orionweller commited on
Commit
ce79d80
1 Parent(s): cdc784b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +23 -0
  2. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  3. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  4. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  5. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  6. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds +3 -0
  7. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds +3 -0
  8. train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds +3 -0
  9. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  10. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  11. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  12. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  13. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  14. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
  15. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
  16. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  17. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
  18. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
  19. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  20. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds +3 -0
  21. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
  22. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
  23. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds +3 -0
  24. train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds +3 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10227-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10227-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13301-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13301-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14672-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14672-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16977-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16977-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18610-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18610-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19414-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19414-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19430-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20212-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20212-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21970-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21970-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2205-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2205-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22724-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22724-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25109-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25109-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31135-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -22160,3 +22160,26 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43972-tokenized-c
22160
  train/algebraic-stack/algebraic_stack_train_0005-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
22161
  train/algebraic-stack/algebraic_stack_train_0005-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
22162
  train/algebraic-stack/algebraic_stack_train_0005-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22160
  train/algebraic-stack/algebraic_stack_train_0005-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
22161
  train/algebraic-stack/algebraic_stack_train_0005-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
22162
  train/algebraic-stack/algebraic_stack_train_0005-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
22163
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
22164
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
22165
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
22166
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
22167
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
22168
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
22169
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
22170
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
22171
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
22172
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds filter=lfs diff=lfs merge=lfs -text
22173
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
22174
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
22175
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22176
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
22177
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds filter=lfs diff=lfs merge=lfs -text
22178
+ train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
22179
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
22180
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
22181
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds filter=lfs diff=lfs merge=lfs -text
22182
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text
22183
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
22184
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
22185
+ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20000f18ff05e365c13837a60cc2add49959fdc9fab80a1078a1bd7d8ff94f2b
3
+ size 67107305
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:563cd058c686dda8c9d9d8fc061f6de2e03698e425fbec6bdd13f14cca5f4b1d
3
+ size 67108435
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c873c50de5787571d3fba9cbfae082e6c007d2ddda66b3d3a2c95d1add657a8
3
+ size 67108221
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a741af2e404fe7c12972df00b7b04d1e2096b90ef3d4858a73febaeb8ac8a80
3
+ size 67107204
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:681b3af8e8f1255b1effa85288da44ebbd9a760c71cde7734438520ae70eecec
3
+ size 67107597
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a32793e85fd38cdbd67d9291e20752d375e7422c5e5c3076324929d6fde04725
3
+ size 67107809
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6f44f82e67f7217afdac74294efb6f175a9af390b21f76ec3a56a26a04930f9
3
+ size 67108102
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57569f3177ff009cd9447ac34781cd8c6aa8b169791d867e4e367b9602aaec9b
3
+ size 67107892
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b736ee71f69d7d8ea0ab338d1dc62d62775029cd3122fb5a63fb662b94e20d55
3
+ size 67108850
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e89f6682cc85608a88668270532e89142d470cd57804f9f6afb918dec77a909
3
+ size 67108224
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c22b668e978cfb32ed56fac8bba34cb9323a17e68257ae5c6af34e715b1a4440
3
+ size 67107946
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abcfdb0b0088590709d2c2b4e1ceda9b9cf596feed4b77e9bfe4b75c40d404a1
3
+ size 67108179
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b2c6e1481e9bd4fad2bb659d951ad389cd6b2c0f41697b1f485a05267270b33
3
+ size 67107974
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66fb7284c6c39507dd78ce378500f7f1a865f3fcba18ae0d9fb4a063dd5b243f
3
+ size 67106837
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ded3feb0c6d4076ff7ba95f3f71190016d9dd717018b6e1d1c2b7ccd7f0fa0c
3
+ size 67107161
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72f2725d0a717d3ec3a13241004b67b881e5e7949cb757adf9368148e6168e49
3
+ size 67108254
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d1f4e5211535b2fd2b47b81fde3ee67a45bba40b064246775bd22a3085ba404
3
+ size 67108763
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:250b0b35e8bf0d0f5a75ab915322ad6d8bc10569147dddb0e27b759221c4bef5
3
+ size 67106883
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02f161472b5d86952ce53e3596c0313b0d907dba1c2a886e5e9a801c21a4297f
3
+ size 67108582
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d315876f6d643831ffa52cc7e02cc95603a9df78986a6ecb11a79a20d3e519d
3
+ size 67108691
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96cbe7b00aadb62e2a24f06e7c9ac55cd62446aad628972adaba882dc7d0bd16
3
+ size 67107951
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a44b97b00c435861ae95d81b72706b4af77537dd07972a8a256b0d8cb34a66b9
3
+ size 67108851
train/algebraic-stack/algebraic_stack_train_0001-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1152150e1060e1a88f2ef32fb3d7d9b044275a627964268ec97de5d9cdb0a3b
3
+ size 47202849
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10227-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107301, "hashes": {}}, "samples": 43387, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47707430, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16214739, "hashes": {}}, "samples": 10437, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11446784, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10227-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39995073,
3
+ "num_truncated_tokens": 39962439
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13301-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108571, "hashes": {}}, "samples": 42670, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47594030, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21400881, "hashes": {}}, "samples": 13539, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15098638, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13301-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42514727,
3
+ "num_truncated_tokens": 42478348
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14672-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108661, "hashes": {}}, "samples": 43726, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47692870, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13930450, "hashes": {}}, "samples": 9154, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9899750, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14672-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38882978,
3
+ "num_truncated_tokens": 38851652
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16977-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108791, "hashes": {}}, "samples": 44247, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47706333, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9603578, "hashes": {}}, "samples": 6345, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6845380, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16977-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36790393,
3
+ "num_truncated_tokens": 36762861
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18610-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108228, "hashes": {}}, "samples": 43460, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47574861, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16655820, "hashes": {}}, "samples": 10726, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11901618, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18610-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40204918,
3
+ "num_truncated_tokens": 40171097
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19414-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108241, "hashes": {}}, "samples": 43231, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47927808, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16594211, "hashes": {}}, "samples": 10620, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11892481, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19414-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40184241,
3
+ "num_truncated_tokens": 40150575
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19430-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108811, "hashes": {}}, "samples": 42728, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47618521, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21972524, "hashes": {}}, "samples": 13872, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15618160, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42788650,
3
+ "num_truncated_tokens": 42751084
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20212-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108016, "hashes": {}}, "samples": 44595, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47865367, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9566632, "hashes": {}}, "samples": 6293, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6815927, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20212-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36762507,
3
+ "num_truncated_tokens": 36734315
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21970-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107959, "hashes": {}}, "samples": 44584, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48188488, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8965151, "hashes": {}}, "samples": 5872, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6431020, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21970-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36474980,
3
+ "num_truncated_tokens": 36447147
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2205-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108427, "hashes": {}}, "samples": 44086, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47830470, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12867412, "hashes": {}}, "samples": 8206, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9136268, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2205-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38369573,
3
+ "num_truncated_tokens": 38339059
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22724-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107249, "hashes": {}}, "samples": 42994, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47501323, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18248534, "hashes": {}}, "samples": 11854, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12926070, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22724-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40980216,
3
+ "num_truncated_tokens": 40945839
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25109-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108265, "hashes": {}}, "samples": 43395, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47485133, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15481189, "hashes": {}}, "samples": 10385, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11058459, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25109-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39630270,
3
+ "num_truncated_tokens": 39597242
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31135-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108291, "hashes": {}}, "samples": 42441, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47525627, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21571036, "hashes": {}}, "samples": 13774, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15424893, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42599462,
3
+ "num_truncated_tokens": 42562120
4
+ }