orionweller commited on
Commit
2b9ec0b
1 Parent(s): abeba31

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +35 -0
  2. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14633-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  3. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14633-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  4. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17363-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  5. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17363-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  6. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20284-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  7. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20284-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  8. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20475-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  9. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20475-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  10. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21375-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  11. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  12. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29942-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29942-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33884-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33884-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37037-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38496-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38496-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47585-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47585-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49458-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49458-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49592-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49592-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53172-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53172-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53834-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53834-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54181-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54181-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5443-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55000-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55000-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55430-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_586-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_586-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58798-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58798-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59571-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64248-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64248-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_65472-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_65472-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68324-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68324-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69148-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes CHANGED
@@ -27809,3 +27809,38 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_44776-tokenized-chun
27809
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_44776-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27810
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_45515-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27811
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_4824-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27809
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_44776-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27810
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_45515-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27811
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_4824-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27812
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_11429-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27813
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_4824-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27814
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97690-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27815
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_16002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27816
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_223-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27817
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_97690-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27818
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_45515-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27819
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_35028-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27820
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9260-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27821
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_9260-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27822
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_16002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27823
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_50229-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27824
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_50229-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27825
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_35028-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27826
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_11429-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27827
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36361-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27828
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_45273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27829
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_36361-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27830
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_42614-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27831
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76862-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27832
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_11862-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27833
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_42614-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27834
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10324-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27835
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_76862-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27836
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_11862-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27837
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49963-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27838
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_41847-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27839
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_46942-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27840
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49963-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27841
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_41847-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27842
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_46942-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27843
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_47324-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
27844
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10324-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27845
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_47324-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
27846
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_81843-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14633-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107794, "hashes": {}}, "samples": 44129, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47689670, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10240384, "hashes": {}}, "samples": 6804, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7315978, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14633-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37097772,
3
+ "num_truncated_tokens": 37069139
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17363-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108345, "hashes": {}}, "samples": 42934, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47634358, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19315137, "hashes": {}}, "samples": 12253, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13802567, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17363-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41503439,
3
+ "num_truncated_tokens": 41468340
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20284-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108068, "hashes": {}}, "samples": 42677, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47615173, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20289399, "hashes": {}}, "samples": 13044, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14402031, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20284-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41973906,
3
+ "num_truncated_tokens": 41938046
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20475-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107709, "hashes": {}}, "samples": 43744, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47589909, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15346444, "hashes": {}}, "samples": 9916, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10859150, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20475-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39566378,
3
+ "num_truncated_tokens": 39534024
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21375-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107220, "hashes": {}}, "samples": 42630, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47581439, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20910420, "hashes": {}}, "samples": 13398, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14806180, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42274507,
3
+ "num_truncated_tokens": 42237989
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29942-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108578, "hashes": {}}, "samples": 43925, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47472267, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11593514, "hashes": {}}, "samples": 7541, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8219553, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29942-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37758059,
3
+ "num_truncated_tokens": 37729716
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33884-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107200, "hashes": {}}, "samples": 42624, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47709072, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20716448, "hashes": {}}, "samples": 13288, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14653196, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33884-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42181135,
3
+ "num_truncated_tokens": 42144563
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37037-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108730, "hashes": {}}, "samples": 42800, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47653817, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20966182, "hashes": {}}, "samples": 13317, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14866846, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37037-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42300429,
3
+ "num_truncated_tokens": 42263887
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38496-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107821, "hashes": {}}, "samples": 43926, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47411503, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11941012, "hashes": {}}, "samples": 7761, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8432695, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38496-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37924607,
3
+ "num_truncated_tokens": 37895746
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47585-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108262, "hashes": {}}, "samples": 42890, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47877275, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19016271, "hashes": {}}, "samples": 12158, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13536654, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47585-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41358190,
3
+ "num_truncated_tokens": 41322872
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49458-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108015, "hashes": {}}, "samples": 44070, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47751186, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11410698, "hashes": {}}, "samples": 7498, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8140530, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49458-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37663424,
3
+ "num_truncated_tokens": 37634488
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49592-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108766, "hashes": {}}, "samples": 42694, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47725239, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22867029, "hashes": {}}, "samples": 14284, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16180003, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49592-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 43224136,
3
+ "num_truncated_tokens": 43185981
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53172-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108398, "hashes": {}}, "samples": 43454, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47962884, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15381438, "hashes": {}}, "samples": 9935, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11040980, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53172-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39592371,
3
+ "num_truncated_tokens": 39559457
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53834-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107533, "hashes": {}}, "samples": 43046, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47414982, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19212287, "hashes": {}}, "samples": 12395, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13631960, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53834-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41443991,
3
+ "num_truncated_tokens": 41408375
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54181-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108087, "hashes": {}}, "samples": 44426, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47712648, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8798396, "hashes": {}}, "samples": 5888, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6295106, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54181-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36396187,
3
+ "num_truncated_tokens": 36368988
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5443-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107698, "hashes": {}}, "samples": 43003, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47835389, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17735952, "hashes": {}}, "samples": 11437, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12685099, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5443-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40736617,
3
+ "num_truncated_tokens": 40702216
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55000-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107750, "hashes": {}}, "samples": 42488, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47831612, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22096640, "hashes": {}}, "samples": 14000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15686496, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55000-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42853512,
3
+ "num_truncated_tokens": 42815978
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55430-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107350, "hashes": {}}, "samples": 43838, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47646088, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12300572, "hashes": {}}, "samples": 8064, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8713594, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38097566,
3
+ "num_truncated_tokens": 38067413
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_586-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107386, "hashes": {}}, "samples": 43710, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47437897, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11952077, "hashes": {}}, "samples": 7943, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8440641, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_586-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37930962,
3
+ "num_truncated_tokens": 37901908
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58798-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108613, "hashes": {}}, "samples": 42978, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47497544, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18481238, "hashes": {}}, "samples": 11941, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13137597, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58798-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41095058,
3
+ "num_truncated_tokens": 41060560
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59571-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107988, "hashes": {}}, "samples": 42503, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47818450, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20472207, "hashes": {}}, "samples": 13271, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14586187, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59571-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42063573,
3
+ "num_truncated_tokens": 42027056
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64248-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108170, "hashes": {}}, "samples": 44088, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47518623, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10931245, "hashes": {}}, "samples": 7140, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7744034, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64248-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37434225,
3
+ "num_truncated_tokens": 37405511
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_65472-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108332, "hashes": {}}, "samples": 43201, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47990284, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17451755, "hashes": {}}, "samples": 11160, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12383645, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_65472-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40597383,
3
+ "num_truncated_tokens": 40563329
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68324-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107532, "hashes": {}}, "samples": 44393, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47573497, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8613083, "hashes": {}}, "samples": 5734, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6106207, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68324-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36308911,
3
+ "num_truncated_tokens": 36282419
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69148-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108200, "hashes": {}}, "samples": 44391, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47781617, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10806977, "hashes": {}}, "samples": 7024, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7661655, "hashes": {}}}], "version": 2}