orionweller commited on
Commit
307fef8
1 Parent(s): e9129b1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +31 -0
  2. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  5. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  6. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  7. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds +3 -0
  8. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  9. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
  10. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
  11. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
  12. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  13. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
  14. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  15. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
  16. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
  17. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds +3 -0
  18. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds +3 -0
  19. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
  20. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds +3 -0
  21. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds +3 -0
  22. train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds +3 -0
  23. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  24. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  25. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  26. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
  27. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
  28. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
  29. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  30. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  31. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
  32. train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds +3 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1025-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10762-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10762-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17042-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19992-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19992-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21426-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21426-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23483-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23483-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23604-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23604-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24126-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24126-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24697-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24697-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -10577,3 +10577,34 @@ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/
10577
  train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
10578
  train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
10579
  train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10577
  train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
10578
  train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
10579
  train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text
10580
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
10581
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
10582
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
10583
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
10584
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
10585
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
10586
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
10587
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
10588
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds filter=lfs diff=lfs merge=lfs -text
10589
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
10590
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
10591
+ train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
10592
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
10593
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
10594
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
10595
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
10596
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
10597
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text
10598
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
10599
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
10600
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds filter=lfs diff=lfs merge=lfs -text
10601
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
10602
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
10603
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
10604
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text
10605
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds filter=lfs diff=lfs merge=lfs -text
10606
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
10607
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
10608
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text
10609
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
10610
+ train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46da2dce1bc45471191dc414b9eb9cd5ea4dfff20b99e30bc0f66e2776927752
3
+ size 67108489
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1799098862efc9a79e9b3031ee7a4c4038a61b09cd67fec15543474bc3b3cca3
3
+ size 67108192
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31dd2e2a68a0f6d8ba02112e38293c6ca9ab501a6d16da99ed0f8b48a4d0ca5a
3
+ size 67108658
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf2059d704ec3409aaf92f21e878ad5a867e6c4df8aa2b00f3f83e48d6a22691
3
+ size 67108097
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8680a33568233b11a1c92664e20001e8d627acd874f1fa4890c135ac2c49ab50
3
+ size 67106967
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f1f07e2e55fc534e1296610cfe385105cb8ab255f31c711ef044c48bf8db8ee
3
+ size 67108340
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26940408ff622eeaaf6d1adaef5b9fc93ecc66472e2a8c4666fa2dcae48a4abe
3
+ size 67108681
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:105ca6934f29520140b1f3aa0d2116a7f41053b6cff72213e299d6d144a3e04e
3
+ size 67108824
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06719b642a30f78136f69ebe5cef99446a2f5a12923e407cf2c1d711ffcb43f4
3
+ size 67108282
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c3284b404771e18ac1b5d15a20e923e12177e75901e8964dede4f77cf13c17a
3
+ size 67107732
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c082988d3418961de9192a86bafd2f75cb7485f14e5235e0f413a88a3658bae8
3
+ size 67108172
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215d8ae0bea146bdfe5d25dac2d161a8829d8768175ddefd43556f96874f3523
3
+ size 67108690
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fff3f42760d06b7db53d8553fd789e3ddfc38015b561c390134591ab4ab9f5ba
3
+ size 67107857
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ecc338409e72b0d122443b9d25d88e001209b4124504a3ab371deabac3afb0b
3
+ size 67108444
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f67ae039830496e9ac309b143b92bf77e818ddad680451cf186f7b4a4eadcf5
3
+ size 67108141
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:730014af29a58751478bd9ca8b29fa3f80a50099f03ca80377c6308a3009a1d9
3
+ size 67108844
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7362b6e3b2fc6452e24ae5d9bcb97fd696c5c757f16ef69f28b7e2ed80ecc53d
3
+ size 67108589
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d99a619cfd6d6d62f2243c4adbae52af77eacad3b7b9009bce23e000827c58af
3
+ size 67108766
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:388e436bc2e73667c71b325e5c2c48b61e271baf15426ae386e001f99e57735c
3
+ size 67108545
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf94dc3ea1aa541a77f750fee287be0b100e62aab12e5f1052354c634799fac8
3
+ size 67108555
train/cc_en_head/cc_en_head_0109-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29bb744fa2fedb6e96657532c3592b27dad75014bf63b82af22acdbda9395327
3
+ size 67108057
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01f1d4a3918cef0217bd9e4cf870906b846e9389a8dd444d0fd0bc03be91c9bc
3
+ size 67108799
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16415aaba78d4afc6b5c5468287a68c0c8bbd3552ea0597185ab9b21d522fb35
3
+ size 67107838
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:286a3454541b370dc5fb954adc904560870f0324809ca215b108ca8694eaf07a
3
+ size 67107887
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:976c09237fafbcd7e7158971734a2306c762781ff65283eb49007393e679d9c2
3
+ size 67107702
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d1f7263a3e9180994c9f0ce70cb2197d6457efec9cf8a3fb39bc1dcd5228db7
3
+ size 67107766
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cab03dcf2a28c88ab419a7a39a4e12eefb07cc7153ba3f76101a2b5f6aa4d1dd
3
+ size 67108548
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f103c8b4eb20db9a2bd18199bc0b630b3b42d75cb27ebc8bbe68964fee213900
3
+ size 67107657
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28b632244496cfeefc9af6262fbfb091d0c6e3c6a81f9721f6becb6edb805bf1
3
+ size 67107436
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6623e8de1ca785206f07118572f789dd9ee4598d9bb697b4133e0db5e1b29f44
3
+ size 67108010
train/cc_en_head/cc_en_head_0147-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf3eaa0a2fcc6d3bd58073c5c5453c0dc5487cc4e3b8b27ae2e87d16d9e54021
3
+ size 67107058
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1025-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107496, "hashes": {}}, "samples": 43147, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47931980, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16486417, "hashes": {}}, "samples": 10733, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11764308, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1025-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40129130,
3
+ "num_truncated_tokens": 40095620
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10762-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107942, "hashes": {}}, "samples": 43656, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47427380, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12063488, "hashes": {}}, "samples": 7941, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8573847, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10762-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37988644,
3
+ "num_truncated_tokens": 37959358
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17042-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108706, "hashes": {}}, "samples": 43202, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48012447, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17294541, "hashes": {}}, "samples": 11026, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12326663, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17042-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40523019,
3
+ "num_truncated_tokens": 40488870
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19992-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107737, "hashes": {}}, "samples": 43195, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47521806, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16887432, "hashes": {}}, "samples": 10900, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12010144, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19992-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40323162,
3
+ "num_truncated_tokens": 40289972
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21426-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107289, "hashes": {}}, "samples": 43223, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48099586, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16552630, "hashes": {}}, "samples": 10694, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11852545, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21426-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40160979,
3
+ "num_truncated_tokens": 40127564
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23483-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107497, "hashes": {}}, "samples": 42099, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47600004, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 25746476, "hashes": {}}, "samples": 16043, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 18315061, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23483-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 44626978,
3
+ "num_truncated_tokens": 44587548
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23604-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108331, "hashes": {}}, "samples": 42004, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47504951, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21772730, "hashes": {}}, "samples": 14297, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15524797, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23604-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42697649,
3
+ "num_truncated_tokens": 42661159
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24126-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107352, "hashes": {}}, "samples": 43487, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48115312, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15269498, "hashes": {}}, "samples": 9743, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10900212, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24126-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39540702,
3
+ "num_truncated_tokens": 39508381
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24697-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107566, "hashes": {}}, "samples": 43691, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47439193, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13003745, "hashes": {}}, "samples": 8464, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9329236, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24697-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38441397,
3
+ "num_truncated_tokens": 38410922
4
+ }