orionweller
commited on
Commit
•
472c186
1
Parent(s):
83bbca3
Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +27 -0
- train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10299-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10299-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14446-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14446-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15165-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15165-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19314-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19314-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20845-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23440-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23440-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24198-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24198-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24654-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24654-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25592-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25592-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2886-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30927-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30927-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31450-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31450-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32254-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32254-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32651-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
.gitattributes
CHANGED
@@ -27639,3 +27639,30 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_1917-tokenized-chunk
|
|
27639 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_78040-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27640 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_29579-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27641 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_21368-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27639 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_78040-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27640 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_29579-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27641 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_21368-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27642 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_236-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27643 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_86396-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27644 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_78040-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27645 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_236-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27646 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_21368-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27647 |
+
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
|
27648 |
+
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27649 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
|
27650 |
+
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27651 |
+
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
|
27652 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
|
27653 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
|
27654 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
|
27655 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
|
27656 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
|
27657 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
|
27658 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27659 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
|
27660 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
|
27661 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text
|
27662 |
+
train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
|
27663 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
|
27664 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
|
27665 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
|
27666 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
|
27667 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
|
27668 |
+
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
|
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b9e18307f58865ca113db0ab06c283b6c3efc5fda22e6a7b85909ce5c18dfee
|
3 |
+
size 67107231
|
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92043af9073da3f97e6e7fdc488a271436626290d0e8c89d23b7105d37757a80
|
3 |
+
size 67107188
|
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb95f13dd5e8ae3baad7e7987c99ed817d09aa367a53a6d95035adef22c9cee6
|
3 |
+
size 67107090
|
train/algebraic-stack/algebraic_stack_train_0006-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:93ccd2c2aa84a8afef34345537f8dbf64dc5dac92bfaf45b424ed7addfa11422
|
3 |
+
size 25601184
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:044db4db6aaab2ce07f547d0797cb7bcb2ba1e190dec402a2f2ee28e95ae49ef
|
3 |
+
size 67108406
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ba3fe0276d3c88840feb20cb690f7ad1b126b649c5b51ebf09f245c708091eb1
|
3 |
+
size 67107390
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:434dda014060b199422b162ce5b9f7b808ca6751adb00c357e8d3ad619fb1c59
|
3 |
+
size 67108039
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41061d50c113450384d4823b46453ce60a7b48b123f74f9968fe250daf040438
|
3 |
+
size 67107651
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e938d1aed70951a2f6fce7bdf8549aed7175eb762774f665fa795b7d6f0fcd2b
|
3 |
+
size 67107095
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fab6656ff99c545d3be2ffab13669618163416633257422d4d5c137c5e385915
|
3 |
+
size 67108548
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9ccebf2e320b81aa70f2e7a855b4e5630450f7349965cb55edc621d1ba714207
|
3 |
+
size 67108145
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:342063458f9f51dfc320d54e8d4d84227fce5e9f72cc76af494f98a84f153687
|
3 |
+
size 67107571
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5aeb698aebf703b55e1590764c59376f57b6e91427ebb01ecf4e9ac981014975
|
3 |
+
size 67108660
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ee7a14f4c4083be547618f5d8b075a0f03e0d0bf16e6df8106a799fb8f2109d1
|
3 |
+
size 67108644
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f08723ac2fbebac5d2748dab37122a3eb4f515fbc739533aea1816220a910fe8
|
3 |
+
size 67108698
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c4887100cf7ebc826e1a9d034ad898d851477a2101984a8daea3d052a253271
|
3 |
+
size 67107798
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28aeb64cb61c16ed4f4d26e2bf07de9263bf26897f669735213557b01ab787b7
|
3 |
+
size 67108303
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6a57501b961e777982ca8da0204d75de4e70c625cbf8fa32a69fdb5436d3f690
|
3 |
+
size 67106884
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a5ba6b956f9f43dcca723a1d662dab45b0e182f631ba3207ca6081738f6b3c8b
|
3 |
+
size 67108597
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cc9e5ec63e3cd9d4e51f6da8800c54c0a6e40cb66725801df7afb845f2071bdb
|
3 |
+
size 67108130
|
train/algebraic-stack/algebraic_stack_train_0007-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3352de8590b8ccf0a9b507ae076ac490b1a0a2df4598a29b2cd59d9e9a4f58b3
|
3 |
+
size 67107564
|
train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6e7d362396c564f07eec1eb0d5c0825defdb9e1b055b452d05bd8358f3eed593
|
3 |
+
size 67107484
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10299-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107477, "hashes": {}}, "samples": 44461, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47843763, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11785329, "hashes": {}}, "samples": 7610, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8370498, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10299-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37834996,
|
3 |
+
"num_truncated_tokens": 37804641
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14446-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107231, "hashes": {}}, "samples": 43106, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47621793, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17104045, "hashes": {}}, "samples": 11106, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12237627, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14446-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40427622,
|
3 |
+
"num_truncated_tokens": 40394296
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15165-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107942, "hashes": {}}, "samples": 42542, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47644063, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21781727, "hashes": {}}, "samples": 13860, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15399295, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15165-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42698922,
|
3 |
+
"num_truncated_tokens": 42661804
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19314-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108154, "hashes": {}}, "samples": 42886, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47626122, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19586346, "hashes": {}}, "samples": 12624, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13833778, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19314-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41629005,
|
3 |
+
"num_truncated_tokens": 41593558
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20845-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108542, "hashes": {}}, "samples": 43392, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47554528, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15045423, "hashes": {}}, "samples": 10005, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10692811, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39424345,
|
3 |
+
"num_truncated_tokens": 39391917
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23440-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107159, "hashes": {}}, "samples": 44105, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47760793, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11665915, "hashes": {}}, "samples": 7687, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8268322, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23440-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37783729,
|
3 |
+
"num_truncated_tokens": 37753724
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24198-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108667, "hashes": {}}, "samples": 43001, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47625619, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18489188, "hashes": {}}, "samples": 11840, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13074796, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24198-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41101367,
|
3 |
+
"num_truncated_tokens": 41066534
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24654-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107663, "hashes": {}}, "samples": 43939, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47654583, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11954913, "hashes": {}}, "samples": 7756, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8495017, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24654-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37931304,
|
3 |
+
"num_truncated_tokens": 37901813
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25592-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107456, "hashes": {}}, "samples": 43337, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48085870, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16297816, "hashes": {}}, "samples": 10502, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11671856, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25592-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40036111,
|
3 |
+
"num_truncated_tokens": 40002601
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2886-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108198, "hashes": {}}, "samples": 43944, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47809375, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12561545, "hashes": {}}, "samples": 8265, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8928368, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 38219100,
|
3 |
+
"num_truncated_tokens": 38188379
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30927-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107711, "hashes": {}}, "samples": 45022, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47946352, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7125450, "hashes": {}}, "samples": 4756, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5106591, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30927-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 35576285,
|
3 |
+
"num_truncated_tokens": 35549187
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31450-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107210, "hashes": {}}, "samples": 44348, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47503904, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9386949, "hashes": {}}, "samples": 6219, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6663979, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31450-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 36682097,
|
3 |
+
"num_truncated_tokens": 36654891
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32254-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108694, "hashes": {}}, "samples": 44010, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47568192, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11093916, "hashes": {}}, "samples": 7307, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7853056, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32254-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37512960,
|
3 |
+
"num_truncated_tokens": 37484415
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32651-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108221, "hashes": {}}, "samples": 43117, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47844132, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18109878, "hashes": {}}, "samples": 11522, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12908336, "hashes": {}}}], "version": 2}
|