orionweller
commited on
Commit
•
bf6cf44
1
Parent(s):
ce79d80
Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +27 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12281-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12281-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19826-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20348-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20348-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20612-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26044-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26044-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29201-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29201-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29769-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29769-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31954-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31954-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3700-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3700-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39107-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39107-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4393-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4393-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes
CHANGED
@@ -22183,3 +22183,30 @@ train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-
|
|
22183 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
|
22184 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
|
22185 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22183 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
|
22184 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
|
22185 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text
|
22186 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
|
22187 |
+
train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
|
22188 |
+
train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
|
22189 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
|
22190 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
|
22191 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
|
22192 |
+
train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
|
22193 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
|
22194 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
22195 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
|
22196 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
|
22197 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
|
22198 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
|
22199 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
|
22200 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
|
22201 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
|
22202 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
|
22203 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
|
22204 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
|
22205 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
|
22206 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
|
22207 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
|
22208 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
|
22209 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
|
22210 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
|
22211 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
|
22212 |
+
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4f8698ea5c2c47ab218a5348f4fa6870aa56628e235b2814396169928bcbb1f5
|
3 |
+
size 67107023
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b9579cb72405b33b3f07b3387a6efaef719c393d6fa553c2a455fe537b7f75ea
|
3 |
+
size 67107493
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fa2ccd27e657df5097b8ff0cd352c19993b6595acbd4c2403df59324cd63b97
|
3 |
+
size 67108739
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b59e3763af319deb2fdde7e1568961630ae35ef936a7b3026ba2198393165790
|
3 |
+
size 67108387
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11b6a3e2a6385698946d170504f2dec8d5b0b703028cb569ac178c5c6e7d6957
|
3 |
+
size 67108613
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d1a55a046beaf037d32ba7265a0c6a25a02bcb1edb6b4a08259a3c09957ca4fd
|
3 |
+
size 67107068
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:69d193f0208d487abb21b5cfa24a0a04dcee38dd799cc3fab1c6e28447ed5100
|
3 |
+
size 67107961
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:331163e7a919907f4c038cda2b21fd5320a9fddb7d3f09db50a3ea7c76ae029d
|
3 |
+
size 67108094
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a19275e86f6b6b28c3ae4882d8f5be19e26c0fdb55c922a888e9f0e4c1adbafb
|
3 |
+
size 67107100
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14585265da64c1f400f2b3614f688d1d3324c464fab0ab2b925dcebd1d5e6531
|
3 |
+
size 67107204
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9bccd9cad16e671f67370f5f0fd1698e9edc832336730c29b2c1620533a3876
|
3 |
+
size 67108856
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:40a48a29ec2f60484b7f5290829a333c8b0c321eecf4b205391eaedd749ada05
|
3 |
+
size 67107798
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23d4ad4594521b0bd33f93ab79941eb8567abdfd6b7076e71f5ad8dc51744428
|
3 |
+
size 67108497
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:698adfeab5183de44f8e53c0083ba2c37ceb83e338c04b7c198e7b025d5fb0f4
|
3 |
+
size 67106825
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9205c392563896334ce41b3fedaae68a6138f9490ea69ef4f7102926b0554175
|
3 |
+
size 67108610
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c57b83850bdf9137c05f41f05244a9a667cd8a27c24833b0564d202f47dd3cfc
|
3 |
+
size 67107616
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2607e09f466c5575290896bc4611f46c32ae41d6a83c19251070e386cec0b783
|
3 |
+
size 67106976
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:397c916c030622a7fbc86c024fae4958d19ced2d57f75561e5769761c7180803
|
3 |
+
size 67108785
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c7b70487b3843c1c106b2c96cb5dd62e7b27267e6be57f088d47810e2af416da
|
3 |
+
size 67108646
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cdad481b8eb3ff485e2eabcfd39638260bebf05d8c3de7ad1ca1855f5cf35771
|
3 |
+
size 67108625
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a584fc35198d3289860dd8f167ed4848b433163b38df055196b494ad34d3965b
|
3 |
+
size 67108828
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6b179dd36bdadd84366cfa840b3cc7476acac87c302a003e6258601a6c61fd5
|
3 |
+
size 67108217
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:044dea0640f9e7e77a3d1b5068b3bdd0cfab58cd55f068c080b7e77dcb48cc12
|
3 |
+
size 67107435
|
train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48f9fbf0aa59a16b5a95e0c796b9c1185d63e3e274fff9f67e9d90ae69cc9e78
|
3 |
+
size 26349446
|
train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:50aaa769b94854d9817c546d5b3e5ae30740ca57728eb350de94f39aec54fe56
|
3 |
+
size 67108333
|
train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7941af459e367b137d222e1766d07e9f267a34c2786ea08f3b30ca969845b444
|
3 |
+
size 67108568
|
train/algebraic-stack/algebraic_stack_train_0010-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:785772e6c1064e0f14ef58778b10400c7fc916e414191417f064728be8e2fcfc
|
3 |
+
size 67108795
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12281-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107147, "hashes": {}}, "samples": 42847, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47562440, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20942635, "hashes": {}}, "samples": 13114, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14822021, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12281-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42292555,
|
3 |
+
"num_truncated_tokens": 42256534
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19826-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107479, "hashes": {}}, "samples": 43050, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47816895, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18779045, "hashes": {}}, "samples": 11966, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13279157, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19826-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41240296,
|
3 |
+
"num_truncated_tokens": 41205150
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20348-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107012, "hashes": {}}, "samples": 43762, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47625131, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15316909, "hashes": {}}, "samples": 9909, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10923395, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20348-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39550898,
|
3 |
+
"num_truncated_tokens": 39518097
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20612-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108158, "hashes": {}}, "samples": 43404, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48039191, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16043903, "hashes": {}}, "samples": 10412, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11469036, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39910247,
|
3 |
+
"num_truncated_tokens": 39876746
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26044-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107424, "hashes": {}}, "samples": 42880, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47952362, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17952431, "hashes": {}}, "samples": 11656, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12831433, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26044-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40841797,
|
3 |
+
"num_truncated_tokens": 40807112
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29201-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108745, "hashes": {}}, "samples": 44575, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47809100, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8767870, "hashes": {}}, "samples": 5952, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6277616, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29201-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 36374702,
|
3 |
+
"num_truncated_tokens": 36346713
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29769-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107208, "hashes": {}}, "samples": 42988, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47554796, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18542688, "hashes": {}}, "samples": 11849, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13131916, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29769-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41127475,
|
3 |
+
"num_truncated_tokens": 41093185
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31954-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107659, "hashes": {}}, "samples": 44097, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47695836, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12028429, "hashes": {}}, "samples": 7933, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8595293, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_31954-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37957862,
|
3 |
+
"num_truncated_tokens": 37927589
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3700-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107889, "hashes": {}}, "samples": 44351, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47829767, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10287968, "hashes": {}}, "samples": 6772, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7329737, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3700-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37115790,
|
3 |
+
"num_truncated_tokens": 37088035
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39107-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107715, "hashes": {}}, "samples": 44126, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47833403, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12331097, "hashes": {}}, "samples": 8014, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8773344, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39107-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 38105863,
|
3 |
+
"num_truncated_tokens": 38075071
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4393-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107169, "hashes": {}}, "samples": 44395, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47779616, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10169023, "hashes": {}}, "samples": 6775, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7299237, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4393-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37054627,
|
3 |
+
"num_truncated_tokens": 37025711
|
4 |
+
}
|