diff --git a/.gitattributes b/.gitattributes index b429745361f00246ea8f92a22dbfd50631d962fc..9dfe714e99db051370f4db8717d0c4edb11f0bc3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -22210,3 +22210,37 @@ train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128- train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text train/algebraic-stack/algebraic_stack_train_0008-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..b9bed2b569aefb140fdb66623dbab25fc0b1dc6d --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:886b11a3935945c6f689a3129b54e48229c178fc21e3c7eb439d90f806c33f1a +size 67108730 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..37bab49d9b062dc6a54a34b6a8f62d590107bdb4 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e594c127c276c198ef3952b7f1426e5a1cfa72039bd661113fa7bdc0d43d0733 +size 67106937 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..5d50dbcdf9f7d0bd3ff09f4ad593e7aa320ae23f --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82c2a6605394668f6ebd80436f5743099cbeee29cba8d5f2103a1d088d0f6e63 +size 67108119 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..34226859990b49867502c739aae02d9e90455774 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73122bd530d98d2034a3e692e0f8ca2ba4e7e5837a1124a234642464dff5d480 +size 67107288 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..8a50f42c893080a1655a3a339e3bfe0cf3db37c8 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f4ae29cfb30f8069f5883dce0c0696d27bfabfe0a357d21d8b5e5dcf54097c6 +size 67107155 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..ef596633fece281eac9f226e01f485b7a80e51df --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f49792bfa549758e3628e4a9f159d7e5807c2ddab1b4d375af99cb203439ef5 +size 67108120 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..b5197dbb409e88a63ea4e4e3a5210eb63a1196f6 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11762515a3977e4bbe54f7680e0fbaf3c44a1e1418039401ddd4b8387416ec9f +size 67108600 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds new file mode 100644 index 0000000000000000000000000000000000000000..ebfa6e5f9f906342ab08563a4a7d5bf708a06480 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d39f84ed69bd7b94f605d4074a76665881bdef205312ad9f972e701713057c7 +size 67108484 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..fafc5c83e107fe8dd3fd7b9d32521bb8ee8ffde1 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3690acc1e93497a604b3b7bf063c938dd14cc2b4a3b89b986a797bd03a3d071c +size 67107776 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..a13708cfcbc9f1f794a171fbe1c399412950ef9d --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4330d8a79fcc891f6e37b99e59f1c93fda31af26639e338ac8552ec05aeef8b5 +size 67107830 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..17b4991c96959d1290917ab4d7f76351a2b59f2e --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdeb58f1f5f1ad0470d58894db9af433459650f893a0e56fe17f183c7cb94ea8 +size 67107371 diff --git a/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..ae8c24c44abaf1e2d5697624bfd9891e7e1d72bb --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1bf23f57f8753f5fc6c9b0aa7580d0f941d9c520b69d46bf604f3f42d433040 +size 17194487 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..6e7f18db3e793f4de26a285876b1b925a469aead --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6425def60ea7612b2c6e45a0a7c90778ae6fde79194362de30ae16684c0cffc3 +size 67108356 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..b5c691aba5c0d406899c927a445dc645266196d1 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c70d57dc4f83bc70549f563868c1b1f80e256469e05fa3b67738827102b0d9a +size 67108312 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..7485563a8c178db9cd81295fb2f8a7f8fc5bc987 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ac9112db16f928b6d5513f901bf91e9b79b75e3ccfb7fb634880cb3bd5f3a6d +size 67107304 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..c82ac23ca3b2046aa623ed32033e49baa6234ecf --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e7cc780a0906632a48627579696a394e489721b3f25a30a14b4ac335b69c665 +size 67108489 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..ffed62aed222ec0f6b27ab5feb40d11221126cb9 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f27d1953f1b6a115a9a007bb6ef0ff92d50dc6ebd9f3540b9c2ef3a5179a14e +size 67106996 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..785620b6e3026d5ac61272d1118620d5ae88512a --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b00fcfb332a8fdf7e277e671db0d12bc1e21a61a35d468dc81cf222f66067ce +size 67107314 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..3f75b3aac05ee415768c1da96d2bb503a5c1f2af --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fc6d1bacf6310082b98fe4ef6399f520119f2b24e2cea19c983b78d0c5ee0b2 +size 67108649 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..e052e897018ca4443498f5ac12ffdc4718039d40 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08377028ac16e8c687918d302ff617160a07570826554f1138e3f6c9c0de71cb +size 67107614 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..1ecfd02ad5fda6ece08088dec40cfbde4c504b30 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc98cef7ce5a1e6a14d6781a5bc1c4c04b72e06dbaa3d739fea996decef5ad61 +size 67107750 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..0c74c68fcf016732fd31959021f604a7a21d09ce --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46de61dda2993c94c7d0a4fc43ce61fbe823ff807d9697f178dbcf7c607a903f +size 67107279 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds new file mode 100644 index 0000000000000000000000000000000000000000..5d945f71a0c22c807df04df3772000c00e343e26 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f18760f72b46140473967f4790dee004911d5d22260a8f454e8c61413735ce9 +size 67107523 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds new file mode 100644 index 0000000000000000000000000000000000000000..13fef746694d4d7e228780274b97bdcd24da9a10 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dea6f3d3dd4be8923000ebc6ee4c580110e5b8baf660146e0f3a8e50aaaaaa83 +size 67108147 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds new file mode 100644 index 0000000000000000000000000000000000000000..fe67afd1f30085a90f5c6f3d8e1c8c32b7017b75 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aaff361ef59f4e3b82514c7e4b3778aa1ec366380e08bbe904db1f4b702446f +size 67107481 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds new file mode 100644 index 0000000000000000000000000000000000000000..fb0f2db6f2d0fd5d04ddb288970a5d5b0a36047a --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e94b68cb3cccfa970d9c1b23b52561474521e57e8a399af20067936db70cc17d +size 67107141 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds new file mode 100644 index 0000000000000000000000000000000000000000..c4df8908ada2ce2bbf8121b2f273c92fefcf5bb7 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:821b6c7ff06b7b7aefedcdff018c3bcf3b84559345b5aa750c32a77a264ab59d +size 67108160 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds new file mode 100644 index 0000000000000000000000000000000000000000..a7067d30c85bc9d0791895af1683033e28c29623 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23774e884508d87b6f7fe4121f594992d539b6c8f0eaa826d0840af3d894c0e0 +size 67108445 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds new file mode 100644 index 0000000000000000000000000000000000000000..08b1710e2da72504a8324f0584700af63fde928f --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dc57a075478785c07c7d2dbd156b7ccb19bab3ae4036139b18962a582485854 +size 67107678 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds new file mode 100644 index 0000000000000000000000000000000000000000..35d772530e8a06e2882c568f8728bd598e5822a4 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6c6d7ffcc1c6deb03247f8ebd69f3496d1347121bdc19bc3262ff99550ad93a +size 67108838 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds new file mode 100644 index 0000000000000000000000000000000000000000..bf9248348d0716669066a35643ae4468587fb9b7 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7feaef4fe47827ae9122b54c6e068d74d736f56dc2bc6bbd6e0026b1a566377 +size 67108144 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds new file mode 100644 index 0000000000000000000000000000000000000000..eb385a010f05535ec2ac81da4258f7c64e6a03f4 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09b960509458b284801193a39f1696d7dfa88dbe30c8aad365c1c3fef3f0f473 +size 67108512 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds new file mode 100644 index 0000000000000000000000000000000000000000..57af3beb34c653f5b1ad803bf64534ca622eebac --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:688700c2964a1b5e7b27765872aa70c99f3f5eed106e18b310a842e7ce736593 +size 67107840 diff --git a/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds new file mode 100644 index 0000000000000000000000000000000000000000..b31967cedaf3100d4cd64664ccb3baf6666b2d68 --- /dev/null +++ b/train/algebraic-stack/algebraic_stack_train_0015-tokenized-chunked-1024-512-128-backfill-nodups/shard.00039.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c8dda80a7c83ebc254347300b430bfc8bd8c69d775e94228ce5e4e99e6c43a +size 67106796 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11310-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11310-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ce7f6c514afdc7c75b350c0b2a747fcea5a394b3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11310-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108847, "hashes": {}}, "samples": 44414, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47923978, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9765896, "hashes": {}}, "samples": 6600, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6978000, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11310-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11310-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..af3e405dd345c168e9c1014ccdfc1631d4aac052 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11310-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36858733, + "num_truncated_tokens": 36830118 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14196-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14196-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8b1a217919c82a8cc94fdf0c579062dec6d83a8d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14196-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108502, "hashes": {}}, "samples": 43155, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47926464, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17559399, "hashes": {}}, "samples": 11236, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12557087, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14196-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14196-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4ce4b5445e3c12c98bf3eeeb4eb35358fff70714 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14196-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40650349, + "num_truncated_tokens": 40616018 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16175-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16175-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e25376b693e515c017cc75c540ceff68403e8d70 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16175-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108849, "hashes": {}}, "samples": 44010, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48109332, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12373959, "hashes": {}}, "samples": 8109, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8879910, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16175-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16175-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b420c61895ba271d364ddc89a87a10e67ac4b2ea --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16175-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38128397, + "num_truncated_tokens": 38097601 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20886-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20886-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fd3fbd5c70ca0916cf3acf860ebfd3e07a2f183f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20886-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108652, "hashes": {}}, "samples": 43827, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47952179, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13524754, "hashes": {}}, "samples": 8751, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9713251, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4fc3260a505c79911b1228bb30fe673ddc8dcb81 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20886-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38689349, + "num_truncated_tokens": 38658447 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21424-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21424-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ab8fd7e88d3fd2665b35f4d2ee882845bc104ab4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21424-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107898, "hashes": {}}, "samples": 45050, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47940364, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7012272, "hashes": {}}, "samples": 4683, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5019622, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21424-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21424-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b0c3506e7b939e3624fdef2e17c546dddfa85d00 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21424-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35521211, + "num_truncated_tokens": 35494279 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23938-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23938-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7ec17b88bbbada6ede0dc2ab190b689a542fb09a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23938-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107211, "hashes": {}}, "samples": 43888, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47799967, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11917332, "hashes": {}}, "samples": 7753, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8492701, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23938-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23938-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..58f87279a0e59bd35b7823500f13dd8abd65b032 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_23938-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37913924, + "num_truncated_tokens": 37884447 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30754-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30754-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..acf4bf172feaee06ed9411f9ca10deb5903ea925 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30754-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108056, "hashes": {}}, "samples": 42912, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47865326, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19753908, "hashes": {}}, "samples": 12574, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14007784, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30754-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30754-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..fbbac5ba77d46f3d3afce4e9b731b82fad3d248a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30754-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41713457, + "num_truncated_tokens": 41677443 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32921-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32921-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7d303d129549ffc15378e467fea1e6cd3f2f1c51 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32921-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108046, "hashes": {}}, "samples": 42528, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47325820, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21604636, "hashes": {}}, "samples": 13733, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15279818, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32921-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32921-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..16c5c090b43051d030512e8302178309b1e23d71 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_32921-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42614688, + "num_truncated_tokens": 42577871 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34338-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34338-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6bfa0edf709de5fd10bc74663e7a9ac70dd22085 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34338-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107838, "hashes": {}}, "samples": 43974, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47765463, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12283584, "hashes": {}}, "samples": 8192, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8762860, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34338-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34338-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d98165b5ca6532b6359ed95a0f1a7743aaa4de6d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34338-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38081291, + "num_truncated_tokens": 38050734 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38868-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38868-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e6a1afb6978629a2921f479580cce614b4cdc980 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38868-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108680, "hashes": {}}, "samples": 43011, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47457668, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17950894, "hashes": {}}, "samples": 11507, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12761566, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38868-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38868-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2fe92576e3bfa00cc5c61259e8b419b9a7e48eab --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38868-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40842210, + "num_truncated_tokens": 40808360 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_41143-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_41143-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c4c9e7df80d28532dca87ba7d85f1964487f308e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_41143-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108580, "hashes": {}}, "samples": 44294, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47734507, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9531713, "hashes": {}}, "samples": 6352, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6767515, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_41143-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_41143-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a92bba6a6fdebf183f8fca76ea68b35f6d9c7c09 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_41143-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36752756, + "num_truncated_tokens": 36725132 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44014-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44014-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..330581632d550f23b880bcb9a4fd10b417c96948 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44014-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107957, "hashes": {}}, "samples": 43224, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47597285, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18494070, "hashes": {}}, "samples": 11745, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13117507, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44014-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44014-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4076be2a316c8daa2cfd4f4affc928fb5943b2df --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44014-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41099613, + "num_truncated_tokens": 41064691 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44978-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44978-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..39b8429589f533ea4e60a34c51ae8179f80166a2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44978-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107655, "hashes": {}}, "samples": 44054, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47751741, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10476148, "hashes": {}}, "samples": 7062, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7523341, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44978-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44978-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..72789b86b4169445cb57e95c65f6b6f29e30de9f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44978-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37209905, + "num_truncated_tokens": 37181194 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47806-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47806-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..bb086780840a5dc79cd11d6f58374294171fc09f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47806-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108118, "hashes": {}}, "samples": 42748, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47796406, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20226341, "hashes": {}}, "samples": 12957, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14428154, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47806-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47806-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b8c5b9a5680d68ade56b60df06b444876d8c7261 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47806-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41942939, + "num_truncated_tokens": 41906917 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47912-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47912-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..284fdaf89dd27c18b7d505a7e7ea1bdea74d557b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47912-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108625, "hashes": {}}, "samples": 42638, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47494419, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22338464, "hashes": {}}, "samples": 13992, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15922475, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0dc8fe7732b8b3b50ed15994c1e6adc92b75e3d2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_47912-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42970446, + "num_truncated_tokens": 42932893 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48076-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48076-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ba0da5b13b40d40f82af60ababcd98d5818b01c4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48076-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107835, "hashes": {}}, "samples": 44471, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47649257, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9049172, "hashes": {}}, "samples": 5950, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6458006, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48076-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48076-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b235f2547be9a0a478d7d9baf66b00449f1b40e1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48076-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36518034, + "num_truncated_tokens": 36490609 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_50337-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_50337-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..00c3d87427024bf6ed1830bd0df1dcf8a46b56c6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_50337-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108248, "hashes": {}}, "samples": 43455, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47593424, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16094271, "hashes": {}}, "samples": 10502, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11455414, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_50337-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_50337-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..18b673417e0e6e262d23c7a6a78fc85a6f021e61 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_50337-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39931286, + "num_truncated_tokens": 39898085 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53506-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53506-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..540f6f85c543ce17d6a1529817b7f027fdb9f0ee --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53506-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107027, "hashes": {}}, "samples": 43935, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47798073, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12306857, "hashes": {}}, "samples": 8129, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8812803, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53506-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53506-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e43685bafec24a360246f7393cf6a45976929baf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53506-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38095676, + "num_truncated_tokens": 38065208 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55146-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55146-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..59cbb898c66d1ef88b6badd04d262e033d6fec58 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55146-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106842, "hashes": {}}, "samples": 44003, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47829572, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12911244, "hashes": {}}, "samples": 8453, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9195315, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55146-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55146-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c58450868e14add9b9ed5a680997d1a0be9734c0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55146-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38385678, + "num_truncated_tokens": 38354585 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55637-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55637-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2603966e53ce6d9ed47dac1809051c85a8b420ee --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55637-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108630, "hashes": {}}, "samples": 43116, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47538324, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18374892, "hashes": {}}, "samples": 11708, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13025263, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c5b6fff19c17644f5c1437e8c24a7ddf9b46e05c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_55637-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41044821, + "num_truncated_tokens": 41010117 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56743-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56743-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..05e4e581df383541f01467fb3f06e858dd122ae0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56743-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107580, "hashes": {}}, "samples": 42689, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47843510, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20014739, "hashes": {}}, "samples": 12857, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14349513, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56743-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56743-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..120e8447686d9d2b32b0d390be40623c8aa48f36 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_56743-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41841718, + "num_truncated_tokens": 41805799 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61602-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61602-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9cc85121eab770a4d4b1ed1b716bfd47ea2b7aba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61602-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107841, "hashes": {}}, "samples": 43820, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47822428, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13201743, "hashes": {}}, "samples": 8664, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9428720, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1ab9cfae4f97774e31c898c6d5e757ffd8816a5a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38530474, + "num_truncated_tokens": 38499134 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61865-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61865-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..64326899fcbc6ee5af948d097755e6e6ba51a1b5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61865-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108600, "hashes": {}}, "samples": 44064, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47751761, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11679811, "hashes": {}}, "samples": 7705, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8328700, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61865-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61865-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8e714c0fd7afddac909123ce14ecc1fafcba2d9b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_61865-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37792026, + "num_truncated_tokens": 37762346 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6609-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6609-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..add9e5713abd5735869e467f3bfb3c84214e70a7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6609-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107995, "hashes": {}}, "samples": 43067, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47673589, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18814936, "hashes": {}}, "samples": 12078, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13354402, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6609-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6609-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..413edcbf3410276d7a6a74a05159babe20bff79d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6609-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41254595, + "num_truncated_tokens": 41219868 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6713-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6713-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..01912f2dc42223a10a4631b02b5dbfb347ab6fc1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6713-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107252, "hashes": {}}, "samples": 42416, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47469202, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21144353, "hashes": {}}, "samples": 13628, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15114948, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6713-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6713-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b58389e6769c59d4a2f0af2b62f89c4ae3f0748e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_6713-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42390899, + "num_truncated_tokens": 42354142 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67177-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67177-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e973dd127586bede5bcdde97d2863b00faf689bb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67177-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108364, "hashes": {}}, "samples": 42921, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47402411, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19706482, "hashes": {}}, "samples": 12444, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13989626, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..790b976b4d0291d1a5a7f9b55e4b8078fb638a77 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41693553, + "num_truncated_tokens": 41658282 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69111-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69111-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9cbdcdb3517bc56db3568d57e0b5af34fcda2e7a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69111-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108425, "hashes": {}}, "samples": 43917, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47441977, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12203630, "hashes": {}}, "samples": 7949, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8607126, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71002-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71002-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..001e3c345b5ddc5ae1ba32fae5ce2d6369457df2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71002-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108034, "hashes": {}}, "samples": 43123, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47887456, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18002190, "hashes": {}}, "samples": 11456, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12829947, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71002-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71002-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9e3db1169066e3fd4ebd681467fbb10395abf0ab --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71002-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40865600, + "num_truncated_tokens": 40831070 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_76495-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_76495-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..26b69ee23e8422351d64df6d9aab8b80df9d4738 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_76495-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107600, "hashes": {}}, "samples": 44025, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47238223, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13844687, "hashes": {}}, "samples": 8822, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9754991, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_76495-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_76495-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ae7125140bbf87d39ea6994ff60c294345680a18 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_76495-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38840676, + "num_truncated_tokens": 38808693 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80430-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80430-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f5fef25d6194e5ed1dd139a769122016bf4a7ab7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80430-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108646, "hashes": {}}, "samples": 42022, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47526300, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 24701860, "hashes": {}}, "samples": 15751, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17587830, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2a93cac6b7935a31f84d4e750876c678e3efc0bf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_80430-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 44116829, + "num_truncated_tokens": 44077743 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82124-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82124-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..012a6ae50cf1ba6f38ae70813eaa4d0a2dbf3c9e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82124-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108721, "hashes": {}}, "samples": 43124, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47907647, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18254677, "hashes": {}}, "samples": 11617, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13005118, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82124-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82124-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..355091044fd242f7862b50fdace49021e1bf8088 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_82124-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40987210, + "num_truncated_tokens": 40952567 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85233-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85233-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6e351ea36d2e75887f520d1d5dfc8d829c277ffb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85233-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108566, "hashes": {}}, "samples": 43709, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47393318, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12161941, "hashes": {}}, "samples": 8046, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8648307, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85233-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85233-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2b111322a009ebfa10853594adc3dc33f0ca3cab --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85233-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38033316, + "num_truncated_tokens": 38004301 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85997-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85997-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2518232999d90f5d2ae5716a69c6b72707cc2147 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85997-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107206, "hashes": {}}, "samples": 43911, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47766572, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14089567, "hashes": {}}, "samples": 9094, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10016902, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85997-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85997-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..791be5fe6184e8ed666c92dbc9596066cc30fbe9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_85997-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38957898, + "num_truncated_tokens": 38925852 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_86491-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_86491-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2cfbe72c4c6cc2abafa144c339b2d8362a6bd545 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_86491-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107605, "hashes": {}}, "samples": 45002, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47828203, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7342958, "hashes": {}}, "samples": 4947, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5285114, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_86491-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_86491-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0c30479f13f020139044cda0bdd8f146b0290a53 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_86491-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35679663, + "num_truncated_tokens": 35652295 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_91375-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_91375-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..32b72a6ffd62d3b515627735081b5190b241b1d0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_91375-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107836, "hashes": {}}, "samples": 41998, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47837621, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23379980, "hashes": {}}, "samples": 15076, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16731001, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_91375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_91375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5addbb053f94af4a8b22ec1cc98de9d0b645aa68 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_91375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43477057, + "num_truncated_tokens": 43438292 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93237-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93237-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..881f5a607fcecbdbc1f4d09d9d6b2e153afaae38 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93237-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108292, "hashes": {}}, "samples": 43196, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47931988, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17518256, "hashes": {}}, "samples": 11265, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12540296, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93237-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93237-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7bfda5cd0b392ad22c8ecf2c9073eca4aced040f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93237-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40627491, + "num_truncated_tokens": 40593278 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93943-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93943-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8c03c36aea17795b5df8966a97286a1d40763c92 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93943-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108338, "hashes": {}}, "samples": 42668, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47445797, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21356293, "hashes": {}}, "samples": 13480, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15132167, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93943-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93943-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e222418e477d71a521d2c4c1fd02a3f34410b08c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_93943-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42494258, + "num_truncated_tokens": 42457589 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9792-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9792-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f594f027a1bd45582b5608f1bf3b5cd6cf26a454 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9792-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108615, "hashes": {}}, "samples": 45020, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47975945, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7701495, "hashes": {}}, "samples": 5095, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5495028, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9792-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9792-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..47d0ac58b849ba5fda140a4ca026e7c74886c06c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9792-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35854343, + "num_truncated_tokens": 35826774 +} \ No newline at end of file