diff --git a/.gitattributes b/.gitattributes index 128f5ab048f7716dcb9255ed505c9338e839a792..aff940fe6c9abaf734a1ac4f359a398c888b85e8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -22690,3 +22690,27 @@ train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-n train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10689-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10689-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..826386e3a594a0cc5a73bcab5f81072614974f0c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10689-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108003, "hashes": {}}, "samples": 44290, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47963924, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10212960, "hashes": {}}, "samples": 6765, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7348716, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f66d0277e6bcec70ffa970961d516a8a76b7323e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37080424, + "num_truncated_tokens": 37051174 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11290-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11290-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d162b84fab0ca18ad9cedc6919376534970fd584 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11290-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107073, "hashes": {}}, "samples": 44893, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47816720, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6781916, "hashes": {}}, "samples": 4578, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4858182, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11290-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11290-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0bf7c0df448fd841fba6755988b6868b46dc72b3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_11290-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35413546, + "num_truncated_tokens": 35387846 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_135-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_135-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..37f5ad27ce4416bbcc0583d3462ba7d072f956d7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_135-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107614, "hashes": {}}, "samples": 44100, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47791101, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11164660, "hashes": {}}, "samples": 7409, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8000389, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..348f2a763148b40e52c51ecfcee74e69a4045942 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37541994, + "num_truncated_tokens": 37513537 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14149-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14149-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..32089c89d7633f3c29628ef3541cf217c420471d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14149-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107729, "hashes": {}}, "samples": 44183, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47806563, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10051956, "hashes": {}}, "samples": 6591, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7175730, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14149-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14149-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1d086e9eb285d50954cf51849000f0afc609831a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14149-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37008435, + "num_truncated_tokens": 36980322 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18067-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18067-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2ea61cd1396da07ffd2acd68db1c30969d3a0d61 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_18067-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40068381, + "num_truncated_tokens": 40034608 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19108-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19108-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8abbd4e90e74ea96bb8e2192d496312816fd6fab --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19108-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107885, "hashes": {}}, "samples": 42929, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47500720, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19679306, "hashes": {}}, "samples": 12490, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13866399, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19108-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19108-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c9f7294aa54227c57ef41519185600df73f50cc0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_19108-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41678169, + "num_truncated_tokens": 41642933 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_193-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_193-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c93eb6ee17fc65422fb2871991aaf5da5da84159 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_193-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108704, "hashes": {}}, "samples": 44190, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47836096, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11271996, "hashes": {}}, "samples": 7398, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8017643, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_193-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_193-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2068d3248693866f93acf9cc193b95688aa1082a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_193-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37593788, + "num_truncated_tokens": 37564838 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21650-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21650-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..eec8b1e1f6f93691998b082c1c48b898f6cdde28 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21650-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108612, "hashes": {}}, "samples": 43971, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47701795, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12733376, "hashes": {}}, "samples": 8561, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9079198, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21650-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21650-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6f4cec17fa659af71eac1ab5941b2374ba8c407b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_21650-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38295307, + "num_truncated_tokens": 38264396 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24248-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24248-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..de4aadc3c8d1f09c9fa90703b53a3c10f1cacc95 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24248-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107998, "hashes": {}}, "samples": 44034, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47832991, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12171076, "hashes": {}}, "samples": 7930, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8656385, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24248-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24248-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..380c451d5f59821a76aa87c8e2dc065542e95e1c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_24248-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38031312, + "num_truncated_tokens": 38001591 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25100-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25100-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9edbd517858223e10c0fce4e05abb545a4bd5505 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25100-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108505, "hashes": {}}, "samples": 43604, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47558616, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15937197, "hashes": {}}, "samples": 10246, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11306023, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25100-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25100-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..27612b00f24eabaf5e72bf1b65c15f8fe4a56653 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25100-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39856140, + "num_truncated_tokens": 39823535 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2974-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2974-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3274effda7d2a363adb19189c1d8e04df4b97655 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2974-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108160, "hashes": {}}, "samples": 43615, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47645379, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15090389, "hashes": {}}, "samples": 9996, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10781076, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2974-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2974-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d2d1a4bc8606a4bf9de165dc8c7ca359948ca46b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2974-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39440105, + "num_truncated_tokens": 39407667 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30030-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30030-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..61646aef757533b039eb397cd2da922567aa3abf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30030-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108665, "hashes": {}}, "samples": 42709, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47612219, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19394221, "hashes": {}}, "samples": 12697, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13903545, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30030-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30030-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..140f6c37f7fad382c4118d22772a4502f814cb3f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30030-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41536506, + "num_truncated_tokens": 41500532 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30873-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30873-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..15c81316559cb0b76233fd1727486d842282edd0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30873-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107390, "hashes": {}}, "samples": 43676, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47765265, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15234404, "hashes": {}}, "samples": 9758, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10820941, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30873-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30873-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d0c60e2b1f1e4d4990d1fd76fe8ef1cdee380eba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30873-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39517043, + "num_truncated_tokens": 39484942 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33953-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33953-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..cc885f31f1faef7208ad22bc84246a894983038f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33953-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107904, "hashes": {}}, "samples": 43096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47973330, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16693438, "hashes": {}}, "samples": 10762, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11937840, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33953-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33953-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2a2b8d29a415d61bddc89d74309e3a9af5426164 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33953-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40233409, + "num_truncated_tokens": 40200213 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36392-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36392-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8661150a0e38f0c1368399a36111c8c7f9bdffce --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36392-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108809, "hashes": {}}, "samples": 44230, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47670075, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9180781, "hashes": {}}, "samples": 6168, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6529372, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..aadba0bd9b584a5824589aa24880e872e277e10a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36392-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36585028, + "num_truncated_tokens": 36557647 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40223-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40223-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6951d69c27ed7082bf272725d3a4197457ebfa9c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40223-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108219, "hashes": {}}, "samples": 42481, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47565791, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22209975, "hashes": {}}, "samples": 14032, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15727942, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40223-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40223-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4b4df36d53dca09cafd886a9df9b5058c58a13e6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40223-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42909663, + "num_truncated_tokens": 42872631 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40375-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40375-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5e203f179904740d7321ea88854846a91d28e2ba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40375-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107225, "hashes": {}}, "samples": 43900, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47507836, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10313496, "hashes": {}}, "samples": 6862, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7299109, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..49ac2372b74edd9b400d25bcf31596885a737ec6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37139178, + "num_truncated_tokens": 37111678 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40614-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40614-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ac01e3bc975012827aec095687c8cabec6abae2d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40614-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108593, "hashes": {}}, "samples": 44287, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47570198, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9122974, "hashes": {}}, "samples": 6101, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6472516, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40614-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40614-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..10f01c9e372b566987e963bd130c1b5a329c6f81 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40614-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36556345, + "num_truncated_tokens": 36529240 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4361-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4361-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ee64f92668b59faf0a581671abd269ac78a3b8ee --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4361-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108653, "hashes": {}}, "samples": 44370, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47986532, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9387012, "hashes": {}}, "samples": 6327, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6740038, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4361-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4361-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b78a101516784bc9e22b8dbbc395d9a1c002f004 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4361-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36678938, + "num_truncated_tokens": 36650608 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48494-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48494-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c40be7d773c818173e1793721479a92021ac2e63 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48494-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108079, "hashes": {}}, "samples": 42736, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47468537, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20028379, "hashes": {}}, "samples": 12846, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14084741, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e713a8447d6bf2e64bbe28c380387486868f307e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_48494-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41847693, + "num_truncated_tokens": 41812132 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54867-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54867-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e5275d1e35f129c92b00ebe5c6e1ed90c0706c61 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54867-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108604, "hashes": {}}, "samples": 42760, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47644581, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20513863, "hashes": {}}, "samples": 13219, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14548650, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54867-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54867-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..49c838b2a2f0fee3b3a5c94dc783a7ca28b5ad3b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54867-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42078541, + "num_truncated_tokens": 42041861 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54964-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54964-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6ea48a4a712b30b95ae71ba7e62ecff7d1a77d79 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54964-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108514, "hashes": {}}, "samples": 43144, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48036768, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16871056, "hashes": {}}, "samples": 10971, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12055106, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54964-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54964-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..183271b9afb2a82e66976f8e51a2ca4b384a2630 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_54964-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40314723, + "num_truncated_tokens": 40280878 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5602-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5602-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..28ccb8024568ae902b762e979bf65756a4610299 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5602-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107085, "hashes": {}}, "samples": 43781, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47949396, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13524733, "hashes": {}}, "samples": 8857, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9669230, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..23995951b2f29854bfa5f56c1af38496d74d03db --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_5602-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38686771, + "num_truncated_tokens": 38655205 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59687-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59687-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8fd800d98654ede90fdbd27ae7553599be97d07d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59687-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107585, "hashes": {}}, "samples": 43936, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48022075, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11675749, "hashes": {}}, "samples": 7757, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8359064, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59687-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59687-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f47244d581c894fbcc77472443b815ffbbba8095 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59687-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37791741, + "num_truncated_tokens": 37761533 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59793-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59793-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d8a101be7ea3235383b6bd8d17c2fd2ca23edae0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59793-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107493, "hashes": {}}, "samples": 43886, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47403165, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11427563, "hashes": {}}, "samples": 7478, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8131624, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59793-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59793-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a8e534b198621e91d6863c8738ea0b8ca67fe757 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59793-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37677725, + "num_truncated_tokens": 37649504 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63462-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63462-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9be9ad4c16fcaded025b77f6b2a4940b25077345 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63462-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107718, "hashes": {}}, "samples": 42356, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47522614, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21884882, "hashes": {}}, "samples": 14042, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15501000, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63462-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63462-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b243564bdf4d975f2f8b27641c2a753f967a9c31 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63462-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42750458, + "num_truncated_tokens": 42713462 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64079-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64079-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b7efbf5ba88b5c7dd88ae57c3e4820a19f496cdd --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64079-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108495, "hashes": {}}, "samples": 43055, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47833951, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18068307, "hashes": {}}, "samples": 11587, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12887416, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64079-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64079-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..69d93e8a05021ff62a33132b96051abadb33f718 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_64079-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40896985, + "num_truncated_tokens": 40862140 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68184-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68184-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ee7f3b3bff931d84a81f704bc93d8e4d443a8f3d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68184-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107310, "hashes": {}}, "samples": 42439, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47678165, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22251676, "hashes": {}}, "samples": 14135, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15702065, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68184-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68184-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c727a4cda1ac82cce1527bbfb779ad22ad401ddd --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68184-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42928143, + "num_truncated_tokens": 42890539 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68219-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68219-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3918b5481d9ea10bbb2464b0e3859fd740237c23 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68219-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107198, "hashes": {}}, "samples": 42006, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47710968, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 25616073, "hashes": {}}, "samples": 16209, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 18194064, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68219-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68219-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9a85779d4c84b697e4e2ac85a1ca212ba06219c9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_68219-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 44559470, + "num_truncated_tokens": 44518617 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72266-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72266-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a0cd6c6a7a0bbf51521caeb73ca03f415e88999a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72266-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108245, "hashes": {}}, "samples": 43082, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47605409, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19427316, "hashes": {}}, "samples": 12318, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13784338, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72266-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72266-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0ddceaeb1bfcc3086404a49d2599273a89ff17cd --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_72266-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41552977, + "num_truncated_tokens": 41517934 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_74016-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_74016-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..e1a7c697d1d90cebb2517a5691de0411badaa16e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_74016-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108834, "hashes": {}}, "samples": 44427, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48027542, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10104165, "hashes": {}}, "samples": 6600, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7181494, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_74016-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_74016-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..eb64a88c81de70f2f2c83a18ec8566e6b08ea625 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_74016-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37027282, + "num_truncated_tokens": 36998355 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_79255-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_79255-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3cef21ebd9842403739e9171fe832e6f0bee7f3d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_79255-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108366, "hashes": {}}, "samples": 44796, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47936726, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8562589, "hashes": {}}, "samples": 5725, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6108978, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_79255-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_79255-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..84d51f5371afded95ce7cf0afd5d7d73b394aaf3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_79255-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36272198, + "num_truncated_tokens": 36244376 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_83484-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_83484-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..46ba49202daaed3e6f71ec5cffca337de07398bc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_83484-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108637, "hashes": {}}, "samples": 43782, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47923849, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13008145, "hashes": {}}, "samples": 8577, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9337157, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_83484-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_83484-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5a71a2c44902eb1e4f48a109b21aad3f64e7afb7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_83484-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38437842, + "num_truncated_tokens": 38406488 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90799-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90799-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..135fb8abf727dac1181c0f9fc3deff39ee76514a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90799-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107797, "hashes": {}}, "samples": 44610, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47667591, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8534242, "hashes": {}}, "samples": 5584, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6055037, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90799-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90799-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9ffbecbb8eebd88192f70eb6213782ff575962d3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_90799-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36267616, + "num_truncated_tokens": 36240808 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9174-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9174-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8b558127f0756810d74a7eceed815cff2ef6405c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9174-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106827, "hashes": {}}, "samples": 44050, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47748484, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12161683, "hashes": {}}, "samples": 8011, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8690023, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9174-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9174-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..dd16ba3b85eae5106431595ca708480cde604783 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_9174-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38023099, + "num_truncated_tokens": 37993180 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95325-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95325-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f7967a4c50e0a1a29279787d3230fd75ffe695b4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95325-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108817, "hashes": {}}, "samples": 43001, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47722220, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18958716, "hashes": {}}, "samples": 12182, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13539692, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95325-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95325-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..04517494c233b72cd51ef840ac4d895fef5e357d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95325-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41325717, + "num_truncated_tokens": 41290373 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95830-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95830-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fea82b6e3d3273db9b7b6de8a9205bf411e40076 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95830-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108538, "hashes": {}}, "samples": 43714, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47975469, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14100039, "hashes": {}}, "samples": 9168, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10053626, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c0013e262b6d59a1f0e9ec2a19763171c8301b72 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_95830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38967503, + "num_truncated_tokens": 38935608 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_97628-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_97628-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..de8bdc07e74f9962bf608a3ac0d4b6d77d359a3a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_97628-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107469, "hashes": {}}, "samples": 43674, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47606389, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13896953, "hashes": {}}, "samples": 9187, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9932171, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_97628-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_97628-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a26972ece09dc9277c2ebae07bfd06398d148d47 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_97628-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38866225, + "num_truncated_tokens": 38835272 +} \ No newline at end of file diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..ec85760207e3ed36261e0e7f220fb1587374048f --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88e4440ec6502028058a73313eea62c37c71cae0aaa8c6a62157d262e1feb9fa +size 67107362 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..33d3e8aaa6e5e7b5d8d37c279203ece376b48b8e --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35d358cecf860619f1241933af6746bd4d540f036933b022cb52fde7efbc37ad +size 67108485 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..98b64aae82ae862f7faef2e417f7d312bbfe6541 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89237b5c96da3f44419199fbfadec39d53f01b046310b59cd98e63e068589071 +size 67107538 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..f82eb897aa51df307fa9fb374115ffcf6e5d3df8 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1150f084a85f6363301bcbf9c840868e94f4264a37ac7d209dba14c14b300be5 +size 67107979 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..b2ad4001b71dcf34d6fa562f16a3d0404cb9453d --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e418428b6cdb80281d750ffbcb0bf4a10b45b2142cbe481cd1d623d5665c5f9 +size 67108705 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..7b1f0e6da8d6660da927b32155b2399b32a567f1 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a0448b0abcdf3687380c0c9e678b0e0c8bc61d933261312f6b0d3a697275ed2 +size 67108473 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..e8dacfba4867162d0ebb5d5406561c9b4b499fd0 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50a1a8e0189a25a19f308c5272b0740daa68bdb8727c02dea73e971241ff35ed +size 67108346 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..d885644490ebc4cc4e08fb8e8a3d118e3febb7c3 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f92984e4de860d832cdfd0e0f862d9d6ea4688f541c626c2d2e515352727f002 +size 67108145 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..690b07f65ffac073ea03d53670d9c1867b37ed4f --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c28ac14c500792ab213c5cc2ea2391639cb069e1298fa2e2083c950a8c9a7c1 +size 67106938 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..ba85b0e8d648e26938353bfeb56596d9faf5c0a1 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6309ca14735ae9101c17362be0e566ebbad50f2d027f2e4aaa39dff99b0aa6df +size 67108310 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..3e886d4a41c49babe0a3041a18969c3034fb41a9 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dea57c8152cf32db2a168f2eba89df63d59545717d55169a89f9b6fb7f4151a8 +size 67107810 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..3bbc52c02f5deac4f34d531c8cbd6e8d2168ae96 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93b59ebc31c96bbcabbc91573e97ad9ea897384605ed9fa16a6598691a2e77b5 +size 67108489 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..39e8e2c26f38e9a03809956724cd03e92e9ee8b3 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf1302123c933dd1df047b5136423e5261a18ee95c119ab03f90f20145937797 +size 67108369 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds new file mode 100644 index 0000000000000000000000000000000000000000..7484625db49bfebd50b22955713cb9fecc68883f --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1bd24cac039ffdc829a2306c36664010250dcc776a52b774f08a353f75ab3cf +size 67108100 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..93c494b92bc9d0c2fe5096fc62cee9a482411e11 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dfe0b8a785c47d8b1680bbe32f8aa392c3d5fdce18c9247a16ce01b2aca3938 +size 67106996 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..68eb250913b89f73b1b814b92058e4d0e078035a --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efa96a8208c93e9c2170364b1ea5d6e32b9821553fd076d1fa4b73049c57ae52 +size 67108409 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..5b96208fb90653fc8503b4797733b98a253ee171 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb29538aac826e7f134e07f136473b7896fee872607bbd6c3829796df89f186c +size 67107456 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..72a6bc14397f8eb3cd5fcc1f6eb2d8b1f92dc4ec --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f491282dcfa3407350b83bd9033515d4950cc4447913b7b17e4f53070c5ca92 +size 67108794 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds new file mode 100644 index 0000000000000000000000000000000000000000..b838ffb88018ae72a2797031e5ac3f5c58358a10 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7036878233577514a25395624151ca4965fb3ff1e8866bbbbb48268a1cdbe66d +size 67108418 diff --git a/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds new file mode 100644 index 0000000000000000000000000000000000000000..9e9f6220353e4bcac9c6868a9aa118e9ff5cd183 --- /dev/null +++ b/train/stackexchange/stackexchange_0019-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81494734621213fd62b03f39155584c85f2155cfcbdf9ce7cddbc99367a8ba52 +size 67108693 diff --git a/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..1ea2fe225e8b82efb06720e5e237fd2475f4ad7b --- /dev/null +++ b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7049d53c90dc32521546f4912e1f7235dfe15a9b1eed37da3bb98e3c700addcb +size 67108668 diff --git a/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..c683ad978f7f4dd50faefc8ef162feb5d27c28c8 --- /dev/null +++ b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52b867b2ea6c61218ec368448c3497fa8d1b884df2967554db2ed583c1283fef +size 67106883 diff --git a/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..2771d7a050fcd013a33e53bad65a283d57a06e06 --- /dev/null +++ b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8440879c2754bc913952198add30d9a8081c12c0df0b7a0cc09bc1f9c9c3f360 +size 67108499 diff --git a/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..cc972a674c1350eb7b770010872cff7e46b1b0ff --- /dev/null +++ b/train/tulu_flan/tulu_flan_0042-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc12f199b3103131992db7809b15b12781e3488415a7b06a107e4616829e82f9 +size 67107798