diff --git a/.gitattributes b/.gitattributes index 7469edca0134ddfe2b8a63b7c1ff14de31fb5b79..8ea2e50e6417336cb6d9c61d900cb67ea04761e3 100644 --- a/.gitattributes +++ b/.gitattributes @@ -10353,3 +10353,40 @@ train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/ train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0072-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/cc_en_head/cc_en_head_0072-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/cc_en_head/cc_en_head_0072-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..7daf99136c5c2e982d91b3e8acd70cba32127ab0 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0072-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8281e1624a4f131bf6eac1dc4766549a9a027d405c6d1800bb8a318327b08f7 +size 67107754 diff --git a/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..9a5f6e5ddb964cb4f803f92713987fcd56a3cdef --- /dev/null +++ b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8459dd02ce717353ec890d10d86db50b6863be8780f7ed56e8f9fe9b9d987f28 +size 67107948 diff --git a/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..0d3a022fd177afc80159e6fd63056d9d8d8bb7ac --- /dev/null +++ b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:628f362e35ce003704de2db2bfcdd93b4f1cc308bedc36d685ccc0e5231f5b27 +size 67108800 diff --git a/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds new file mode 100644 index 0000000000000000000000000000000000000000..1082dbda93e01ae8557e4b43ab797917b51f363b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb739ddc533790607bb62c39e6c073f962cb76581e4d53c00cfc0da183ab9989 +size 67108287 diff --git a/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds new file mode 100644 index 0000000000000000000000000000000000000000..86cb95ab67a632b46910e68e2c3f4b6a9a3fd2de --- /dev/null +++ b/train/cc_en_head/cc_en_head_0203-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a48f9dcf799102ac3dbe8cfaf59f3c97682b047fc9abe91712341a9639fd3a40 +size 67107922 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..e0aa81d28eb622c45b70f6ac0150ee043385424d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9dcda60a9834f895f07bdba4c7d8927026fe34bb02541177b8414d055c4bf1a +size 67108614 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..6e04458d1daf9e0b19892f7f3444a1f960bf1214 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:639e01f467c3b6eefda81e1cad6e39dcdae75123dc0f9fcdc319e1bdb37d429e +size 67108418 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds new file mode 100644 index 0000000000000000000000000000000000000000..94e66cb7b438d44341828a55c34ebcfde0845d3b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:997645b0bd53adc365df1df3b2699b9ae11969ce7f271e569794b189e42752be +size 67107219 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds new file mode 100644 index 0000000000000000000000000000000000000000..e75d700a6269e379203a14b88e25b6361393c9f4 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90183af1c2a0c666dd120bc600e900172f2125ada0d4e30c72f043950734837a +size 67107151 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..cc84482fbb411fd5acfc10321e6bfddfb12bc368 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b02532c5245b6897ddc84e014fb5b20863933a467985dd11c46bf621ad5ec55 +size 67107349 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..09c56b86bff47b4609b68cab364ee568548724e0 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:855e09c70c952fb4257fbd4c79114a6cd6c914bafa34a3e7e8461d6a9cc401c7 +size 67106949 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..459b335d9c246ff9c44e0be1fce59b9fb4831c58 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c3334d5fa6aa7f0f068f99ccd8b5d03a9fcc7f616a1bd06669d544b7de9bb12 +size 67108713 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds new file mode 100644 index 0000000000000000000000000000000000000000..e7801f8c662829895f61addc71fa0476e24d176e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d333f56fff374c4fa3cc2ee535dc0fd0f1ccf7e6dc362a4cd860a1b899bbff2 +size 67108305 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..8f48880f83e07cbd06f6d081684f04d9e42db184 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93901b7833ff1135baa3bbffeadcfbfcb1dd50601d92239e43d6571cff43971e +size 67107989 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds new file mode 100644 index 0000000000000000000000000000000000000000..126f0d57b75c922f9f248d92a5ae3fa5fb14787d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79d52997ec6f8c33e60dfa418a6d66758146b37aeaf044d00ff8240d820a426b +size 67108178 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..cce8299f9c1d1b37189390fc0aa131525ad4a8d4 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f44e523635c7b1c4314cdab19346d80c885a192794d9676812dbec68147ebb6 +size 67108187 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds new file mode 100644 index 0000000000000000000000000000000000000000..7eb644415ea0632171b67f8cd995a8e660b71279 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c766cddad9e091222104961ac3810dba46ffdd1dc5732154c099a6a36fd1915 +size 67108383 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..cc8194795c2e0f612ba4065d239d8253e1e18c76 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7e4ac9fac2f6a5b699e32d433e67bf4fcd0991c95c4a3a8745855ae12a2031e +size 67107919 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..7c37d58706824e57d2d995a2b6258d802520ae11 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef10ebc0787344948f24847ef589ead5cc5dffb7f04327afe2568bcaec6d4954 +size 67107646 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..b5ab73bd843131d70077232a5b14f899dbc9546c --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3784bea46ecb9f97114f96a7c50aa851c2511b2ff8757d22c7c8da0b383e25c5 +size 67107672 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..ef1fa5849cda5e6b6832012c1d790338e9c23983 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27a3e278b4fc9885222f2de024ac30557333d4a6944251bf43d97462479517bf +size 67108122 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds new file mode 100644 index 0000000000000000000000000000000000000000..ae370e6d58ffe8168c2f92f71e143cd4a15672c4 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90ed594e0b4f2bed1f5a0ddb01c2aba73dbecebcb295841a1fed33df3c6219f9 +size 67108019 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds new file mode 100644 index 0000000000000000000000000000000000000000..087ef4141f1ca5a8cc2bb2952c623f3c7e2752e6 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:280f92a19c1c5c8be05f3494f8085ab6a42235307ff070aba4329f0c0891f47e +size 67108836 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..1f5bb4e3348e2c30fa46e6b54694dadc2fcb9541 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77eb82a2fe52b3b68180f216b49757a11733a98935e2593fc639441ff831c714 +size 67108376 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..1a7ace1ed8d0b23c78ddcdcde506662ef1d1a989 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:630f2fa27a46cd7fd506b1bb0f53e5b5428de0656eed4bbf2a06cd755dd3d1ad +size 67107165 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..9a5e5cbdb745aa7fd012eb3cdc6b41fb23e85885 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b5d05793c6326c1796971ad256153982e19d63ee0f8dac19286bfd8aafcca8e2 +size 67108818 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..1046d8fb35a01d78d9d4e0b4db1d16e38467d7a8 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a6ab8b8acc7fa127c7be8436e6c5ab62507dc29f941345135e2040cca3206da +size 67107518 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds new file mode 100644 index 0000000000000000000000000000000000000000..9dd8831c81d9f1fd9061b242083b46e5c1e6619d --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb6fb95a642150ffc508150635e12c2808cd89a0ec70302801e3ab4f4de2a23e +size 67108611 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds new file mode 100644 index 0000000000000000000000000000000000000000..b8fa36be4fdd10213bcfd5d756cb8d98af4f8f50 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3aee89c15ac6b8263e712dfc76353dd14f378e3dafbe338bd9b9d2e0679e8040 +size 67107948 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds new file mode 100644 index 0000000000000000000000000000000000000000..006f555d59a652574e97dfad653a693a99f5b1a0 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7167f30a8e26d5b9b8de2c93da31f932c5de630603ec565ed27182da0dea03d6 +size 67107878 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds new file mode 100644 index 0000000000000000000000000000000000000000..44225962b0c235c0b70cd80510b97d19d7d1da06 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df3cba23ea492f1a59a7b3ed30abb99518dfe03f44634b8e9919036c077e844 +size 67107705 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds new file mode 100644 index 0000000000000000000000000000000000000000..c996f215bd06b3ff734cb74bcfd305244ef2a924 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73bf1ab5ff17947e6674e9ebbc6aff8d157a5138e5e2f0418dd5c2dd45761a2d +size 67108367 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds new file mode 100644 index 0000000000000000000000000000000000000000..4e3bbf433ed0e03680f2b3e26823ed6fc3d0446a --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7cb17020f57b0cff558002cdc854112b357ebf2f8596551bd8fcee19d456416 +size 67108754 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds new file mode 100644 index 0000000000000000000000000000000000000000..b6dd4cb7928b04dd37023b46c7a23af1776e09b6 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2698bef716121d364b1537981d6848f191294fe83bee86e897f93b2591e720c +size 67107406 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds new file mode 100644 index 0000000000000000000000000000000000000000..853dd722964db16aa5cbd37210bcef98df0e5f50 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ed8fdcf3ad6fefda31cf50fc8d9475cf0bd9da1095edb9f7a6a7ee5e1031fec +size 67108581 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds new file mode 100644 index 0000000000000000000000000000000000000000..1a66b5a42dedb8f05dfabedaf5c4d3bbd6b98f0e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00b9aa84f985c00c25bd0c5c6812d9335d2acc98ebb817482a7aa37472a89dfb +size 67108105 diff --git a/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds new file mode 100644 index 0000000000000000000000000000000000000000..9f898d56631783ca8b189d590d1a9041a7444560 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f5bcb4dfeab895887677a1db0e68d6e822751972ac4400baa4e8f11fd14d135 +size 48801286 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11012-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11012-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7e665ca35f30e165a7b70d97de429a362836ee5c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11012-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108109, "hashes": {}}, "samples": 43138, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47986483, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17612597, "hashes": {}}, "samples": 11260, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12538051, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11012-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11012-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..60aebe6b07ae56bd8b2f2043992be3cfee4bedf9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11012-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40676498, + "num_truncated_tokens": 40642075 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11337-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11337-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2dc1e90538424fb9da08be9a3b585fbf923b29cf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11337-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108393, "hashes": {}}, "samples": 44519, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47891825, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9116525, "hashes": {}}, "samples": 6104, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6527620, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11337-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11337-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..62fa85fae213d9e8565673f1899daa36f99c3187 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11337-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36545911, + "num_truncated_tokens": 36518397 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1419-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1419-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d4ce1ad7b050cdfc470c3e70fc79cdc6fb584d38 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1419-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108333, "hashes": {}}, "samples": 43624, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47658867, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15786798, "hashes": {}}, "samples": 10175, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11166838, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1419-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1419-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d17486afac1580710aba9dcccb22756762df2391 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1419-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39782475, + "num_truncated_tokens": 39749547 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15667-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15667-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..44c15b9e22405bff7aa3eadbf45e7cdc9b7f7584 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15667-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108457, "hashes": {}}, "samples": 41899, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47548559, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 24475154, "hashes": {}}, "samples": 15767, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17469482, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15667-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15667-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e31473432055a86874890aa62fe808c2ca777b5d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_15667-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 44006702, + "num_truncated_tokens": 43967158 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16021-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16021-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..94c06deff89f2fb0e1f885b7115824f16d3931fb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16021-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107388, "hashes": {}}, "samples": 44022, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47892608, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12367812, "hashes": {}}, "samples": 8017, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8804902, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16021-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16021-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2a06d92e96f8b7d0548d54e7d5b559fb234b4264 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16021-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38127037, + "num_truncated_tokens": 38097465 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18339-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18339-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4f841dba344ed4271f6fcc973b263d3afb6d4ffd --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18339-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107400, "hashes": {}}, "samples": 42949, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47957562, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18255317, "hashes": {}}, "samples": 11784, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13049025, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18339-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18339-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2bcfb6283d5ebabfa74dbe0e2c62c6faebecf45e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18339-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40987100, + "num_truncated_tokens": 40952220 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21201-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21201-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5e65f5ea6114ba1c4d080dc4071babca497ebe6b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21201-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108360, "hashes": {}}, "samples": 42926, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47602825, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19734764, "hashes": {}}, "samples": 12572, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13998737, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21201-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21201-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..523e129b795cad508bbf2428e144e27c7486c339 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21201-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41703655, + "num_truncated_tokens": 41668308 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_247-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_247-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4abf9f89a69900d173aef5ff54024d160f677d0d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_247-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107259, "hashes": {}}, "samples": 43983, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48007365, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12247613, "hashes": {}}, "samples": 8012, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8728524, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_247-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_247-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..853f7988684a3f5db381091a1d9cb9a647575be9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_247-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38068170, + "num_truncated_tokens": 38037717 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27141-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27141-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1808f3c41beffa6bd7f347a970fdc20fef745957 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27141-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37898238, + "num_truncated_tokens": 37868730 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27391-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27391-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..95da5d596b5cc24925f72ad6b4247c0cdec064e6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27391-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107796, "hashes": {}}, "samples": 43750, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47375134, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12003507, "hashes": {}}, "samples": 7893, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8496113, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27391-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27391-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6b8d9953637cb5b7a031770d5a95abb0b1d5179f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27391-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37957236, + "num_truncated_tokens": 37928295 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27684-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27684-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c11b51d0216edcb1cd771f1b9eabcdd34d1add7f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27684-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108351, "hashes": {}}, "samples": 43565, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47890862, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15912366, "hashes": {}}, "samples": 10224, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11367143, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27684-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27684-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..48b46302c0fbb51275a80a0c52427961ebbe9292 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27684-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39845554, + "num_truncated_tokens": 39812144 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28076-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28076-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..835ea046a9d76bc7c016826cd621bac4309be220 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28076-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107672, "hashes": {}}, "samples": 43924, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47708027, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12826137, "hashes": {}}, "samples": 8334, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9103110, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28076-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28076-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d295517d802d9254c00c8da6e5807c77363a3a4e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28076-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38349554, + "num_truncated_tokens": 38319139 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32214-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32214-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5f443cb93a2f3ee23d630fe5ba2f2de191ec41e6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32214-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107139, "hashes": {}}, "samples": 44338, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47804166, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8986326, "hashes": {}}, "samples": 6097, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6428106, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32214-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32214-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7e00181383243eaa352e1b311bab29074d007f52 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32214-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36485921, + "num_truncated_tokens": 36458668 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34059-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34059-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..edb10ee2254f062b40347f6acd0ab001172ab295 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34059-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108381, "hashes": {}}, "samples": 44492, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47759530, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9121206, "hashes": {}}, "samples": 6053, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6473855, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34059-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34059-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..39729aa653f9bfd586101184e31c97b9e4cdc8ee --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_34059-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36550626, + "num_truncated_tokens": 36522966 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37176-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37176-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d56bea7320330da92a7387c675aea692250f8874 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37176-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108796, "hashes": {}}, "samples": 44796, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47980022, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8196014, "hashes": {}}, "samples": 5420, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5880221, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37176-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37176-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8c305bf21f4d6d111702712c0adca3a148b6ce1b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37176-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36098491, + "num_truncated_tokens": 36070976 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37401-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37401-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..ccc032b0c57949592002d578b706787f17a4c2f3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37401-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107179, "hashes": {}}, "samples": 43117, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47890710, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17305288, "hashes": {}}, "samples": 11175, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12395996, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37401-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37401-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7611f12767aa047ffa14977f20aec2c32619a241 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37401-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40525652, + "num_truncated_tokens": 40491672 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37431-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37431-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..761ae307c850e79810085abafe3b2561e502c453 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37431-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108515, "hashes": {}}, "samples": 43464, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47899475, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15973270, "hashes": {}}, "samples": 10194, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11450199, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37431-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37431-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4bdf22dd145ed8b7c5f7d4d7944c7b5b0cf55c82 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37431-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39879967, + "num_truncated_tokens": 39847016 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4226-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4226-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f71379956758f7bc3f4a0dcae322a6a5c04a444a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4226-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107972, "hashes": {}}, "samples": 44339, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47738248, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9403804, "hashes": {}}, "samples": 6191, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6682143, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4226-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4226-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..85c50cf766794faa97c9072b2e179cc4c9f0ccbc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_4226-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36692035, + "num_truncated_tokens": 36664468 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42555-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42555-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c97e1dd70fc063e3f46a4cb67d43477b3a94a384 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42555-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108810, "hashes": {}}, "samples": 43339, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48029273, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15517120, "hashes": {}}, "samples": 9990, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11179361, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42555-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42555-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..960037e51c536a58396bc9397648401ff9cdb933 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_42555-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39662171, + "num_truncated_tokens": 39629251 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43628-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43628-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..821b7c6c583a568bd9d87d8bbc97ba819c4b22ba --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43628-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108542, "hashes": {}}, "samples": 44017, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47775878, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12464366, "hashes": {}}, "samples": 8096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8850247, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43628-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43628-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8c671b0e6bf9a98c3d79b98c34b7c39205a2dcd3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43628-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38173621, + "num_truncated_tokens": 38143886 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43783-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43783-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4e82f3de1d1d0a03b00fe0ba6ba30f43ad18c58f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43783-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107531, "hashes": {}}, "samples": 45019, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47977053, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7488157, "hashes": {}}, "samples": 4831, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5269969, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43783-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43783-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..aca524a3695302be7623023be294a8fabe999f33 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_43783-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35755178, + "num_truncated_tokens": 35729618 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44054-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44054-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..86f658df55f60f56b57f1af2f3687b9cc5c2f478 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44054-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108619, "hashes": {}}, "samples": 42652, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47668454, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19915475, "hashes": {}}, "samples": 12891, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14209012, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44054-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44054-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a3bde29bfeb4f4c3f6d1d01a28c8cb696a12e37d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_44054-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41792779, + "num_truncated_tokens": 41756984 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6382-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6382-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0fe7982f3fe268f432f423d420b13501935fdac5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6382-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108156, "hashes": {}}, "samples": 44079, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47732540, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12622649, "hashes": {}}, "samples": 8248, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8966747, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a0f577ae06c2d0eb2289d29dac56828b047c9dd5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6382-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38246050, + "num_truncated_tokens": 38215227 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68418-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68418-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..22daf5813ea8b8516bcb8281d8c971a1193f1497 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68418-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107258, "hashes": {}}, "samples": 44000, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47819980, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11432607, "hashes": {}}, "samples": 7613, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8197613, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68418-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68418-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ce697e84e92fad74897acfda4e13731fdf358889 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68418-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37672574, + "num_truncated_tokens": 37642510 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6845-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6845-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..95a678c1f40d2a72d70dc00ff0a3be6f276d67a8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6845-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108313, "hashes": {}}, "samples": 44053, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48068503, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12459720, "hashes": {}}, "samples": 7980, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8916537, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..54b9f3ba66c2f5cacd2fb85dd0c11904ad58e516 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6845-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38173476, + "num_truncated_tokens": 38143136 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71763-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71763-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b2f8a5f7b59d696978f4c1da54ddf0b7b35b5f59 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71763-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107949, "hashes": {}}, "samples": 42987, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47580389, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18051406, "hashes": {}}, "samples": 11692, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12825330, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71763-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71763-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7de7a4a8b61051b862d28bd58924dbc5a0275818 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71763-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40887133, + "num_truncated_tokens": 40853194 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73050-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73050-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4042c719ecb14975cb1c3036702d508345777a43 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73050-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108728, "hashes": {}}, "samples": 44033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47746254, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11779438, "hashes": {}}, "samples": 7629, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8333106, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73050-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73050-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8dc282faf751813f6606882ff27df321ccf5c4d8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_73050-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37845117, + "num_truncated_tokens": 37815607 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_74099-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_74099-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b07530769072433306a48b8b7c3683f6fc181dd1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_74099-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107470, "hashes": {}}, "samples": 43396, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47769686, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14913245, "hashes": {}}, "samples": 9730, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10644667, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_74099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_74099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..7309bf50d1b804b044d3bb3d7c78132c281e3830 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_74099-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39365981, + "num_truncated_tokens": 39333826 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76105-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76105-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..223c61a5202bcde9b798fb92bcb38c5b3836960c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76105-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108804, "hashes": {}}, "samples": 43910, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47473818, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12234323, "hashes": {}}, "samples": 7927, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8661954, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76105-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76105-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..99ffd7c4ebcd66e0e99314f7c0abc8a33d0f651e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76105-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38067216, + "num_truncated_tokens": 38037724 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76830-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76830-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..51ec54dfcd52815d8c3838b588c66cb9d6dc0955 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76830-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107233, "hashes": {}}, "samples": 43101, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47570531, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17652479, "hashes": {}}, "samples": 11405, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12510823, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e5c4252f54cee1d6d07dce7938004052edd3bc06 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76830-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40692769, + "num_truncated_tokens": 40658661 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79919-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79919-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fd822ad36abeec05e07e69c0b7500d8760c29b8b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79919-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106951, "hashes": {}}, "samples": 43695, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47414375, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12582831, "hashes": {}}, "samples": 8258, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8920728, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79919-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79919-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0cbfb75558418da07cf7e2e7ffdc617e63b527a1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79919-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38236825, + "num_truncated_tokens": 38207510 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80057-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80057-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f7ae2fd4c274d5c08670f60f4fbed1dce4fb937f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80057-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107697, "hashes": {}}, "samples": 43058, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47742055, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19056945, "hashes": {}}, "samples": 12111, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13517822, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80057-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80057-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8f7a3a92d992e0caecbefa8087c5fb11cdc7cb46 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_80057-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41374695, + "num_truncated_tokens": 41339283 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81135-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81135-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..690df28b46d4de234c5e9a75d91349c2e2f5de4b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81135-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108015, "hashes": {}}, "samples": 42172, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47743017, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22404762, "hashes": {}}, "samples": 14513, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15882916, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ffa46486fa10abdbbb05372d7eb8cceb769f47f0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81135-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43001684, + "num_truncated_tokens": 42963617 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88996-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88996-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..394b24bf32fbee40d5e1717574b16d5e5900578c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88996-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107245, "hashes": {}}, "samples": 44060, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47988132, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11686577, "hashes": {}}, "samples": 7742, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8405433, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88996-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88996-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..749c736f458fccb7a524fdf86206dba47dfc5955 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88996-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37793767, + "num_truncated_tokens": 37763626 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91007-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91007-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..a7bf9e0e8d9520114ab3a6037183d2fbb5badad0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91007-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108069, "hashes": {}}, "samples": 44042, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48000461, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11955847, "hashes": {}}, "samples": 7912, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8608482, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91007-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91007-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..456f2633c8913d00e0407f992b57d19bbc12fe21 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91007-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37924095, + "num_truncated_tokens": 37893496 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93607-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93607-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..27b899bef31e9be1c1ac6145dcbfc977686fd562 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93607-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107506, "hashes": {}}, "samples": 44041, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47727250, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11286797, "hashes": {}}, "samples": 7629, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8096044, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93607-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93607-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..e0a33c8f2de3f4f40aa3655d8959a872f83c669a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93607-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37598112, + "num_truncated_tokens": 37568511 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_96529-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_96529-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..41d8cde31209c2bce1590a0b55452d7f38e750dc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_96529-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108238, "hashes": {}}, "samples": 43241, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47592664, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16978970, "hashes": {}}, "samples": 11097, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12069929, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_96529-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_96529-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1abab977a07b0371d6ed06331106eb09310f629f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_96529-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40361743, + "num_truncated_tokens": 40327385 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9691-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9691-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..0189aa93dff7258dc6156ee5ebb0e7a6fa677ca5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9691-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107896, "hashes": {}}, "samples": 44724, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47987333, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8642399, "hashes": {}}, "samples": 5663, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6216410, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9691-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9691-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..bafba5b7fc6e334e663aaac1b2763be67296456b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_9691-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36315764, + "num_truncated_tokens": 36288061 +} \ No newline at end of file