diff --git a/.gitattributes b/.gitattributes index 44d713ad97811a22a4b57ec4fbd512d42d89e1b8..c739b45e37bc131b305fe5b26631bb9c27394d03 100644 --- a/.gitattributes +++ b/.gitattributes @@ -11600,3 +11600,43 @@ train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/ train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00043.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text +train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds new file mode 100644 index 0000000000000000000000000000000000000000..e81727014e94bb4714577ff05b7116c46dced3fa --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f51b0380b48f183abc55ac029f4798b1d0004f9443e15a607fcb9117e81cc0c9 +size 67108482 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds new file mode 100644 index 0000000000000000000000000000000000000000..17b1c3ba03dbddd669235421d5106276eedcbf98 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41f9a869a46ac63694a901363b1a9e82bab127c7885a7eb9ffbe65c1ad97f698 +size 67108673 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds new file mode 100644 index 0000000000000000000000000000000000000000..1b28ba501276fad821e1bc290f0cde36a8fbaf42 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af1fcb4932ad2b0d0aac0158015b67e3df60ad8eb9685ca94a53b39ae019bece +size 67108339 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds new file mode 100644 index 0000000000000000000000000000000000000000..6e28b4fc042bd3c288db044c8f2b75d08158182f --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cf90b3bdd39ef5e6e6192a7bb843fa2193330c46f6c455e27fb8dda24612474 +size 67107025 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds new file mode 100644 index 0000000000000000000000000000000000000000..6b209d28d00ca848a6b18112463f27cf1d30acd7 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21f418f5260e2fbfb1e66c63b7d97bd3a93a1d8262bd82fc1794d32f2c6111cc +size 67107945 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds new file mode 100644 index 0000000000000000000000000000000000000000..61e17c4176ab1a4ac82357985b2f535bea11b3a2 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:122fd1798befd01d73f580ffbbd5b567ea7dd211d5aaba47b8b68e14babdc6b3 +size 67107058 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds new file mode 100644 index 0000000000000000000000000000000000000000..36ed4028be3b5c5da89c3154ff68995912481921 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc58f2b26d82411d40f260083d363c645c53fa6c40fa57e0e48e3531b7dffdda +size 67106930 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds new file mode 100644 index 0000000000000000000000000000000000000000..7ea775fe3a91daa51d634e0d0059530cd1206f42 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53f120433cceef7eea6a73b027afbd73d3dd18439daf65c299854276b15b2d6b +size 67108665 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds new file mode 100644 index 0000000000000000000000000000000000000000..6a8da15a58b41433a492fa4d3f6cd7c6e98bc620 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54f7928f30f2695fa9032477cf21b96a82c1acd74aa54730885a546acd3a39f7 +size 67107502 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds new file mode 100644 index 0000000000000000000000000000000000000000..0e6f946bd85e760ffa2194a552a73b85bbce2bf9 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e764766b9e735a76c6b8e554f775c715754f88446d41680d0a6a781008193b0 +size 67108829 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds new file mode 100644 index 0000000000000000000000000000000000000000..5d80c815a54bc1fc6963fdf23f146c08ceaef9dc --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b54a44e64b162d776c1ea3124d3bad288d737b970969c579481763fd23fc0211 +size 67107734 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds new file mode 100644 index 0000000000000000000000000000000000000000..6b5a9ee48db81a4f3f2cdac9fb9dc785b7704b82 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fc975ff4088aa7d4a470893c9319abe8b1a7fd96dffdcaae529409eaf2aee9f +size 67107332 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds new file mode 100644 index 0000000000000000000000000000000000000000..1a57917cf4509af9d83a0246a8d7d6ec63274885 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e4e6248020088b91721b075776011a17e723eb56bbe5a9194626738c71b8b1e +size 67108836 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds new file mode 100644 index 0000000000000000000000000000000000000000..cfbac5071b0964a436cc1bfe84a680d8c653c187 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62b5f60ea2657bd95b269d6c506f80aede3305ba65c046c52623be6eb268e56a +size 67108116 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..772dd1b2024ab7fbe27f6a5ebed78e437374346b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa41e03fcb32bc3dce53bf68a0bbd980e91a87053840e697ba2af6f5cb22f16c +size 67107632 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds new file mode 100644 index 0000000000000000000000000000000000000000..fe8da0b5a61ff2a30ba0939e1c3021b300cc867e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7924e916f8a904cc5ee86cc979171f4b5d2d24263ff2267d1d03e7c5698381f +size 67106801 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds new file mode 100644 index 0000000000000000000000000000000000000000..9c6e055cb105f3a3c22f10e096d44ca3286babd3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6adbfd9b5a4044eb08a683302086d1153fc0b8273144a73bac1ca13cb5cadaa +size 67108131 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds new file mode 100644 index 0000000000000000000000000000000000000000..f8c6b0ff1f05bab44d8e6fa806898869cbab5909 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:982e4fc0b3e50e6d49585bd435b40eee43b4435cb0717283a58a4e9d4ec400f0 +size 67108000 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds new file mode 100644 index 0000000000000000000000000000000000000000..c1a22b910ac32db531b5529a270d2bc929098d03 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7915e3d3ea2e00bf9fd784b9b58bf574fd7bb2b5037a4772db5f66140077ffc3 +size 67108472 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds new file mode 100644 index 0000000000000000000000000000000000000000..f6c3f8538695b90742edfd9e3ebec871581dfa35 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0ef7db49941090de48d6a781bf26b32bd0262b2b246dc0aba96eaaa57c12e56 +size 67108658 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds new file mode 100644 index 0000000000000000000000000000000000000000..dffcf5babc7d80152256525602213319a8bacda3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbd9c2a77fe441f387e4531d4c17e7624b123eec298f74f8530bf7b2c728dc61 +size 67108504 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds new file mode 100644 index 0000000000000000000000000000000000000000..2e16ed7095094efe6fcf863271e228456aa5a77e --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e93114f784568e1623ea3d8b3df829c93595caa07722757791704791dafcd243 +size 67107875 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds new file mode 100644 index 0000000000000000000000000000000000000000..28feb4499ed4f93cc046cbb643bb8cbb0ab25bef --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94cb993b53eee35c6fd0a2e45b80e6e97dbf6569b45d472169d7040b2aaa0aad +size 67108551 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds new file mode 100644 index 0000000000000000000000000000000000000000..c70c9b4e6ccc6f4a121d74586d702a29f2c8ea6c --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2620097aefeb8996c55ea5caa03698de681c54635a740ea97158186f7ef47232 +size 67108735 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds new file mode 100644 index 0000000000000000000000000000000000000000..78640ff1d91e8e7fa4351a829f3509d085a920a3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5136906677fba8cb7d70d9f4f917c028ae524f5b7ce060aa3fd44ac930d3d944 +size 67107483 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds new file mode 100644 index 0000000000000000000000000000000000000000..36fc91b03da18b9f36268248e0e8488f95343d50 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:52198b807c734f7d0ab4e0c0673b562a0db30e762ab895bb487ddcd5e0f34aa8 +size 67108776 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds new file mode 100644 index 0000000000000000000000000000000000000000..9d059e8c5420c8302c527a2bc6ad931029149b8c --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61abd8e87f351622ab3fbc4994703f1f2b6f44673b3390bb6cbb1d9d09fd687f +size 67108828 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds new file mode 100644 index 0000000000000000000000000000000000000000..816d4dfc3c3080da30de60e5e78f7dfd8e889924 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c17c4cac2b8c88e1bea6d1fc2aec94c93c167a3d209ac3b74848e36174ffbb8 +size 67107494 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds new file mode 100644 index 0000000000000000000000000000000000000000..90849e5a2ba62cc0f7abf460ff9e0c0d33041b76 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00034.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:932bd448be02d92dbcc76f60a9d7d98a67c7ec00f048a7a667cb9e0c7bc13e9f +size 67108814 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds new file mode 100644 index 0000000000000000000000000000000000000000..1fabe7453d0a783f6b253d0112388c26ab6504e1 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00035.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a96859b15a182555c3beabb041f28326457d97c4e65a593110431a59bf414ed +size 67108405 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds new file mode 100644 index 0000000000000000000000000000000000000000..b0617cdc6ba5e163a13f8a58a4ab642dd28a6971 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00036.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15a3a8cc82635918211496abc1442860e3f37da938974e669f85686d91f2bed8 +size 67108762 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds new file mode 100644 index 0000000000000000000000000000000000000000..9f2b556de6bd0a9a7fcb34dc01b9177ce81cf71b --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96aed2c9d7090d5d073d53bf0e10aead6b476086829558a6681c234970db7edc +size 67108028 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds new file mode 100644 index 0000000000000000000000000000000000000000..8423264dab9da5cf0dd15147650806aa12f55e8a --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00038.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9fdd1e148a53abdaa46cebae7f4e3b7b0c8ed9a10a0147ee6986ed23a471da2 +size 67107622 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds new file mode 100644 index 0000000000000000000000000000000000000000..569c5211752a201b931e09dd09c119f8b283dcf3 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00040.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cbfb529a2657783985cc190818beabe32e12d8541eef85a581bbb63e2a5c945 +size 67107804 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds new file mode 100644 index 0000000000000000000000000000000000000000..cf22da0d135264ab886abf4129c1b3da431d5023 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00041.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:100c2c811390e13d8ebd2a8b0e3cee0c8ef3d921ce8b0dd014a31e8bf8cef4f5 +size 67107839 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds new file mode 100644 index 0000000000000000000000000000000000000000..74793f4ba3fe7bea16f51107cde699649f3557df --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00042.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f325dcf98a5184972fc9f168f755ecd2d2d6382f8eacb99835e89358ad3e90c +size 67108510 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds new file mode 100644 index 0000000000000000000000000000000000000000..d955e31b225b874ec1b7b3f33b2c99dd78d53864 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00044.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8b3c2ef9895ac3cac83006c763df3b41b8e786a7de973902e0a7665b916b93b +size 67106766 diff --git a/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds new file mode 100644 index 0000000000000000000000000000000000000000..c03778805a1a75d59400519834ea07414f21e345 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0037-tokenized-chunked-1024-512-128-backfill-nodups/shard.00045.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7610cdbc87411324f1642bc68eb56bcc6fcb0a09dd5f3319a01b00e7d4c5478 +size 39949597 diff --git a/train/cc_en_head/cc_en_head_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds b/train/cc_en_head/cc_en_head_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds new file mode 100644 index 0000000000000000000000000000000000000000..84a7f0360de0bbbeb6891997d7287e2c6c36c385 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0082-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9efdd067c0c5ed72a3cce258109d7847f62ec37a93a1fc597fb6651ca92a1a39 +size 67107819 diff --git a/train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds b/train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds new file mode 100644 index 0000000000000000000000000000000000000000..737193dadabc001779f998466d9c2b6101987907 --- /dev/null +++ b/train/cc_en_head/cc_en_head_0193-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daadcd3d1de4d697808f7a34d0f0a1cf587773b2cc6f94c3df685cd57eb2cfcc +size 67108108 diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12200-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12200-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5b919a879dc4d69894c784ccc6d1c70f037c6af8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12200-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106936, "hashes": {}}, "samples": 44606, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47950434, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9468126, "hashes": {}}, "samples": 6130, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6693614, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12200-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12200-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..471257784ff2214dfb1ea8f0e759a5695be1c752 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12200-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36717357, + "num_truncated_tokens": 36688567 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12453-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12453-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4cdf48245a6df2df0b6ff7985533ab3ab6ae11cf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12453-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107529, "hashes": {}}, "samples": 42265, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47641793, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23519922, "hashes": {}}, "samples": 14929, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16677543, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12453-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12453-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9ba3a464533d05a806f753d991ec8ce46c365642 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12453-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43543207, + "num_truncated_tokens": 43504178 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16110-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16110-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f9508afb9b0b74d2facb340bc176ca777f5d3ab5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16110-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108476, "hashes": {}}, "samples": 43361, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47943196, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17136735, "hashes": {}}, "samples": 10873, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12097711, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16110-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16110-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..5fce1b5a4731b7717e9f1b51032f117502000986 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16110-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40443910, + "num_truncated_tokens": 40409678 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1945-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1945-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..8effdb51b786e806e86591ccf7fe99e1509ada98 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1945-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107921, "hashes": {}}, "samples": 43359, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47636266, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16340556, "hashes": {}}, "samples": 10696, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11649199, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1945-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1945-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..64d58ea3a445ca3775f37516f23debb3450487e2 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1945-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40051257, + "num_truncated_tokens": 40018214 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27542-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27542-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b6c82be1d242b4eb34f6f2c1794baf9dd25c23af --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27542-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108661, "hashes": {}}, "samples": 44678, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47875606, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7768805, "hashes": {}}, "samples": 5231, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5553341, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27542-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27542-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..bce637c2a1b9d3eea90305fbc1f459ca3450d1fc --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27542-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35894244, + "num_truncated_tokens": 35867777 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28217-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28217-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..5a5d088ea55e77353e299b40590ba2fd61244674 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28217-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107637, "hashes": {}}, "samples": 43966, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47566836, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11159625, "hashes": {}}, "samples": 7293, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8012082, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28217-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28217-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..2f482e06547702ab1d0ac0cead723e13173523d4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28217-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37547117, + "num_truncated_tokens": 37518579 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28892-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28892-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..37a9e791a825d099f4c8cb093dfbdb9604fca356 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28892-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107872, "hashes": {}}, "samples": 44003, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47521653, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11266365, "hashes": {}}, "samples": 7395, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8001852, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28892-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28892-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d77ce6fd7e9d9089a58edb9c43801bf4d8593807 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28892-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37596372, + "num_truncated_tokens": 37567532 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32804-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32804-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..18aaef1328f7c9a02897bc1d26aaafefacfc6bc7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32804-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108641, "hashes": {}}, "samples": 43063, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47677198, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18707895, "hashes": {}}, "samples": 12019, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13302446, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32804-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32804-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d1fc361b68c34643d95335458d34203ee9f78fed --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_32804-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41203310, + "num_truncated_tokens": 41168589 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36014-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36014-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..cbf9e27f2df22ceaa653720987c36078c165d656 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36014-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108704, "hashes": {}}, "samples": 42740, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47754263, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20418310, "hashes": {}}, "samples": 12907, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14568676, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36014-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36014-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..09fd2b354f8940f68d14eb92a23d4219d7dcf24c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_36014-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42040919, + "num_truncated_tokens": 42004638 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37319-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37319-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2052e9fc8f171b3e25c9d23c267de2c170f19e1b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37319-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108456, "hashes": {}}, "samples": 42727, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47510429, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21015098, "hashes": {}}, "samples": 13309, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14979961, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37319-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37319-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ddb482efe9c667f65f321ad009fd6df605c0f232 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_37319-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42327098, + "num_truncated_tokens": 42290325 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40986-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40986-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..460f467846d860000433da6a8ad33103718d4c29 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40986-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107862, "hashes": {}}, "samples": 43568, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48086969, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15396446, "hashes": {}}, "samples": 9800, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10998513, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40986-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40986-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a6028ec64bcff17dc87aa426a653ff727b82cbf5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40986-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39600217, + "num_truncated_tokens": 39567800 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45611-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45611-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..580c7489590d5c198dc0540db68018e87b3696d8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45611-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107162, "hashes": {}}, "samples": 42838, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47667055, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19668326, "hashes": {}}, "samples": 12555, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13982527, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45611-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45611-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..a90bd0aa095cf4bb6721f94372414ce966317e25 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_45611-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41673065, + "num_truncated_tokens": 41637443 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49132-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49132-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..19ef26cd21c33a20c3da5228bf635d95dfd24efa --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49132-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108671, "hashes": {}}, "samples": 44231, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47484323, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10417303, "hashes": {}}, "samples": 6733, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7390050, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49132-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49132-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..ba63b533a8e9b1b34fe02cd22af34836cddb477f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_49132-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37185639, + "num_truncated_tokens": 37157582 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57084-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57084-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1a55967f770cc1ff0e506c187702819798a12fd1 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57084-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107756, "hashes": {}}, "samples": 43132, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47460424, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18320303, "hashes": {}}, "samples": 11704, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13000400, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57084-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57084-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..c1f41d441020b180e895fbe8bb41745dd3719072 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_57084-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41016703, + "num_truncated_tokens": 40982243 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62373-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62373-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..9589cce0d1db2d1d77fb79f0be901566e93291c0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62373-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107698, "hashes": {}}, "samples": 43453, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47558055, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16712280, "hashes": {}}, "samples": 10638, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11862196, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62373-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62373-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..cb429262fd19d0f3e88209f6b57f3b79a05a69ce --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_62373-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40235734, + "num_truncated_tokens": 40202293 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64122-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64122-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..6589cb0b87aa8d7d6ab800035e545053b993c666 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64122-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108154, "hashes": {}}, "samples": 43142, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47892031, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16860737, "hashes": {}}, "samples": 10949, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12047688, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64122-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64122-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..05e86be1c9bcf775a77f606a8197c0a921ed9bbb --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64122-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40310142, + "num_truncated_tokens": 40276186 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64177-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64177-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2d1beb9372f72d8cec75d7e28ff4742966b93636 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64177-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106937, "hashes": {}}, "samples": 41970, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47660332, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 27079029, "hashes": {}}, "samples": 16934, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 19298922, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..abae1a74b75d0cad1606c603094c55775192b7cd --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_64177-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 45269433, + "num_truncated_tokens": 45227844 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6532-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6532-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..8dc90a4f0857028bf7e843f06921ddfbd66f731a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_6532-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43064249, + "num_truncated_tokens": 43026227 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65812-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65812-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..fe601a90c8adb624540649599063ab4229d2f4e6 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65812-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107448, "hashes": {}}, "samples": 43618, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48028703, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14595111, "hashes": {}}, "samples": 9480, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10447254, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65812-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65812-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..4ddf484b715c0ff7a49aa649176a98f3db4aba10 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_65812-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39207815, + "num_truncated_tokens": 39175521 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66640-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66640-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..1b86c0c77f62b6546beed4df20702a16ca7d3db3 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66640-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107208, "hashes": {}}, "samples": 44652, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47960099, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8961708, "hashes": {}}, "samples": 5926, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6371451, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66640-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66640-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..402bfaba394b1726047c4bb3699bdd3edeb548ca --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_66640-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 36469294, + "num_truncated_tokens": 36441486 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68241-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68241-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..04e20ab64ea1c918dcae1ab4a82c67a8828fde19 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68241-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107863, "hashes": {}}, "samples": 42729, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47816707, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21506813, "hashes": {}}, "samples": 13532, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15332368, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68241-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68241-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..6bedf9e360d97d2d6987dd9562d7b3f9e7a08536 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_68241-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42565706, + "num_truncated_tokens": 42528508 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_69549-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_69549-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4660681979541a41b4b0a8b506cbc88383f7c742 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_69549-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107994, "hashes": {}}, "samples": 42901, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47586177, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20160581, "hashes": {}}, "samples": 12861, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14205985, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_69549-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_69549-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f8c1e20229811fefdc7f821cbf9fcafd6e17ab1c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_69549-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 41908331, + "num_truncated_tokens": 41872528 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70887-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70887-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..7da675de78e33af67d3b381fecba932b32e404cf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70887-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106964, "hashes": {}}, "samples": 43721, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47510579, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12403797, "hashes": {}}, "samples": 8084, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8744499, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70887-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70887-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..9143ea8ff7dd1e16642150d5ab012e11de806c2a --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_70887-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38151882, + "num_truncated_tokens": 38122784 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71292-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71292-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..4c4d4513653d012383124c3ea79e0a5fe155a465 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71292-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106987, "hashes": {}}, "samples": 42888, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47624574, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18007433, "hashes": {}}, "samples": 11848, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12772838, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71292-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71292-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..1d9af3bc874fb771e2beab83c25dc9eece96f295 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71292-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40862988, + "num_truncated_tokens": 40828546 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71958-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71958-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..91d5634ec2b30ae3302d917c57f16abf1c7c7c7e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71958-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108046, "hashes": {}}, "samples": 43404, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48009953, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15389155, "hashes": {}}, "samples": 9935, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10969159, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71958-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71958-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..d0b9e00cbc98a26b2e3f92a3163d8641635726f5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_71958-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 39597580, + "num_truncated_tokens": 39565035 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72611-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72611-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..10e60495be00ba77dcc940ba51d420f5e2dc19a0 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72611-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106944, "hashes": {}}, "samples": 43719, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47550335, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12667961, "hashes": {}}, "samples": 8328, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8982266, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72611-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72611-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..86b095e556d4f7045149d4917553a967237457a7 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_72611-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38276592, + "num_truncated_tokens": 38246421 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76005-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76005-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..2435bdae4ac4af8b6fe38499a9f4819de913075f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76005-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107859, "hashes": {}}, "samples": 43013, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47887615, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16835432, "hashes": {}}, "samples": 11113, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12065282, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76005-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76005-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..cf68bbedf0a2ddabecc5d24c0cfcc9c11ff4bd0d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76005-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40296289, + "num_truncated_tokens": 40262390 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78276-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78276-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..178b6a92883a873e07fb207288c3c19aed9af16f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78276-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107519, "hashes": {}}, "samples": 44385, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47766058, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11708738, "hashes": {}}, "samples": 7632, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8304316, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..97a3818d8471815e6fd9f7b2201bd10e162a81da --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37798360, + "num_truncated_tokens": 37768616 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78375-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78375-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..d4c9edf8dc673fb3a431475c177b0a54a2033d67 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78375-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107724, "hashes": {}}, "samples": 43571, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47868805, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13610167, "hashes": {}}, "samples": 9082, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9712610, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..44572f0f9aece5b97189ac7507987c8d67a5116e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78375-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38729322, + "num_truncated_tokens": 38697963 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78551-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78551-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..678182b2146b31729488cd96483fd462d1394a8e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78551-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108627, "hashes": {}}, "samples": 43384, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48045784, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16360603, "hashes": {}}, "samples": 10440, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11646054, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78551-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78551-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..f221415d9dcad7afd113c4873967e656306534e4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_78551-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40068500, + "num_truncated_tokens": 40034967 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81616-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81616-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..21ce55276c234aeaa7a3910b8035134cb222819f --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81616-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106912, "hashes": {}}, "samples": 43071, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47435006, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20725894, "hashes": {}}, "samples": 13075, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14675273, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81616-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81616-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..0d6ef476bfcbd9389fd4cf44d00aae62a1e8d308 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81616-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42178608, + "num_truncated_tokens": 42141688 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81675-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81675-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..82058690b6a775e20a86d6a76bfbdd3c145a99e8 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81675-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108594, "hashes": {}}, "samples": 44140, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47832393, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12508947, "hashes": {}}, "samples": 8076, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8867615, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81675-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81675-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..fb46af82dcb90a96855af69bf644029bdf7068f5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81675-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 38192839, + "num_truncated_tokens": 38162371 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83276-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83276-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..107f82ee19ae31dd1cc8ac2fc34d0806b526f0c9 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83276-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108732, "hashes": {}}, "samples": 42550, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47681419, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21091534, "hashes": {}}, "samples": 13545, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15035972, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..b4d717acb89a80171827dae07ef9b2e38570fe82 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_83276-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 42363728, + "num_truncated_tokens": 42327174 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85545-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85545-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..c52864c2881f4f9980751635f164af954cdcb60e --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85545-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108584, "hashes": {}}, "samples": 42406, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47636833, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22670847, "hashes": {}}, "samples": 14368, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16002864, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85545-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85545-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..923e4b4e7d7a3003e3bbed12bf9914bc503d800c --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_85545-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 43132177, + "num_truncated_tokens": 43094638 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90717-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90717-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..b3101f14adb88f85ca4a3f9d39f1c8d5a7010dd5 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90717-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108042, "hashes": {}}, "samples": 44800, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47594767, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7905165, "hashes": {}}, "samples": 5298, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5635616, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90717-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90717-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..31bf5670dc8cd8ec02bd388588ea2e560d0bc1da --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90717-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 35956327, + "num_truncated_tokens": 35929719 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90891-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90891-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..f714da363ee7b49913099e36e43584c7eb5803b4 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90891-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107152, "hashes": {}}, "samples": 43919, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47820226, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11603634, "hashes": {}}, "samples": 7851, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8313477, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90891-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90891-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..28815a6eda47aea1e30c029211577f4889b4953b --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90891-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37753238, + "num_truncated_tokens": 37723168 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91493-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91493-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..3c4beee376e23afc6065812cccec6843298986cf --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91493-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107533, "hashes": {}}, "samples": 43483, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47534167, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16439717, "hashes": {}}, "samples": 10524, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11665835, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91493-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91493-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..853d266a1430f000204957822ab145b44a014a04 --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91493-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 40102043, + "num_truncated_tokens": 40068794 +} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93040-tokenized-chunked-1024-512-128-backfill-nodups/index.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93040-tokenized-chunked-1024-512-128-backfill-nodups/index.json new file mode 100644 index 0000000000000000000000000000000000000000..982c48f36ea014003611070115864da7418b84be --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93040-tokenized-chunked-1024-512-128-backfill-nodups/index.json @@ -0,0 +1 @@ +{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108766, "hashes": {}}, "samples": 44419, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47898489, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10382313, "hashes": {}}, "samples": 6841, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7408501, "hashes": {}}}], "version": 2} \ No newline at end of file diff --git a/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93040-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93040-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json new file mode 100644 index 0000000000000000000000000000000000000000..25f2e58965efe869ec704983051c460df5df526d --- /dev/null +++ b/train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93040-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json @@ -0,0 +1,4 @@ +{ + "num_tokens": 37159218, + "num_truncated_tokens": 37130442 +} \ No newline at end of file