orionweller commited on
Commit
8ddb23f
1 Parent(s): 9e9601e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +31 -0
  2. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  3. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  4. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  5. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  6. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  7. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  8. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  9. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  10. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
  11. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
  12. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
  13. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
  14. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
  15. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
  16. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
  17. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
  18. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
  19. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  20. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds +3 -0
  21. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
  22. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  23. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
  24. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
  25. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds +3 -0
  26. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds +3 -0
  27. train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
  28. train/cc_en_head/cc_en_head_0142-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
  29. train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  30. train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
  31. train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
  32. train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds +3 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10268-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10268-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12040-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12040-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1796-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1796-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21670-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21670-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25218-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25218-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26618-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26618-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27487-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27487-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30603-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30603-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31197-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31197-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -10757,3 +10757,34 @@ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/
10757
  train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
10758
  train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
10759
  train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10757
  train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
10758
  train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
10759
  train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
10760
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
10761
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
10762
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
10763
+ train/cc_en_head/cc_en_head_0142-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
10764
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text
10765
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
10766
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
10767
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
10768
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
10769
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
10770
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
10771
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
10772
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
10773
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
10774
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
10775
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
10776
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
10777
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
10778
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
10779
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
10780
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
10781
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
10782
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
10783
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text
10784
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
10785
+ train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
10786
+ train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
10787
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
10788
+ train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
10789
+ train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds filter=lfs diff=lfs merge=lfs -text
10790
+ train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c3ea9af72192540679ffbfa9ec63be123393053cce9cf7a10c001af365e75c7
3
+ size 67108232
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8a60e6469ad10619a008c8a609ac24801b25e056b9a91f0ae46580ac7bfc53e
3
+ size 67108787
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a34eb62464c5c1c6dfe7e5ef07434e79226451608fe1a6667c2c5d5bbc3e8c5
3
+ size 67106941
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4aa4ef672ee80cf9203013d1df6f903addf0245f3eda4bcd300b369c8501124c
3
+ size 67108106
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f336e9596c4f098aa06114603921ec9730481150bf54a08d6d4d399b1c435fc
3
+ size 67107635
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d04d84fca8baf0c5920223bf3571620cdb3e55f3a5fa30179bc850d44df4b03
3
+ size 67107805
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b59d132993b69efeb715eba6299464e2d297dddb99f6512742ccfb8570865d71
3
+ size 67108032
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5bb6ed19b58dc01a051c0443e1dbf55e87d1227211d4faec5cfbc19d1c1c52c
3
+ size 67108112
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd4afc7011e9c323e816d1f24f55c5fd1ab01c5fd08e5afb468bdf653b66c411
3
+ size 67106780
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb68f7c144c545ada8b4c7815a9a78a40576f4b26a7ea0f2256eff538525508
3
+ size 67107598
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f62dbe1fa429ace1c1879afa84b3a47dcfaec91172a0f30f7e9572f23a9dcab
3
+ size 67108032
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f98d3db8e70c01d06b5f89e76cef56ad9775692e6db150fb7787786a09fcba4
3
+ size 67108813
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6a386f475e92cdff89b51329efdf73fbfa46aff8b03f17327c337b81b41cc63
3
+ size 67107901
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85f517eaaf019e1a7ba9320277446564e644e6f7128784deb04ba8ca6edfe8cc
3
+ size 67108628
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db1f106c99a00991ce809796d5a9d19b362e3caf96196438653456d661f79bcf
3
+ size 67107891
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e54d0ddf9959134284b13208140b5bb50a0c2aa4dd1474363d70da0886c8e02b
3
+ size 67108075
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:642755d2a27d2daeb07ded20ffade2dc348fb41e2de6ddc82831922212e36f8d
3
+ size 67106761
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e68f1d3b4597d64c83092d8eab151db8e5474f245bf9f2af3553adf9649cbb47
3
+ size 67106905
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf9ab242acdd1d92216f193c4e66174022675ff9e35e29534677c6f012218ef3
3
+ size 67108407
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80180e187a3792310bf209a6f2c5889c90a15ad2e5eb3494bac1e5f57ff78a9a
3
+ size 67107540
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8450ba725900e8cda0a9db6a27eaa37ef491ab07f0c1309ac59ffecbec3468aa
3
+ size 67108447
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc56d1a82bc0e83b7b36b295df8ae2e3055977a86d231937007220f2957a54c0
3
+ size 67108688
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1edbb671919d2fc250776ab8b37f30d7de70234453330c87fac14b7ff55467b
3
+ size 67108784
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cea182d57c854aba773e19cfb53654ece03779bf55a5f2a2f342182a6aa71d29
3
+ size 67107667
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae8f700eb9bd5d58c948a938d7eb29a856975bf50947af8f89e36ba1eb8a479b
3
+ size 67108780
train/cc_en_head/cc_en_head_0039-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03cb6b06ec65f1194fe36a77a0b55527cdb43792860aebe07a12dab061ed3eb1
3
+ size 34038594
train/cc_en_head/cc_en_head_0142-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:761d83432c116b75dc082aaff20a5561e6c789c9c9674df2a8d47937f65f6e41
3
+ size 67108592
train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88c74197ece190771767db4d977e579d7100dfcd24891cfaf2ccb68f2ae16bbb
3
+ size 67108819
train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02e6bcae7ced020eb6410a1f9d562d821359d2f85fac5955aa60bc3d1e0618b9
3
+ size 67107449
train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ddc5f729d40d057d5d37ab9acb2e0f9240d72777ea86297aa9777415ec84249
3
+ size 67108800
train/cc_en_head/cc_en_head_0175-tokenized-chunked-1024-512-128-backfill-nodups/shard.00053.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e85ce690c063f650c44df45ca513ce82818dfbb9b0e4df9345e8deceb12b4e
3
+ size 67108089
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10268-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108117, "hashes": {}}, "samples": 42930, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47841293, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19209358, "hashes": {}}, "samples": 12280, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13725393, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10268-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41449724,
3
+ "num_truncated_tokens": 41414556
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12040-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107584, "hashes": {}}, "samples": 43576, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47584136, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15234184, "hashes": {}}, "samples": 9898, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10847516, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12040-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39515847,
3
+ "num_truncated_tokens": 39483429
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1796-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107846, "hashes": {}}, "samples": 42324, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47712112, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23846691, "hashes": {}}, "samples": 15034, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16989057, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_1796-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 43701656,
3
+ "num_truncated_tokens": 43662305
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21670-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107711, "hashes": {}}, "samples": 41865, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47402686, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 24639453, "hashes": {}}, "samples": 15792, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17481426, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21670-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 44088605,
3
+ "num_truncated_tokens": 44049590
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25218-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106888, "hashes": {}}, "samples": 43533, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47748178, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14405442, "hashes": {}}, "samples": 9245, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10297909, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25218-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39122542,
3
+ "num_truncated_tokens": 39091307
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26618-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108378, "hashes": {}}, "samples": 42914, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47528630, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20394930, "hashes": {}}, "samples": 13115, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14469902, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26618-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42017552,
3
+ "num_truncated_tokens": 41980850
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27487-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108258, "hashes": {}}, "samples": 42366, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47781642, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23805608, "hashes": {}}, "samples": 14932, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16851360, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_27487-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 43683241,
3
+ "num_truncated_tokens": 43644426
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30603-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107863, "hashes": {}}, "samples": 43355, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47871803, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16663987, "hashes": {}}, "samples": 10590, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11915065, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30603-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40216140,
3
+ "num_truncated_tokens": 40182418
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31197-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107338, "hashes": {}}, "samples": 43030, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47666386, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17882337, "hashes": {}}, "samples": 11546, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12636740, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_31197-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40805568,
3
+ "num_truncated_tokens": 40771498
4
+ }