orionweller commited on
Commit
a3e6331
1 Parent(s): c1eb80d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +36 -0
  2. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  5. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  6. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  7. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  8. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  9. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
  10. train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
  11. train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  12. train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  13. train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  14. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  15. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  16. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  17. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  18. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  19. train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
  20. train/arxiv/arxiv_0085-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10567-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10567-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12351-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12351-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13754-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13950-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13950-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16661-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16661-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18804-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18804-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19535-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19535-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21317-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21908-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21908-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22428-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22428-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22741-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22741-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23309-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23925-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23925-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25073-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25073-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28616-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29681-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29681-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -13218,3 +13218,39 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_91476-tokenized-c
13218
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46817-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13219
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13220
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13218
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_46817-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13219
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13220
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19360-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13221
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13222
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_63529-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13223
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_87511-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13224
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13225
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_87511-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13226
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79602-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13227
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28616-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13228
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13754-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13229
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21317-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13230
+ train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13231
+ train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13232
+ train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13233
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
13234
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13235
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13236
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13237
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13238
+ train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13239
+ train/arxiv/arxiv_0085-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
13240
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
13241
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
13242
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
13243
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13244
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
13245
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
13246
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13247
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
13248
+ train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
13249
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_88320-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13250
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76731-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13251
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30893-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13252
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_58243-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13253
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30692-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
13254
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_76731-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13255
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_30252-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
13256
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_93267-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d84e7115327a5ae7ce3dc6360d36d7b09f8c60c6e2967b6cb14216c8e7f55cc
3
+ size 67108699
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a7eb3a1513d9cfec9aa911ee6ab34d9843a77138893aedfb7b4e251d412861d0
3
+ size 67107527
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa9174b23ddbb96cdab96fc2cc05611d64ddf5fdadcfd4ba23fd8b303b4f407a
3
+ size 67108157
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22e523aa97c635e463f11c55cadbbd654036d4557127fd444057d9d29766a2d8
3
+ size 67107820
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a09e9fa6915100e08e0bc6b4107aa91c4275a148a7e87477c7ae85ecbf0fb9e0
3
+ size 67108688
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:839895efb4284d596cf0f3b0d7d58f2019e1fadd7af44e5d7ecb8d324da8e01e
3
+ size 67107270
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:195fe7ee1c2df14e06d43fee974ca518454e9920cb34252abef53dcce7f6b7bd
3
+ size 67108036
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80043fd25ead95c9a6f2c17768e0f22ded03ef3abac82e15684d748800770897
3
+ size 67107169
train/arxiv/arxiv_0012-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:231012f93c1a82c4443e8b1840a0d38087faed1735301de83cfe1f9e476bbc67
3
+ size 55146453
train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:286af1f4c85973d983462c2da9b6cac6088fb48ec77357def03407b4f26e6e51
3
+ size 67107044
train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd65ec10b8f1d19e19f36bb6aa73ad45af6abd71e1767ca001d06b52503f5951
3
+ size 67107072
train/arxiv/arxiv_0026-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf0b85b8ce56a44fa0c2a08c9f28260ecf735a7735141215f755a9fb36ee992
3
+ size 67107604
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8469abf16c05eafa0fcc26d5480ac6203bef275af01a16595cbb3ee5473365ab
3
+ size 67106905
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca5c6e2130fa0b8bedacf597fe9b33105a215d06850d705289b1faaefb0a035
3
+ size 67107516
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35f6f659ff7f5c8585515ee2c7009a2407c1d7c74d1d54b584061e200405dd45
3
+ size 67107156
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:872fbecd26db927863806b11409dd2a6bb6e65a8e718c7a17cd243f204d9d8bd
3
+ size 67106986
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a5ec26d8fd30f0facc16eb8b245804ab896337be4c4546c0968f58b5827591f
3
+ size 67107969
train/arxiv/arxiv_0076-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e38a5dc23909eb578b328d2a2006561a8c89cfc8bb804b7cc9841f074660f803
3
+ size 54696685
train/arxiv/arxiv_0085-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2cb00cd19b62a9e461357835c01c583bef68e8b6d17a85dea6295630b2264c7
3
+ size 67107940
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10567-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107109, "hashes": {}}, "samples": 43040, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47413206, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17861796, "hashes": {}}, "samples": 11531, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12661771, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_10567-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40795271,
3
+ "num_truncated_tokens": 40761567
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12351-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107312, "hashes": {}}, "samples": 43057, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47906298, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17543987, "hashes": {}}, "samples": 11257, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12548916, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_12351-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40644364,
3
+ "num_truncated_tokens": 40610419
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13754-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:115f83e5e8fc106cee1d87d416de969cdf23d72e0c7b8c894caa95a9b6453009
3
+ size 12042972
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13950-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107710, "hashes": {}}, "samples": 44201, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47789591, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13487898, "hashes": {}}, "samples": 8515, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9508053, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_13950-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38666324,
3
+ "num_truncated_tokens": 38635531
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16661-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107497, "hashes": {}}, "samples": 43233, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47955035, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16330429, "hashes": {}}, "samples": 10583, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11692598, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16661-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40053073,
3
+ "num_truncated_tokens": 40019747
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18804-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107332, "hashes": {}}, "samples": 43577, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47817370, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14321852, "hashes": {}}, "samples": 9485, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10237998, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18804-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39072280,
3
+ "num_truncated_tokens": 39040012
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19535-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107611, "hashes": {}}, "samples": 43647, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47778657, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13931789, "hashes": {}}, "samples": 9098, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9921847, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_19535-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38887190,
3
+ "num_truncated_tokens": 38855521
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21317-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d5a1959a6581fce0f9e3b11d2cedda05ae519baee1a80c23b44163a727c3355
3
+ size 19920778
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21908-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108544, "hashes": {}}, "samples": 43920, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47796073, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12207017, "hashes": {}}, "samples": 8062, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8674746, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_21908-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38048966,
3
+ "num_truncated_tokens": 38019251
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22428-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107258, "hashes": {}}, "samples": 44065, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47785835, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11356555, "hashes": {}}, "samples": 7515, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8070943, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22428-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37635596,
3
+ "num_truncated_tokens": 37606689
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22741-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107700, "hashes": {}}, "samples": 43380, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47902924, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16992376, "hashes": {}}, "samples": 10836, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12052261, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_22741-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40371863,
3
+ "num_truncated_tokens": 40337679
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23309-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107230, "hashes": {}}, "samples": 42879, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47435441, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19954835, "hashes": {}}, "samples": 12687, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14179026, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23925-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106919, "hashes": {}}, "samples": 44007, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47979078, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12243288, "hashes": {}}, "samples": 7960, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8779971, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_23925-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38066667,
3
+ "num_truncated_tokens": 38036243
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fd5b21aecc200a14166a6d70d03d2c91e8d03f92c72d7e6db6a844986f46f25
3
+ size 67108799
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_24273-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c52a99be8fae97949790ebc3391b2a297ed2022a7945a7eb3133ea53cea75f1
3
+ size 11881172
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25073-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108226, "hashes": {}}, "samples": 44808, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47875614, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6540968, "hashes": {}}, "samples": 4409, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4673874, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_25073-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35301520,
3
+ "num_truncated_tokens": 35275877
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28616-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dd47b4987be2596b67b0ffcf6519fed955d9bc9ea4700e8883269340db37c65
3
+ size 67108766
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29681-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107492, "hashes": {}}, "samples": 44293, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47825794, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10999543, "hashes": {}}, "samples": 7048, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7817439, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_29681-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37464553,
3
+ "num_truncated_tokens": 37435884
4
+ }