orionweller commited on
Commit
dbb76ca
1 Parent(s): c1f1f83

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +33 -0
  2. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  5. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
  6. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds +3 -0
  7. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds +3 -0
  8. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds +3 -0
  9. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds +3 -0
  10. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds +3 -0
  11. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
  12. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
  13. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
  14. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
  15. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds +3 -0
  16. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
  17. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
  18. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds +3 -0
  19. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  20. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds +3 -0
  21. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds +3 -0
  22. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds +3 -0
  23. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
  24. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds +3 -0
  25. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds +3 -0
  26. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
  27. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds +3 -0
  28. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds +3 -0
  29. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
  30. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds +3 -0
  31. train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds +3 -0
  32. train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
  33. train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
  34. train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds +3 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11038-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11038-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11139-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11139-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14935-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14935-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16800-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16800-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17962-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17962-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18619-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18619-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28048-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28048-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28806-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28806-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -12190,3 +12190,36 @@ train/cc_en_head/cc_en_head_0126-tokenized-chunked-1024-512-128-backfill-nodups/
12190
  train/cc_en_head/cc_en_head_0126-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
12191
  train/cc_en_head/cc_en_head_0126-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text
12192
  train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12190
  train/cc_en_head/cc_en_head_0126-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
12191
  train/cc_en_head/cc_en_head_0126-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text
12192
  train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
12193
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
12194
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
12195
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
12196
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
12197
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
12198
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
12199
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
12200
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
12201
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
12202
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text
12203
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
12204
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
12205
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
12206
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text
12207
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds filter=lfs diff=lfs merge=lfs -text
12208
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
12209
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
12210
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
12211
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
12212
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
12213
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds filter=lfs diff=lfs merge=lfs -text
12214
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
12215
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
12216
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
12217
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
12218
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
12219
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
12220
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
12221
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
12222
+ train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text
12223
+ train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
12224
+ train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
12225
+ train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds filter=lfs diff=lfs merge=lfs -text
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f64b8d7d224380a613081e675c46268b3a5957085215a32e71d555b5dab53cb3
3
+ size 67108237
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b717493a1ef0ef97e4c97de3c23f1ec023d722e719f1be3be8c6715a2bcd852
3
+ size 67108457
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:543128efd27859f9736433df737f59a14b7c19ab23142032282c5e04515d91c1
3
+ size 67108593
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24fd2bf65f203dad83d49c0fc57dc7b347636cac434d1f12f2b955978716d68d
3
+ size 67107300
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e708fe6983df6868ec8f5df5704ee0649bd31d37b0ee0761bb0f6803c1fe0619
3
+ size 67106976
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e445adea106b2dba46f5e03c7b7cd75af6d54c3f669661ae069d1cbc46487470
3
+ size 67108103
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93b5112ec9c6cde176acd03f172fbb25643c9ece006f7ef41a66df2b49303a18
3
+ size 67108711
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92cb456e7d74b7846f905c826c5bf86e806b0078ac7108df221201c9952af447
3
+ size 67108798
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cf02bcbf9787cd9e4944f084333152a235f5c8e2ad335ea86a6c58b8ae2947
3
+ size 67107161
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37c8def65617422b7d99c2c4b59742e13fad78de63384662fe5fc5f44f257431
3
+ size 67108150
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b96b03b84fba305e9edc3fd551e08590fc9b5bb9424827db99690c917a1e2ee8
3
+ size 67108758
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8975fcbea359d68b1017f3ac41720ea00e2fec4751bf3598b421c3bdb0dffda
3
+ size 67108344
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6efe3e0efb91ec2ad69ae838f3623ee72e6327795812c98c98b4bcf8f79f6513
3
+ size 67108290
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68a43e1003cf8b10a1d5a381c64bf31a48b3e9145252550e5b3ff29b70ca3fbe
3
+ size 67107426
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68c17f32c891a498c8a8263eaef77ce53ebc9af7c70f38ce4f721d9207d8ddf4
3
+ size 67108033
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:687d73b22db6d05299ccc4fe4e46ab9fdfb191e6dc7feeb970087aefb4d16b1a
3
+ size 67108066
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3d528c3753bcc94bbd9ca945cb323a5ec252f2c97632d3518a1b28ae88a882c
3
+ size 67108233
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b8f420c236a60982008460d2b559828b5d3b51cc741e4beadffa5fa68441aa2
3
+ size 67108274
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecea6abfd652dc70823f2c0693e22c403f3f05667846a3b4777ee31ff2f1d0fb
3
+ size 67108602
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c2401b8294494d1cc634382c39ec5d578bff5c8e8a37867679fea6b6b1e104e
3
+ size 67108228
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb33b61ea0cc8338c043b663fd20e85ac74d371b08195506916a1d099978e01
3
+ size 67107385
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ccb5a0ce8349e835999e6d60480f168b540e4460afe11a1bfe59b95e72bddc3
3
+ size 67108621
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae112dca2a31ff317e28ced3ed9ac9e06372afedbc740f3e03c45b8dcb904dbd
3
+ size 67107105
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00026.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd6779158dc5bedb33eb17faa7d57da162abe0d47b16d820e20c3925bc55e902
3
+ size 67108017
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00224b6cadec61d8f45435094f209007bd8033d0be2575cd3a58b5322d8c46a1
3
+ size 67107220
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4963aeb878ac74490b127b151a58aae0374787a661b8a6d77f7368b6656ebd15
3
+ size 67108644
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50426e0c6ec3a9198dba0872bd3d00edd14e44c15659621d593f30198e63b9d3
3
+ size 67108757
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a055ea687bf8f72818440e00f3d3e9ce31e3ce3c416775a12a291a009c51c7ab
3
+ size 67108655
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec5198e55b61ae02168dd6944f517c5e3e8287c581142ff03e8528b247adf703
3
+ size 67106821
train/cc_en_head/cc_en_head_0080-tokenized-chunked-1024-512-128-backfill-nodups/shard.00032.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b635dd6dfd778d7a6bebe2745595331de7f3c24857edf0259cc5dec1d38bfc99
3
+ size 43849901
train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:407932634ee48af7a5a4c7a7ffaf0882ae01f2efb76fb87e78896de691df0cbb
3
+ size 67107839
train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36543b556cf083028c44d9a6d211370de2a0cbbd2700917dce183fad092f4ae9
3
+ size 67108710
train/cc_en_head/cc_en_head_0156-tokenized-chunked-1024-512-128-backfill-nodups/shard.00037.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c73dcfffabe6ea850231173b8df2940395ba3006d93d7e7cffe1b4dcf845695f
3
+ size 67107909
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11038-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107882, "hashes": {}}, "samples": 43515, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47533363, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18192910, "hashes": {}}, "samples": 11437, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12877749, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11038-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40949615,
3
+ "num_truncated_tokens": 40914686
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11139-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108818, "hashes": {}}, "samples": 44419, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47797655, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10231975, "hashes": {}}, "samples": 6760, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7300838, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_11139-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37086573,
3
+ "num_truncated_tokens": 37057693
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14935-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108469, "hashes": {}}, "samples": 43747, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48074985, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14320308, "hashes": {}}, "samples": 9226, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10186532, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_14935-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39074800,
3
+ "num_truncated_tokens": 39042699
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16800-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108423, "hashes": {}}, "samples": 44318, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47745565, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12455996, "hashes": {}}, "samples": 8092, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8854431, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_16800-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38160329,
3
+ "num_truncated_tokens": 38129215
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17962-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107331, "hashes": {}}, "samples": 44107, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47760059, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11605486, "hashes": {}}, "samples": 7599, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8263257, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_17962-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37756161,
3
+ "num_truncated_tokens": 37727108
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18619-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108696, "hashes": {}}, "samples": 44215, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47525607, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9148867, "hashes": {}}, "samples": 6128, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6488854, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_18619-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36570635,
3
+ "num_truncated_tokens": 36543719
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28048-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107284, "hashes": {}}, "samples": 44828, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48009782, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 7796918, "hashes": {}}, "samples": 5291, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5577524, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28048-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 35901221,
3
+ "num_truncated_tokens": 35873742
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28806-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108632, "hashes": {}}, "samples": 44502, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47894968, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9411509, "hashes": {}}, "samples": 6204, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6709479, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_28806-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36690890,
3
+ "num_truncated_tokens": 36662773
4
+ }