orionweller commited on
Commit
f60e47d
1 Parent(s): 538c27d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +31 -0
  2. train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  3. train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds +3 -0
  4. train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds +3 -0
  5. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1146-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  6. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1146-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  7. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12660-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  8. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12660-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  9. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16008-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  10. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16008-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  11. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20955-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  12. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20955-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22175-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22175-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26096-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26096-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2820-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2820-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29414-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29414-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29621-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29621-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36864-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36864-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44812-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44812-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4626-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4626-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46685-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46685-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49770-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49770-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51208-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51208-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51855-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51855-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53944-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53944-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58340-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58340-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59508-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59508-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62531-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62531-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63412-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63412-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66767-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66767-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67612-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -22244,3 +22244,34 @@ train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-
22244
  train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
22245
  train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22246
  train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22244
  train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
22245
  train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22246
  train/algebraic-stack/algebraic_stack_train_0013-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
22247
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00004.mds filter=lfs diff=lfs merge=lfs -text
22248
+ train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
22249
+ train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
22250
+ train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22251
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00025.mds filter=lfs diff=lfs merge=lfs -text
22252
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
22253
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
22254
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
22255
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
22256
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
22257
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00018.mds filter=lfs diff=lfs merge=lfs -text
22258
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
22259
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
22260
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00009.mds filter=lfs diff=lfs merge=lfs -text
22261
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
22262
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds filter=lfs diff=lfs merge=lfs -text
22263
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00014.mds filter=lfs diff=lfs merge=lfs -text
22264
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00006.mds filter=lfs diff=lfs merge=lfs -text
22265
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
22266
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds filter=lfs diff=lfs merge=lfs -text
22267
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
22268
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
22269
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
22270
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
22271
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
22272
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00022.mds filter=lfs diff=lfs merge=lfs -text
22273
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
22274
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
22275
+ train/stackexchange/stackexchange_0021-tokenized-chunked-1024-512-128-backfill-nodups/shard.00005.mds filter=lfs diff=lfs merge=lfs -text
22276
+ train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
22277
+ train/stackexchange/stackexchange_0011-tokenized-chunked-1024-512-128-backfill-nodups/shard.00023.mds filter=lfs diff=lfs merge=lfs -text
train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dc1da9b110a6e867b85157f3f76e3d2cd034e5f5e12f524cab66343dd68384b
3
+ size 67108816
train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00002.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19b4f9970b27963183fde7a01e0a368dc7e4e3dcea05cb7a81926a54d59e6733
3
+ size 67107390
train/algebraic-stack/algebraic_stack_train_0003-tokenized-chunked-1024-512-128-backfill-nodups/shard.00016.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74708d6046c2367962311ea76443eec97878029ec32edb5cc57269efc768b0d8
3
+ size 67106969
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1146-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108179, "hashes": {}}, "samples": 43965, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47776748, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13688294, "hashes": {}}, "samples": 8987, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9711259, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1146-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38759523,
3
+ "num_truncated_tokens": 38727293
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12660-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108549, "hashes": {}}, "samples": 44370, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47961420, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9392074, "hashes": {}}, "samples": 6311, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6691238, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12660-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36681867,
3
+ "num_truncated_tokens": 36653601
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16008-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107505, "hashes": {}}, "samples": 43078, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47995482, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16137128, "hashes": {}}, "samples": 10523, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11588666, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16008-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39963114,
3
+ "num_truncated_tokens": 39929951
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20955-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108270, "hashes": {}}, "samples": 43491, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48002673, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15099235, "hashes": {}}, "samples": 9927, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10810439, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20955-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39450338,
3
+ "num_truncated_tokens": 39417781
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22175-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107100, "hashes": {}}, "samples": 42495, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47329715, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21348196, "hashes": {}}, "samples": 13707, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15111585, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22175-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42487985,
3
+ "num_truncated_tokens": 42451178
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26096-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108016, "hashes": {}}, "samples": 43075, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47580806, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17932295, "hashes": {}}, "samples": 11757, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12792502, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_26096-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40823127,
3
+ "num_truncated_tokens": 40788776
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2820-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108245, "hashes": {}}, "samples": 44306, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47735079, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10029238, "hashes": {}}, "samples": 6538, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7152795, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2820-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36995175,
3
+ "num_truncated_tokens": 36966852
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29414-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108244, "hashes": {}}, "samples": 44357, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47851223, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9516595, "hashes": {}}, "samples": 6409, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6810930, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29414-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36741380,
3
+ "num_truncated_tokens": 36713880
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29621-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108600, "hashes": {}}, "samples": 44337, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47872065, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9727225, "hashes": {}}, "samples": 6373, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6910026, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_29621-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36848539,
3
+ "num_truncated_tokens": 36820452
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36864-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108705, "hashes": {}}, "samples": 43728, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47988674, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13320809, "hashes": {}}, "samples": 8883, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9531697, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_36864-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38586469,
3
+ "num_truncated_tokens": 38555052
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44812-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108260, "hashes": {}}, "samples": 44262, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47849671, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11166505, "hashes": {}}, "samples": 7340, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7959773, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_44812-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37540435,
3
+ "num_truncated_tokens": 37510644
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4626-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107698, "hashes": {}}, "samples": 44232, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47855456, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11923117, "hashes": {}}, "samples": 7711, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8500902, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4626-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37907913,
3
+ "num_truncated_tokens": 37877432
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46685-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107825, "hashes": {}}, "samples": 43770, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47457859, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12735799, "hashes": {}}, "samples": 8315, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9031649, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46685-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38309752,
3
+ "num_truncated_tokens": 38279638
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49770-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108223, "hashes": {}}, "samples": 44204, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47793120, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11328012, "hashes": {}}, "samples": 7446, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8041362, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49770-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37619688,
3
+ "num_truncated_tokens": 37590048
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51208-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108620, "hashes": {}}, "samples": 42037, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47630659, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 24855470, "hashes": {}}, "samples": 15780, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17674036, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51208-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 44192276,
3
+ "num_truncated_tokens": 44153056
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51855-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108623, "hashes": {}}, "samples": 43714, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47786875, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17554036, "hashes": {}}, "samples": 10767, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12436863, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_51855-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40645007,
3
+ "num_truncated_tokens": 40611136
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53944-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108179, "hashes": {}}, "samples": 43593, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47971370, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14275013, "hashes": {}}, "samples": 9368, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10207700, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53944-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39052429,
3
+ "num_truncated_tokens": 39020598
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58340-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107636, "hashes": {}}, "samples": 42934, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47925405, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18959696, "hashes": {}}, "samples": 12052, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13470196, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_58340-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41331519,
3
+ "num_truncated_tokens": 41295994
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59508-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107508, "hashes": {}}, "samples": 44636, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47844770, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8016056, "hashes": {}}, "samples": 5335, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5715710, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_59508-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36015321,
3
+ "num_truncated_tokens": 35988656
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62531-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107775, "hashes": {}}, "samples": 42540, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47543769, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21531907, "hashes": {}}, "samples": 13682, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15251526, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_62531-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42579417,
3
+ "num_truncated_tokens": 42542502
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63412-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108510, "hashes": {}}, "samples": 43859, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47742457, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 13397722, "hashes": {}}, "samples": 8764, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9560965, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63412-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38624502,
3
+ "num_truncated_tokens": 38593350
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66767-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107673, "hashes": {}}, "samples": 44249, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47880740, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11554707, "hashes": {}}, "samples": 7460, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8282493, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_66767-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37730944,
3
+ "num_truncated_tokens": 37700958
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67612-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108159, "hashes": {}}, "samples": 43899, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48066368, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12989341, "hashes": {}}, "samples": 8403, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 9255440, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_67612-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38429899,
3
+ "num_truncated_tokens": 38399135
4
+ }