orionweller commited on
Commit
ef0b5df
1 Parent(s): 0b39846

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +39 -0
  2. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26773-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  3. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_52232-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  4. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79781-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  5. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79781-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  6. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81730-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  7. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90796-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds +3 -0
  8. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94645-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds +3 -0
  9. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10713-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  10. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10713-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  11. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12989-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  12. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12989-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  13. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13215-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  14. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14683-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  15. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14683-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  16. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20247-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  17. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20247-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  18. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25519-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  19. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25519-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  20. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30689-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  21. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  22. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33349-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  23. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33349-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  24. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3404-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  25. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3404-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  26. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34295-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  27. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34295-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  28. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38924-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  29. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38924-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  30. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39480-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  31. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39480-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  32. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40594-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  33. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40594-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  34. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40957-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  35. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40957-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  36. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46349-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  37. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46349-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  38. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49753-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  39. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49753-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  40. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53784-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  41. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  42. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60169-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  43. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60169-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  44. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63158-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  45. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63158-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  46. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63880-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  47. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63880-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
  48. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69687-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  49. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71261-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
  50. train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71261-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes CHANGED
@@ -29787,3 +29787,42 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40641-tokenized-c
29787
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94645-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29788
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89236-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29789
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40641-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29787
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94645-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29788
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_89236-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29789
  train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_40641-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29790
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79781-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29791
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94645-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29792
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_52232-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29793
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81730-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29794
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90796-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29795
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26773-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29796
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79781-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29797
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_27280-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29798
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19066-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29799
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_19066-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29800
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_56669-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29801
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_69748-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29802
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_77974-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29803
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_77974-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29804
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_37420-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29805
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_69748-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29806
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_88329-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29807
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_28204-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29808
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_37439-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29809
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_28204-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29810
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_37439-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29811
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_73751-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29812
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_73751-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29813
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_37420-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29814
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_86536-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29815
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10115-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29816
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_73057-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29817
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_57405-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29818
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49133-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29819
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_10115-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29820
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_55680-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29821
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_45887-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29822
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_45887-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29823
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_44110-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29824
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_49133-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29825
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_73057-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29826
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_40090-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
29827
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_18524-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
29828
+ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_18524-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_26773-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c723a6de1ca5ece36beaa51cfcfcdaca905cc4625a29f10553ee289091847a8
3
+ size 8841695
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_52232-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25d4b7c95c6410c4e358778785027add9fa8d17694a98e59389e7f86a3ef78d2
3
+ size 8747683
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79781-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e074fdba9e835620ba2e3a33896b9b0c837105d1fc30877a6fc7d5b500172c1e
3
+ size 67107077
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_79781-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b236a9c3cb1f54ff02c277ea4b633dac97fe602e7b387eadd6bc9429af99126
3
+ size 21684936
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_81730-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df6de67173a9b6cb096e2f3eed883b46f19d754b7c6e72d00dc4a6e7caed2311
3
+ size 11900362
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_90796-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dca9608e73d73d3d86bd081f2d9c269320a3177ff8c820149136c92ec3cdddd4
3
+ size 8313940
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v2/split_94645-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11594e61819b036c4a78f3a246f665a835ccb7902d3bfe711322c182d41814bb
3
+ size 67108580
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10713-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107857, "hashes": {}}, "samples": 42184, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47552025, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 24869043, "hashes": {}}, "samples": 15611, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 17608049, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10713-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 44199311,
3
+ "num_truncated_tokens": 44159756
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12989-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108510, "hashes": {}}, "samples": 44186, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47780950, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12012085, "hashes": {}}, "samples": 7887, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8561161, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12989-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37948822,
3
+ "num_truncated_tokens": 37918536
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_13215-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37396389,
3
+ "num_truncated_tokens": 37368151
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14683-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108717, "hashes": {}}, "samples": 44679, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48142319, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8431910, "hashes": {}}, "samples": 5530, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6044040, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_14683-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36216428,
3
+ "num_truncated_tokens": 36188910
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20247-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108218, "hashes": {}}, "samples": 42702, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47342349, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 19938841, "hashes": {}}, "samples": 12739, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14146010, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_20247-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41807340,
3
+ "num_truncated_tokens": 41771538
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25519-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107175, "hashes": {}}, "samples": 42870, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47747949, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20037653, "hashes": {}}, "samples": 12605, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14204292, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_25519-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 41855118,
3
+ "num_truncated_tokens": 41819402
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30689-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108378, "hashes": {}}, "samples": 44096, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47757766, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11242566, "hashes": {}}, "samples": 7311, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7989364, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_30689-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37584456,
3
+ "num_truncated_tokens": 37555255
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33349-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107899, "hashes": {}}, "samples": 44227, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47738606, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 10162555, "hashes": {}}, "samples": 7007, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7321259, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_33349-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37049787,
3
+ "num_truncated_tokens": 37021123
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3404-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106803, "hashes": {}}, "samples": 43572, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47965402, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16127841, "hashes": {}}, "samples": 10272, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11466554, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_3404-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39950739,
3
+ "num_truncated_tokens": 39917726
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34295-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108544, "hashes": {}}, "samples": 43660, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48093297, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15148762, "hashes": {}}, "samples": 9787, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10806182, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_34295-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39474368,
3
+ "num_truncated_tokens": 39441608
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38924-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108168, "hashes": {}}, "samples": 42474, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47501299, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22189672, "hashes": {}}, "samples": 14161, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15700610, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_38924-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42895764,
3
+ "num_truncated_tokens": 42858534
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39480-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108008, "hashes": {}}, "samples": 43202, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47957137, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17335574, "hashes": {}}, "samples": 11037, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12337557, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_39480-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 40542800,
3
+ "num_truncated_tokens": 40508789
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40594-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107901, "hashes": {}}, "samples": 42681, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47488281, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20521162, "hashes": {}}, "samples": 13290, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14677312, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40594-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 42082080,
3
+ "num_truncated_tokens": 42045387
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40957-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108562, "hashes": {}}, "samples": 44513, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47775719, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11050679, "hashes": {}}, "samples": 7289, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7841761, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_40957-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37476587,
3
+ "num_truncated_tokens": 37446915
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46349-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107924, "hashes": {}}, "samples": 44455, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47861198, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9851426, "hashes": {}}, "samples": 6543, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7054913, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_46349-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36901525,
3
+ "num_truncated_tokens": 36872467
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49753-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108155, "hashes": {}}, "samples": 43748, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47373767, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 12543008, "hashes": {}}, "samples": 8115, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8890363, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_49753-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 38220334,
3
+ "num_truncated_tokens": 38190896
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53784-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108063, "hashes": {}}, "samples": 43789, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47800354, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15767015, "hashes": {}}, "samples": 9906, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11278897, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_53784-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39775635,
3
+ "num_truncated_tokens": 39742714
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60169-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108605, "hashes": {}}, "samples": 44390, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47564100, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9643654, "hashes": {}}, "samples": 6307, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 6820591, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_60169-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36807139,
3
+ "num_truncated_tokens": 36779543
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63158-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108502, "hashes": {}}, "samples": 44810, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 48048369, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 8261527, "hashes": {}}, "samples": 5423, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 5931785, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63158-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 36130529,
3
+ "num_truncated_tokens": 36102550
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63880-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107082, "hashes": {}}, "samples": 43743, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47859308, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 15792799, "hashes": {}}, "samples": 10054, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11200240, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_63880-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 39784912,
3
+ "num_truncated_tokens": 39751438
4
+ }
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_69687-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107874, "hashes": {}}, "samples": 42436, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47666616, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 21978392, "hashes": {}}, "samples": 14072, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15685143, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71261-tokenized-chunked-1024-512-128-backfill-nodups/index.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108667, "hashes": {}}, "samples": 43980, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47722098, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11299124, "hashes": {}}, "samples": 7348, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7997285, "hashes": {}}}], "version": 2}
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_71261-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "num_tokens": 37615315,
3
+ "num_truncated_tokens": 37586327
4
+ }