orionweller
commited on
Commit
•
514a6e4
1
Parent(s):
f508162
Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +24 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds +3 -0
- train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds +3 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10177-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12788-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12788-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15210-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15210-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1540-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1540-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16091-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16091-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16696-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16696-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16786-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16786-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17642-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17642-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17752-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17752-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22794-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22794-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2581-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2581-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28183-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28183-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_341-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_341-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37636-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4105-tokenized-chunked-1024-512-128-backfill-nodups/index.json +1 -0
- train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4105-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json +4 -0
.gitattributes
CHANGED
@@ -27334,3 +27334,27 @@ train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_62663-tokenized-chun
|
|
27334 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_4217-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27335 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27336 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27334 |
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_4217-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27335 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27336 |
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00007.mds filter=lfs diff=lfs merge=lfs -text
|
27337 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds filter=lfs diff=lfs merge=lfs -text
|
27338 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
|
27339 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds filter=lfs diff=lfs merge=lfs -text
|
27340 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
|
27341 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds filter=lfs diff=lfs merge=lfs -text
|
27342 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds filter=lfs diff=lfs merge=lfs -text
|
27343 |
+
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds filter=lfs diff=lfs merge=lfs -text
|
27344 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds filter=lfs diff=lfs merge=lfs -text
|
27345 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds filter=lfs diff=lfs merge=lfs -text
|
27346 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds filter=lfs diff=lfs merge=lfs -text
|
27347 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds filter=lfs diff=lfs merge=lfs -text
|
27348 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds filter=lfs diff=lfs merge=lfs -text
|
27349 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds filter=lfs diff=lfs merge=lfs -text
|
27350 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds filter=lfs diff=lfs merge=lfs -text
|
27351 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds filter=lfs diff=lfs merge=lfs -text
|
27352 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds filter=lfs diff=lfs merge=lfs -text
|
27353 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds filter=lfs diff=lfs merge=lfs -text
|
27354 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds filter=lfs diff=lfs merge=lfs -text
|
27355 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds filter=lfs diff=lfs merge=lfs -text
|
27356 |
+
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds filter=lfs diff=lfs merge=lfs -text
|
27357 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_52969-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27358 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_81697-tokenized-chunked-1024-512-128-backfill-nodups/shard.00001.mds filter=lfs diff=lfs merge=lfs -text
|
27359 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_81697-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
27360 |
+
train/mlfoundations-dclm-baseline-1.0-parquet-sampled/split_52969-tokenized-chunked-1024-512-128-backfill-nodups/shard.00000.mds filter=lfs diff=lfs merge=lfs -text
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a0a5014b83942c5f6d8312a89d532b11ad164703e8d4ce4e6afa87233432c345
|
3 |
+
size 67107260
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00013.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d827657f138393da3c725b82b283e521502b2063739a8f71acbe4b2a37aed32
|
3 |
+
size 67108720
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e337e9f4a43ae81262905d6005eb1ac90e02f1e8d426da62e131727188b580f0
|
3 |
+
size 67108751
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00021.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:569d4c041867ae669dfeee19fc3b6c64185a85fca14ba7135d10a4b7286a6105
|
3 |
+
size 67107200
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00031.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e67d99523e45544b23cb26779063b8319aab76f722988957c0020483d8a27049
|
3 |
+
size 67107730
|
train/algebraic-stack/algebraic_stack_train_0000-tokenized-chunked-1024-512-128-backfill-nodups/shard.00033.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c32bc243aa160f36c575a92dea13849ba3e371c7a8bf665abb5620992df215f
|
3 |
+
size 67107865
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00003.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:51581bfb841523b22de07e85f9629c8b3da13207bb2e75bb51aa5e80c84ae7c4
|
3 |
+
size 67107894
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00008.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d86e1c9d1b95ef0c4631d8b505930120626bf87a61744c4215dc8018c77410a1
|
3 |
+
size 67107672
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00010.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:67d9d810091af4e1dce6faa5013baab5e0879c50b57e2825ff0130264c146a4e
|
3 |
+
size 67108373
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00011.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e1d19dfc341ce8676b5522b25f6685cd8843e0d360a2de60bd49b451bdfc100b
|
3 |
+
size 67108518
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00012.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d51e14928e064340e207ec18819772473562c4486bd3628cbd43ac0cc86be6a6
|
3 |
+
size 67108691
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00015.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe7a53bc7e178f4f209212fe699f135ebe25701905c7e98b9cd48d6e87b30f08
|
3 |
+
size 67107905
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00017.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f42df5ffbd64f1ed4ffea616bab9b80f4522dc689791c4e1587fc109f597905
|
3 |
+
size 67107768
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00019.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f9cb026e7216c0c16e6fbb0ea3baf8e1e9882576185ecde85bc19cb6e8004d8c
|
3 |
+
size 67107314
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00020.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a397e98d61d7957ad37aeb65d821aa761d88c381cd45267098a00d389bbf19ff
|
3 |
+
size 67108341
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00024.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b84d1c0095a03243131a8a407cec4bae30af3b33c02a963d300b95747ab76793
|
3 |
+
size 67108420
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00027.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d9f2478339be448bd4648c5d52a109c267a2a28ccffa30f66fb4f247008be75
|
3 |
+
size 67108838
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00028.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06314f5c76bdf08fd01783893f8d99eb503f712df38ed1f986aea908ac9821c5
|
3 |
+
size 67108316
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00029.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23afff6db11c1ce2fe6386982af354e1c51ca5bb1abf062d8eff502f00dbe624
|
3 |
+
size 67108793
|
train/algebraic-stack/algebraic_stack_train_0002-tokenized-chunked-1024-512-128-backfill-nodups/shard.00030.mds
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5171918317f174df98281012d09f98cd41e61c186ae308b17ac3df5bbc9d5fc
|
3 |
+
size 67107900
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_10177-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67106758, "hashes": {}}, "samples": 43367, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47918539, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16013558, "hashes": {}}, "samples": 10169, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11440364, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12788-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107559, "hashes": {}}, "samples": 42565, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47755907, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 20557134, "hashes": {}}, "samples": 13160, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 14575268, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_12788-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42107348,
|
3 |
+
"num_truncated_tokens": 42071622
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15210-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107753, "hashes": {}}, "samples": 43142, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47626247, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17095171, "hashes": {}}, "samples": 11033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12097059, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_15210-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40424551,
|
3 |
+
"num_truncated_tokens": 40390987
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1540-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107500, "hashes": {}}, "samples": 43459, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47634997, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17042196, "hashes": {}}, "samples": 10806, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12052525, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_1540-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40395211,
|
3 |
+
"num_truncated_tokens": 40361807
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16091-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107534, "hashes": {}}, "samples": 42666, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47748962, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 22105576, "hashes": {}}, "samples": 13904, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 15685514, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16091-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 42855376,
|
3 |
+
"num_truncated_tokens": 42817812
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16696-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108536, "hashes": {}}, "samples": 42904, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47986343, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 18721064, "hashes": {}}, "samples": 12063, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 13429722, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16696-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 41213314,
|
3 |
+
"num_truncated_tokens": 41177912
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16786-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108170, "hashes": {}}, "samples": 43729, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47708537, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14217134, "hashes": {}}, "samples": 9340, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10134133, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_16786-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39020282,
|
3 |
+
"num_truncated_tokens": 38988321
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17642-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108356, "hashes": {}}, "samples": 44142, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47954385, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 11341473, "hashes": {}}, "samples": 7481, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 8089195, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17642-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 37627268,
|
3 |
+
"num_truncated_tokens": 37598060
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17752-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108744, "hashes": {}}, "samples": 43347, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47625619, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 17893620, "hashes": {}}, "samples": 11033, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 12700172, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_17752-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40817817,
|
3 |
+
"num_truncated_tokens": 40784205
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22794-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108091, "hashes": {}}, "samples": 43686, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47906594, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14333829, "hashes": {}}, "samples": 9303, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10217569, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_22794-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39080964,
|
3 |
+
"num_truncated_tokens": 39048813
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2581-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108037, "hashes": {}}, "samples": 43912, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47688243, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 14734147, "hashes": {}}, "samples": 9540, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 10449951, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_2581-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 39266872,
|
3 |
+
"num_truncated_tokens": 39234500
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28183-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108770, "hashes": {}}, "samples": 42279, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47463028, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 23381437, "hashes": {}}, "samples": 14813, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 16683752, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_28183-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 43477701,
|
3 |
+
"num_truncated_tokens": 43439422
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_341-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108738, "hashes": {}}, "samples": 45111, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47936930, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 6938264, "hashes": {}}, "samples": 4691, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 4960660, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_341-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 35482496,
|
3 |
+
"num_truncated_tokens": 35455939
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37636-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67107135, "hashes": {}}, "samples": 44406, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47859882, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 9874462, "hashes": {}}, "samples": 6569, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 7049992, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_37636-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 36913317,
|
3 |
+
"num_truncated_tokens": 36884611
|
4 |
+
}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4105-tokenized-chunked-1024-512-128-backfill-nodups/index.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"shards": [{"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00000.mds", "bytes": 67108107, "hashes": {}}, "samples": 43221, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00000.mds.zstd", "bytes": 47980582, "hashes": {}}}, {"column_encodings": ["str", "ndarray:uint16"], "column_names": ["id", "input_ids"], "column_sizes": [null, null], "compression": "zstd", "format": "mds", "hashes": [], "raw_data": {"basename": "shard.00001.mds", "bytes": 16362485, "hashes": {}}, "samples": 10527, "size_limit": 67108864, "version": 2, "zip_data": {"basename": "shard.00001.mds.zstd", "bytes": 11700502, "hashes": {}}}], "version": 2}
|
train/mlfoundations-dclm-baseline-1.0-parquet-sampled-v3/split_4105-tokenized-chunked-1024-512-128-backfill-nodups/num_tokens.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"num_tokens": 40071504,
|
3 |
+
"num_truncated_tokens": 40038126
|
4 |
+
}
|