Upload dataset_infos.json
Browse files- dataset_infos.json +1 -0
dataset_infos.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"Vlasta--Human_DNA_v0_SentencepieceTokenized_vocab10k": {"description": "", "citation": "", "homepage": "", "license": "", "features": {"input_ids": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "token_type_ids": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "attention_mask": {"feature": {"dtype": "int8", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": null, "config_name": null, "version": null, "splits": {"test": {"name": "test", "num_bytes": 303514944, "num_examples": 98416, "dataset_name": "Human_DNA_v0_SentencepieceTokenized_vocab10k"}, "train": {"name": "train", "num_bytes": 2731825704, "num_examples": 885806, "dataset_name": "Human_DNA_v0_SentencepieceTokenized_vocab10k"}}, "download_checksums": null, "download_size": 887705894, "post_processing_size": null, "dataset_size": 3035340648, "size_in_bytes": 3923046542}}
|