ttro / Wiki /validation /dataset_info.json
YiDuo1999's picture
Upload 10 files
8e98830 verified
raw
history blame
1.77 kB
{
"builder_name": "wikitext_document_level",
"citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
"config_name": "wikitext-2-raw-v1",
"dataset_name": "wikitext_document_level",
"dataset_size": 13380433,
"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
"download_checksums": {
"https://wikitext.smerity.com/wikitext-2-raw-v1.zip": {
"num_bytes": 4721645,
"checksum": null
}
},
"download_size": 4721645,
"features": {
"page": {
"dtype": "string",
"_type": "Value"
}
},
"homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
"license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
"size_in_bytes": 18102078,
"splits": {
"test": {
"name": "test",
"num_bytes": 1290775,
"num_examples": 62,
"dataset_name": "wikitext_document_level"
},
"train": {
"name": "train",
"num_bytes": 10942633,
"num_examples": 629,
"dataset_name": "wikitext_document_level"
},
"validation": {
"name": "validation",
"num_bytes": 1147025,
"num_examples": 60,
"dataset_name": "wikitext_document_level"
}
},
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
}
}