Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
wikitext / dataset_infos.json
albertvillanova's picture
Add wikitext-103-raw-v1 data files
5fddba4
raw
history blame
8.05 kB
{
"wikitext-103-v1": {
"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
"citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
"homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
"license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
}
},
"builder_name": "wikitext",
"dataset_name": "wikitext",
"config_name": "wikitext-103-v1",
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
},
"splits": {
"test": {
"name": "test",
"num_bytes": 1295575,
"num_examples": 4358,
"dataset_name": null
},
"train": {
"name": "train",
"num_bytes": 545141915,
"num_examples": 1801350,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 1154751,
"num_examples": 3760,
"dataset_name": null
}
},
"download_size": 313093838,
"dataset_size": 547592241,
"size_in_bytes": 860686079
},
"wikitext-2-v1": {
"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
"citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
"homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
"license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
}
},
"builder_name": "wikitext",
"dataset_name": "wikitext",
"config_name": "wikitext-2-v1",
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
},
"splits": {
"test": {
"name": "test",
"num_bytes": 1270947,
"num_examples": 4358,
"dataset_name": null
},
"train": {
"name": "train",
"num_bytes": 10918118,
"num_examples": 36718,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 1134123,
"num_examples": 3760,
"dataset_name": null
}
},
"download_size": 7371282,
"dataset_size": 13323188,
"size_in_bytes": 20694470
},
"wikitext-103-raw-v1": {
"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
"citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
"homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
"license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
"features": {
"text": {
"dtype": "string",
"_type": "Value"
}
},
"builder_name": "wikitext",
"dataset_name": "wikitext",
"config_name": "wikitext-103-raw-v1",
"version": {
"version_str": "1.0.0",
"major": 1,
"minor": 0,
"patch": 0
},
"splits": {
"test": {
"name": "test",
"num_bytes": 1305088,
"num_examples": 4358,
"dataset_name": null
},
"train": {
"name": "train",
"num_bytes": 546500949,
"num_examples": 1801350,
"dataset_name": null
},
"validation": {
"name": "validation",
"num_bytes": 1159288,
"num_examples": 3760,
"dataset_name": null
}
},
"download_size": 315466397,
"dataset_size": 548965325,
"size_in_bytes": 864431722
},
"wikitext-2-raw-v1": {
"description": " The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified\n Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike\n License.\n",
"citation": "@misc{merity2016pointer,\n title={Pointer Sentinel Mixture Models},\n author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},\n year={2016},\n eprint={1609.07843},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n",
"homepage": "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/",
"license": "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)",
"features": {
"text": {
"dtype": "string",
"id": null,
"_type": "Value"
}
},
"post_processed": null,
"supervised_keys": null,
"task_templates": null,
"builder_name": "wikitext",
"config_name": "wikitext-2-raw-v1",
"version": {
"version_str": "1.0.0",
"description": null,
"major": 1,
"minor": 0,
"patch": 0
},
"splits": {
"test": {
"name": "test",
"num_bytes": 1305092,
"num_examples": 4358,
"dataset_name": "wikitext"
},
"train": {
"name": "train",
"num_bytes": 11061733,
"num_examples": 36718,
"dataset_name": "wikitext"
},
"validation": {
"name": "validation",
"num_bytes": 1159292,
"num_examples": 3760,
"dataset_name": "wikitext"
}
},
"download_checksums": {
"https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip": {
"num_bytes": 4721645,
"checksum": "ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11"
}
},
"download_size": 4721645,
"post_processing_size": null,
"dataset_size": 13526117,
"size_in_bytes": 18247762
}
}