metadata
dataset_info:
config_name: gpt-4
features:
- name: id
dtype: string
- name: title
dtype: string
- name: text
dtype: string
- name: token_length
dtype: int64
- name: text_length
dtype: int64
splits:
- name: train
num_bytes: 19998333901
num_examples: 6458670
download_size: 11604627673
dataset_size: 19998333901
configs:
- config_name: gpt-4
data_files:
- split: train
path: gpt-4/train-*
Dataset Card for "wikipedia_token"
Token count {
'~1024': 5320881,
'1024~2048': 693911,
'2048~4096': 300935,
'4096~8192': 106221,
'8192~16384': 30611,
'16384~32768': 4812,
'32768~65536': 1253,
'65536~128000': 46,
'128000~': 0
}
Text count {
'0~1024': 2751539,
'1024~2048': 1310778,
'2048~4096': 1179150,
'4096~8192': 722101,
'8192~16384': 329062,
'16384~32768': 121237,
'32768~65536': 36894,
'65536~': 7909
}
Token percent {
'~1024': '82.38%',
'1024~2048': '10.74%',
'2048~4096': '4.66%',
'4096~8192': '1.64%',
'8192~16384': '0.47%',
'16384~32768': '0.07%',
'32768~65536': '0.02%',
'65536~128000': '0.00%',
'128000~': '0.00%'
}
Text percent {
'0~1024': '42.60%',
'1024~2048': '20.29%',
'2048~4096': '18.26%',
'4096~8192': '11.18%',
'8192~16384': '5.09%',
'16384~32768': '1.88%',
'32768~65536': '0.57%',
'65536~': '0.12%'
}