metadata
license: cc-by-4.0
dataset_info:
features:
- name: title
dtype: string
- name: text
dtype: string
- name: contributors
sequence: string
- name: id
dtype: string
splits:
- name: train
num_bytes: 8757569508
num_examples: 867023
download_size: 4782924595
dataset_size: 8757569508
from datasets import load_dataset
raw_dataset = load_dataset(
"wisenut-nlp-team/namu",
"raw",
use_auth_token="<your personal/api token>"
)
processed_dataset = load_dataset(
"wisenut-nlp-team/namu",
"processed",
use_auth_token="<your personal/api token>"
)