|
--- |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
- split: validation |
|
path: data/validation-* |
|
- split: test |
|
path: data/test-* |
|
dataset_info: |
|
features: |
|
- name: boe_text_cleaned |
|
dtype: string |
|
- name: tweet_text_cleaned |
|
dtype: string |
|
- name: text |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 81243274 |
|
num_examples: 2867 |
|
- name: validation |
|
num_bytes: 8940533 |
|
num_examples: 392 |
|
- name: test |
|
num_bytes: 10272606 |
|
num_examples: 389 |
|
download_size: 44398951 |
|
dataset_size: 100456413 |
|
--- |
|
# Dataset Card for "BOE_with_BERTIN_for_tokenize_2048" |
|
|
|
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |