Datasets:
metadata
license: odc-by
task_categories:
- text-generation
dataset_info:
- config_name: all
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
- name: lang
dtype: string
splits:
- name: test
num_bytes: 193159922
num_examples: 4334
- name: validation
num_bytes: 212095720
num_examples: 4328
- name: train
num_bytes: 7445907253
num_examples: 159185
download_size: 2899594863
dataset_size: 7851162895
- config_name: all-filtered
features:
- name: filename
dtype: string
- name: text
dtype: string
- name: flesch_reading_ease
dtype: float64
splits:
- name: test
num_bytes: 157823888.30671677
num_examples: 3549
- name: validation
num_bytes: 173010266.388928
num_examples: 3541
- name: train
num_bytes: 6018286403.457957
num_examples: 129653
download_size: 2428198753
dataset_size: 6349120558.153602
- config_name: all-filtered-deduped
features:
- name: filename
dtype: string
- name: text
dtype: string
- name: flesch_reading_ease
dtype: float64
splits:
- name: train
num_bytes: 5593504495.685427
num_examples: 121120
- name: validation
num_bytes: 168820995.24710205
num_examples: 3403
- name: test
num_bytes: 141318723.08888263
num_examples: 3417
download_size: 1469473020
dataset_size: 5903644214.021411
- config_name: doc
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 3373404810.354176
num_examples: 66804
- name: validation
num_bytes: 88773810.7987941
num_examples: 1758
- name: test
num_bytes: 88824307.84703004
num_examples: 1759
download_size: 1362475698
dataset_size: 3551002929.0000005
- config_name: docx
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 4605598.853503184
num_examples: 141
- name: validation
num_bytes: 261310.57324840763
num_examples: 8
- name: test
num_bytes: 261310.57324840763
num_examples: 8
download_size: 1788590
dataset_size: 5128220
- config_name: embeddings-jina-base
features:
- name: filename
dtype: string
- name: text
dtype: string
- name: flesch_reading_ease
dtype: float64
- name: embedding
sequence: float64
splits:
- name: train
num_bytes: 6327123534
num_examples: 125834
- name: validation
num_bytes: 189970363
num_examples: 3537
- name: test
num_bytes: 161101531
num_examples: 3544
download_size: 2921652603
dataset_size: 6678195428
- config_name: logs
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 2350324475.916881
num_examples: 9223
- name: validation
num_bytes: 61924411.541559376
num_examples: 243
- name: test
num_bytes: 61924411.541559376
num_examples: 243
download_size: 718096901
dataset_size: 2474173298.9999995
- config_name: ppt
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: validation
num_bytes: 11813294
num_examples: 1230
- name: train
num_bytes: 426593595
num_examples: 43706
- name: test
num_bytes: 12242562
num_examples: 1232
download_size: 232304159
dataset_size: 450649451
- config_name: pptx
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 9517778
num_examples: 963
- name: validation
num_bytes: 513930
num_examples: 53
- name: test
num_bytes: 436852
num_examples: 54
download_size: 5314310
dataset_size: 10468560
- config_name: rtf
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 61558658.13180516
num_examples: 942
- name: validation
num_bytes: 3398142.4871060173
num_examples: 52
- name: test
num_bytes: 3463491.3810888254
num_examples: 53
download_size: 22547280
dataset_size: 68420292
- config_name: txt
features:
- name: section
dtype: string
- name: filename
dtype: string
- name: text
dtype: string
splits:
- name: train
num_bytes: 1358006724.1432111
num_examples: 41393
- name: validation
num_bytes: 35727522.10740843
num_examples: 1089
- name: test
num_bytes: 35760329.749380335
num_examples: 1090
download_size: 608912009
dataset_size: 1429494576
configs:
- config_name: all
data_files:
- split: test
path: all/test-*
- split: validation
path: all/validation-*
- split: train
path: all/train-*
- config_name: all-filtered
data_files:
- split: test
path: all-filtered/test-*
- split: validation
path: all-filtered/validation-*
- split: train
path: all-filtered/train-*
- config_name: all-filtered-deduped
data_files:
- split: train
path: all-filtered-deduped/train-*
- split: validation
path: all-filtered-deduped/validation-*
- split: test
path: all-filtered-deduped/test-*
- config_name: doc
data_files:
- split: train
path: doc/train-*
- split: validation
path: doc/validation-*
- split: test
path: doc/test-*
- config_name: docx
data_files:
- split: train
path: docx/train-*
- split: validation
path: docx/validation-*
- split: test
path: docx/test-*
- config_name: embeddings-jina-base
data_files:
- split: train
path: embeddings-jina-base/train-*
- split: validation
path: embeddings-jina-base/validation-*
- split: test
path: embeddings-jina-base/test-*
- config_name: logs
data_files:
- split: train
path: logs/train-*
- split: validation
path: logs/validation-*
- split: test
path: logs/test-*
- config_name: ppt
data_files:
- split: validation
path: ppt/validation-*
- split: train
path: ppt/train-*
- split: test
path: ppt/test-*
- config_name: pptx
data_files:
- split: train
path: pptx/train-*
- split: validation
path: pptx/validation-*
- split: test
path: pptx/test-*
- config_name: rtf
data_files:
- split: train
path: rtf/train-*
- split: validation
path: rtf/validation-*
- split: test
path: rtf/test-*
- config_name: txt
data_files:
- split: train
path: txt/train-*
- split: validation
path: txt/validation-*
- split: test
path: txt/test-*
govdocs1 Dataset: By File Extension
Markdown-parsed versions of documents in govdocs1 with light filtering.
Usage
Load specific file formats (e.g., .doc
files) parsed to markdown with pandoc
:
from datasets import load_dataset
# Replace "doc" with desired config name
dataset = load_dataset("BEE-spoke-data/govdocs1-by-extension", "doc")
Configurations
This dataset includes multiple configurations, each corresponding to a different file extension:
doc
docx
logs
ppt
pptx
rtf
txt
Each configuration contains train
, validation
, and test
splits.
Dataset Details
- Download Size: Varies by configuration
- Dataset Size: Varies by configuration
- Splits: Train, Validation, Test
- Features: Section, Filename, Text
counts
Here's a summary of the number of examples for each configuration in the dataset (thus far):
DOC Configuration
- Train Examples: 38,094
- Validation Examples: 1,002
- Test Examples: 1,003
DOCX Configuration
- Train Examples: 141
- Validation Examples: 8
- Test Examples: 8
Logs Configuration
- Train Examples: 9,223
- Validation Examples: 243
- Test Examples: 243
PPT Configuration
- Train Examples: 13,865
- Validation Examples: 365
- Test Examples: 365
PPTX Configuration
- Train Examples: 963
- Validation Examples: 53
- Test Examples: 54
RTF Configuration
- Train Examples: 942
- Validation Examples: 52
- Test Examples: 53
TXT Configuration
- Train Examples: 41,393
- Validation Examples: 1,089
- Test Examples: 1,090
Citation
@inproceedings{garfinkel2009bringing,
title={Bringing Science to Digital Forensics with Standardized Forensic Corpora},
author={Garfinkel, Simson and others},
booktitle={Digital Forensic Research Workshop (DFRWS) 2009},
year={2009},
address={Montreal, Canada},
url={https://digitalcorpora.org/corpora/file-corpora/files/}
}
For more detailed information on each configuration, refer to the dataset documentation.