docs / huggingface_datatrove.txt
danidarko's picture
Upload 59 files
b1d4de0 verified
# File: datatrove-main/src/datatrove/data.py
""""""
from dataclasses import dataclass, field
from typing import Generator, NewType
class MediaType:
IMAGE = 0
VIDEO = 1
AUDIO = 2
@dataclass
class Media:
type: int
url: str
alt: str | None = None
local_path: str | None = None
@dataclass
class Document:
text: str
id: str
media: list[Media] = field(default_factory=list)
metadata: dict[str, str | int | float | bool] = field(default_factory=dict)
DocumentsPipeline = NewType('DocumentsPipeline', Generator[Document, None, None] | None)
# File: datatrove-main/src/datatrove/executor/base.py
import dataclasses
import json
import random
import time
from abc import ABC, abstractmethod
from collections import deque
from collections.abc import Sequence
from typing import Callable
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.logging import add_task_logger, close_task_logger, get_random_str, get_timestamp, log_pipeline, logger
from datatrove.utils.stats import PipelineStats
class PipelineExecutor(ABC):
@abstractmethod
def __init__(self, pipeline: list[PipelineStep | Callable], logging_dir: DataFolderLike=None, skip_completed: bool=True, randomize_start_duration: int=0):
self.pipeline: list[PipelineStep | Callable] = pipeline
self.logging_dir = get_datafolder(logging_dir if logging_dir else f'logs/{get_timestamp()}_{get_random_str()}')
self.skip_completed = skip_completed
self.randomize_start_duration = randomize_start_duration
@abstractmethod
def run(self):
pass
@property
@abstractmethod
def world_size(self) -> int:
return 0
def _run_for_rank(self, rank: int, local_rank: int=0) -> PipelineStats:
if self.is_rank_completed(rank):
logger.info(f'Skipping rank={rank!r} as it has already been completed.')
return PipelineStats()
logfile = add_task_logger(self.logging_dir, rank, local_rank)
log_pipeline(self.pipeline)
if self.randomize_start_duration > 0:
time.sleep(random.randint(0, self.randomize_start_duration))
try:
pipelined_data = None
for pipeline_step in self.pipeline:
if callable(pipeline_step):
pipelined_data = pipeline_step(pipelined_data, rank, self.world_size)
elif isinstance(pipeline_step, Sequence) and (not isinstance(pipeline_step, str)):
pipelined_data = pipeline_step
else:
raise ValueError
if pipelined_data:
deque(pipelined_data, maxlen=0)
logger.success(f'Processing done for rank={rank!r}')
stats = PipelineStats(self.pipeline)
with self.logging_dir.open(f'stats/{rank:05d}.json', 'w') as f:
stats.save_to_disk(f)
logger.info(stats.get_repr(f'Task {rank}'))
self.mark_rank_as_completed(rank)
except Exception as e:
logger.exception(e)
raise e
finally:
close_task_logger(logfile)
return stats
def is_rank_completed(self, rank: int) -> bool:
return self.skip_completed and self.logging_dir.isfile(f'completions/{rank:05d}')
def mark_rank_as_completed(self, rank: int):
self.logging_dir.open(f'completions/{rank:05d}', 'w').close()
def get_incomplete_ranks(self, ranks=None) -> list[int]:
completed = set(self.logging_dir.list_files('completions'))
return list(filter(lambda rank: not self.skip_completed or f'completions/{rank:05d}' not in completed, ranks if ranks is not None else range(self.world_size)))
def to_json(self, indent=4) -> str:
data = self.__dict__
data['pipeline'] = [{a: b for (a, b) in x.__dict__.items() if a != 'stats'} for x in data['pipeline']]
return json.dumps(data, indent=indent)
def save_executor_as_json(self, indent: int=4):
with self.logging_dir.open('executor.json', 'w') as f:
json.dump(self, f, cls=ExecutorJSONEncoder, indent=indent)
class ExecutorJSONEncoder(json.JSONEncoder):
def default(self, o):
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
if isinstance(o, PipelineExecutor):
return o.__dict__ | {'world_size': o.world_size}
if isinstance(o, PipelineStep):
return {a: b for (a, b) in o.__dict__.items() if a != 'stats'}
return str(o)
# File: datatrove-main/src/datatrove/executor/local.py
import time
from copy import deepcopy
from functools import partial
from typing import Callable
import multiprocess
from datatrove.executor.base import PipelineExecutor
from datatrove.io import DataFolderLike
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.logging import logger
from datatrove.utils.stats import PipelineStats
class LocalPipelineExecutor(PipelineExecutor):
def __init__(self, pipeline: list[PipelineStep | Callable], tasks: int=1, workers: int=-1, logging_dir: DataFolderLike=None, depends: 'LocalPipelineExecutor'=None, skip_completed: bool=True, start_method: str='forkserver', local_tasks: int=-1, local_rank_offset: int=0, randomize_start_duration: int=0):
super().__init__(pipeline, logging_dir, skip_completed, randomize_start_duration)
self.tasks = tasks
self.workers = workers if workers != -1 else tasks
self.start_method = start_method
self.local_tasks = local_tasks if local_tasks != -1 else tasks
self.local_rank_offset = local_rank_offset
self.depends = depends
if self.local_rank_offset + self.local_tasks > self.tasks:
raise ValueError(f'Local tasks go beyond the total tasks (local_rank_offset + local_tasks = {self.local_rank_offset + self.local_tasks} > {self.tasks} = tasks)')
self._launched = False
def _launch_run_for_rank(self, rank: int, ranks_q, completed=None, completed_lock=None) -> PipelineStats:
local_rank = ranks_q.get()
try:
return self._run_for_rank(rank, local_rank)
finally:
if completed and completed_lock:
with completed_lock:
completed.value += 1
logger.info(f'{completed.value}/{self.world_size} tasks completed.')
ranks_q.put(local_rank)
def run(self):
assert not self.depends or isinstance(self.depends, LocalPipelineExecutor), 'depends= must be a LocalPipelineExecutor'
if self.depends:
if not self.depends._launched:
logger.info(f'Launching dependency job "{self.depends}"')
self.depends.run()
while (incomplete := len(self.depends.get_incomplete_ranks())) > 0:
logger.info(f'Dependency job still has {incomplete}/{self.depends.world_size} tasks. Waiting...')
time.sleep(2 * 60)
self._launched = True
if all(map(self.is_rank_completed, range(self.local_rank_offset, self.local_rank_offset + self.local_tasks))):
logger.info(f'Not doing anything as all {self.local_tasks} tasks have already been completed.')
return
self.save_executor_as_json()
mg = multiprocess.Manager()
ranks_q = mg.Queue()
for i in range(self.workers):
ranks_q.put(i)
ranks_to_run = self.get_incomplete_ranks(range(self.local_rank_offset, self.local_rank_offset + self.local_tasks))
if (skipped := (self.local_tasks - len(ranks_to_run))) > 0:
logger.info(f'Skipping {skipped} already completed tasks')
if self.workers == 1:
pipeline = self.pipeline
stats = []
for rank in ranks_to_run:
self.pipeline = deepcopy(pipeline)
stats.append(self._launch_run_for_rank(rank, ranks_q))
else:
completed_counter = mg.Value('i', skipped)
completed_lock = mg.Lock()
ctx = multiprocess.get_context(self.start_method)
with ctx.Pool(self.workers) as pool:
stats = list(pool.imap_unordered(partial(self._launch_run_for_rank, ranks_q=ranks_q, completed=completed_counter, completed_lock=completed_lock), ranks_to_run))
stats = sum(stats, start=PipelineStats())
with self.logging_dir.open('stats.json', 'wt') as statsfile:
stats.save_to_disk(statsfile)
logger.success(stats.get_repr(f'All {self.local_tasks} tasks'))
return stats
@property
def world_size(self) -> int:
return self.tasks
# File: datatrove-main/src/datatrove/executor/slurm.py
from __future__ import annotations
import json
import math
import os
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
from copy import deepcopy
from typing import Callable
import dill
from dill import CONTENTS_FMODE
from datatrove.executor.base import PipelineExecutor
from datatrove.io import DataFolderLike
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.logging import get_random_str, get_timestamp, logger
def requeue_handler(signum, _frame):
signame = signal.Signals(signum).name
logger.warning(f'Received signal {signum} ({signame}). Requeueing and exiting...')
subprocess.run(['scontrol', 'requeue', os.environ.get('SLURM_JOB_ID')])
sys.exit(15)
class SlurmPipelineExecutor(PipelineExecutor):
def __init__(self, pipeline: list[PipelineStep | Callable], tasks: int, time: str, partition: str, cpus_per_task: int=1, mem_per_cpu_gb: int=2, workers: int=-1, job_name: str='data_processing', qos: str='normal', env_command: str=None, condaenv: str=None, venv_path: str=None, sbatch_args: dict | None=None, max_array_size: int=1001, depends: SlurmPipelineExecutor | None=None, depends_job_id: str | None=None, logging_dir: DataFolderLike=None, skip_completed: bool=True, slurm_logs_folder: str=None, max_array_launch_parallel: bool=False, stagger_max_array_jobs: int=0, run_on_dependency_fail: bool=False, randomize_start_duration: int=0, requeue_signals: tuple[str] | None=('SIGUSR1',), mail_type: str='ALL', mail_user: str=None, requeue: bool=True, srun_args: dict=None, tasks_per_job: int=1):
super().__init__(pipeline, logging_dir, skip_completed, randomize_start_duration)
self.tasks = tasks
self.workers = workers
self.partition = partition
self.cpus_per_task = cpus_per_task
self.mem_per_cpu_gb = mem_per_cpu_gb
self.tasks_per_job = tasks_per_job
self.time = time
self.job_name = job_name
self.qos = qos
self.env_command = env_command
self.condaenv = condaenv
self.venv_path = venv_path
self.depends = depends
self.depends_job_id = depends_job_id
self._sbatch_args = sbatch_args if sbatch_args else {}
self.max_array_size = max_array_size
self.max_array_launch_parallel = max_array_launch_parallel
self.stagger_max_array_jobs = stagger_max_array_jobs
self.run_on_dependency_fail = run_on_dependency_fail
self.randomize_start_duration = randomize_start_duration
self.job_id = None
self.requeue_signals = requeue_signals
self.mail_type = mail_type
self.mail_user = mail_user
self.srun_args = srun_args
self.slurm_logs_folder = slurm_logs_folder if slurm_logs_folder else f'slurm_logs/{self.job_name}/{get_timestamp()}_{get_random_str()}' if not self.logging_dir.is_local() else self.logging_dir.resolve_paths('slurm_logs')
self.requeue = requeue
def run(self):
if 'SLURM_ARRAY_TASK_ID' in os.environ:
slurm_rank = int(os.environ['SLURM_ARRAY_TASK_ID']) + self.max_array_size * int(os.environ.get('RUN_OFFSET', 0))
ranks_to_run_range = (slurm_rank * self.tasks_per_job, (slurm_rank + 1) * self.tasks_per_job)
with self.logging_dir.open('ranks_to_run.json', 'r') as ranks_to_run_file:
all_ranks = json.load(ranks_to_run_file)
if ranks_to_run_range[0] >= len(all_ranks):
return
for ss in self.requeue_signals or []:
signal.signal(signal.Signals[ss], requeue_handler)
for rank_to_run in range(*ranks_to_run_range):
if rank_to_run >= len(all_ranks):
break
rank = all_ranks[rank_to_run]
self._run_for_rank(rank)
else:
self.launch_job()
def launch_merge_stats(self):
launch_slurm_job(self.get_launch_file_contents({**self.get_sbatch_args(), 'cpus-per-task': 1, 'mem-per-cpu': '1G', 'dependency': f'afterok:{self.job_id}'}, f"merge_stats {self.logging_dir.resolve_paths('stats')} -o {self.logging_dir.resolve_paths('stats.json')}"))
@property
def dependency(self) -> str:
dependency = []
if self.depends_job_id:
dependency.append(f"{('afterany' if self.run_on_dependency_fail else 'afterok')}:{self.depends_job_id}")
if self.job_id and (not self.max_array_launch_parallel):
dependency.append(f'afterany:{self.job_id}')
return ','.join(dependency)
def launch_job(self):
assert not self.depends or isinstance(self.depends, SlurmPipelineExecutor), 'depends= must be a SlurmPipelineExecutor'
if self.depends:
if not self.depends.job_id:
logger.info(f'Launching dependency job "{self.depends.job_name}"')
self.depends.launch_job()
if self.depends.job_id != -1:
self.depends_job_id = self.depends.job_id
self.depends = None
ranks_to_run = self.get_incomplete_ranks()
if len(ranks_to_run) == 0:
logger.info(f'Skipping launch of {self.job_name} as all {self.tasks} tasks have already been completed.')
self.job_id = -1
return
executor = deepcopy(self)
with self.logging_dir.open('executor.pik', 'wb') as executor_f:
dill.dump(executor, executor_f, fmode=CONTENTS_FMODE)
self.save_executor_as_json()
with self.logging_dir.open('ranks_to_run.json', 'w') as ranks_to_run_file:
json.dump(ranks_to_run, ranks_to_run_file)
nb_jobs_to_launch = math.ceil(len(ranks_to_run) / self.tasks_per_job)
max_array = min(nb_jobs_to_launch, self.max_array_size) if self.max_array_size != -1 else nb_jobs_to_launch
srun_args_str = ' '.join([f'--{k}={v}' for (k, v) in self.srun_args.items()]) if self.srun_args else ''
launch_file_contents = self.get_launch_file_contents(self.get_sbatch_args(max_array), f"srun {srun_args_str} -l launch_pickled_pipeline {self.logging_dir.resolve_paths('executor.pik')}")
with self.logging_dir.open('launch_script.slurm', 'w') as launchscript_f:
launchscript_f.write(launch_file_contents)
logger.info(f'''Launching Slurm job {self.job_name} ({len(ranks_to_run)} tasks) with launch script "{self.logging_dir.resolve_paths('launch_script.slurm')}"''')
launched_jobs = 0
while launched_jobs * max_array < nb_jobs_to_launch:
if launched_jobs and self.max_array_launch_parallel and (self.stagger_max_array_jobs > 0):
time.sleep(self.stagger_max_array_jobs)
args = [f'--export=ALL,RUN_OFFSET={launched_jobs}']
if self.dependency:
args.append(f'--dependency={self.dependency}')
self.job_id = launch_slurm_job(launch_file_contents, *args)
launched_jobs += 1
logger.info(f'Slurm job launched successfully with (last) id={self.job_id}.')
self.launch_merge_stats()
def get_sbatch_args(self, max_array: int=1) -> dict:
os.makedirs(self.slurm_logs_folder, exist_ok=True)
slurm_logfile = os.path.join(self.slurm_logs_folder, '%A_%a.out')
sbatch_args = {'cpus-per-task': self.cpus_per_task, 'mem-per-cpu': f'{self.mem_per_cpu_gb}G', 'partition': self.partition, 'job-name': self.job_name, 'time': self.time, 'output': slurm_logfile, 'error': slurm_logfile, 'array': f"0-{max_array - 1}{(f'%{self.workers}' if self.workers != -1 else '')}", **({'mail-type': self.mail_type, 'mail-user': self.mail_user} if self.mail_user else {}), **self._sbatch_args}
if self.requeue:
sbatch_args['requeue'] = ''
if self.qos:
sbatch_args['qos'] = self.qos
return sbatch_args
def get_launch_file_contents(self, sbatch_args: dict, run_script: str) -> str:
args = '\n'.join([f'#SBATCH --{k}={v}' if v else f'#SBATCH --{k}' for (k, v) in sbatch_args.items()])
env_command = self.env_command if self.env_command else f'conda init bash\n conda activate {self.condaenv}\n source ~/.bashrc' if self.condaenv else f'source {self.venv_path}' if self.venv_path else ''
return '#!/bin/bash\n' + args + textwrap.dedent(f'\n echo "Starting data processing job {self.job_name}"\n {env_command}\n set -xe\n export PYTHONUNBUFFERED=TRUE\n {run_script}\n ')
@property
def world_size(self) -> int:
return self.tasks
def launch_slurm_job(launch_file_contents, *args):
with tempfile.NamedTemporaryFile('w') as f:
f.write(launch_file_contents)
f.flush()
return subprocess.check_output(['sbatch', *args, f.name]).decode('utf-8').split()[-1]
# File: datatrove-main/src/datatrove/io.py
import os.path
from glob import has_magic
from typing import IO, Callable, TypeAlias
from fsspec import AbstractFileSystem
from fsspec import open as fsspec_open
from fsspec.callbacks import NoOpCallback, TqdmCallback
from fsspec.core import get_fs_token_paths, strip_protocol, url_to_fs
from fsspec.implementations.cached import CachingFileSystem
from fsspec.implementations.dirfs import DirFileSystem
from fsspec.implementations.local import LocalFileSystem
from huggingface_hub import HfFileSystem, cached_assets_path
from datatrove.utils._import_utils import check_required_dependencies
from datatrove.utils.logging import logger
class OutputFileManager:
def __init__(self, fs, mode: str='wt', compression: str | None='infer'):
self.fs = fs
self.mode = mode
self.compression = compression
self._output_files = {}
def get_file(self, filename):
if filename not in self._output_files:
self._output_files[filename] = self.fs.open(filename, mode=self.mode, compression=self.compression)
return self._output_files[filename]
def get_open_files(self):
return self._output_files
def pop(self, filename):
file = self.get_file(filename)
self._output_files.pop(filename)
return file
def write(self, filename, data):
self.get_file(filename).write(data)
def __enter__(self):
return self
def close(self):
for file in self._output_files.values():
file.close()
self._output_files.clear()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class DataFolder(DirFileSystem):
def __init__(self, path: str, fs: AbstractFileSystem | None=None, auto_mkdir: bool=True, **storage_options):
super().__init__(path=path, fs=fs if fs else url_to_fs(path, **storage_options)[0])
self.auto_mkdir = auto_mkdir
def list_files(self, subdirectory: str='', recursive: bool=True, glob_pattern: str | None=None, include_directories: bool=False) -> list[str]:
if glob_pattern and (not has_magic(glob_pattern)):
glob_pattern = f'*{glob_pattern}'
extra_options = {}
if isinstance(_get_true_fs(self.fs), HfFileSystem):
extra_options['expand_info'] = False
if include_directories and (not glob_pattern):
extra_options['withdirs'] = True
return sorted([f for (f, info) in (self.find(subdirectory, maxdepth=1 if not recursive else None, detail=True, **extra_options) if not glob_pattern else self.glob(self.fs.sep.join([subdirectory, glob_pattern]) if subdirectory else glob_pattern, maxdepth=1 if not recursive else None, detail=True, **extra_options)).items() if include_directories or info['type'] != 'directory'])
def get_shard(self, rank: int, world_size: int, **kwargs) -> list[str]:
return self.list_files(**kwargs)[rank::world_size]
def resolve_paths(self, paths) -> list[str] | str:
if isinstance(paths, str):
if isinstance(self.fs, LocalFileSystem):
return self.fs._strip_protocol(self._join(paths))
return self.fs.unstrip_protocol(self._join(paths))
return list(map(self.resolve_paths, paths))
def get_output_file_manager(self, **kwargs) -> OutputFileManager:
return OutputFileManager(self, **kwargs)
def open_files(self, paths, mode='rb', **kwargs):
return [self.open(path, mode=mode, **kwargs) for path in paths]
def open(self, path, mode='rb', *args, **kwargs):
if self.auto_mkdir and ('w' in mode or 'a' in mode):
self.fs.makedirs(self.fs._parent(self._join(path)), exist_ok=True)
return super().open(path, *args, mode=mode, **kwargs)
def is_local(self):
return isinstance(self.fs, LocalFileSystem)
def get_datafolder(data: DataFolder | str | tuple[str, dict] | tuple[str, AbstractFileSystem]) -> DataFolder:
if isinstance(data, DataFolder):
return data
if isinstance(data, str):
return DataFolder(data)
if isinstance(data, tuple) and isinstance(data[0], str) and isinstance(data[1], dict):
return DataFolder(data[0], **data[1])
if isinstance(data, tuple) and isinstance(data[0], str) and isinstance(data[1], AbstractFileSystem):
return DataFolder(data[0], fs=data[1])
raise ValueError('You must pass a DataFolder instance, a str path, a (str path, fs_init_kwargs) or (str path, fs object)')
def open_file(file: IO | str, mode='rt', **kwargs):
if isinstance(file, str):
return fsspec_open(file, mode, **kwargs)
return file
def file_exists(path: str):
(fs, a, fpath) = get_fs_token_paths(path)
return fs.exists(fpath[0])
def download_file(remote_path: str, local_path: str, progress: bool=True):
(fs, _, paths) = get_fs_token_paths(remote_path)
fs.get_file(paths[0], local_path, callback=TqdmCallback(tqdm_kwargs={'desc': f'↓ Downloading {os.path.basename(remote_path)}', 'unit': 'B', 'unit_scale': True, 'unit_divisor': 1024, 'miniters': 1}) if progress else NoOpCallback())
def safely_create_file(file_to_lock: str, do_processing: Callable):
check_required_dependencies('io', ['fasteners'])
from fasteners import InterProcessLock
completed_file = f'{file_to_lock}.completed'
if os.path.exists(completed_file):
return
with InterProcessLock(f'{file_to_lock}.lock'):
if not os.path.exists(completed_file):
do_processing()
open(completed_file, 'a').close()
def cached_asset_path_or_download(remote_path: str, progress: bool=True, namespace: str='default', subfolder: str='default', desc: str='file'):
download_dir = cached_assets_path(library_name='datatrove', namespace=namespace, subfolder=subfolder)
local_path = os.path.join(download_dir, strip_protocol(remote_path).replace('/', '_'))
def do_download_file():
logger.info(f'⬇️ Downloading {desc} from "{remote_path}"...')
download_file(remote_path, local_path, progress)
logger.info(f'⬇️ Downloaded {desc} to "{local_path}".')
safely_create_file(local_path, do_download_file)
return local_path
DataFolderLike: TypeAlias = str | tuple[str, dict] | DataFolder
DataFileLike: TypeAlias = str | tuple[str, dict]
def get_shard_from_paths_file(paths_file: DataFileLike, rank: int, world_size):
kwargs = {}
if isinstance(paths_file, tuple):
(paths_file, kwargs) = paths_file
with open_file(paths_file, mode='rt', **kwargs) as f:
for (pathi, path) in enumerate(f):
if (pathi - rank) % world_size == 0:
yield path.strip()
def _get_true_fs(fs: AbstractFileSystem):
if isinstance(fs, CachingFileSystem):
return fs.fs
return fs
# File: datatrove-main/src/datatrove/pipeline/base.py
from abc import ABC, abstractmethod
from itertools import chain
from datatrove.data import Document, DocumentsPipeline
from datatrove.utils._import_utils import check_required_dependencies
from datatrove.utils.stats import Stats
class PipelineStep(ABC):
name: str = None
type: str = None
def __new__(cls, *args, **kwargs):
required_dependencies = chain.from_iterable((getattr(t, '_requires_dependencies', []) for t in cls.mro()))
if required_dependencies:
check_required_dependencies(cls.__name__, required_dependencies)
return super().__new__(cls)
def __init__(self):
super().__init__()
self.stats = Stats(str(self))
def stat_update(self, *labels, value: int=1, unit: str=None):
for label in labels:
self.stats[label].update(value, unit)
def update_doc_stats(self, document: Document):
self.stat_update('doc_len', value=len(document.text), unit='doc')
if (token_count := document.metadata.get('token_count', None)):
self.stat_update('doc_len_tokens', value=token_count, unit='doc')
def track_time(self, unit: str=None):
if unit:
self.stats.time_stats.unit = unit
return self.stats.time_stats
def __repr__(self):
return f'{self.type}: {self.name}'
@abstractmethod
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
if data:
yield from data
def __call__(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
return self.run(data, rank, world_size)
# File: datatrove-main/src/datatrove/pipeline/decont/n_grams.py
""""""
import os
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from typing import Tuple
import numpy as np
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolderLike, file_exists, get_datafolder, open_file
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.binaryio import read_np_from_file
from datatrove.utils.hashing import HashConfig, create_hash_func
from datatrove.utils.logging import logger
from datatrove.utils.text import TextNormConfig, ngrams, simplify_text
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
@dataclass
class NGramsDecontConfig:
n_grams: int = 12
find_query_ngrams: bool = False
find_overlap_ngrams: bool = True
norm_config: TextNormConfig = field(default_factory=TextNormConfig)
hash_config: HashConfig = field(default_factory=HashConfig)
class NGramsDecontIndexer(PipelineStep):
type = '🦠 - DECONT'
name = '💥 N-grams build index'
_requires_dependencies = ['lighteval']
def __init__(self, output_folder: DataFolderLike, lighteval_tasks: str | list[str] | None=None, custom_lighteval_tasks: str | None=None, config: NGramsDecontConfig=None, language: str=Languages.english):
super().__init__()
self.output_folder = get_datafolder(output_folder)
if isinstance(lighteval_tasks, str):
if file_exists(lighteval_tasks):
with open_file(lighteval_tasks, 'rt') as f:
self.lighteval_tasks = f.read().strip().splitlines()
else:
self.lighteval_tasks = [lighteval_tasks]
else:
self.lighteval_tasks = lighteval_tasks
self.custom_lighteval_tasks = custom_lighteval_tasks
self.config = config or NGramsDecontConfig()
self.tokenizer = load_word_tokenizer(language)
self.hash_func = create_hash_func(self.config.hash_config)
def compute_hashes(self, label: str, query: str | None=None) -> list[int]:
label_tokens = self.tokenizer.word_tokenize(simplify_text(label, self.config.norm_config))
ngrams_to_compute = list(ngrams(label_tokens, self.config.n_grams))
if query is not None:
query_tokens = self.tokenizer.word_tokenize(simplify_text(query, self.config.norm_config))
if self.config.find_query_ngrams:
ngrams_to_compute.extend(ngrams(query_tokens, self.config.n_grams))
if self.config.find_overlap_ngrams:
''
ngrams_to_compute.extend([query_tokens[-self.config.n_grams + 1 + i:] + label_tokens[:i + 1] for i in range(self.config.n_grams - 1) if len(query_tokens) >= self.config.n_grams - 1 - i and len(label_tokens) >= i + 1])
return list(map(self.hash_func, map(' '.join, ngrams_to_compute)))
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
if world_size != 1:
raise ValueError('Decontamination index building requires a single worker.')
hashes = defaultdict(set)
if data:
for doc in data:
if not self.config.find_query_ngrams and 'query' not in doc.metadata:
raise ValueError("only_label_ngrams is False but could not find 'query' field in documents metadata")
hashes[doc.metadata.get('task', 'input')].update(self.compute_hashes(doc.text, doc.metadata.get('query', None)))
from lighteval.tasks.lighteval_task import LightevalTask
from lighteval.tasks.registry import Registry
task_dict = Registry(cache_dir=os.getenv('HF_HOME')).get_task_dict(self.lighteval_tasks, custom_tasks=self.custom_lighteval_tasks)
LightevalTask.load_datasets(task_dict.values())
for (task_name, task) in task_dict.items():
for eval_doc in task.eval_docs():
try:
golds = eval_doc.get_golds()
query = eval_doc.query
except Exception as e:
logger.warning(f'Error while fetching doc data: {e}')
continue
for gold in golds:
hashes[task_name].update(self.compute_hashes(gold, query))
for (task_name, task_hashes) in hashes.items():
hashes_array = np.array(list(task_hashes), dtype=self.config.hash_config.np_descr)
logger.info(f'Saving {len(task_hashes)} hashes for {task_name}')
with self.output_folder.open(f"{task_name.replace(' ', '_')}.index.hashes", mode='wb') as f:
if self.output_folder.is_local():
hashes_array.tofile(f)
else:
f.write(hashes_array.tobytes())
class NGramsDecontFilter(BaseFilter):
type = '🦠 - DECONT'
name = '💥 N-grams decontaminate'
def __init__(self, index_folder: DataFolderLike, config: NGramsDecontConfig=None, exclusion_writer: DiskWriter=None, language: str=Languages.english):
super().__init__()
self.index_folder = get_datafolder(index_folder)
self.config = config or NGramsDecontConfig()
self.exclusion_writer = exclusion_writer
self.language = language
self._index_hashes = None
self.hash_func = create_hash_func(self.config.hash_config)
self.tokenizer = load_word_tokenizer(language)
def load_index_hashes(self):
def load_index_from_file(file):
with self.index_folder.open(file, mode='rb') as f:
return (file, read_np_from_file(f, np.dtype(self.config.hash_config.np_descr), self.index_folder.is_local()).tolist())
with ThreadPoolExecutor() as pool:
hashes = pool.map(load_index_from_file, self.index_folder.list_files())
self._index_hashes = {}
for (filename, hashlist) in hashes:
taskname = filename.removesuffix('.index.hashes')
logger.info(f'Loading {len(hashlist)} hashes for {taskname}')
for hash in hashlist:
self._index_hashes[hash] = taskname
def filter(self, doc: Document) -> bool | Tuple[bool, str]:
if self._index_hashes is None:
self.load_index_hashes()
text_tokens = self.tokenizer.word_tokenize(simplify_text(doc.text, self.config.norm_config))
ngrams_to_compute = list(ngrams(text_tokens, self.config.n_grams))
for n_gram in map(' '.join, ngrams_to_compute):
task = self._index_hashes.get(self.hash_func(n_gram), None)
if task is not None:
doc.metadata['contaminated_ngram'] = n_gram
doc.metadata['contaminated_task'] = task
self.stat_update(f'contaminated_{task}')
if ':' in task:
self.stat_update(f"contaminated_tg_{task[:task.index(':')]}")
return (False, 'contaminated')
return True
# File: datatrove-main/src/datatrove/pipeline/dedup/__init__.py
from .bloom_filter import SingleBloomFilter
from .exact_substrings import ESDatasetToSequence, ESMergeSequences, ESRangeRemover
from .minhash import MinhashBuildIndex, MinhashConfig, MinhashDedupBuckets, MinhashDedupCluster, MinhashDedupFilter, MinhashDedupSignature
from .sentence_dedup import SentDedupConfig, SentenceDedupFilter, SentenceDedupSignature, SentenceFindDedups
from .url_dedup import UrlDedupConfig, UrlDedupFilter, UrlDedupSignature, UrlFindDedups
# File: datatrove-main/src/datatrove/pipeline/dedup/bloom_filter.py
import contextlib
import math
from dataclasses import dataclass, field
import numpy as np
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.hashing import HashConfig, create_hash_func
from datatrove.utils.logging import logger
from datatrove.utils.text import TextNormConfig, ngrams, simplify_text
from datatrove.utils.typeshelper import Languages, StatHints
from datatrove.utils.word_tokenizers import load_word_tokenizer
_mersenne_prime = np.uint64((1 << 61) - 1)
MAX_HASH = 1 << 32 - 1
@dataclass
class BloomFilterConfig:
m_bytes: int
k: int = None
expected_elements: int = None
duplicate_threshold: float = 0.8
n_grams: int = 13
seed: int = 0
norm_config: TextNormConfig = field(default_factory=TextNormConfig)
hash_config: HashConfig = field(default_factory=lambda : HashConfig(precision=32))
@property
def m(self):
return self.m_bytes * 8
def __post_init__(self):
if self.k is None:
self.k = get_optimal_k(self.m, expected_elements=self.expected_elements)
def get_optimal_k(size_in_bytes: int, expected_elements: int) -> int:
assert expected_elements, f'if expected_elements={expected_elements!r} then k must be given'
m = size_in_bytes * 8
k = m / expected_elements * np.log(2)
return math.ceil(k)
def get_false_positive_prob(size_in_bytes: int, n: int, k: int) -> float:
m = size_in_bytes * 8
return (1.0 - (1.0 - 1.0 / m) ** (k * n)) ** k
class SingleBloomFilter(PipelineStep):
type = '🫂 - DEDUPS'
name = '\U0001fab7 Bloom-filter'
def __init__(self, output_folder: DataFolderLike, config: BloomFilterConfig, save_bloom_filter: bool=False, exclusion_writer: DiskWriter=None, language: str=Languages.english):
super().__init__()
self.output_folder = get_datafolder(output_folder)
self.tokenizer = load_word_tokenizer(language)
self.config = config
self.bit_vector = bytearray([0] * self.config.m_bytes)
self.save_bloom_filter = save_bloom_filter
self.exclusion_writer = exclusion_writer
assert self.config.hash_config.precision == 32, 'Bloom filter only supports 32-bit hashes'
self.hash_fc = create_hash_func(self.config.hash_config)
assert self.config.m < MAX_HASH
self.total_shingles = 0
self._parameters = None
assert self.config.m_bytes < MAX_HASH, f'MAX_HASH={MAX_HASH!r} is smaller than self.config.m_bytes={self.config.m_bytes!r}'
if self.config.expected_elements:
fp = get_false_positive_prob(self.config.m_bytes, n=self.config.expected_elements, k=self.config.k)
if fp > 0.05:
logger.warning(f'False probability = {fp:.3}')
else:
logger.info(f'False probability = {fp:.3}')
self.language = language
@property
def parameters(self):
if self._parameters is None:
gen = np.random.RandomState(self.config.seed)
self._parameters = (gen.randint(1, _mersenne_prime, dtype=np.uint64, size=(1, self.config.k)), gen.randint(0, _mersenne_prime, dtype=np.uint64, size=(1, self.config.k)))
return self._parameters
def get_shingles(self, text: str) -> np.ndarray:
return np.fromiter([self.hash_fc(' '.join(x)) for x in ngrams(self.tokenizer.word_tokenize(simplify_text(text, self.config.norm_config)), self.config.n_grams)], dtype=np.uint64).reshape((-1, 1))
def get_indexes(self, shingles: np.ndarray) -> list[list[int]]:
(a, b) = self.parameters
phv = np.bitwise_and((shingles * a + b) % _mersenne_prime, self.config.m_bytes)
return phv.tolist()
def update_bf(self, indexes: list[int]):
for index in indexes:
(byte_index, bit_index) = divmod(index, 8)
mask = 1 << bit_index
self.bit_vector[byte_index] |= mask
def query(self, indexes: list[int]) -> bool:
for idx in indexes:
(byte_index, bit_index) = divmod(idx, 8)
mask = 1 << bit_index
if self.bit_vector[byte_index] & mask == 0:
return False
return True
def step(self, doc: Document) -> bool:
shingles = self.get_shingles(doc.text)
self.total_shingles += shingles.size
if shingles.size == 0:
return True
shingle_indexes = self.get_indexes(shingles)
duplicate_shingles = 0
indexes_to_update = []
for indexes in shingle_indexes:
if self.query(indexes):
duplicate_shingles += 1
else:
indexes_to_update.extend(indexes)
self.update_bf(indexes_to_update)
if duplicate_shingles / len(shingles) > self.config.duplicate_threshold:
self.stat_update(StatHints.dropped)
return False
return True
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
with self.exclusion_writer if self.exclusion_writer else contextlib.nullcontext() as writer:
for (doc_idx, doc) in enumerate(data):
with self.track_time():
self.stat_update(StatHints.total)
if not self.step(doc):
self.stat_update(StatHints.dropped)
if self.exclusion_writer:
writer.write(doc, rank)
continue
self.stat_update(StatHints.forwarded)
yield doc
if self.save_bloom_filter:
with self.output_folder.open('bloom_filter.bloom', mode='wb') as f:
f.write(self.bit_vector)
logger.info(f'self.total_shingles={self.total_shingles!r}')
logger.info(f'False probability = {get_false_positive_prob(self.config.m_bytes, n=self.total_shingles, k=self.config.k):.3}')
logger.info(f'Optimal K given total shingles = {get_optimal_k(self.config.m_bytes, self.total_shingles)}')
# File: datatrove-main/src/datatrove/pipeline/dedup/exact_substrings.py
""""""
import struct
from typing import BinaryIO, Generator
import numpy as np
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import DocumentsPipeline, PipelineStep
from datatrove.utils.logging import logger
from ...utils.tokenization import PipelineStepWithTokenizer
from ...utils.typeshelper import ExtensionHelperES as EH
from ...utils.typeshelper import Languages
from ...utils.word_tokenizers import load_word_tokenizer
SEPARATOR_BYTES = 12
def prepare_doc(tokenizer, doc: str, rank: int, doc_id: int):
tokens = tokenizer.encode(doc).ids
tokens = np.fromiter(tokens, dtype=np.uint16, count=len(tokens))
b_doc = b'\xff\xff' + struct.pack('<I', doc_id) + b'\xff\xff' + struct.pack('<I', rank) + tokens.tobytes()
return b_doc
class ESDatasetToSequence(PipelineStepWithTokenizer):
type = '🫂 - DEDUP'
name = '🪞 - exact-substrings stage 1'
def __init__(self, output_folder: DataFolderLike, tokenizer_name_or_path: str='gpt2'):
super().__init__()
self.output_folder = get_datafolder(output_folder)
self.tokenizer_name_or_path = tokenizer_name_or_path
def save_sizes(self, doc_lens: list[int], rank: int):
with self.output_folder.open(f'{rank:05d}{EH.stage_1_sequence_size}', mode='wb') as f_lens:
f_lens.write(struct.pack('Q' * len(doc_lens), *doc_lens))
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
doc_lens = []
with self.output_folder.open(f'{rank:05d}{EH.stage_1_sequence}', mode='wb') as f_sequence:
i = -1
for (i, doc) in enumerate(data):
with self.stats.time_stats:
b_doc = prepare_doc(tokenizer=self.tokenizer, doc=doc.text, rank=rank, doc_id=i)
doc_lens.append(len(b_doc))
f_sequence.write(b_doc)
assert i < 2 ** 32, 'doc ID overflow'
assert i + 1 == len(doc_lens), f'i={i!r} but len(doc_lens)={len(doc_lens)!r}'
self.save_sizes(doc_lens, rank)
class ESMergeSequences(PipelineStep):
type = '🫂 - DEDUP'
name = '🪞 - exact-substrings stage 2'
def __init__(self, data_folder: DataFolderLike, tasks_stage_1: int, bytes_per_batch: int=int(500000000.0)):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.tasks_stage_1 = tasks_stage_1
self.bytes_per_batch = bytes_per_batch
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
bytes_per_sequence = [0]
with self.stats.time_stats:
assert world_size == 1, f"world_size={world_size!r} can't be greater than 1!"
all_files: list[str] = self.data_folder.list_files(glob_pattern=EH.stage_1_sequence)
assert len(all_files) == self.tasks_stage_1
with self.data_folder.open(f'dataset{EH.stage_2_big_sequence}', mode='wb') as f_sequence:
for file in all_files:
len_sequence = 0
with self.data_folder.open(file, 'rb') as f:
while True:
sequence = f.read(self.bytes_per_batch)
f_sequence.write(sequence)
len_sequence += len(sequence)
if len(sequence) != self.bytes_per_batch:
break
bytes_per_sequence.append(bytes_per_sequence[-1] + len_sequence)
with self.data_folder.open(f'bytes_offsets{EH.stage_2_bytes_offset}', mode='wb') as f_bytes:
f_bytes.write(np.array([bytes_per_sequence], np.uint32).tobytes())
def read_bytes(x):
return np.frombuffer(x[SEPARATOR_BYTES:], dtype=np.uint16).tolist()
def sequence_reader(file: BinaryIO, size_file: BinaryIO) -> Generator[list, None, None]:
with size_file as f_size:
with file as f:
while True:
n_bytes = f_size.read(struct.calcsize('<Q'))
if len(n_bytes) == 0:
break
assert len(n_bytes) == 8
n_bytes = struct.unpack('<Q', n_bytes)[0]
yield f.read(n_bytes)
class ESRangeRemover(PipelineStepWithTokenizer):
type = '🫂 - DEDUP'
name = '🪞 - exact-substrings stage 3'
def __init__(self, sequence_folder: DataFolderLike, tokenizer_name_or_path: str='gpt2', min_doc_words: int=50, language: str=Languages.english):
super().__init__()
self.sequence_folder = get_datafolder(sequence_folder)
self.tokenizer_name_or_path = tokenizer_name_or_path
self.min_doc_words = min_doc_words
self.sequence_bytes_offset = None
self.dup_ranges = None
self.rank = None
self.exhausted_ranges = False
self.bytes_counter = 0
self.range_idx = 0
self.language = language
self.word_tokenizer = load_word_tokenizer(language)
def reset(self):
self.bytes_counter = 0
self.range_idx = 0
self.exhausted_ranges = False
self.sequence_bytes_offset = None
self.dup_ranges = None
self.rank = None
def get_sequence_bytes_offset(self):
offset_array_file: str = self.sequence_folder.list_files(glob_pattern=EH.stage_2_bytes_offset)[0]
with self.sequence_folder.open(offset_array_file, 'rb') as f:
offset_array = f.read()
self.sequence_bytes_offset = np.frombuffer(offset_array, dtype=np.uint32)
logger.info(f'self.rank={self.rank!r}, -> self.sequence_bytes_offset[self.rank]={self.sequence_bytes_offset[self.rank]!r}')
def get_bytearange(self, bytes_range_file: BinaryIO):
with bytes_range_file as f:
dup_ranges = f.read()
dup_ranges = dup_ranges.split('\n')
i = 0
for (i, x) in enumerate(dup_ranges):
if x == 'out':
break
dup_ranges = dup_ranges[i + 1:-1]
rank_dup_ranges = []
for br in dup_ranges:
(a, b) = br.split(' ')
(a, b) = (int(a), int(b))
if b > self.sequence_bytes_offset[self.rank + 1] + SEPARATOR_BYTES:
break
if b > self.sequence_bytes_offset[self.rank] + SEPARATOR_BYTES:
(a, b) = (a - self.sequence_bytes_offset[self.rank], b - self.sequence_bytes_offset[self.rank])
rank_dup_ranges.append((a, b))
self.dup_ranges = rank_dup_ranges
def get_all_files(self, rank: int, world_size: int):
self.get_sequence_bytes_offset()
sequence_file = self.sequence_folder.get_shard(rank, world_size, glob_pattern=EH.stage_1_sequence)
docs_sizes_file = self.sequence_folder.get_shard(rank, world_size, glob_pattern=EH.stage_1_sequence_size)
byte_range_file = self.sequence_folder.list_files(glob_pattern=EH.stage_3_bytes_ranges)
assert all([len(sequence_file) == 1, len(docs_sizes_file) == 1, len(byte_range_file) == 1]), f'Need to run with n_tasks = n_files. len(sequence_file)={len(sequence_file)!r}, len(sequence_file)={len(sequence_file)!r}, len(byte_range_file)={len(byte_range_file)!r}'
(sequence_file, docs_sizes_file, byte_range_file) = (sequence_file[0], docs_sizes_file[0], byte_range_file[0])
self.get_bytearange(self.sequence_folder.open(byte_range_file, 'rt'))
return (sequence_file, docs_sizes_file)
def normalize_range(self, a, b, bytes_len):
(a, b) = (a - self.bytes_counter, b - self.bytes_counter)
a = max(SEPARATOR_BYTES, a)
b = min(bytes_len, b)
assert SEPARATOR_BYTES <= a < b <= bytes_len, f'SEPARATOR_BYTES={SEPARATOR_BYTES!r} < a={a!r} < b={b!r} < bytes_len={bytes_len!r} is NOT satisfied'
if b % 2 == 1:
b -= 1
if a % 2 == 1:
a += 1
b = max(a, b)
return (a, b)
def get_duplicate_range(self, bytes_len: int):
ranges = []
upper_limit = self.bytes_counter + bytes_len + SEPARATOR_BYTES
if self.exhausted_ranges:
return ranges
while True:
(a, b) = (self.dup_ranges[self.range_idx][0], self.dup_ranges[self.range_idx][1])
left = a < self.bytes_counter and self.bytes_counter + SEPARATOR_BYTES < b <= upper_limit
centre = self.bytes_counter <= a < b <= upper_limit
right = self.bytes_counter <= a < upper_limit - SEPARATOR_BYTES and upper_limit < b
outside = a < self.bytes_counter < upper_limit < b
if not any([left, centre, right, outside]):
break
assert sum([left, centre, right, outside]) == 1, f'left={left!r}, centre={centre!r}, right={right!r}, outside={outside!r}'
if left:
self.range_idx += 1
a = self.bytes_counter
if centre:
self.range_idx += 1
if right:
ranges.append(self.normalize_range(a, upper_limit, bytes_len))
break
if outside:
ranges.append(self.normalize_range(self.bytes_counter, upper_limit, bytes_len))
break
ranges.append(self.normalize_range(a, b, bytes_len))
if self.range_idx == len(self.dup_ranges):
self.exhausted_ranges = True
break
return ranges
def remove_duplicate(self, doc, bytes_content):
n_bytes = len(bytes_content)
duplicates_ranges = self.get_duplicate_range(n_bytes)
duplicates = []
for (byte_a, byte_b) in duplicates_ranges:
dup_sentence = self.tokenizer.decode(np.frombuffer(bytes_content[byte_a:byte_b], dtype=np.uint16).tolist())
duplicates.append(dup_sentence)
if duplicates:
text = doc.text
for d in duplicates:
text = text.replace(d, '')
doc.text = text
self.bytes_counter += len(bytes_content)
if len(self.word_tokenizer.word_tokenize(doc.text)) < self.min_doc_words:
return False
return True
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
self.reset()
self.rank = rank
(sequence_file, size_file) = self.get_all_files(rank=self.rank, world_size=world_size)
if not self.dup_ranges:
return
for (doc, doc_content) in zip(data, sequence_reader(self.sequence_folder.open(sequence_file, 'rb'), self.sequence_folder.open(size_file, 'rb'))):
with self.stats.time_stats:
assert doc.text == self.tokenizer.decode(read_bytes(doc_content), skip_special_tokens=False), f'{doc.text}\n\n{self.tokenizer.decode(read_bytes(doc_content))}'
to_yield = self.remove_duplicate(doc, doc_content)
if to_yield:
self.update_doc_stats(doc)
yield doc
assert self.bytes_counter == self.sequence_bytes_offset[rank + 1] - self.sequence_bytes_offset[rank], f'got self.bytes_counter={self.bytes_counter!r}, expected = {self.sequence_bytes_offset[rank + 1] - self.sequence_bytes_offset[rank]}'
assert self.exhausted_ranges, 'One or more duplicate ranges have not been used'
# File: datatrove-main/src/datatrove/pipeline/dedup/minhash.py
import contextlib
import heapq
import os
import re
import struct
from dataclasses import dataclass, field
from pathlib import Path
from typing import Generator
import numpy as np
from fsspec.spec import AbstractBufferedFile
from datatrove.data import DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.binaryio import read_tuples_from_file, seek_to_start
from datatrove.utils.hashing import HashConfig, create_hash_func
from datatrove.utils.logging import logger
from datatrove.utils.text import TextNormConfig, ngrams, simplify_text
from datatrove.utils.typeshelper import Languages, StatHints
from datatrove.utils.word_tokenizers import load_word_tokenizer
_mersenne_prime = np.uint64((1 << 61) - 1)
''
SENTINEL = (1 << 32) - 1
@dataclass
class MinhashConfig:
n_grams: int = 5
num_buckets: int = 14
hashes_per_bucket: int = 8
seed: int = 1
norm_config: TextNormConfig = field(default_factory=TextNormConfig)
hash_config: HashConfig = field(default_factory=HashConfig)
def __str__(self):
return f'{self.n_grams}ng_{self.num_buckets}bs_{self.hashes_per_bucket}hs_{self.hash_config}'
@dataclass(order=True)
class HashSig:
sig: tuple[int]
file_id: int
file_stem: str
doc_id: int
reader_id: int
def is_from_index(self):
return self.reader_id != self.file_id
def read_sigs(file: AbstractBufferedFile, reader_id: int, config: MinhashConfig, index_file: bool=False, min_hash: int=0, max_hash: int=_mersenne_prime, ensure_order: bool=True, lines_to_buffer: int=5) -> Generator:
line_format = f"{config.hashes_per_bucket}{config.hash_config.struct_format}{('I' if not index_file else '')}"
with file as f:
if f.size == 0:
return
seek_to_start(f, min_hash, line_format, config.hash_config.struct_format)
last = None
file_stem = Path(file.path).name.removesuffix('.minhash.sig')
for data in read_tuples_from_file(f, line_format, lines_to_buffer=lines_to_buffer):
sigdata = data if index_file else data[:-1]
assert sigdata[0] >= min_hash and (ensure_order is False or last is None or sigdata >= last), f'Hash order error. f.tell()={f.tell()!r}, min_hash={min_hash!r}, sigdata={sigdata!r}, last={last!r}'
if sigdata[0] >= max_hash:
break
last = sigdata
yield (HashSig(sig=sigdata, doc_id=-1, file_id=-1, reader_id=reader_id, file_stem=file_stem) if index_file else HashSig(sig=sigdata, doc_id=data[-1], file_id=reader_id, reader_id=reader_id, file_stem=file_stem))
class MinhashDedupSignature(PipelineStep):
type = '🫂 - DEDUP'
name = '🎯 MinHash stage 1'
def __init__(self, output_folder: DataFolderLike, config: MinhashConfig=None, language: str=Languages.english):
super().__init__()
self.output_folder = get_datafolder(output_folder)
self.config = config or MinhashConfig()
self.num_hashes = self.config.num_buckets * self.config.hashes_per_bucket
self._parameters = None
self._hash_func = create_hash_func(self.config.hash_config)
self.language = language
self.word_tokenizer = load_word_tokenizer(language)
@property
def parameters(self):
if self._parameters is None:
gen = np.random.RandomState(self.config.seed)
self._parameters = (gen.randint(1, _mersenne_prime, dtype=np.uint64, size=(1, self.num_hashes)), gen.randint(0, _mersenne_prime, dtype=np.uint64, size=(1, self.num_hashes)))
return self._parameters
def get_signature(self, shingles: np.ndarray) -> list[list[int]]:
(a, b) = self.parameters
phv = (shingles * a + b) % _mersenne_prime
if self.config.hash_config.precision == 32:
phv = np.bitwise_and(phv, self.config.hash_config.max)
return [x.tolist() for x in np.split(np.min(phv, axis=0).astype(self.config.hash_config.np_dtype), self.config.num_buckets)]
def get_shingles(self, text: str) -> np.ndarray:
return np.fromiter([self._hash_func(' '.join(x)) for x in ngrams(self.word_tokenizer.word_tokenize(simplify_text(text, self.config.norm_config)), self.config.n_grams)], dtype=np.uint64).reshape((-1, 1))
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
buckets = [self.output_folder.open(f'bucket_{bi:03d}/{rank:05d}.minhash.sig', mode='wb') for bi in range(self.config.num_buckets)]
with self.track_time():
for (doc_idx, doc) in enumerate(data):
self.stat_update(StatHints.total)
shingles = self.get_shingles(doc.text)
if shingles.size != 0:
sig = self.get_signature(shingles)
for (bi, (bucket, bucket_sig)) in enumerate(zip(buckets, sig)):
bucket.write(struct.pack(f'<{self.config.hashes_per_bucket}{self.config.hash_config.struct_format}I', *bucket_sig, doc_idx))
for file in buckets:
file.close()
logger.info('Sorting buckets...')
for bi in range(len(buckets)):
sigs = sorted(read_sigs(self.output_folder.open(f'bucket_{bi:03d}/{rank:05d}.minhash.sig', mode='rb'), -1, self.config, ensure_order=False, lines_to_buffer=-1))
with self.output_folder.open(f'bucket_{bi:03d}/{rank:05d}.minhash.sig', mode='wb') as fo:
for sig in sigs:
fo.write(struct.pack(f'<{self.config.hashes_per_bucket}{self.config.hash_config.struct_format}I', *sig.sig, sig.doc_id))
class MinhashDedupBuckets(PipelineStep):
type = '🫂 - DEDUP'
name = '🎯 MinHash stage 2'
def __init__(self, input_folder: DataFolderLike, output_folder: DataFolderLike, index_folder: DataFolderLike=None, config: MinhashConfig=None, only_dedup_in_index: bool=True, create_index_name: str=None, lines_to_buffer: int=5):
super().__init__()
self.input_folder = get_datafolder(input_folder)
self.output_folder = get_datafolder(output_folder)
self.index_folder = get_datafolder(index_folder) if index_folder else None
self.config = config or MinhashConfig()
self.only_dedup_in_index = only_dedup_in_index
self.create_index_name = create_index_name
self.lines_to_buffer = lines_to_buffer
def get_worker_hash_range(self, sig_files, rank, world_size):
workers_per_bucket = world_size // self.config.num_buckets
(bucket, bucket_worker) = divmod(rank, workers_per_bucket)
(hash_min, hash_max) = (0, _mersenne_prime if self.config.hash_config.precision == 64 else self.config.hash_config.max)
if workers_per_bucket > 1 and len(sig_files):
with self.input_folder.open(sig_files[0], mode='rb') as f:
line_size = struct.calcsize(f'{self.config.hashes_per_bucket}{self.config.hash_config.struct_format}I')
(L, rem) = divmod(f.size, line_size)
assert rem == 0, 'file size not divisible by line size'
assert L >= workers_per_bucket, f'tried to use workers_per_bucket={workers_per_bucket!r} but there are only {L} lines'
if bucket_worker > 0:
f.seek(line_size * (L // workers_per_bucket) * bucket_worker, os.SEEK_SET)
hash_min = struct.unpack(self.config.hash_config.struct_format, f.read(struct.calcsize(self.config.hash_config.struct_format)))[0]
if bucket_worker + 1 < workers_per_bucket:
f.seek(line_size * (L // workers_per_bucket) * (bucket_worker + 1), os.SEEK_SET)
hash_max = struct.unpack(self.config.hash_config.struct_format, f.read(struct.calcsize(self.config.hash_config.struct_format)))[0]
return (hash_min, hash_max)
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
assert data is None, 'You should not use an input block before MinhashDedupBuckets'
assert world_size % self.config.num_buckets == 0, 'Number of tasks must be divisible by num_buckets'
workers_per_bucket = world_size // self.config.num_buckets
(bucket, bucket_worker) = divmod(rank, workers_per_bucket)
with self.track_time():
sig_files = self.input_folder.list_files(subdirectory=f'bucket_{bucket:03d}')
(hash_min, hash_max) = self.get_worker_hash_range(sig_files, rank, world_size)
logger.info(f'Running worker {bucket_worker + 1}/{workers_per_bucket} on bucket {bucket:03d}. Hash range: {[hash_min, hash_max]}')
sig_readers = [read_sigs(file, file_i, self.config, min_hash=hash_min, max_hash=hash_max, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.input_folder.open_files(sig_files, mode='rb'))]
own_index_regex = re.compile(f'bucket_{bucket:03d}/{self.create_index_name}_\\d{{2}}.minhash.index')
index_files = [filename for filename in self.index_folder.list_files(subdirectory=f'bucket_{bucket:03d}') if not self.create_index_name or not own_index_regex.fullmatch(filename)] if self.index_folder else None
if index_files:
logger.info(f"Found {len(index_files)} index file(s): {', '.join(index_files)}")
sig_readers.extend([read_sigs(file, len(sig_readers) + file_i, self.config, index_file=True, min_hash=hash_min, max_hash=hash_max, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.index_folder.open_files(index_files, mode='rb'))])
pq = [x for x in [next(sig_reader, None) for sig_reader in sig_readers] if x is not None]
heapq.heapify(pq)
logger.info('Finished initializing signatures priority queue.')
out_index = None
if self.index_folder and self.create_index_name:
out_index = self.index_folder.open(f'bucket_{bucket:03d}/{self.create_index_name}_{bucket_worker:02d}.minhash.index', mode='wb')
with self.output_folder.open(f'{bucket:05d}_{bucket_worker:02d}.dups', mode='wb') as out_f:
last: HashSig | None = None
while pq:
v: HashSig = heapq.heappop(pq)
assert last is None or v >= last, f'Sig queue sort error. v={v!r} < last={last!r}'
if not v.is_from_index():
if last and last.sig == v.sig:
if last.is_from_index():
out_f.write(struct.pack('<4I', SENTINEL, SENTINEL, int(v.file_stem), v.doc_id))
self.stat_update('index_match', 'total_matches')
elif not index_files or not self.only_dedup_in_index:
out_f.write(struct.pack('<4I', int(last.file_stem), last.doc_id, int(v.file_stem), v.doc_id))
self.stat_update('total_matches')
elif out_index:
out_index.write(struct.pack(f'<%d{self.config.hash_config.struct_format}' % self.config.hashes_per_bucket, *v.sig))
last = v
next_sig = next(sig_readers[v.reader_id], None)
if next_sig:
assert next_sig >= v, f'Next sig sort error. next_sig={next_sig!r} < v={v!r}'
heapq.heappush(pq, next_sig)
if out_index:
out_index.close()
class MinhashDedupCluster(PipelineStep):
type = '🫂 - DEDUP'
name = '🎯 MinHash stage 3'
def __init__(self, input_folder: DataFolderLike, output_folder: DataFolderLike, config: MinhashConfig=None, save_cluster_id: bool=False, ignore_index_matches: bool=False, lines_to_buffer: int=5):
super().__init__()
self.input_folder = get_datafolder(input_folder)
self.output_folder = get_datafolder(output_folder)
self.config = config or MinhashConfig()
self.save_cluster_id = save_cluster_id
self.ignore_index_matches = ignore_index_matches
self.lines_to_buffer = lines_to_buffer
def run(self, data: DocumentsPipeline=None, _: int=0, world_size: int=1):
dup_files = self.input_folder.list_files(glob_pattern='*.dups')
assert len(dup_files) % self.config.num_buckets == 0, 'Number of .dups files should be divisible by number of buckets'
assert world_size == 1, 'World size must be 1 for clustering'
union_set = {}
def parent(x):
if x not in union_set or union_set[x] == x:
return x
union_set[x] = parent(union_set[x])
return union_set[x]
with self.track_time():
for dup_file in dup_files:
with self.input_folder.open(dup_file, 'rb') as dupf:
for (f1, d1, f2, d2) in read_tuples_from_file(dupf, '4I', lines_to_buffer=self.lines_to_buffer):
(a, b) = ((f1, d1), (f2, d2))
if self.ignore_index_matches and a == (SENTINEL, SENTINEL):
continue
union_set[parent(b)] = parent(a)
ci = 0
cluster_ids = {}
with self.output_folder.get_output_file_manager(mode='wb') as output_mg:
for node in sorted(union_set.keys()):
self.stat_update('duplicates')
(file, doc) = node
p = parent(node)
if node != p:
output_mg.write(f'{file:06d}.remove', struct.pack('<I', doc))
self.stat_update('to_remove')
if self.save_cluster_id:
if p not in cluster_ids:
cluster_ids[p] = ci
ci += 1
self.stat_update('clusters')
output_mg.write(f'{file:06d}.clusters', struct.pack('<I', doc))
output_mg.write(f'{file:06d}.clusters', struct.pack('<I', cluster_ids[p]))
class MinhashDedupFilter(PipelineStep):
type = '🫂 - DEDUP'
name = '🎯 MinHash stage 4'
def __init__(self, input_folder: DataFolderLike, exclusion_writer: DiskWriter=None, load_cluster_ids: bool=False, lines_to_buffer: int=5):
super().__init__()
self.data_folder = get_datafolder(input_folder)
self.exclusion_writer = exclusion_writer
self.load_cluster_ids = load_cluster_ids
self.lines_to_buffer = lines_to_buffer
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
clusters_data = self.data_folder.get_shard(rank, world_size, glob_pattern='*.clusters')
assert not self.load_cluster_ids or len(clusters_data) <= 1, f'Must have exactly one .clusters file per task. Found {len(clusters_data)} files.'
if not self.data_folder.isfile(f'{rank:06d}.remove'):
logger.warning(f'No .remove file for rank={rank!r}.')
for doc in data:
self.stat_update(StatHints.total, StatHints.forwarded)
yield doc
return
with self.data_folder.open(f'{rank:06d}.remove', 'rb') as f:
with self.exclusion_writer if self.exclusion_writer else contextlib.nullcontext() as exc_writer:
def get_next():
data = f.read(struct.calcsize('I'))
if data:
return struct.unpack('<I', data)[0]
def load_clusters():
if clusters_data:
with self.data_folder.open(clusters_data[0], 'rb') as clustersf:
yield from read_tuples_from_file(clustersf, '2I', lines_to_buffer=self.lines_to_buffer)
if self.load_cluster_ids:
cluster_loader = load_clusters()
next_cluster = next(cluster_loader, None)
next_removal = get_next()
for (idx, doc) in enumerate(data):
with self.track_time():
if self.load_cluster_ids:
if next_cluster and idx == next_cluster[0]:
doc.metadata['minhash_cluster'] = next_cluster[1]
next_cluster = next(cluster_loader, None)
self.stat_update(StatHints.total)
if next_removal == idx:
self.stat_update(StatHints.dropped)
if self.exclusion_writer:
exc_writer.write(doc, rank)
next_removal = get_next()
continue
self.stat_update(StatHints.forwarded)
yield doc
class MinhashBuildIndex(PipelineStep):
type = '🫂 - DEDUP'
name = '🎯 MinHash build index'
def __init__(self, input_folder: DataFolderLike, output_folder: DataFolderLike, index_name: str, config: MinhashConfig=None, lines_to_buffer: int=5):
super().__init__()
self.input_folder = input_folder
self.output_folder = output_folder
self.config = config or MinhashConfig()
self.index_name = index_name
self.lines_to_buffer = lines_to_buffer
def run(self, data: DocumentsPipeline=None, bucket: int=0, world_size: int=1):
assert data is None, 'You should not use an input block before MinhashBuildIndex'
assert world_size == self.config.num_buckets, 'You must run exactly one task per bucket'
sig_files = self.input_folder.list_files(subdirectory=f'bucket_{bucket:03d}')
sig_readers = [read_sigs(file, file_i, self.config, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.input_folder.open_files(sig_files, mode='rb'))]
pq = [next(sig_reader) for sig_reader in sig_readers]
heapq.heapify(pq)
out_f = self.output_folder.open(f'bucket_{bucket:03d}/{self.index_name}.minhash.index', mode='wb')
last: HashSig | None = None
with self.track_time():
while pq:
v: HashSig = heapq.heappop(pq)
if not last or last.sig != v.sig:
out_f.write(struct.pack(f'<%d{self.config.hash_config.struct_format}' % self.config.hashes_per_bucket, *v.sig))
last = v
next_sig = next(sig_readers[v.file_id], None)
if next_sig:
heapq.heappush(pq, next_sig)
out_f.close()
# File: datatrove-main/src/datatrove/pipeline/dedup/sentence_dedup.py
""""""
import contextlib
import dataclasses
import heapq
import struct
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from pathlib import Path
from typing import BinaryIO, Generator
import numpy as np
from fsspec.spec import AbstractBufferedFile
from tqdm import tqdm
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.binaryio import read_np_from_file, read_tuples_from_file
from datatrove.utils.hashing import HashConfig, create_hash_func
from datatrove.utils.logging import logger
from datatrove.utils.text import SPLIT_TEXT_SENTENCES, TextNormConfig, ngrams, simplify_text, split_into_parts
from datatrove.utils.typeshelper import ExtensionHelperSD, Languages, StatHints
from ...utils.word_tokenizers import load_word_tokenizer
from ..writers.disk_base import DiskWriter
@dataclass
class SentDedupConfig:
n_sentences: int = 3
split_sentences: bool = True
only_dedup_in_index: bool = True
min_doc_words: int = 50
min_num_sentences: int = 3
min_words_to_remove_span: int = 0
norm_config: TextNormConfig = field(default_factory=TextNormConfig)
hash_config: HashConfig = field(default_factory=HashConfig)
@dataclass(order=True)
class HashSig:
hash_value: int
doc_id: int
file_id: int = None
sent_id: int = None
file_stem: str = None
def is_from_index(self):
return self.doc_id == self.sent_id == -1
class SentenceDedupSignature(PipelineStep):
type = '🫂 - DEDUPS'
name = '💥 sentence-deduplication stage 1'
def __init__(self, output_folder: DataFolderLike, finder_workers: int=1, config: SentDedupConfig=None, language: str=Languages.english):
super().__init__()
self.output_folder = get_datafolder(output_folder)
if finder_workers <= 0:
raise ValueError('finder_workers must be >= 1')
elif finder_workers > 1:
logger.warning(f'Remember to also set the name of tasks of the finder block to finder_workers={finder_workers!r}!')
self.finder_workers = finder_workers
self.config = config or SentDedupConfig()
self.hash_fc = create_hash_func(config.hash_config)
self.language = language
self.tokenizer = load_word_tokenizer(language)
def save_hashes(self, rank: int, signatures):
signatures = np.array(signatures, dtype=[('hash', self.config.hash_config.np_descr), ('doc', '<u4'), ('sent', '<u2')])
signatures.sort(axis=0)
hashes_per_worker = self.config.hash_config.max // self.finder_workers
left_idx = 0
for hash_i in range(self.finder_workers):
with self.output_folder.open(f'{hash_i:04d}/{rank:05d}{ExtensionHelperSD.stage_1_signature}', mode='wb') as f:
right_hash = (hash_i + 1) * hashes_per_worker if hash_i != self.finder_workers - 1 else self.config.hash_config.max
right_idx = left_idx + signatures['hash'][left_idx:].searchsorted(right_hash, side='right')
if right_idx > left_idx:
if self.output_folder.is_local():
signatures[left_idx:right_idx].tofile(f)
else:
f.write(signatures[left_idx:right_idx].tobytes())
left_idx = right_idx
if right_idx >= len(signatures):
break
def get_hashes(self, doc: Document, doc_idx: int) -> list[None] | list[tuple[int, int, int]]:
sentences = self.tokenizer.sent_tokenize(doc.text) if self.config.split_sentences else doc.text.splitlines()
if len(sentences) < self.config.n_sentences:
return []
sentences_tokens = [simplify_text(sent, self.config.norm_config) for sent in sentences]
n_sent_grams: list = [' '.join(x) for x in ngrams(sentences_tokens, self.config.n_sentences)]
hashes = [(self.hash_fc(n_sent_gram), doc_idx, sentence_idx) for (sentence_idx, n_sent_gram) in enumerate(n_sent_grams) if n_sent_gram.strip() != '']
return hashes
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
signatures = []
for (doc_idx, doc) in enumerate(data):
with self.stats.time_stats:
self.stat_update(StatHints.total)
signatures.extend(self.get_hashes(doc, doc_idx))
self.save_hashes(rank, signatures)
def read_sigs(file: AbstractBufferedFile, file_id: int, config: SentDedupConfig, index_file: bool=False, lines_to_buffer: int=5) -> Generator[HashSig, None, None]:
line_format = f'{config.hash_config.struct_format}IH' if not index_file else config.hash_config.struct_format
file_stem = Path(file.path).name.removesuffix(ExtensionHelperSD.stage_1_signature)
last = None
with file as f:
for data in read_tuples_from_file(f, line_format, lines_to_buffer=lines_to_buffer):
assert last is None or data[0] >= last, f'Hash order error. f.tell()={f.tell()!r}, data[0]={data[0]!r}, last={last!r}'
last = data[0]
yield (HashSig(hash_value=data[0], doc_id=-1, file_id=file_id, sent_id=-1, file_stem=file_stem) if index_file else HashSig(file_id=file_id, hash_value=data[0], doc_id=data[1], sent_id=data[2], file_stem=file_stem))
class SentenceFindDedups(PipelineStep):
type = '🫂 - DEDUPS'
name = '💥 sentence-deduplication stage 2'
def __init__(self, data_folder: DataFolderLike, output_folder: DataFolderLike, index_folder: DataFolderLike=None, config: SentDedupConfig=None, lines_to_buffer: int=5):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.output_folder = get_datafolder(output_folder)
self.index_folder = get_datafolder(index_folder) if index_folder else None
self.config = config or SentDedupConfig()
self.lines_to_buffer = lines_to_buffer
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
with self.stats.time_stats:
if world_size == 1:
sig_files = self.data_folder.list_files(glob_pattern='*/*' + ExtensionHelperSD.stage_1_signature)
if any((not sig_file.startswith('0000/') for sig_file in sig_files)):
raise ValueError(f'world_size={world_size!r} but found sig files for different hash buckets. Set tasks=finder_workers')
else:
sig_files = self.data_folder.list_files(subdirectory=f'{rank:04d}', glob_pattern=ExtensionHelperSD.stage_1_signature)
sig_readers = [read_sigs(file, file_i, config=self.config, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.data_folder.open_files(sig_files))]
index_files = self.index_folder.list_files() if self.index_folder else None
if index_files:
logger.info(f"Found index file(s): {', '.join(index_files)}")
sig_readers.extend([read_sigs(file, len(sig_readers) + file_i, config=self.config, index_file=True, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.data_folder.open_files(index_files))])
logger.info(f'Initializing pq with {len(sig_readers)} files.')
with ThreadPoolExecutor() as executor:
pq = [x for x in tqdm(executor.map(lambda x: next(x, None), sig_readers), total=len(sig_readers), desc='Initializing pq...') if x]
heapq.heapify(pq)
logger.info('PQ initialized.')
output_mg = self.output_folder.get_output_file_manager(mode='wb')
packer = struct.Struct('<IH')
last: HashSig | None = None
while pq:
v: HashSig = heapq.heappop(pq)
if last and last.hash_value == v.hash_value and (not v.is_from_index()):
out_filename = f'{rank:04d}/{v.file_stem}{ExtensionHelperSD.stage_2_duplicates}'
if last.is_from_index() or not index_files or (not self.config.only_dedup_in_index):
output_mg.write(out_filename, packer.pack(v.doc_id, v.sent_id))
last = v
new_v = next(sig_readers[v.file_id], None)
if new_v:
heapq.heappush(pq, new_v)
output_mg.close()
class SentenceDedupFilter(PipelineStep):
type = '🫂 - DEDUPS'
name = '💥 sentence-deduplication stage 3'
def __init__(self, data_folder: DataFolderLike, config: SentDedupConfig=None, exclusion_writer: DiskWriter=None, language: str=Languages.english):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.config = config or SentDedupConfig()
self.tokenizer = load_word_tokenizer(language)
self.exclusion_writer = exclusion_writer
self.language = language
def read_duplicates(self, file: BinaryIO) -> np.ndarray:
return read_np_from_file(file, dtype=np.dtype([('doc', '<u4'), ('sent', '<u2')]), is_local_file=self.data_folder.is_local())
def remove_dup_sentences(self, doc: Document, du_lines: np.ndarray) -> tuple[str, str]:
sentence_spans = list(self.tokenizer.span_tokenize(doc.text)) if self.config.split_sentences else doc.text.splitlines()
kept_sentences = []
original_formatted = []
last_s = 0
du_line_idx = 0
drop_until = 0
removed_span = []
for (idx, s) in enumerate(sentence_spans):
line_text = doc.text[last_s:s[1]] if self.config.split_sentences else s
if du_line_idx < len(du_lines):
if du_lines[du_line_idx] < idx:
raise ValueError('Error with duplicate line index')
elif du_lines[du_line_idx] == idx:
drop_until = idx + self.config.n_sentences
du_line_idx += 1
if idx >= drop_until:
if removed_span:
original_formatted.append('<<<')
if self.config.min_words_to_remove_span > 0 and len(self.tokenizer.word_tokenize('\n'.join(removed_span))) < self.config.min_words_to_remove_span:
kept_sentences.extend(removed_span)
removed_span.clear()
kept_sentences.append(line_text)
elif not removed_span:
removed_span.append(line_text)
original_formatted.append('>>>')
original_formatted.append(line_text)
if self.config.split_sentences:
last_s = s[1]
if removed_span:
original_formatted.append('<<<')
if self.config.min_words_to_remove_span > 0 and len(self.tokenizer.word_tokenize('\n'.join(removed_span))) < self.config.min_words_to_remove_span:
kept_sentences.extend(removed_span)
if len(kept_sentences) < len(sentence_spans):
self.stat_update('removed_sentences', value=len(sentence_spans) - len(kept_sentences))
self.stat_update('original_sentences', value=len(sentence_spans))
merge_char = '' if self.config.split_sentences else '\n'
return (merge_char.join(kept_sentences).lstrip(), merge_char.join(original_formatted))
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
folders = self.data_folder.list_files(include_directories=True, recursive=False)
files = [f for f in [f'{folder}/{rank:05d}{ExtensionHelperSD.stage_2_duplicates}' for folder in folders] if self.data_folder.exists(f)]
logger.info(f'Loading duplicate indexes from {len(files)} results files.')
all_dups = np.array([], dtype=[('doc', '<u4'), ('sent', '<u2')])
if files:
with ThreadPoolExecutor() as pool:
all_dups = np.concatenate(list(tqdm(pool.map(self.read_duplicates, self.data_folder.open_files(files)), total=len(files))), axis=0)
all_dups.sort()
(_, doc_starts) = np.unique(all_dups['doc'], return_index=True)
logger.info('Loaded duplicate indexes.')
dups_doc_i = 0
with self.exclusion_writer if self.exclusion_writer else contextlib.nullcontext() as writer:
for (doc_idx, doc) in enumerate(data):
self.stat_update(StatHints.total)
with self.stats.time_stats:
if dups_doc_i >= len(doc_starts) or all_dups['doc'][doc_starts[dups_doc_i]] > doc_idx:
(filtered_text, original_formatted) = (doc.text, None)
else:
(sents_span_l, sents_span_r) = (doc_starts[dups_doc_i], doc_starts[dups_doc_i + 1] if dups_doc_i + 1 < len(doc_starts) else None)
(filtered_text, original_formatted) = self.remove_dup_sentences(doc, all_dups['sent'][sents_span_l:sents_span_r])
dups_doc_i += 1
if (filtered_text == doc.text or ((self.config.min_doc_words <= 0 or len(self.tokenizer.word_tokenize(filtered_text)) >= self.config.min_doc_words) and (self.config.min_num_sentences <= 0 or len(split_into_parts(filtered_text, SPLIT_TEXT_SENTENCES, self.language)) >= self.config.min_num_sentences))) and filtered_text:
self.update_doc_stats(doc)
if not filtered_text == doc.text and writer:
writer.write(dataclasses.replace(doc, text=original_formatted), rank=rank)
doc.text = filtered_text
yield doc
elif writer:
doc.text = original_formatted
writer.write(doc, rank=rank)
class SentenceDedupBuildIndex(PipelineStep):
type = '🫂 - DEDUP'
name = '💥 sentence-deduplication build index'
def __init__(self, data_folder: DataFolderLike, output_folder: DataFolderLike, index_name: str, config: SentDedupConfig=None, lines_to_buffer: int=5):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.output_folder = get_datafolder(output_folder)
self.index_name = index_name
self.lines_to_buffer = lines_to_buffer
self.config = config or SentDedupConfig()
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
assert world_size == 1, 'SentenceDedupBuildIndex can only run on a single worker.'
with self.stats.time_stats:
sig_files = self.data_folder.list_files(glob_pattern=ExtensionHelperSD.stage_1_signature)
sig_readers = [read_sigs(file, file_i, self.config, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.data_folder.open_files(sig_files))]
pq = [next(sig_reader) for sig_reader in sig_readers]
heapq.heapify(pq)
with self.output_folder.open(f'{self.index_name}.{ExtensionHelperSD.index}', mode='wb') as out_f:
last = None
while pq:
v: HashSig = heapq.heappop(pq)
if last != v.hash_value:
out_f.write(struct.pack(f'<{self.config.hash_config.struct_format}', v.hash_value))
last = v.hash_value
new_v = next(sig_readers[v.file_id], None)
if new_v:
heapq.heappush(pq, new_v)
# File: datatrove-main/src/datatrove/pipeline/dedup/url_dedup.py
""""""
import contextlib
import heapq
import struct
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import BinaryIO, Callable, Generator
import numpy as np
from fsspec.spec import AbstractBufferedFile
from tqdm import tqdm
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.binaryio import read_np_from_file, read_tuples_from_file
from datatrove.utils.hashing import HashConfig, create_hash_func
from datatrove.utils.logging import logger
from datatrove.utils.typeshelper import ExtensionHelperSD, StatHints
from ..writers.disk_base import DiskWriter
@dataclass
class UrlDedupConfig:
url_normalizer: Callable[[str], str] | None = None
document_priority: Callable[[Document], int] | None = None
hash_config: HashConfig = field(default_factory=HashConfig)
only_dedup_in_index: bool = True
@dataclass(order=False)
class HashSig:
hash_value: int
priority: int
doc_id: int
file_id: int
file_stem: str
def is_from_index(self):
return self.doc_id == -1 and self.priority == 1
def __lt__(self, other: 'HashSig') -> bool:
return (self.hash_value, -self.priority, self.doc_id) < (other.hash_value, -other.priority, other.doc_id)
def get_sig_dtype(config: HashConfig) -> np.dtype:
return np.dtype([('hash', config.np_dtype), ('priority', '<u2'), ('doc', '<u4')])
class UrlDedupSignature(PipelineStep):
type = '🫂 - DEDUPS'
name = '💥 url-deduplication stage 1'
def __init__(self, output_folder: DataFolderLike, finder_workers: int=1, config: UrlDedupConfig | None=None):
super().__init__()
self.output_folder = get_datafolder(output_folder)
if finder_workers <= 0:
raise ValueError('finder_workers must be >= 1')
elif finder_workers > 1:
logger.warning(f'Remember to also set the number of tasks of the finder block to finder_workers={finder_workers!r}!')
self.finder_workers = finder_workers
self.config = config or UrlDedupConfig()
self.hash_fc = create_hash_func(self.config.hash_config)
def save_hashes(self, rank: int, signatures):
sig_dtype = get_sig_dtype(self.config.hash_config)
priority_max = np.iinfo(sig_dtype['priority']).max
assert all((sig[1] >= 1 and sig[1] <= priority_max for sig in signatures)), f'priority must be between 1 and {priority_max}'
signatures = np.array(signatures, dtype=sig_dtype)
signatures['priority'] = -signatures['priority']
signatures.sort(axis=0)
signatures['priority'] = -signatures['priority']
hashes_per_worker = self.config.hash_config.max // self.finder_workers
left_idx = 0
for hash_i in range(self.finder_workers):
with self.output_folder.open(f'{hash_i:04d}/{rank:05d}{ExtensionHelperSD.stage_1_signature}', mode='wb') as f:
right_hash = (hash_i + 1) * hashes_per_worker if hash_i != self.finder_workers - 1 else np.iinfo(np.uint64).max
right_idx = left_idx + signatures['hash'][left_idx:].searchsorted(right_hash, side='right')
if right_idx > left_idx:
bts = signatures[left_idx:right_idx].tobytes()
f.write(bts)
left_idx = right_idx
if right_idx >= len(signatures):
break
def get_hashes(self, doc: Document, doc_idx: int) -> list[None] | list[tuple[int, int, int]]:
normalized_url: str = self.config.url_normalizer(doc.metadata['url']) if self.config.url_normalizer else doc.metadata['url']
priority = self.config.document_priority(doc) if self.config.document_priority else 1
hashes = [(self.hash_fc(normalized_url), priority, doc_idx)]
return hashes
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
signatures = []
for (doc_idx, doc) in enumerate(data):
with self.stats.time_stats:
self.stat_update(StatHints.total)
signatures.extend(self.get_hashes(doc, doc_idx))
self.save_hashes(rank, signatures)
def read_sigs(file: AbstractBufferedFile, file_id: int, hash_config: HashConfig, index_file: bool=False, lines_to_buffer: int=5) -> Generator[HashSig, None, None]:
last = None
line_format = f'{hash_config.struct_format}HI' if not index_file else hash_config.struct_format
with file as f:
file_stem = Path(f.path).name.removesuffix(ExtensionHelperSD.stage_1_signature)
for data in read_tuples_from_file(f, line_format, lines_to_buffer=lines_to_buffer):
assert last is None or data[0] >= last, f'Hash order error. f.tell()={f.tell()!r}, data[0]={data[0]!r}, last={last!r}'
last = data[0]
yield (HashSig(hash_value=data[0], doc_id=-1, file_id=file_id, priority=-1, file_stem=file_stem) if index_file else HashSig(file_id=file_id, file_stem=file_stem, hash_value=data[0], priority=data[1], doc_id=data[2]))
class UrlFindDedups(PipelineStep):
type = '🫂 - DEDUPS'
name = '💥 url-deduplication stage 2'
def __init__(self, data_folder: DataFolderLike, output_folder: DataFolderLike, index_folder: DataFolderLike | None=None, config: UrlDedupConfig | None=None, lines_to_buffer: int=5):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.output_folder = get_datafolder(output_folder)
self.index_folder = get_datafolder(index_folder) if index_folder else None
self.config = config or UrlDedupConfig()
self.lines_to_buffer = lines_to_buffer
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
with self.stats.time_stats:
if world_size == 1:
sig_files = self.data_folder.list_files(glob_pattern='*/*' + ExtensionHelperSD.stage_1_signature)
if any((not sig_file.startswith('0000/') for sig_file in sig_files)):
raise ValueError(f'world_size={world_size!r} but found sig files for different hash buckets. Set tasks=finder_workers')
else:
sig_files = self.data_folder.list_files(subdirectory=f'{rank:04d}', glob_pattern=ExtensionHelperSD.stage_1_signature)
sig_readers = [read_sigs(file, file_i, self.config.hash_config, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.data_folder.open_files(sig_files))]
index_files = self.index_folder.list_files() if self.index_folder else None
if index_files:
logger.info(f"Found index file(s): {', '.join(index_files)}")
sig_readers.extend([read_sigs(file, len(sig_readers) + file_i, self.config.hash_config, index_file=True, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.data_folder.open_files(index_files))])
logger.info(f'Initializing pq with {len(sig_readers)} files.')
with ThreadPoolExecutor() as executor:
pq = [x for x in tqdm(executor.map(lambda x: next(x, None), sig_readers), total=len(sig_readers), desc='Initializing pq...') if x]
heapq.heapify(pq)
logger.info('PQ initialized.')
output_mg = self.output_folder.get_output_file_manager(mode='wb')
last: HashSig | None = None
packer = struct.Struct('<I')
while pq:
v: HashSig = heapq.heappop(pq)
if last and last.hash_value == v.hash_value and (not v.is_from_index()):
out_filename = f'{rank:04d}/{v.file_stem}{ExtensionHelperSD.stage_2_duplicates}'
if not index_files or last.is_from_index() or (not self.config.only_dedup_in_index):
doc_id_bytes = packer.pack(v.doc_id)
output_mg.write(out_filename, doc_id_bytes)
last = v
new_v = next(sig_readers[v.file_id], None)
if new_v:
heapq.heappush(pq, new_v)
output_mg.close()
class UrlDedupFilter(PipelineStep):
type = '🫂 - DEDUPS'
name = '💥 url-deduplication stage 3'
def __init__(self, data_folder: DataFolderLike, config: UrlDedupConfig | None=None, exclusion_writer: DiskWriter | None=None):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.config = config or UrlDedupConfig()
self.exclusion_writer = exclusion_writer
def read_duplicates(self, file: BinaryIO, dup_dtype: np.dtype) -> np.ndarray:
with file as f:
return read_np_from_file(f, dtype=dup_dtype, is_local_file=self.data_folder.is_local())
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1):
folders = self.data_folder.list_files(include_directories=True, recursive=False)
files = [f for f in [f'{folder}/{rank:05d}{ExtensionHelperSD.stage_2_duplicates}' for folder in folders] if self.data_folder.exists(f)]
logger.info(f'Loading duplicate indexes from {len(files)} results files.')
dup_dtype = get_sig_dtype(self.config.hash_config)[2]
all_dups = np.array([], dtype=dup_dtype)
if files:
with ThreadPoolExecutor() as pool:
read_partial = partial(self.read_duplicates, dup_dtype=dup_dtype)
all_dups = np.concatenate(list(tqdm(pool.map(read_partial, self.data_folder.open_files(files)), total=len(files))), axis=0)
all_dups.sort()
logger.info('Loaded duplicate indexes.')
dups_doc_i = 0
with self.exclusion_writer if self.exclusion_writer else contextlib.nullcontext() as writer:
with self.stats.time_stats:
for (doc_idx, doc) in enumerate(data):
self.stat_update(StatHints.total)
with self.stats.time_stats:
if dups_doc_i < all_dups.shape[0] and all_dups[dups_doc_i] == doc_idx:
if writer:
writer.write(doc, rank=rank)
self.stat_update(StatHints.dropped)
dups_doc_i += 1
else:
self.stat_update(StatHints.forwarded)
self.update_doc_stats(doc)
yield doc
class UrlDedupBuildIndex(PipelineStep):
type = '🫂 - DEDUP'
name = '💥 url-deduplication build index'
def __init__(self, data_folder: DataFolderLike, output_folder: DataFolderLike, index_name: str, config: UrlDedupConfig | None=None, lines_to_buffer: int=5):
super().__init__()
self.data_folder = get_datafolder(data_folder)
self.output_folder = get_datafolder(output_folder)
self.index_name = index_name
self.lines_to_buffer = lines_to_buffer
self.config = config or UrlDedupConfig()
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1):
assert world_size == 1, 'UrlDedupBuildIndex can only run on a single worker.'
with self.stats.time_stats:
sig_files = self.data_folder.list_files(glob_pattern=ExtensionHelperSD.stage_1_signature)
sig_readers = [read_sigs(file, file_i, self.config.hash_config, lines_to_buffer=self.lines_to_buffer) for (file_i, file) in enumerate(self.data_folder.open_files(sig_files))]
pq = [next(sig_reader) for sig_reader in sig_readers]
heapq.heapify(pq)
with self.output_folder.open(f'{self.index_name}.{ExtensionHelperSD.index}', mode='wb') as out_f:
last = None
while pq:
v: HashSig = heapq.heappop(pq)
if last != v.hash_value:
out_f.write(struct.pack(f'<{self.config.hash_config.struct_format}', v.hash_value))
last = v.hash_value
new_v = next(sig_readers[v.file_id], None)
if new_v:
heapq.heappush(pq, new_v)
# File: datatrove-main/src/datatrove/pipeline/extractors/base.py
from abc import abstractmethod
from concurrent.futures import ThreadPoolExecutor
from datatrove.data import DocumentsPipeline
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.logging import logger
from datatrove.utils.typeshelper import StatHints
class BaseExtractor(PipelineStep):
type = '🛢 - EXTRAC'
@abstractmethod
def __init__(self, timeout: float=0.1):
super().__init__()
self.timeout = timeout
@abstractmethod
def extract(self, text: str) -> str:
pass
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
with ThreadPoolExecutor() as executor:
for doc in data:
self.stat_update(StatHints.total)
with self.track_time():
future = executor.submit(self.extract, doc.text)
try:
doc.text = future.result(timeout=self.timeout)
except TimeoutError:
logger.warning('⏰ Timeout while cleaning record text. Skipping record.')
continue
except Exception as e:
logger.warning(f'❌ Error "{e}" while cleaning record text. Skipping record.')
continue
if doc.text:
self.stat_update(StatHints.forwarded)
self.update_doc_stats(doc)
yield doc
else:
self.stat_update(StatHints.dropped)
# File: datatrove-main/src/datatrove/pipeline/extractors/modular.py
import re
from .base import BaseExtractor
class ReadabilityInscriptis(BaseExtractor):
_requires_dependencies = ['inscriptis', ('readability', 'readability-lxml @ git+https://github.com/huggingface/python-readability.git@speedup')]
def __init__(self, max_new_lines: int=2, min_text_length=25, min_text_score=20, timeout: float=0.1):
from inscriptis.css_profiles import CSS_PROFILES
from inscriptis.model.config import ParserConfig
super().__init__(timeout)
self.min_text_length = min_text_length
self.min_text_score = min_text_score
self.new_line_chars = '\n' * max_new_lines
self.regex_excessive_lines = re.compile('(' + self.new_line_chars + '\n+)')
self._parser_config = ParserConfig(css=CSS_PROFILES['strict'])
def extract(self, text: str) -> str:
from inscriptis import get_text
from readability import Document as _Document
parsed_doc = _Document(text, min_text_length=self.min_text_length, min_text_score=self.min_text_score)
clean_html = parsed_doc.summary(html_partial=True)
text = get_text(clean_html, self._parser_config).strip()
return self.regex_excessive_lines.sub(self.new_line_chars, text)
# File: datatrove-main/src/datatrove/pipeline/extractors/trafilatura.py
from .base import BaseExtractor
class Trafilatura(BaseExtractor):
name = '⛏ Trafilatura'
_requires_dependencies = ['trafilatura']
def __init__(self, favour_precision: bool=True, include_images: bool=False, timeout: float=0.1, deduplicate: bool=True, **kwargs):
super().__init__(timeout)
self.favour_precision = favour_precision
self.include_images = include_images
self.deduplicate = deduplicate
self.kwargs = kwargs
if self.include_images:
raise NotImplementedError
def extract(self, text: str) -> str:
from trafilatura import extract
return extract(text, favor_precision=self.favour_precision, include_comments=False, deduplicate=self.deduplicate, **self.kwargs)
# File: datatrove-main/src/datatrove/pipeline/filters/__init__.py
from .c4_filters import C4BadWordsFilter, C4ParagraphFilter, C4QualityFilter
from .fasttext_filter import FastTextClassifierFilter
from .fineweb_quality_filter import FineWebQualityFilter
from .gopher_quality_filter import GopherQualityFilter
from .gopher_repetition_filter import GopherRepetitionFilter
from .lambda_filter import LambdaFilter
from .language_filter import LanguageFilter
from .regex_filter import RegexFilter
from .sampler_filter import SamplerFilter
from .unigram_log_probs import UnigramLogProbFilter
from .url_filter import URLFilter
# File: datatrove-main/src/datatrove/pipeline/filters/base_filter.py
import contextlib
from abc import ABC, abstractmethod
from typing import List, Tuple
from loguru import logger
from datatrove.data import Document, DocumentsPipeline
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.batching import batched
from datatrove.utils.typeshelper import StatHints
def get_filter_result(res):
(result, reason) = (res, None)
if isinstance(result, tuple):
(result, reason) = res
return (result, reason)
class BaseFilter(PipelineStep, ABC):
type = '🔻 - FILTER'
def __init__(self, exclusion_writer: DiskWriter=None, batch_size: int=1):
super().__init__()
self.exclusion_writer = exclusion_writer
self.batch_size = batch_size
if self.batch_size > 1 and type(self).filter_batch == BaseFilter.filter_batch:
logger.warning(f'batch_size={batch_size!r} > 1 but {self} does not implement a custom filter_batch method.')
@abstractmethod
def filter(self, doc: Document) -> bool | Tuple[bool, str]:
raise NotImplementedError
def filter_batch(self, batch: List[Document]) -> List[bool | Tuple[bool, str]]:
return list(map(self.filter, batch))
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
with self.exclusion_writer if self.exclusion_writer else contextlib.nullcontext() as writer:
for batch in batched(data, self.batch_size):
if self.batch_size > 1:
self.stat_update('batches')
with self.track_time('batch' if self.batch_size > 1 else None):
batch_filter_result = self.filter_batch(batch)
for (doc, doc_filter_result) in zip(batch, batch_filter_result):
self.stat_update(StatHints.total)
(filter_result, reason) = get_filter_result(doc_filter_result)
if filter_result:
self.stat_update(StatHints.forwarded)
self.update_doc_stats(doc)
yield doc
else:
self.stat_update(StatHints.dropped)
if reason:
self.stat_update(f'dropped_{reason}')
if self.exclusion_writer:
if reason:
doc.metadata['filter_reason'] = reason
writer.write(doc, rank)
# File: datatrove-main/src/datatrove/pipeline/filters/c4_filters.py
import heapq
import re
from numpy.random import default_rng
from datatrove.data import Document
from datatrove.io import cached_asset_path_or_download
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
CITATION_REGEX = re.compile('\\[\\d*]|\\[edit]|\\[citation needed]')
END_PUNCTUATION = ('.', '?', '!', '"', "'")
ELLIPSIS = '...'
POLICY_SUBSTRINGS = ['terms of use', 'privacy policy', 'cookie policy', 'uses cookies', 'use of cookies', 'use cookies']
class C4QualityFilter(BaseFilter):
name = '⛰ C4 Quality'
def __init__(self, exclusion_writer: DiskWriter=None, split_paragraph: bool=True, remove_citations: bool=True, filter_no_terminal_punct: bool=True, min_num_sentences: int=5, min_words_per_line: int=3, max_word_length: int=1000, filter_lorem_ipsum: bool=True, filter_javascript: bool=True, filter_curly_bracket: bool=True, filter_policy: bool=True, language: str=Languages.english):
super().__init__(exclusion_writer)
self.split_paragraph = split_paragraph
self.remove_citations = remove_citations
self.filter_no_terminal_punct = filter_no_terminal_punct
self.min_num_sentences = min_num_sentences
self.min_words_per_line = min_words_per_line
self.max_word_length = max_word_length
self.filter_lorem_ipsum = filter_lorem_ipsum
self.filter_javascript = filter_javascript
self.filter_curly_bracket = filter_curly_bracket
self.filter_policy = filter_policy
self.tokenizer = load_word_tokenizer(language)
def filter(self, doc: Document) -> bool | tuple[bool, str]:
lines = doc.text.splitlines() if self.split_paragraph else self.tokenizer.sent_tokenize(doc.text)
num_sentences = 0
kept_lines = []
for line in lines:
line = line.strip()
words = line.split()
self.stat_update('line-total')
if self.max_word_length != -1 and any((len(word) > self.max_word_length for word in words)):
self.stat_update('line-filter-too_long_word')
continue
if self.remove_citations:
line = CITATION_REGEX.sub('', line)
if self.filter_no_terminal_punct and (not line.endswith(END_PUNCTUATION) or line.endswith(ELLIPSIS)):
self.stat_update('line-filter-no_terminal_punc')
continue
if len(words) < self.min_words_per_line:
self.stat_update('line-filter-too_few_words')
continue
line_l = line.lower()
if self.filter_lorem_ipsum and 'lorem ipsum' in line_l:
return (False, 'lorem_ipsum')
if self.filter_javascript and 'javascript' in line_l:
self.stat_update('line-filter-javascript')
continue
if self.filter_curly_bracket and '{' in line:
return (False, 'curly_bracket')
if self.filter_policy and any((p in line_l for p in POLICY_SUBSTRINGS)):
self.stat_update('line-filter-policy')
continue
if self.min_num_sentences != -1:
num_sentences += len(self.tokenizer.sent_tokenize(line)) if self.split_paragraph else 1
kept_lines.append(line)
self.stat_update('line-kept')
if num_sentences < self.min_num_sentences:
return (False, 'too_few_sentences')
doc.text = ('\n' if self.split_paragraph else ' ').join(kept_lines).strip()
return True
class C4ParagraphFilter(BaseFilter):
name = '⛰ C4 Paragraph'
def __init__(self, exclusion_writer: DiskWriter=None):
super().__init__(exclusion_writer)
self.min_paragraphs = 3
self.min_paragraph_len = 200
self.line_delimiter = '\n'
def paragraph_filter(self, page):
lines = page.split(self.line_delimiter)
if len(lines) < self.min_paragraphs or min(heapq.nlargest(3, [len(line) for line in lines])) < self.min_paragraph_len:
return False
return True
def filter(self, doc: Document) -> bool | tuple[bool, str]:
if not self.paragraph_filter(doc.text):
return (False, f'< {self.min_paragraphs} paragraphs')
return True
_EN_BADWORDS_URL = 'https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/25e679f03d96baa721cde20db9944649e8d0a844/en'
_BADWORDS_URL = 'https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/5faf2ba42d7b1c0977169ec3611df25a3c08eb13/'
_BADWORDS_LANGS = ['ar', 'cs', 'da', 'de', 'en', 'eo', 'es', 'fa', 'fi', 'fil', 'fr', 'fr-CA-u-sd-caqc', 'hi', 'hu', 'it', 'ja', 'kab', 'ko', 'nl', 'no', 'pl', 'pt', 'ru', 'sv', 'th', 'tlh', 'tr', 'zh']
_BADWORDS_ALLOWLIST = {'ja': {'sm', 'グロ', '女の子'}, 'zh': {'性'}}
class C4BadWordsFilter(BaseFilter):
name = '⛰ C4 Badwords'
def __init__(self, keep_fraction: float=0.0, fail_on_missing_language: bool=True, seed: int=None, default_language: str='en', exclusion_writer: DiskWriter=None):
super().__init__(exclusion_writer)
self.keep_fraction = keep_fraction
self.fail_on_missing_language = fail_on_missing_language
self._badwords_regex: dict[str, re.Pattern] = {}
self.uniform = default_rng(seed).uniform
self.default_language = default_language
def _get_badwords(self, lang: str):
if lang not in self._badwords_regex:
if lang not in _BADWORDS_LANGS:
if self.fail_on_missing_language:
raise ValueError(f'There is not badwords list available for "{lang}". Set fail_on_missing_language=False to continue anyway.')
else:
return None
local_path = cached_asset_path_or_download(_BADWORDS_URL + lang if lang != 'en' else _EN_BADWORDS_URL, namespace='filters', subfolder='c4_badwords')
badwords: set[str] = set()
with open(local_path, 'rt') as f:
badwords.update((line.strip() for line in f))
for (lang, allowlist) in _BADWORDS_ALLOWLIST.items():
badwords -= allowlist
words = [re.escape(w) for w in badwords]
self._badwords_regex[lang] = re.compile('|'.join(words)) if lang in ('ja', 'th', 'zh') else re.compile('(?:\\W|^)({})(?:\\W|$)'.format('|'.join(words)))
return self._badwords_regex[lang]
def filter(self, doc: Document) -> bool | tuple[bool, str]:
lang: str = doc.metadata.get('language', self.default_language)
badwords_regex = self._get_badwords(lang)
if badwords_regex is None:
self.stat_update('missing_badwords_lang', f'missing_badwords_lang_{lang}')
return True
badwords_found = badwords_regex.search(doc.text.lower())
if badwords_found is not None:
self.stat_update('documents_with_badwords', f'documents_with_badwords_{lang}')
if self.keep_fraction > 0.0 and self.uniform() < self.keep_fraction:
self.stat_update('document_kept_with_badwords', f'document_kept_with_badwords_{lang}')
return True
self.stat_update(f'document_removed_with_badwords_{lang}')
return (False, 'document_removed_with_badwords')
return True
# File: datatrove-main/src/datatrove/pipeline/filters/fasttext_filter.py
from collections import defaultdict
from typing import Tuple
import numpy as np
from datatrove.data import Document
from datatrove.io import cached_asset_path_or_download
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.text import SPLIT_TEXT_DOCUMENTS, split_into_parts
class FastTextClassifierFilter(BaseFilter):
name = '🤖 fastText'
_requires_dependencies = [('fasttext', 'fasttext-wheel'), 'fasteners']
def __init__(self, model_url: str, keep_labels: Tuple[str, float] | list[Tuple[str, float]] | None=None, remove_labels: Tuple[str, float] | list[Tuple[str, float]] | None=None, save_labels_in_metadata: bool=True, exclusion_writer: DiskWriter | None=None, newline_replacement='', filter_mode: str=SPLIT_TEXT_DOCUMENTS):
super().__init__(exclusion_writer)
self.model_url = model_url
self.keep_labels = keep_labels
self.remove_labels = remove_labels
self.filter_mode = filter_mode
if keep_labels and remove_labels:
raise ValueError('You can only supply one of `keep_labels` or `remove_labels`.')
self.newline_replacement = newline_replacement
if keep_labels and isinstance(keep_labels[0], str):
self.keep_labels = [keep_labels]
if remove_labels and isinstance(remove_labels[0], str):
self.remove_labels = [remove_labels]
self.save_labels_in_metadata = save_labels_in_metadata
self._model = None
@property
def model(self):
if self._model is None:
from fasttext.FastText import _FastText
model_file = cached_asset_path_or_download(self.model_url, namespace='filters', subfolder='fasttext', desc='fast-text model')
self._model = _FastText(model_file)
available_labels = [x.removeprefix('__label__') for x in self._model.labels]
for (label, _) in self.keep_labels or [] + self.remove_labels or []:
if label not in available_labels:
raise ValueError(f"Label '{label}' passed as keep_labels or remove_labels is not available in this FastText model. Available labels: {available_labels}")
return self._model
def filter(self, doc: Document) -> bool:
def check_label_scores(unit_scores):
if self.keep_labels:
return any((unit_scores.get(f'__label__{label}', -9000000000.0) >= min_score for (label, min_score) in self.keep_labels))
else:
return not self.remove_labels or not any((unit_scores.get(f'__label__{label}', -9000000000.0) >= min_score for (label, min_score) in self.remove_labels))
units = split_into_parts(doc.text, mode=self.filter_mode)
kept_spans = []
label_scores = defaultdict(list)
for unit in units:
(labels, scores) = self.model.predict(unit.strip().replace('\n', self.newline_replacement), k=-1)
if self.save_labels_in_metadata:
for (label, score) in zip(labels, scores):
label_scores[label].append(score)
if check_label_scores(dict(zip(labels, scores))):
kept_spans.append(unit)
self.stat_update('kept_span')
else:
self.stat_update('removed_span')
doc.text = ''.join(kept_spans)
if self.save_labels_in_metadata:
doc.metadata.update({label: np.mean(scores).item() for (label, scores) in label_scores.items()})
return not not doc.text.strip()
# File: datatrove-main/src/datatrove/pipeline/filters/fineweb_quality_filter.py
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.filters.gopher_repetition_filter import find_duplicates
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
class FineWebQualityFilter(BaseFilter):
name = '🍷 FineWeb Quality'
def __init__(self, exclusion_writer: DiskWriter=None, line_punct_thr: float=0.12, line_punct_exclude_zero: bool=False, short_line_thr: float=0.67, short_line_length: int=30, char_duplicates_ratio: float=0.01, new_line_ratio: float=0.3, language: str=Languages.english):
super().__init__(exclusion_writer)
self.line_punct_thr = line_punct_thr
self.line_punct_exclude_zero = line_punct_exclude_zero
self.short_line_threshold = short_line_thr
self.short_line_length = short_line_length
self.char_duplicates_ratio = char_duplicates_ratio
self.new_line_ratio = new_line_ratio
self.tokenizer = load_word_tokenizer(language)
def filter(self, doc) -> bool | tuple[bool, str]:
stop_chars = ('.', "'", '"', '!', '?')
lines = doc.text.split('\n')
ratio = sum((1 for line in lines if line.endswith(stop_chars))) / len(lines)
if ratio <= self.line_punct_thr and (not (ratio == 0 and self.line_punct_exclude_zero)):
return (False, 'line_punct_ratio')
ratio = sum((1 for line in lines if len(line) <= self.short_line_length)) / len(lines)
if ratio >= self.short_line_threshold:
return (False, 'short_line_ratio')
non_empty_lines = [line for line in lines if line.strip() != '']
ratio = find_duplicates(non_empty_lines)[1] / len(doc.text.replace('\n', ''))
if ratio >= self.char_duplicates_ratio:
return (False, 'char_dup_ratio')
words = self.tokenizer.word_tokenize(doc.text)
new_line = doc.text.count('\n')
if new_line / len(words) > self.new_line_ratio:
return (False, 'list_ratio')
return True
# File: datatrove-main/src/datatrove/pipeline/filters/gopher_quality_filter.py
import numpy as np
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.text import PUNCTUATION_SET
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
STOP_WORDS = ['the', 'be', 'to', 'of', 'and', 'that', 'have', 'with']
class GopherQualityFilter(BaseFilter):
name = '🥇 Gopher Quality'
def __init__(self, min_doc_words: int | None=50, max_doc_words: int | None=100000, min_avg_word_length: int | None=3, max_avg_word_length: int | None=10, max_symbol_word_ratio: float | None=0.1, max_bullet_lines_ratio: float | None=0.9, max_ellipsis_lines_ratio: float | None=0.3, max_non_alpha_words_ratio: float | None=0.8, min_stop_words: int | None=2, stop_words: list[str] | None=None, exclusion_writer: DiskWriter=None, language: str=Languages.english):
super().__init__(exclusion_writer)
self.min_doc_words = min_doc_words
self.max_doc_words = max_doc_words
self.min_avg_word_length = min_avg_word_length
self.max_avg_word_length = max_avg_word_length
self.max_symbol_word_ratio = max_symbol_word_ratio
self.max_bullet_lines_ratio = max_bullet_lines_ratio
self.max_ellipsis_lines_ratio = max_ellipsis_lines_ratio
self.max_non_alpha_words_ratio = max_non_alpha_words_ratio
self.min_stop_words = min_stop_words
self.stop_words = set(STOP_WORDS if stop_words is None else stop_words)
self.tokenizer = load_word_tokenizer(language)
def filter(self, doc: Document) -> bool | tuple[bool, str]:
text = doc.text
words = self.tokenizer.word_tokenize(text)
n_words = len(words)
non_symbol_words = [w for w in words if any((ch not in PUNCTUATION_SET for ch in w))]
n_non_symbol_words_words = len(non_symbol_words)
if self.min_doc_words and n_non_symbol_words_words < self.min_doc_words:
return (False, 'gopher_short_doc')
if self.max_doc_words and n_non_symbol_words_words > self.max_doc_words:
return (False, 'gopher_long_doc')
avg_n_words = np.mean([len(w) for w in non_symbol_words])
if self.min_avg_word_length and avg_n_words < self.min_avg_word_length:
return (False, 'gopher_below_avg_threshold')
if self.max_avg_word_length and avg_n_words > self.max_avg_word_length:
return (False, 'gopher_above_avg_threshold')
if self.max_symbol_word_ratio and text.count('#') / n_words > self.max_symbol_word_ratio:
return (False, 'gopher_too_many_hashes')
if self.max_symbol_word_ratio and (text.count('...') + text.count('…')) / n_words > self.max_symbol_word_ratio:
return (False, 'gopher_too_many_ellipsis')
lines = text.splitlines()
if self.max_bullet_lines_ratio and sum((s.lstrip().startswith('•') or s.lstrip().startswith('-') for s in lines)) / len(lines) > self.max_bullet_lines_ratio:
return (False, 'gopher_too_many_bullets')
if self.max_ellipsis_lines_ratio and sum((s.rstrip().endswith('...') or s.rstrip().endswith('…') for s in lines)) / len(lines) > self.max_ellipsis_lines_ratio:
return (False, 'gopher_too_many_end_ellipsis')
if self.max_non_alpha_words_ratio and sum([any((c.isalpha() for c in w)) for w in words]) / n_words < self.max_non_alpha_words_ratio:
return (False, 'gopher_below_alpha_threshold')
if self.min_stop_words and sum((w in self.stop_words for w in words)) < self.min_stop_words:
return (False, 'gopher_enough_stop_words')
return True
# File: datatrove-main/src/datatrove/pipeline/filters/gopher_repetition_filter.py
import re
from collections import Counter
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
''
def get_n_grams(words: list[str], n: int) -> list[str]:
return [' '.join(words[i:i + n]) for i in range(len(words) - n + 1)]
def find_duplicates(x: list[str]) -> tuple[int, int]:
unique_x = set()
duplicate_chars = 0
duplicate_elements = 0
for element in x:
if element in unique_x:
duplicate_chars += len(element)
duplicate_elements += 1
else:
unique_x.add(element)
return (duplicate_elements, duplicate_chars)
def find_top_duplicate(x: list[str]) -> int:
counter = Counter()
for element in x:
counter[element] += 1
top_n_gram = counter.most_common(1)[0]
return len(top_n_gram[0]) * top_n_gram[1]
def find_all_duplicate(words: list[str], n: int) -> int:
n_words = len(words)
unique = set()
(repeated_chars, idx) = (0, 0)
while idx < n_words - n + 1:
n_gram = ''.join(words[idx:idx + n])
if n_gram in unique:
repeated_chars += len(n_gram)
idx += n
else:
unique.add(n_gram)
idx += 1
assert repeated_chars <= len(''.join(words))
return repeated_chars
class GopherRepetitionFilter(BaseFilter):
name = '👯 Gopher Repetition'
def __init__(self, dup_line_frac: float | None=0.3, dup_para_frac: float | None=0.3, dup_line_char_frac: float | None=0.2, dup_para_char_frac: float | None=0.2, top_n_grams: tuple[tuple[int, float]]=((2, 0.2), (3, 0.18), (4, 0.16)), dup_n_grams: tuple[tuple[int, float]]=((5, 0.15), (6, 0.14), (7, 0.13), (8, 0.12), (9, 0.11), (10, 0.1)), exclusion_writer: DiskWriter=None, language: str=Languages.english):
super().__init__(exclusion_writer)
self.dup_line_frac = dup_line_frac
self.dup_para_frac = dup_para_frac
self.dup_line_char_frac = dup_line_char_frac
self.dup_para_char_frac = dup_para_char_frac
self.top_n_grams = top_n_grams
self.dup_n_grams = dup_n_grams
self.paragraph_exp = re.compile('\\n{2,}')
self._line_splitter = re.compile('\n+')
self.tokenizer = load_word_tokenizer(language)
def filter(self, doc: Document) -> bool | tuple[bool, str]:
text = doc.text
paragraphs = self.paragraph_exp.split(text.strip())
(paragraphs_duplicates, char_duplicates) = find_duplicates(paragraphs)
if self.dup_para_frac and paragraphs_duplicates / len(paragraphs) > self.dup_para_frac:
return (False, 'dup_para_frac')
if self.dup_para_char_frac and char_duplicates / len(text) > self.dup_para_char_frac:
return (False, 'dup_para_char_frac')
lines = self._line_splitter.split(text)
(line_duplicates, char_duplicates) = find_duplicates(lines)
if self.dup_line_frac and line_duplicates / len(lines) > self.dup_line_frac:
return (False, 'dup_line_frac')
if self.dup_line_char_frac and char_duplicates / len(text) > self.dup_line_char_frac:
return (False, 'dup_line_char_frac')
words = self.tokenizer.word_tokenize(text)
for (n, n_frac) in self.top_n_grams:
n_grams = get_n_grams(words, n)
if not n_grams:
continue
top_char_length = find_top_duplicate(n_grams)
if top_char_length / len(text) > n_frac:
return (False, f'top_{n}_gram')
for (n, n_frac) in self.dup_n_grams:
n_duplicates_char = find_all_duplicate(words, n)
if n_duplicates_char / len(text) > n_frac:
return (False, f'duplicated_{n}_n_grams')
return True
# File: datatrove-main/src/datatrove/pipeline/filters/lambda_filter.py
from typing import Callable
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
class LambdaFilter(BaseFilter):
name = '👤 Lambda'
def __init__(self, filter_function: Callable[[Document], bool], exclusion_writer: DiskWriter=None):
super().__init__(exclusion_writer)
self.filter_function = filter_function
def filter(self, doc: Document) -> bool:
return self.filter_function(doc)
# File: datatrove-main/src/datatrove/pipeline/filters/language_filter.py
from typing import Literal
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.lid import FT176LID, GlotLID
class LanguageFilter(BaseFilter):
name = '🌍 Language ID'
_requires_dependencies = [('fasttext', 'fasttext-wheel'), 'fasteners']
def __init__(self, languages: list[str] | str | None=None, language_threshold: float=0.65, exclusion_writer: DiskWriter=None, backend: Literal['ft176', 'glotlid']='ft176', label_only: bool=False, keep_top_pairs_threshold: float=-1):
super().__init__(exclusion_writer)
self.language_threshold = language_threshold
if isinstance(languages, str):
languages = list(languages)
self.languages = languages
self.backend = backend
self.model = FT176LID(languages) if backend == 'ft176' else GlotLID(languages)
self.label_only = label_only
self.keep_top_pairs_threshold = keep_top_pairs_threshold
def filter(self, doc: Document) -> bool:
(best_lang_pair, lang_pairs) = self.model.predict(doc)
(lang, lang_score) = best_lang_pair
if self.backend == 'glotlid':
(lang, script) = lang.split('_')
doc.metadata['language_script'] = script
doc.metadata['language'] = lang
doc.metadata['language_score'] = lang_score
if self.keep_top_pairs_threshold != -1:
for (key, value) in lang_pairs.items():
if value > self.keep_top_pairs_threshold:
doc.metadata[f'top_language_{key}_score'] = value
return self.label_only or (self.languages and any((score > self.language_threshold for score in lang_pairs.values()))) or (self.languages is None and lang_score > self.language_threshold)
# File: datatrove-main/src/datatrove/pipeline/filters/regex_filter.py
import re
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
class RegexFilter(BaseFilter):
name = '🕵 Regex'
def __init__(self, regex_exp: str, exclusion_writer: DiskWriter=None):
super().__init__(exclusion_writer)
self.regex = re.compile(regex_exp)
def filter(self, doc: Document) -> bool:
return not self.regex.search(doc.text)
# File: datatrove-main/src/datatrove/pipeline/filters/sampler_filter.py
from numpy.random import default_rng
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
class SamplerFilter(BaseFilter):
name = '🎲 Sampler'
def __init__(self, rate: float | None=0.5, seed: int=None, exclusion_writer: DiskWriter=None):
""""""
super().__init__(exclusion_writer)
self.rate = rate
self.uniform = default_rng(seed).uniform
def filter(self, doc: Document) -> bool | tuple[bool, str]:
return self.uniform() < self.rate
# File: datatrove-main/src/datatrove/pipeline/filters/unigram_log_probs.py
import csv
import os
import urllib.request
import numpy as np
from huggingface_hub import cached_assets_path
from datatrove.data import Document
from datatrove.pipeline.filters.base_filter import BaseFilter
from datatrove.pipeline.writers.disk_base import DiskWriter
from datatrove.utils.logging import logger
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
UNIGRAM_DOWNLOAD = 'https://ai2-s2-research-public.s3-us-west-2.amazonaws.com/lucas/google-1T-unigram/unigram_freq.csv'
class UnigramLogProbFilter(BaseFilter):
name = '🧑\u200d🍳 Unigram log-prob filter'
def __init__(self, logprobs_threshold: float=-10, exclusion_writer: DiskWriter=None, language: str=Languages.english):
super().__init__(exclusion_writer)
self.logprobs_threshold = logprobs_threshold
self.unigram_frequencies = self.get_frequencies()
self.tokenizer = load_word_tokenizer(language)
def get_frequencies(self):
download_dir = cached_assets_path(library_name='datatrove', namespace='filters', subfolder='unigram_logprob_filter')
unigram_freq_file = os.path.join(download_dir, 'unigram_freq.csv')
if not os.path.isfile(unigram_freq_file):
logger.info('⬇️ Downloading unigram-frequencies ...')
urllib.request.urlretrieve(UNIGRAM_DOWNLOAD, unigram_freq_file)
words = []
counts = []
with open(unigram_freq_file, encoding='utf-8', newline='') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
words.append(row['word'])
counts.append(int(row['count']))
total_count = sum(counts)
return {word: count / total_count for (word, count) in zip(words, counts)}
def get_logprob(self, doc):
words = self.tokenizer.word_tokenize(doc.text)
freqs = [self.unigram_frequencies.get(word.lower(), 1e-09) for word in words]
if len(freqs) == 0:
return 0
return sum([np.log(f) for f in freqs]) / len(freqs)
def filter(self, doc: Document) -> bool:
return self.get_logprob(doc) > self.logprobs_threshold
# File: datatrove-main/src/datatrove/pipeline/filters/url_filter.py
import os
import re
import tarfile
from typing import Iterable
from huggingface_hub import cached_assets_path
from datatrove.data import Document
from datatrove.io import safely_create_file
from datatrove.utils._import_utils import ASSETS_PATH
from datatrove.utils.logging import logger
from ..writers.disk_base import DiskWriter
from .base_filter import BaseFilter
normalizer = re.compile('[^a-zA-Z0-9]+')
def normalize(text, replace=''):
return normalizer.sub(replace, text).lower()
def parse_list(line, do_normalize=True):
return {normalize(x) if do_normalize else x.strip() for x in line if x[0] != '#'}
def get_list(abs_path: str, file_name: str, extra: set, do_normalize: bool=True):
with open(os.path.join(abs_path, file_name)) as f:
return parse_list(f, do_normalize).union(extra)
class URLFilter(BaseFilter):
name = '😈 Url-filter'
_requires_dependencies = ['tldextract', 'fasteners', ('ahocorasick', 'pyahocorasick')]
def __init__(self, soft_word_threshold: int=2, extra_domains: Iterable=None, extra_urls: Iterable=None, banned_words: Iterable=None, banned_subwords: Iterable=None, soft_banned_words: Iterable=None, use_integrated_lists: bool=True, exclusion_writer: DiskWriter=None):
import ahocorasick
from tldextract import TLDExtract
super().__init__(exclusion_writer)
self.soft_word_threshold = soft_word_threshold
self.block_listed_domains = parse_list(extra_domains, do_normalize=False) if extra_domains else set()
self.block_listed_url = parse_list(extra_urls, do_normalize=False) if extra_urls else set()
self.banned_words = parse_list(banned_words) if banned_words else set()
self.banned_subwords = parse_list(banned_subwords) if banned_subwords else set()
self.soft_banned_words = parse_list(soft_banned_words) if soft_banned_words else set()
self.use_integrated_lists = use_integrated_lists
self._downloaded = False
self.tldextractor = TLDExtract()
self.banned_subwords_automaton = ahocorasick.Automaton(ahocorasick.STORE_INTS)
for word in self.banned_subwords:
self.banned_subwords_automaton.add_word(word, len(self.banned_subwords_automaton))
if not self.use_integrated_lists:
self.banned_subwords_automaton.make_automaton()
def download_data(self):
if self._downloaded or not self.use_integrated_lists:
return
download_dir = cached_assets_path(library_name='datatrove', namespace='filters', subfolder='url_filter')
file_to_lock = os.path.join(download_dir, 'url_filterblacklists.tar.gz')
def do_extract():
logger.info('💥 Extracting url filter blacklists...')
with tarfile.open(os.path.join(ASSETS_PATH, 'url_filterblacklists.tar.gz'), 'r:gz') as tar:
tar.extractall(download_dir)
logger.info('💥 Extracted url filter blacklists.')
safely_create_file(file_to_lock, do_extract)
self.block_listed_domains = get_list(download_dir, 'adult/domains', self.block_listed_domains, do_normalize=False)
self.block_listed_url = get_list(download_dir, 'adult/urls', self.block_listed_url, do_normalize=False)
self.banned_words = get_list(ASSETS_PATH, 'banned_words.txt', self.banned_words)
self.banned_subwords = get_list(ASSETS_PATH, 'banned_subwords.txt', self.banned_subwords)
self.soft_banned_words = get_list(ASSETS_PATH, 'soft_banned_words.txt', self.soft_banned_words)
for word in self.banned_subwords:
self.banned_subwords_automaton.add_word(word, len(self.banned_subwords_automaton))
self.banned_subwords_automaton.make_automaton()
self._downloaded = True
def filter(self, document: Document) -> bool | tuple[bool, str]:
self.download_data()
url = document.metadata.get('url')
assert url, 'Document does not have url in its metadata'
url_info = self.tldextractor(url)
if url_info.registered_domain in self.block_listed_domains:
return (False, 'domain')
if url_info.fqdn in self.block_listed_domains:
return (False, 'subdomain')
if url in self.block_listed_url:
return (False, 'url')
url_words = set(normalizer.split(url))
if any((word in url_words for word in self.banned_words)):
return (False, 'hard_blacklisted')
nb_soft_words = sum([word in url_words for word in self.soft_banned_words])
if nb_soft_words >= self.soft_word_threshold:
return (False, 'soft_blacklisted')
normalized_space = normalize(url)
if self.banned_subwords and next(self.banned_subwords_automaton.iter(normalized_space), False):
return (False, 'blacklisted_subword')
return True
# File: datatrove-main/src/datatrove/pipeline/formatters/base.py
from abc import ABC, abstractmethod
from datatrove.data import DocumentsPipeline
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.typeshelper import StatHints
class BaseFormatter(PipelineStep, ABC):
type = '✂️ - FORMAT'
def __init__(self):
super().__init__()
@abstractmethod
def format(self, text: str) -> str:
return text
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
for doc in data:
self.stat_update(StatHints.total)
with self.track_time():
doc.text = self.format(doc.text)
yield doc
# File: datatrove-main/src/datatrove/pipeline/formatters/pii.py
import ipaddress
import re
from functools import partial
from typing import Callable
from datatrove.pipeline.formatters.base import BaseFormatter
class PIIReplacer:
def __init__(self, regex: str, replacements: tuple[str, ...] | str, validator: Callable[[str], bool] | None=None):
self.regex: re.Pattern = re.compile(regex)
self.replacements = replacements if type(replacements) is tuple else tuple(replacements) if not isinstance(replacements, str) else (replacements,)
self.validator = validator
self._replace_i = 0
def replace(self, text: str):
def get_replacement(matchobj):
if self.validator and (not self.validator(matchobj.group(0))):
return matchobj.group(0)
replacement = self.replacements[self._replace_i]
self._replace_i = (self._replace_i + 1) % len(self.replacements)
return replacement
return self.regex.sub(get_replacement, text)
def public_ip_validator(ip, public_only: bool=True) -> bool:
try:
ip = ipaddress.ip_address(ip)
return not public_only or ip.is_global
except ValueError:
return False
class PIIFormatter(BaseFormatter):
name = '📞 PII'
def __init__(self, remove_emails: bool=True, remove_ips: bool=True, only_remove_public_ips: bool=True, email_replacement: tuple[str, ...] | str=('email@example.com', 'firstname.lastname@example.org'), ip_replacement: tuple[str, ...] | str=('22.214.171.124', '126.96.36.199', '188.8.131.52', '184.108.40.206', '220.127.116.11', '18.104.22.168')):
super().__init__()
self.remove_emails = remove_emails
self.remove_ips = remove_ips
self.emails_replacer = PIIReplacer("\\b[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[A-Za-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:(?:[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?\\.)+[A-Za-z0-9](?:[A-Za-z0-9-]*[A-Za-z0-9])?|\\[(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[A-Za-z0-9-]*[A-Za-z0-9]:)])", email_replacement)
self.ip_replacer = PIIReplacer('(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', validator=partial(public_ip_validator, public_only=only_remove_public_ips), replacements=ip_replacement)
def format(self, text: str) -> str:
if self.remove_emails:
text = self.emails_replacer.replace(text)
if self.remove_ips:
text = self.ip_replacer.replace(text)
return text
# File: datatrove-main/src/datatrove/pipeline/formatters/symbol_lines_remover.py
from ...utils.text import PUNCTUATION_SET
from .base import BaseFormatter
class SymbolLinesFormatter(BaseFormatter):
name = ' ⚞ Symbol Lines Remover'
def __init__(self, replace_char: str=''):
super().__init__()
self.replace_char = replace_char
def format(self, text: str) -> str:
formatted = []
in_removed_span = False
for line in text.splitlines():
chars_line = line.strip() != '' and all((c in PUNCTUATION_SET or c == ' ' for c in line))
if chars_line and (not in_removed_span):
if self.replace_char:
formatted.append(self.replace_char)
in_removed_span = True
elif not chars_line:
formatted.append(line)
in_removed_span = False
return '\n'.join(formatted)
# File: datatrove-main/src/datatrove/pipeline/readers/base.py
import random
from abc import abstractmethod
from types import MethodType
from typing import Callable
from tqdm import tqdm
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFileLike, DataFolderLike, get_datafolder, get_shard_from_paths_file
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.logging import logger
class BaseReader(PipelineStep):
type = '📖 - READER'
def __init__(self, limit: int=-1, skip: int=0, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None):
super().__init__()
self.limit = limit
self.skip = skip
self.text_key = text_key
self.id_key = id_key
self.adapter = MethodType(adapter, self) if adapter else self._default_adapter
self._empty_warning = False
self.default_metadata = default_metadata
def _default_adapter(self, data: dict, path: str, id_in_file: int | str):
return {'text': data.pop(self.text_key, ''), 'id': data.pop(self.id_key, f'{path}/{id_in_file}'), 'media': data.pop('media', []), 'metadata': data.pop('metadata', {}) | data}
def get_document_from_dict(self, data: dict, source_file: str, id_in_file: int | str):
parsed_data = self.adapter(data, source_file, id_in_file)
if not parsed_data.get('text', None):
if not self._empty_warning:
self._empty_warning = True
logger.warning(f'Found document without text, skipping. Is your `text_key` ("{self.text_key}") correct? Available keys: {list(data.keys())}')
return None
document = Document(**parsed_data)
if self.default_metadata:
document.metadata = self.default_metadata | document.metadata
return document
@abstractmethod
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
raise NotImplementedError
class BaseDiskReader(BaseReader):
type = '📖 - READER'
def __init__(self, data_folder: DataFolderLike, paths_file: DataFileLike | None=None, limit: int=-1, skip: int=0, file_progress: bool=False, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, recursive: bool=True, glob_pattern: str | None=None, shuffle_files: bool=False):
super().__init__(limit, skip, adapter, text_key, id_key, default_metadata)
self.data_folder = get_datafolder(data_folder)
self.paths_file = paths_file
self.recursive = recursive
self.glob_pattern = glob_pattern
self.shuffle_files = shuffle_files
self.file_progress = file_progress
self.doc_progress = doc_progress
def get_document_from_dict(self, data: dict, source_file: str, id_in_file: int):
document = super().get_document_from_dict(data, source_file, id_in_file)
if document:
document.metadata.setdefault('file_path', self.data_folder.resolve_paths(source_file))
return document
@abstractmethod
def read_file(self, filepath: str) -> DocumentsPipeline:
raise NotImplementedError
def read_files_shard(self, shard: list[str]) -> DocumentsPipeline:
li = 0
skipped = 0
with tqdm(total=self.limit if self.limit != -1 else None, desc='Document progress', unit='doc', disable=not self.doc_progress) as doc_pbar, tqdm(total=len(shard), desc='File progress', unit='file', disable=not self.file_progress) as file_pbar:
for (i, filepath) in enumerate(shard):
self.stat_update('input_files')
logger.info(f'Reading input file {filepath}, {i + 1}/{len(shard)}')
di = 0
ndocs = 0
for (di, document) in enumerate(self.read_file(filepath)):
if skipped < self.skip:
skipped += 1
continue
if self.limit != -1 and li >= self.limit:
break
yield document
doc_pbar.update()
li += 1
ndocs += 1
file_pbar.update()
self.stat_update('documents', value=ndocs, unit='input_file')
if self.limit != -1 and li >= self.limit:
break
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
if data:
yield from data
files_shard = self.data_folder.get_shard(rank, world_size, recursive=self.recursive, glob_pattern=self.glob_pattern) if not self.paths_file else list(get_shard_from_paths_file(self.paths_file, rank, world_size))
if len(files_shard) == 0:
if rank == 0:
raise RuntimeError(f'No files found on {self.data_folder.path}!')
logger.warning(f'No files found on {self.data_folder.path} for rank={rank!r}')
if self.shuffle_files:
random.shuffle(files_shard)
for doc in self.read_files_shard(files_shard):
self.update_doc_stats(doc)
yield doc
# File: datatrove-main/src/datatrove/pipeline/readers/csv.py
import csv
from typing import Callable, Literal
from datatrove.io import DataFileLike, DataFolderLike
from datatrove.pipeline.readers.base import BaseDiskReader
class CsvReader(BaseDiskReader):
name = '🔢 Csv'
def __init__(self, data_folder: DataFolderLike, paths_file: DataFileLike | None=None, compression: Literal['infer', 'gzip', 'zstd'] | None='infer', limit: int=-1, skip: int=0, file_progress: bool=False, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, recursive: bool=True, glob_pattern: str | None=None, shuffle_files: bool=False):
super().__init__(data_folder, paths_file, limit, skip, file_progress, doc_progress, adapter, text_key, id_key, default_metadata, recursive, glob_pattern, shuffle_files)
self.compression = compression
self.empty_warning = False
def read_file(self, filepath: str):
with self.data_folder.open(filepath, 'r', compression=self.compression) as f:
csv_reader = csv.DictReader(f)
for (di, d) in enumerate(csv_reader):
with self.track_time():
document = self.get_document_from_dict(d, filepath, di)
if not document:
continue
yield document
CSVReader = CsvReader
# File: datatrove-main/src/datatrove/pipeline/readers/huggingface.py
import copy
from typing import Callable
from loguru import logger
from tqdm import tqdm
from datatrove.data import DocumentsPipeline
from datatrove.pipeline.readers.base import BaseReader
class HuggingFaceDatasetReader(BaseReader):
name = '🤗 HuggingFace'
_requires_dependencies = ['datasets']
def __init__(self, dataset: str, dataset_options: dict | None=None, streaming: bool=False, limit: int=-1, skip: int=0, batch_size: int=1000, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, shuffle_files: bool=False):
super().__init__(limit, skip, adapter, text_key, id_key, default_metadata)
self.dataset = dataset
self.dataset_options = dataset_options or {}
self.batch_size = batch_size
self.doc_progress = doc_progress
self.streaming = streaming
self.shuffle_files = shuffle_files
def get_document_from_dict(self, data: dict, source: str, id_in_file: int | str):
document = super().get_document_from_dict(data, source, id_in_file)
if document:
document.metadata.setdefault('dataset', source)
return document
def _get_dataset_shard(self, dst, rank: int, world_size: int):
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
if isinstance(dst, Dataset):
return dst.shard(world_size, rank, contiguous=True)
elif isinstance(dst, IterableDataset) and dst.n_shards > 1:
if rank >= dst.n_shards:
logger.warning(f'Requested shard {rank} of a streaming dataset, but it only has {dst.n_shards} shards.')
return None
ex_iterable = dst._ex_iterable.shard_data_sources(rank, world_size)
return IterableDataset(ex_iterable=ex_iterable, info=dst._info.copy(), split=dst._split, formatting=dst._formatting, shuffling=copy.deepcopy(dst._shuffling), distributed=copy.deepcopy(dst._distributed), token_per_repo_id=dst._token_per_repo_id)
else:
return split_dataset_by_node(dst, rank, world_size)
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
from datasets import load_dataset
if data:
yield from data
ds = load_dataset(self.dataset, **self.dataset_options, streaming=self.streaming)
if self.shuffle_files:
if not self.streaming:
ds = ds.shuffle(seed=42)
else:
ds = ds.shuffle(seed=42, buffer_size=1000)
if isinstance(ds, dict):
raise ValueError(f"You forgot to specify the split of the dataset. Update your dataset_options to include 'split'. Available splits: {list(ds.keys())}")
shard = self._get_dataset_shard(ds, rank, world_size)
if not shard:
return
with tqdm(total=self.limit if self.limit != -1 else None, disable=not self.doc_progress) as pbar:
li = 0
for batch in shard.iter(self.batch_size):
if self.limit != -1 and li >= self.limit:
break
documents = []
with self.track_time('batch'):
for line in (dict(zip(batch, t)) for t in zip(*batch.values())):
if self.limit != -1 and li >= self.limit:
break
document = self.get_document_from_dict(line, self.dataset, f'{rank:05d}/{li}')
if not document:
continue
documents.append(document)
self.update_doc_stats(document)
self.stat_update('documents')
li += 1
pbar.update()
yield from documents
# File: datatrove-main/src/datatrove/pipeline/readers/ipc.py
from typing import Callable
from datatrove.io import DataFileLike, DataFolderLike
from datatrove.pipeline.readers.base import BaseDiskReader
class IpcReader(BaseDiskReader):
name = '🪶 Ipc'
_requires_dependencies = ['pyarrow']
def __init__(self, data_folder: DataFolderLike, paths_file: DataFileLike | None=None, limit: int=-1, skip: int=0, stream: bool=False, file_progress: bool=False, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, recursive: bool=True, glob_pattern: str | None=None, shuffle_files: bool=False):
super().__init__(data_folder, paths_file, limit, skip, file_progress, doc_progress, adapter, text_key, id_key, default_metadata, recursive, glob_pattern, shuffle_files)
self.stream = stream
def _iter_file_batches(self, filepath: str):
import pyarrow as pa
with self.data_folder.open(filepath, 'rb') as f:
with pa.ipc.open_file(f) as ipc_reader:
for i in range(ipc_reader.num_record_batches):
yield ipc_reader.get_batch(i)
def _iter_stream_batches(self, filepath: str):
import pyarrow as pa
with self.data_folder.open(filepath, 'rb') as f:
with pa.ipc.open_stream(f) as ipc_stream_reader:
for batch in ipc_stream_reader:
yield batch
def read_file(self, filepath: str):
batch_iter = self._iter_file_batches(filepath) if not self.stream else self._iter_stream_batches(filepath)
li = 0
for batch in batch_iter:
documents = []
with self.track_time('batch'):
for line in batch.to_pylist():
document = self.get_document_from_dict(line, filepath, li)
if not document:
continue
documents.append(document)
li += 1
yield from documents
# File: datatrove-main/src/datatrove/pipeline/readers/jsonl.py
from typing import Callable, Literal
from datatrove.io import DataFileLike, DataFolderLike
from datatrove.pipeline.readers.base import BaseDiskReader
from datatrove.utils.logging import logger
class JsonlReader(BaseDiskReader):
name = '🐿 Jsonl'
_requires_dependencies = ['orjson']
def __init__(self, data_folder: DataFolderLike, paths_file: DataFileLike | None=None, compression: Literal['infer', 'gzip', 'zstd'] | None='infer', limit: int=-1, skip: int=0, file_progress: bool=False, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, recursive: bool=True, glob_pattern: str | None=None, shuffle_files: bool=False):
super().__init__(data_folder, paths_file, limit, skip, file_progress, doc_progress, adapter, text_key, id_key, default_metadata, recursive, glob_pattern, shuffle_files)
self.compression = compression
def read_file(self, filepath: str):
import orjson
from orjson import JSONDecodeError
with self.data_folder.open(filepath, 'r', compression=self.compression) as f:
try:
for (li, line) in enumerate(f):
with self.track_time():
try:
document = self.get_document_from_dict(orjson.loads(line), filepath, li)
if not document:
continue
except (EOFError, JSONDecodeError) as e:
logger.warning(f'Error when reading `{filepath}`: {e}')
continue
yield document
except UnicodeDecodeError as e:
logger.warning(f'File `{filepath}` may be corrupted: raised UnicodeDecodeError ({e})')
# File: datatrove-main/src/datatrove/pipeline/readers/parquet.py
from typing import Callable
from datatrove.io import DataFileLike, DataFolderLike
from datatrove.pipeline.readers.base import BaseDiskReader
class ParquetReader(BaseDiskReader):
name = '📒 Parquet'
_requires_dependencies = ['pyarrow']
def __init__(self, data_folder: DataFolderLike, paths_file: DataFileLike | None=None, limit: int=-1, skip: int=0, batch_size: int=1000, read_metadata: bool=True, file_progress: bool=False, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, recursive: bool=True, glob_pattern: str | None=None, shuffle_files: bool=False):
super().__init__(data_folder, paths_file, limit, skip, file_progress, doc_progress, adapter, text_key, id_key, default_metadata, recursive, glob_pattern, shuffle_files)
self.batch_size = batch_size
self.read_metadata = read_metadata
def read_file(self, filepath: str):
import pyarrow.parquet as pq
with self.data_folder.open(filepath, 'rb') as f:
with pq.ParquetFile(f) as pqf:
li = 0
columns = [self.text_key, self.id_key] if not self.read_metadata else None
for batch in pqf.iter_batches(batch_size=self.batch_size, columns=columns):
documents = []
with self.track_time('batch'):
for line in batch.to_pylist():
document = self.get_document_from_dict(line, filepath, li)
if not document:
continue
documents.append(document)
li += 1
yield from documents
# File: datatrove-main/src/datatrove/pipeline/readers/warc.py
from typing import TYPE_CHECKING, Callable, Literal
from datatrove.io import DataFileLike, DataFolderLike
from datatrove.pipeline.readers.base import BaseDiskReader
if TYPE_CHECKING:
from warcio.recordloader import ArcWarcRecord
class WarcReader(BaseDiskReader):
name = '🕷 Warc'
_requires_dependencies = ['warcio', ('cchardet', 'faust-cchardet'), ('magic', 'python-magic')]
def __init__(self, data_folder: DataFolderLike, paths_file: DataFileLike | None=None, compression: Literal['infer', 'gzip', 'zstd'] | None='infer', limit: int=-1, skip: int=0, file_progress: bool=False, doc_progress: bool=False, adapter: Callable=None, text_key: str='text', id_key: str='id', default_metadata: dict=None, recursive: bool=True, glob_pattern: str | None=None, shuffle_files: bool=False):
self.compression = compression
super().__init__(data_folder, paths_file, limit, skip, file_progress, doc_progress, adapter, text_key, id_key, default_metadata, recursive, glob_pattern, shuffle_files)
def read_file(self, filepath: str):
from warcio.archiveiterator import ArchiveIterator
with self.data_folder.open(filepath, 'rb', compression=self.compression) as f:
for (ri, record) in enumerate(ArchiveIterator(f)):
with self.track_time():
extracted_data = process_record(record)
if not extracted_data:
continue
document = self.get_document_from_dict(extracted_data, filepath, ri)
if not document:
continue
yield document
def process_record(record: 'ArcWarcRecord') -> dict | None:
import cchardet
import magic
if record.rec_type != 'response' and record.rec_type != 'conversion':
return
mime_type = record.rec_headers.get('WARC-Identified-Payload-Type', None)
if mime_type is not None and (mime_type != 'text/html' and (record.rec_type != 'conversion' or mime_type != 'text/plain')):
return
content_bytes = record.content_stream().read()
if mime_type is None:
mime_type = magic.from_buffer(content_bytes, mime=True)
if mime_type != 'text/html' and (record.rec_type != 'conversion' or mime_type != 'text/plain'):
return
charset = 'UTF-8'
try:
html = content_bytes.decode(charset)
except UnicodeDecodeError:
encoding_det = cchardet.detect(content_bytes)['encoding']
if not encoding_det or encoding_det == charset:
return
charset = encoding_det
try:
html = content_bytes.decode(charset)
except (UnicodeDecodeError, LookupError):
return
id_ = record.rec_headers['WARC-Record-ID']
url = record.rec_headers.get('WARC-Target-URI', None)
date = record.rec_headers.get('WARC-Date', None)
if not url:
url = dict(record.rec_headers.headers)['uri']
if not date:
date = dict(record.rec_headers.headers)['archive-date']
return {'text': html, 'id': id_, 'url': url, 'date': date}
# File: datatrove-main/src/datatrove/pipeline/stats/__init__.py
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, STAT_TYPE, TopKConfig
from datatrove.pipeline.stats.contamination_stats import WordsContaminationStats
from datatrove.pipeline.stats.doc_stats import DocStats
from datatrove.pipeline.stats.lang_stats import LangStats
from datatrove.pipeline.stats.line_stats import LineStats
from datatrove.pipeline.stats.merger import STATS_MERGED_NAME, StatsMerger
from datatrove.pipeline.stats.paragraph_stats import ParagraphStats
from datatrove.pipeline.stats.perplexity_stats import CCNetPerplexityStats
from datatrove.pipeline.stats.sentence_stats import SentenceStats
from datatrove.pipeline.stats.token_stats import TokenStats
from datatrove.pipeline.stats.word_stats import WordStats
# File: datatrove-main/src/datatrove/pipeline/stats/base.py
import heapq
import json
from abc import abstractmethod
from collections import defaultdict
from typing import get_args
from loguru import logger
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, STAT_TYPE, TopKConfig
from datatrove.utils.stats import MetricStatsDict
class BaseStats(PipelineStep):
type = '📊 - STATS'
name = '👑 Summary stats'
_requires_dependencies = ['tldextract']
def __init__(self, output_folder: DataFolderLike, groups_to_compute: list[GROUP] | None=None, histogram_round_digits: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
from tldextract import TLDExtract
super().__init__()
self.output_folder = get_datafolder(output_folder)
self.groups = groups_to_compute or list(get_args(GROUP))
self.histogram_round_digits = histogram_round_digits
self.top_k_cfg = top_k_config
self.tld_extractor = TLDExtract()
@abstractmethod
def extract_stats(self, doc: Document) -> dict[str, int | float]:
raise NotImplementedError()
def get_kv(self, doc: Document, value: STAT_TYPE, group_name: GROUP) -> tuple[str, STAT_TYPE | dict[str, STAT_TYPE]]:
if group_name == 'histogram':
return (str(round(value, self.histogram_round_digits)), {'': 1, 'chars': len(doc.text), **({'tokens': doc.metadata['token_count']} if 'token_count' in doc.metadata else {})})
elif group_name == 'summary':
return ('summary', value)
elif group_name == 'fqdn':
fqdn = doc.metadata.get('fqdn')
if fqdn is None:
fqdn = self.tld_extractor.extract_str(doc.metadata['url']).fqdn
doc.metadata['fqdn'] = fqdn
return (fqdn, value)
elif group_name == 'suffix':
suffix = doc.metadata.get('suffix')
if suffix is None:
suffix = self.tld_extractor.extract_str(doc.metadata['url']).suffix
doc.metadata['suffix'] = suffix
return (suffix, value)
else:
raise ValueError(f'Unknown group name: {group_name}')
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
groups_dicts: dict[GROUP, dict[str, MetricStatsDict]] = {group: defaultdict(MetricStatsDict) for group in self.groups}
for doc in data:
with self.track_time():
try:
doc_stats = self.extract_stats(doc)
except Exception as e:
logger.error(f'Error while extracting stats from document {doc.id}', exc_info=e)
raise e
for (group, counters) in groups_dicts.items():
for (stat, value) in doc_stats.items():
(key, value) = self.get_kv(doc, value, group)
if not isinstance(value, dict):
counters[stat][key] += value
else:
for (suffix, val) in value.items():
stat_name = stat if not suffix else f'{stat}__{suffix}'
counters[stat_name][key] += val
doc.metadata.update(doc_stats)
yield doc
for (group, stats_dict) in groups_dicts.items():
group_top_k_keys = None
for (stat_name, stat_values) in stats_dict.items():
if group in self.top_k_cfg.top_k_groups:
if group_top_k_keys is None:
group_top_k_keys = heapq.nlargest(self.top_k_cfg.top_k, stat_values, key=lambda x: stat_values[x].n)
stat_values = MetricStatsDict(init={s: stat_values[s] for s in group_top_k_keys})
with self.output_folder.open(f'{group}/{stat_name}/{rank:05d}.json', 'wt') as f:
json.dump(stat_values.to_dict(), f)
del groups_dicts
# File: datatrove-main/src/datatrove/pipeline/stats/config.py
from dataclasses import dataclass
from typing import Literal
GROUP = Literal['summary', 'histogram', 'fqdn', 'suffix']
@dataclass(frozen=True)
class TopKConfig:
top_k_groups: list[Literal['fqdn', 'suffix']]
top_k: int
DEFAULT_TOP_K_CONFIG = TopKConfig(top_k_groups=['fqdn', 'suffix'], top_k=100000)
STAT_TYPE = int | float
# File: datatrove-main/src/datatrove/pipeline/stats/contamination_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.text import TextNormConfig, simplify_text
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
class WordsContaminationStats(BaseStats):
name = '😷 Words contamination'
def __init__(self, output_folder: DataFolderLike, words: list[str], norm_config: TextNormConfig=TextNormConfig(), language: str=Languages.english, groups_to_compute: list[GROUP]=list(get_args(GROUP)), histogram_round_digits: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config=top_k_config)
if len(words) == 0:
raise ValueError('At least one word must be provided')
self.norm_config = norm_config
self.language = language
self.words = words
def extract_stats(self, doc: Document) -> dict[str, int | float]:
word_tokenizer = load_word_tokenizer(self.language)
doc_words = word_tokenizer.word_tokenize(simplify_text(doc.text, self.norm_config))
return {f'words_contamination_{self.words[0]}': sum([1 for word in doc_words if word in self.words]) / len(doc_words)}
# File: datatrove-main/src/datatrove/pipeline/stats/doc_stats.py
import re
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.text import PUNCTUATION
ELIPSIS = ['...', '…']
class DocStats(BaseStats):
name = '📜 Doc stats'
def __init__(self, output_folder: DataFolderLike, groups_to_compute: list[GROUP]=list(get_args(GROUP)), histogram_round_digits: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.elipsis_regex = re.compile('|'.join([f'(?:{re.escape(elipsis)})' for elipsis in ELIPSIS]))
self.punc_regex = re.compile('|'.join([f'(?:{re.escape(punc)})' for punc in PUNCTUATION]))
def extract_stats(self, doc: Document) -> dict[str, int | float]:
return {'length': len(doc.text), 'white_space_ratio': sum([1 for c in doc.text if c.isspace()]) / len(doc.text), 'non_alpha_digit_ratio': sum([1 for c in doc.text if not c.isalpha() and (not c.isdigit())]) / len(doc.text), 'digit_ratio': sum([1 for c in doc.text if c.isdigit()]) / len(doc.text), 'uppercase_ratio': sum([1 for c in doc.text if c.isupper()]) / len(doc.text), 'elipsis_ratio': sum((len(elipsis) for elipsis in self.elipsis_regex.findall(doc.text))) / len(doc.text), 'punctuation_ratio': sum((len(punc) for punc in self.punc_regex.findall(doc.text))) / len(doc.text)}
# File: datatrove-main/src/datatrove/pipeline/stats/lang_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.lid import FT176LID
class LangStats(BaseStats):
name = '🎤 Language stats'
def __init__(self, output_folder: DataFolderLike, language: str, groups_to_compute: list[GROUP]=list(get_args(GROUP)), histogram_round_digits: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.fasttext = FT176LID([language])
self.language = language
def extract_stats(self, doc: Document) -> dict[str, int | float]:
language_score = 0
if doc.metadata.get('language') == self.language and 'language_score' in doc.metadata:
language_score = doc.metadata['language_score']
else:
language_score = self.fasttext.predict(doc)[1][self.language]
return {f'fasttext_{self.language}': language_score}
# File: datatrove-main/src/datatrove/pipeline/stats/line_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.filters.c4_filters import END_PUNCTUATION
from datatrove.pipeline.filters.gopher_repetition_filter import find_duplicates
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
def get_max_chars_per_line_ratio(lines, chars: int) -> float:
return sum([1 for line in lines if len(line) <= chars]) / len(lines)
def get_min_chars_per_line_ratio(lines, chars: int) -> float:
return sum([1 for line in lines if len(line) >= chars]) / len(lines)
def is_bullet_line(line: str):
if len(line.strip()) == 0:
return False
return line.strip()[0] in '-*•'
class LineStats(BaseStats):
name = '🎼 Line stats'
def __init__(self, output_folder: DataFolderLike, max_k_chars_per_line_tresholds: list[int] | None=None, min_k_chars_per_line_thresholds: list[int] | None=None, groups_to_compute: list[GROUP]=list(get_args(GROUP)), ignore_empty_lines: bool=False, histogram_round_digits: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.short_max_chars = max_k_chars_per_line_tresholds if max_k_chars_per_line_tresholds is not None else [10, 30]
self.long_max_chars = min_k_chars_per_line_thresholds if min_k_chars_per_line_thresholds is not None else [2000, 10000]
self.ignore_empty_lines = ignore_empty_lines
def extract_stats(self, doc: Document):
lines: list[str] = doc.metadata.get('lines') or doc.text.split('\n')
n_lines = len(lines)
lines = [line for line in lines if len(line.strip()) > 0] if self.ignore_empty_lines else lines
(line_dups, char_dups) = find_duplicates(lines)
return {'n_lines': n_lines, 'avg_line_length': sum([len(line) for line in lines]) / len(lines), **{f'short_line_ratio_chars_{chars}': get_max_chars_per_line_ratio(lines, chars) for chars in self.short_max_chars}, **{f'long_line_ratio_chars_{chars}': get_min_chars_per_line_ratio(lines, chars) for chars in self.long_max_chars}, 'lines_ending_with_terminal_mark_ratio': sum((1 for line in lines if line.endswith(END_PUNCTUATION))) / len(lines), 'bullet_point_lines_ratio': sum((1 for line in lines if is_bullet_line(line))) / len(lines), 'line_duplicates': line_dups / len(lines), 'line_char_duplicates': char_dups / sum((len(line) for line in lines))}
# File: datatrove-main/src/datatrove/pipeline/stats/merger.py
import heapq
import json
from pathlib import Path
from loguru import logger
from tqdm import tqdm
from datatrove.data import DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, TopKConfig
from datatrove.utils.stats import MetricStats, MetricStatsDict
STATS_MERGED_NAME = 'metric.json'
class StatsMerger(PipelineStep):
type = '📊 - STATS'
name = '🔗 Merging stats'
def __init__(self, input_folder: DataFolderLike, output_folder: DataFolderLike, remove_input: bool=False, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__()
self.input_folder = get_datafolder(input_folder)
self.output_folder = get_datafolder(output_folder)
self.remove_input = remove_input
self.top_k_config = top_k_config
def get_leaf_non_empty_folders(self):
return sorted([path for (path, folders, files) in self.input_folder.walk('') if not folders and files])
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
folders_shard = self.get_leaf_non_empty_folders()[rank::world_size]
logger.info(f'Merging {len(folders_shard)} stat folders')
with self.track_time():
for folder in tqdm(folders_shard):
input_files = self.input_folder.glob(f'{folder}/[0-9][0-9][0-9][0-9][0-9].json')
logger.info(f'Processing folder {folder} with {len(input_files)} files')
stat = MetricStatsDict()
for file in tqdm(input_files):
with self.input_folder.open(file, 'rt') as f:
for (key, item) in json.load(f).items():
stat[key] += MetricStats.from_dict(item)
with self.output_folder.open(f'{folder}/{STATS_MERGED_NAME}', 'wt') as f:
group_name = Path(folder).parent.name
if group_name in self.top_k_config.top_k_groups:
top_k_keys = heapq.nlargest(self.top_k_config.top_k, stat, key=lambda x: stat.get(x).n)
stat = MetricStatsDict(init={s: stat.get(s) for s in top_k_keys})
json.dump(stat.to_dict(), f)
if self.remove_input:
for file in input_files:
self.input_folder.rm(file)
if data:
yield from data
# File: datatrove-main/src/datatrove/pipeline/stats/paragraph_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.filters.gopher_repetition_filter import find_duplicates
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
def get_short_paragraph_ratio(paragraphs: list[str], threshold: int) -> float:
return sum([1 for paragraph in paragraphs if len(paragraph) <= threshold]) / len(paragraphs)
def get_long_paragraph_ratio(paragraphs: list[str], threshold: int) -> float:
return sum([1 for paragraph in paragraphs if len(paragraph) >= threshold]) / len(paragraphs)
class ParagraphStats(BaseStats):
type = '📊 - STATS'
name = '📄 Paragraph stats'
def __init__(self, output_folder: DataFolderLike, short_paragraph_max_chars_threshold: list[int] | None=None, long_paragraph_max_chars_threshold: list[int] | None=None, ignore_empty_paragraphs: bool=False, histogram_round_digits: int=3, groups_to_compute: list[GROUP]=list(get_args(GROUP)), top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.ignore_empty_paragraphs = ignore_empty_paragraphs
self.short_paragraph_max_chars_threshold = short_paragraph_max_chars_threshold or [100]
self.long_paragraph_max_chars_threshold = long_paragraph_max_chars_threshold or [1000]
def extract_stats(self, doc: Document) -> dict[str, int | float]:
paragraphs = [p for p in doc.text.split('\n\n') if p.strip()]
n_paragraphs = len(paragraphs)
paragraphs = [p for p in paragraphs if p.strip()] if self.ignore_empty_paragraphs else paragraphs
(paragraph_dups, paragraph_char_dups) = find_duplicates(paragraphs)
return {'n_paragraphs': n_paragraphs, 'avg_paragraph_length': sum([len(p) for p in paragraphs]) / n_paragraphs, **{f'short_paragraph_ratio_{chars}': get_short_paragraph_ratio(paragraphs, chars) for chars in self.short_paragraph_max_chars_threshold}, **{f'long_paragraph_ratio_{chars}': get_long_paragraph_ratio(paragraphs, chars) for chars in self.long_paragraph_max_chars_threshold}, 'paragraph_duplicates': paragraph_dups / n_paragraphs, 'paragraph_char_duplicates': paragraph_char_dups / sum((len(p) for p in paragraphs))}
# File: datatrove-main/src/datatrove/pipeline/stats/perplexity_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.perplexity import KenlmModel
from datatrove.utils.typeshelper import Languages
class CCNetPerplexityStats(BaseStats):
name = '🤯 CCNet perplexity stats'
_requires_dependencies = BaseStats._requires_dependencies + ['kenlm']
def __init__(self, output_folder: DataFolderLike, model_dataset: str, language: str=Languages.english, histogram_round_digits: int=3, groups_to_compute: list[GROUP]=list(get_args(GROUP)), top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.model = KenlmModel(model_dataset=model_dataset, language=language)
def extract_stats(self, doc: Document) -> dict[str, int | float]:
return {f'ccnet_perplexity_{self.model.model_dataset}_{self.model.language}': self.model.get_perplexity(doc.text)}
# File: datatrove-main/src/datatrove/pipeline/stats/sentence_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
def get_short_sentence_ratio(sentences: list[str], threshold: int) -> float:
return sum([1 for sentence in sentences if len(sentence) <= threshold]) / len(sentences)
def get_long_sentence_ratio(sentences: list[str], threshold: int) -> float:
return sum([1 for sentence in sentences if len(sentence) >= threshold]) / len(sentences)
class SentenceStats(BaseStats):
name = '🈂️ Sentence stats'
def __init__(self, output_folder: DataFolderLike, short_sentence_max_chars_threshold: list[int] | None=None, long_sentence_max_chars_threshold: list[int] | None=None, language: str=Languages.english, histogram_round_digits: int=3, groups_to_compute: list[GROUP]=list(get_args(GROUP)), top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.short_sentence_max_chars_threshold = short_sentence_max_chars_threshold or [20]
self.long_sentence_max_chars_threshold = long_sentence_max_chars_threshold or [75]
self.language = language
def extract_stats(self, doc: Document) -> dict[str, int | float]:
word_tokenizer = load_word_tokenizer(self.language)
sentences = [s for s in word_tokenizer.sent_tokenize(doc.text) if s.strip()]
return {'n_sentences': len(sentences), 'avg_sentence_length': sum([len(s) for s in sentences]) / len(sentences), **{f'short_sentence_ratio_{chars}': get_short_sentence_ratio(sentences, chars) for chars in self.short_sentence_max_chars_threshold}, **{f'long_sentence_ratio_{chars}': get_long_sentence_ratio(sentences, chars) for chars in self.long_sentence_max_chars_threshold}}
# File: datatrove-main/src/datatrove/pipeline/stats/token_stats.py
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.tokenization import PipelineStepWithTokenizer
class TokenStats(BaseStats, PipelineStepWithTokenizer):
name = '🔗 Token counter'
_requires_dependencies = ['tokenizers'] + BaseStats._requires_dependencies
def __init__(self, output_folder: DataFolderLike, tokenizer_name_or_path: str='gpt2', groups_to_compute: list[GROUP]=['fqdn', 'suffix', 'summary', 'histogram'], histogram_rounding: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
BaseStats.__init__(self, output_folder, groups_to_compute, histogram_rounding, top_k_config)
PipelineStepWithTokenizer.__init__(self)
self.tokenizer_name_or_path = tokenizer_name_or_path
def extract_stats(self, doc: Document) -> dict[str, int | float]:
tokens_count = doc.metadata.get('token_count', None)
if tokens_count is None:
tokens_count = len(self.tokenizer.encode(doc.text).tokens)
return {'token_count': tokens_count}
# File: datatrove-main/src/datatrove/pipeline/stats/word_stats.py
from typing import get_args
from datatrove.data import Document
from datatrove.io import DataFolderLike
from datatrove.pipeline.filters.gopher_quality_filter import STOP_WORDS
from datatrove.pipeline.stats.base import BaseStats
from datatrove.pipeline.stats.config import DEFAULT_TOP_K_CONFIG, GROUP, TopKConfig
from datatrove.utils.typeshelper import Languages
from datatrove.utils.word_tokenizers import load_word_tokenizer
def get_short_word_ratio(words: list[str], threshold: int) -> float:
return sum([1 for word in words if len(word) <= threshold]) / len(words)
def get_long_word_ratio(words: list[str], threshold: int) -> float:
return sum([1 for word in words if len(word) >= threshold]) / len(words)
class WordStats(BaseStats):
name = '🈂️ Word stats'
def __init__(self, output_folder: DataFolderLike, stop_words: list[str]=STOP_WORDS, short_word_max_chars_threshold: list[int] | None=None, long_word_max_chars_threshold: list[int] | None=None, language: str=Languages.english, groups_to_compute: list[GROUP]=list(get_args(GROUP)), histogram_round_digits: int=3, top_k_config: TopKConfig=DEFAULT_TOP_K_CONFIG) -> None:
super().__init__(output_folder, groups_to_compute, histogram_round_digits, top_k_config)
self.short_word_max_chars_threshold = short_word_max_chars_threshold or [3]
self.long_word_max_chars_threshold = long_word_max_chars_threshold or [7]
self.language = language
self.stop_words = stop_words
def extract_stats(self, doc: Document) -> dict[str, int | float]:
word_tokenizer = load_word_tokenizer(self.language)
words = word_tokenizer.word_tokenize(doc.text)
lines = doc.text.splitlines()
return {'n_words': len(words), 'avg_word_length': sum([len(word) for word in words]) / len(words), 'avg_words_per_line': len(words) / len(lines), **{f'short_word_ratio_{chars}': get_short_word_ratio(words, chars) for chars in self.short_word_max_chars_threshold}, **{f'long_word_ratio_{chars}': get_long_word_ratio(words, chars) for chars in self.long_word_max_chars_threshold}, 'type_token_ratio': len(set(words)) / len(words), 'uppercase_word_ratio': sum([1 for word in words if word.isupper()]) / len(words), 'capitalized_word_ratio': sum([1 for word in words if word.istitle()]) / len(words), 'stop_word_ratio': sum([1 for word in words if word in self.stop_words]) / len(words)}
# File: datatrove-main/src/datatrove/pipeline/tokens/context_shuffler.py
import mmap
import numpy as np
from numpy.random import default_rng
from datatrove.data import DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.tokens.merger import load_doc_ends
from datatrove.utils.logging import logger
class DocumentTokenizerContextShuffler(PipelineStep):
name = '🗃 Context Shuffler'
type = '🔢 - TOKENIZER'
def __init__(self, input_folder: DataFolderLike, output_folder: DataFolderLike, window_size: int=2048 + 1, seed: int=None, token_size: int=2):
super().__init__()
self.input_folder = get_datafolder(input_folder)
self.output_folder = get_datafolder(output_folder)
self.window_size = window_size
self.token_size = token_size
self.rand = default_rng(seed)
def get_ordering(self, all_doc_ends):
doc_ids = np.concatenate([np.ones(len(doc_ends), dtype=int) * i for (i, doc_ends) in enumerate(all_doc_ends)])
return self.rand.permutation(doc_ids)
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
datafiles = self.input_folder.get_shard(rank, world_size, glob_pattern='*.ds')
datafiles_index = self.input_folder.get_shard(rank, world_size, glob_pattern='*.ds.index')
for (datafile, index) in zip(datafiles, datafiles_index):
logger.info(f'Context shuffling {datafile} with a {self.window_size} token window')
total_len = load_doc_ends(self.input_folder.open(index, 'rb'))[-1]
nr_windows = total_len // self.window_size
ordering = self.rand.permutation(np.arange(0, nr_windows, dtype=int))
with self.output_folder.open(datafile, 'wb') as fout:
with self.input_folder.open(datafile, 'rb') as f:
with mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ) as unshuf:
with self.track_time():
for windowi in ordering:
(start, end) = (windowi * self.window_size * self.token_size, (windowi + 1) * self.window_size * self.token_size)
fout.write(unshuf[start:end])
# File: datatrove-main/src/datatrove/pipeline/tokens/counter.py
from datatrove.data import DocumentsPipeline
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.batching import batched
from datatrove.utils.tokenization import PipelineStepWithTokenizer
class TokensCounter(PipelineStepWithTokenizer):
name = '📊 Counter'
type = '🔢 - TOKENIZER'
def __init__(self, tokenizer_name_or_path: str='gpt2', count_eos_token: bool=False, batch_size: int=10000):
super().__init__()
self.tokenizer_name_or_path = tokenizer_name_or_path
self.count_eos_token = count_eos_token
self.batch_size = batch_size
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
from tokenizers import Encoding
for batch in batched(data, self.batch_size):
with self.track_time(unit='batch'):
encoded_batch: list[Encoding] = self.tokenizer.encode_batch([document.text for document in batch])
for (document, encoded) in zip(batch, encoded_batch):
count = len(encoded.ids)
if self.count_eos_token:
count += 1
document.metadata['token_count'] = count
self.stat_update('tokens', value=count)
yield document
class LengthCounter(PipelineStep):
name = '📊 Document length counter'
type = '🔢 - TOKENIZER'
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
for document in data:
count = document.metadata['token_count']
self.stats[count].update(1)
yield document
# File: datatrove-main/src/datatrove/pipeline/tokens/merger.py
from functools import partial
from typing import BinaryIO, Generator
import numpy as np
from numpy.random import default_rng
from tqdm import tqdm
from datatrove.data import DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.pipeline.tokens.tokenizer import TokenizedFile
class DocumentTokenizerMerger(PipelineStep):
name = '🗃 Document Merger'
type = '🔢 - TOKENIZER'
def __init__(self, input_folder: DataFolderLike, output_folder: DataFolderLike, save_filename: str, max_tokens_per_file: int=100000000000.0, max_tokens: int=-1, shuffle: bool=True, upload_block_size: int=20 * 2 ** 20, seed: int=None, save_loss_metadata: bool=False, save_final_metadata: bool=True, progress: bool=True):
super().__init__()
self.input_folder = get_datafolder(input_folder)
self.output_folder = get_datafolder(output_folder)
self.save_filename = save_filename
self.max_tokens_per_file = max_tokens_per_file
self.max_tokens = max_tokens
self.shuffle = shuffle
self.save_loss_metadata = save_loss_metadata
self.rand = default_rng(seed)
self.save_final_metadata = save_final_metadata
self.upload_block_size = upload_block_size
self.progress = progress
def get_ordering(self, all_doc_ends):
doc_ids = np.concatenate([np.ones(len(doc_ends), dtype=int) * i for (i, doc_ends) in enumerate(all_doc_ends)])
return doc_ids if not self.shuffle else self.rand.permutation(doc_ids)
def run(self, data: DocumentsPipeline=None, rank: int=0, world_size: int=1) -> DocumentsPipeline:
assert world_size == 1, 'world_size must be 1 for DocumentTokenizerMerger'
datafiles = self.input_folder.list_files(glob_pattern='*.ds')
datafiles_index = self.input_folder.list_files(glob_pattern='*.ds.index')
datafiles_loss = self.input_folder.list_files(glob_pattern='*.ds.loss') if self.save_loss_metadata else [None] * len(datafiles)
assert len(datafiles) == len(datafiles_index) == len(datafiles_loss), f'Mismatch between number of .ds, .ds.index and/or .ds.loss files({len(datafiles)} vs {len(datafiles_index)} vs {len(datafiles_loss)})'
(tokenizer_name_or_path, token_size) = (None, 2)
if self.save_final_metadata:
if self.input_folder.isfile(f'{datafiles[0]}.metadata'):
with self.input_folder.open(f'{datafiles[0]}.metadata', 'rt') as f:
tokenizer_name_or_path = f.read().splitlines()[0]
if '|' in tokenizer_name_or_path:
(tokenizer_name_or_path, token_size) = tokenizer_name_or_path.split('|')
token_size = int(token_size)
doc_ends = [load_doc_ends(self.input_folder.open(file, 'rb')) for file in datafiles_index]
token_inputs = list(map(partial(get_data_reader, nb_bytes=token_size), self.input_folder.open_files(datafiles), doc_ends))
loss_inputs = list(map(partial(get_data_reader, nb_bytes=1), self.input_folder.open_files(datafiles_loss), doc_ends)) if self.save_loss_metadata else None
ordering = self.get_ordering(doc_ends)
file_ct = 0
output_file = TokenizedFile(output_folder=self.output_folder, filename=f'{file_ct:03d}_{self.save_filename}.ds', save_loss_metadata=self.save_loss_metadata, upload_block_size=self.upload_block_size, tokenizer_name_or_path=tokenizer_name_or_path, save_final_metadata=self.save_final_metadata, token_size=token_size)
for input_file_id in tqdm(ordering, desc='Merging documents', unit='documents', total=len(ordering), disable=not self.progress):
if 0 < self.max_tokens <= self.stats['tokens'].total:
break
if 0 < self.max_tokens_per_file <= len(output_file):
output_file.close()
file_ct += 1
output_file = TokenizedFile(output_folder=self.output_folder, filename=f'{file_ct:03d}_{self.save_filename}.ds', save_loss_metadata=self.save_loss_metadata, upload_block_size=self.upload_block_size, tokenizer_name_or_path=tokenizer_name_or_path, save_final_metadata=self.save_final_metadata, token_size=token_size)
tokens = next(token_inputs[input_file_id])
output_file.write_bytes(tokens)
if loss_inputs:
output_file.write_loss_bytes(next(loss_inputs[input_file_id]))
self.stat_update('tokens', value=len(tokens) // token_size)
output_file.close()
if self.save_final_metadata:
output_file.write_final_metadata(self.stats['tokens'].total, filename=f'{self.save_filename}.ds')
def load_doc_ends(file: BinaryIO) -> np.ndarray:
with file as f:
return np.frombuffer(f.read(), dtype=np.uint64).astype(int)
def get_data_reader(file: BinaryIO, doc_ends: list, nb_bytes: int=1, start_e: int=0) -> Generator[bytes, None, None]:
with file as f:
if start_e != 0:
f.seek(int(start_e) * nb_bytes)
for r_e in doc_ends:
yield f.read((r_e - start_e) * nb_bytes)
start_e = r_e
# File: datatrove-main/src/datatrove/pipeline/tokens/tokenizer.py
import struct
from typing import TYPE_CHECKING
import humanize
import numpy as np
from numpy.random import default_rng
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolder, DataFolderLike, get_datafolder
from datatrove.utils.batching import batched
from datatrove.utils.logging import logger
from datatrove.utils.tokenization import PipelineStepWithTokenizer
SHUFFLING_READ_BLOCK_SIZE = 50000
SHUFFLING_CACHE_TYPE = 'none'
if TYPE_CHECKING:
from tokenizers import Encoding
class TokenizedFile:
def __init__(self, output_folder: DataFolderLike, filename: str, save_index: bool=True, save_loss_metadata: bool=False, upload_block_size: int | None=None, tokenizer_name_or_path: str | None=None, save_final_metadata: bool=False, token_size: int=2):
self.output_folder = get_datafolder(output_folder)
self.filename = filename
self.save_index = save_index
self.save_loss_metadata = save_loss_metadata
self.upload_block_size = upload_block_size
self.write_idx = 0
self.token_size = token_size
self.token_format = 'I' if self.token_size == 4 else 'H'
self.doc_ends = []
self.tokenizer_name_or_path = tokenizer_name_or_path
self.save_final_metadata = save_final_metadata
self.tokens_file = self.output_folder.open(self.filename, mode='wb', block_size=upload_block_size)
self.loss_file: DataFolderLike | None = None
if self.save_loss_metadata:
self.loss_file = self.output_folder.open(f'{self.filename}.loss', mode='wb', block_size=upload_block_size)
def __len__(self):
return self.doc_ends[-1] if self.doc_ends else 0
def close(self):
if self.tokens_file:
self.tokens_file.close()
if self.loss_file:
self.loss_file.close()
if self.save_index:
index_file = self.output_folder.open(f'{self.filename}.index', mode='wb')
index_file.write(struct.pack('<%sQ' % len(self.doc_ends), *self.doc_ends))
index_file.close()
if self.save_final_metadata:
self.write_final_metadata()
def cleanup(self):
self.doc_ends = []
self.output_folder.rm_file(self.filename)
if self.loss_file:
self.output_folder.rm_file(f'{self.filename}.loss')
if self.save_final_metadata and self.output_folder.exists(f'{self.filename}.metadata'):
self.output_folder.rm_file(f'{self.filename}.metadata')
def write_bytes(self, tk_bytes: bytes, doc_ends: list[int]=None):
self.tokens_file.write(tk_bytes)
if doc_ends is not None:
self.doc_ends.extend([d + self.write_idx for d in doc_ends])
self.write_idx += len(tk_bytes) // self.token_size
else:
self.write_idx += len(tk_bytes) // self.token_size
self.doc_ends.append(self.write_idx)
def write_loss_bytes(self, l_bytes: bytes):
if self.save_loss_metadata:
self.loss_file.write(l_bytes)
def write(self, tokens: list[int], loss_values: np.ndarray | None):
self.write_bytes(struct.pack(f'<%s{self.token_format}' % len(tokens), *tokens))
if loss_values is not None:
self.write_loss_bytes(struct.pack('<%s?' % len(loss_values), *loss_values))
def copy(self, save_filename: str, ordering: np.ndarray, new_output_folder: DataFolder=None, rank: int=0, max_tokens_per_file: int=None) -> 'TokenizedFile':
with self.output_folder.open(self.filename, mode='rb', cache_type=SHUFFLING_CACHE_TYPE, block_size=SHUFFLING_READ_BLOCK_SIZE) as tokens_file:
loss_file = None if not self.loss_file else self.output_folder.open(f'{self.filename}.loss', mode='rb', cache_type=SHUFFLING_CACHE_TYPE, block_size=SHUFFLING_READ_BLOCK_SIZE // 2)
sub_rank = 0
destination = get_output_filename(save_filename, rank, 'shuffled', sub_rank)
new_file = TokenizedFile(self.output_folder if not new_output_folder else new_output_folder, destination, save_loss_metadata=self.save_loss_metadata, upload_block_size=self.upload_block_size, tokenizer_name_or_path=self.tokenizer_name_or_path, save_final_metadata=self.save_final_metadata, token_size=self.token_size)
logger.info(f'Shuffling in {destination}...')
total_tokens_written = 0
for doc_id in ordering:
(start, end) = (self.doc_ends[doc_id - 1] if doc_id > 0 else 0, self.doc_ends[doc_id])
tokens_file.seek(start * self.token_size)
new_file.write_bytes(tokens_file.read((end - start) * self.token_size))
if loss_file:
loss_file.seek(start)
new_file.write_loss_bytes(loss_file.read(end - start))
total_tokens_written += end - start
if max_tokens_per_file and total_tokens_written > max_tokens_per_file:
new_file.close()
sub_rank += 1
destination = get_output_filename(save_filename, rank, 'shuffled', sub_rank)
new_file = TokenizedFile(self.output_folder if not new_output_folder else new_output_folder, destination, save_loss_metadata=self.save_loss_metadata, upload_block_size=self.upload_block_size, tokenizer_name_or_path=self.tokenizer_name_or_path, save_final_metadata=self.save_final_metadata, token_size=self.token_size)
logger.info(f'Shuffling in {destination}...')
total_tokens_written = 0
if loss_file:
loss_file.close()
new_file.close()
return new_file
def write_final_metadata(self, token_count: int=-1, filename: str=None):
tokenizer_name = self.tokenizer_name_or_path
if not tokenizer_name:
tokenizer_name = 'Unknown Tokenizer' + '|' + str(self.token_size)
if filename is None:
filename = self.filename
with self.output_folder.open(f'{filename}.metadata', 'wt') as f:
if token_count == -1:
token_count = self.write_idx
f.write('\n'.join([tokenizer_name + '|' + str(self.token_size), str(token_count), humanize.metric(token_count, unit='T')]))
def get_output_filename(save_filename, rank: int, name: str, sub_rank: int=None):
if sub_rank is not None:
return '_'.join([x for x in [save_filename, f'{rank:05d}', f'{sub_rank:05d}', f'{name}.ds'] if x])
return '_'.join([x for x in [save_filename, f'{rank:05d}', f'{name}.ds'] if x])
class DocumentTokenizer(PipelineStepWithTokenizer):
name = '✍️ Writer'
type = '🔢 - TOKENIZER'
def __init__(self, output_folder: DataFolderLike, local_working_dir: DataFolderLike | None=None, save_filename: str=None, tokenizer_name_or_path: str='gpt2', eos_token: str='<|endoftext|>', save_loss_metadata: bool=False, shuffle: bool=True, batch_size: int=10000, max_tokens_per_file: int=None, seed: int=None, save_final_metadata: bool=True, upload_block_size: int | None=None):
super().__init__()
self.output_folder = get_datafolder(output_folder)
self.local_working_dir = get_datafolder(local_working_dir) if local_working_dir else None
if self.local_working_dir and (not self.local_working_dir.is_local()):
raise ValueError('local_working_dir must be a local path')
if self.local_working_dir is None and shuffle and (not self.output_folder.is_local()):
logger.warning('local_working_dir is not set and output folder is not local. This may slow down the process.')
self.save_filename = save_filename
self.tokenizer_name_or_path = tokenizer_name_or_path
self.eos_token = eos_token
self.save_loss_metadata = save_loss_metadata
self.shuffle = shuffle
self.batch_size = batch_size
self.rand = default_rng(seed)
self.save_final_metadata = save_final_metadata
self.upload_block_size = upload_block_size
self.max_tokens_per_file = max_tokens_per_file
def get_loss_values(self, document: Document, encoded: 'Encoding'):
if self.save_loss_metadata:
loss_values = np.ones(len(encoded.ids))
if (no_loss := document.metadata.get('no_loss_ranges', None)):
for (start, end) in no_loss:
(t_start, t_end) = (encoded.char_to_token(start), encoded.char_to_token(end))
loss_values[t_start:t_end] = 0
if t_end is None or t_end >= len(encoded.ids):
loss_values = loss_values[:t_start]
return loss_values
def write_unshuffled(self, data: DocumentsPipeline, filename: str):
from tokenizers import Encoding
unshuff = TokenizedFile(self.output_folder if not self.shuffle or not self.local_working_dir else self.local_working_dir, filename, save_index=not self.shuffle, save_loss_metadata=self.save_loss_metadata, upload_block_size=self.upload_block_size, tokenizer_name_or_path=self.tokenizer_name_or_path, save_final_metadata=self.save_final_metadata, token_size=self.token_size)
for batch in batched(data, self.batch_size):
with self.track_time(unit='batch'):
encoded_batch: list[Encoding] = self.tokenizer.encode_batch([document.text for document in batch])
for (document, encoded) in zip(batch, encoded_batch):
tokens = encoded.ids
loss_values = self.get_loss_values(document, encoded)
if loss_values is not None and len(loss_values) < len(tokens):
tokens = tokens[:len(loss_values)]
unshuff.write(tokens, loss_values)
self.stat_update('tokens', value=len(tokens))
unshuff.close()
return unshuff
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
unshuf_filename = get_output_filename(self.save_filename, rank, 'unshuffled')
logger.info(f'Tokenizing in "{unshuf_filename}"...')
outputfile: TokenizedFile = self.write_unshuffled(data, unshuf_filename)
if len(outputfile) == 0:
logger.warning('No data saved.')
return
if self.shuffle:
logger.info('Shuffling...')
outputfile.copy(self.save_filename, self.rand.permutation(len(outputfile.doc_ends)), self.output_folder, max_tokens_per_file=self.max_tokens_per_file, rank=rank)
outputfile.cleanup()
# File: datatrove-main/src/datatrove/pipeline/writers/disk_base.py
import dataclasses
import os.path
from abc import ABC, abstractmethod
from collections import Counter
from string import Template
from types import MethodType
from typing import IO, Callable
from datatrove.data import Document, DocumentsPipeline
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.base import PipelineStep
from datatrove.utils.typeshelper import StatHints
class DiskWriter(PipelineStep, ABC):
default_output_filename: str = None
type = '💽 - WRITER'
def __init__(self, output_folder: DataFolderLike, output_filename: str=None, compression: str | None='infer', adapter: Callable=None, mode: str='wt', expand_metadata: bool=False, max_file_size: int=-1):
super().__init__()
self.compression = compression
self.output_folder = get_datafolder(output_folder)
output_filename = output_filename or self.default_output_filename
if self.compression == 'gzip' and (not output_filename.endswith('.gz')):
output_filename += '.gz'
elif self.compression == 'zstd' and (not output_filename.endswith('.zst')):
output_filename += '.zst'
self.max_file_size = max_file_size
self.file_id_counter = Counter()
if self.max_file_size > 0 and mode != 'wb':
raise ValueError('Can only specify `max_file_size` when writing in binary mode!')
self.output_filename = Template(output_filename)
self.output_mg = self.output_folder.get_output_file_manager(mode=mode, compression=compression)
self.adapter = MethodType(adapter, self) if adapter else self._default_adapter
self.expand_metadata = expand_metadata
def _default_adapter(self, document: Document) -> dict:
data = {key: val for (key, val) in dataclasses.asdict(document).items() if val}
if self.expand_metadata and 'metadata' in data:
data |= data.pop('metadata')
return data
def __enter__(self):
return self
def close(self):
self.output_mg.close()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _get_output_filename(self, document: Document, rank: int | str=0, **kwargs) -> str:
return self.output_filename.substitute({'rank': str(rank).zfill(5), 'id': document.id, **document.metadata, **kwargs})
@abstractmethod
def _write(self, document: dict, file_handler: IO, filename: str):
raise NotImplementedError
def _on_file_switch(self, _original_name, old_filename, _new_filename):
self.output_mg.pop(old_filename).close()
def _get_filename_with_file_id(self, filename):
if os.path.dirname(filename):
return f'{os.path.dirname(filename)}/{self.file_id_counter[filename]:03d}_{os.path.basename(filename)}'
return f'{self.file_id_counter[filename]:03d}_{os.path.basename(filename)}'
def write(self, document: Document, rank: int=0, **kwargs):
original_name = output_filename = self._get_output_filename(document, rank, **kwargs)
if self.max_file_size > 0:
output_filename = self._get_filename_with_file_id(original_name)
if self.output_mg.get_file(output_filename).tell() >= self.max_file_size:
self.file_id_counter[original_name] += 1
new_output_filename = self._get_filename_with_file_id(original_name)
self._on_file_switch(original_name, output_filename, new_output_filename)
output_filename = new_output_filename
self._write(self.adapter(document), self.output_mg.get_file(output_filename), original_name)
self.stat_update(self._get_output_filename(document, 'XXXXX', **kwargs))
self.stat_update(StatHints.total)
self.update_doc_stats(document)
def run(self, data: DocumentsPipeline, rank: int=0, world_size: int=1) -> DocumentsPipeline:
with self:
for document in data:
with self.track_time():
self.write(document, rank)
yield document
# File: datatrove-main/src/datatrove/pipeline/writers/huggingface.py
import os
import random
import tempfile
import time
from typing import Callable
from huggingface_hub import CommitOperationAdd, create_commit, create_repo, preupload_lfs_files
from huggingface_hub.utils import HfHubHTTPError
from datatrove.io import DataFolderLike, get_datafolder
from datatrove.pipeline.writers import ParquetWriter
from datatrove.utils.logging import logger
MAX_RETRIES = 12
BASE_DELAY = 0.1
class HuggingFaceDatasetWriter(ParquetWriter):
default_output_filename: str = 'data/${rank}.parquet'
name = '🤗 HuggingFace'
def __init__(self, dataset: str, private: bool=True, local_working_dir: DataFolderLike | None=None, output_filename: str=None, compression: str | None=None, adapter: Callable=None, cleanup: bool=True, expand_metadata: bool=True, max_file_size: int=round(4.5 * 2 ** 30)):
self.dataset = dataset
self.private = private
self.local_working_dir = get_datafolder(local_working_dir if local_working_dir else tempfile.TemporaryDirectory())
self.cleanup = cleanup
if not self.local_working_dir.is_local():
raise ValueError('local_working_dir must be a local path')
if os.environ.get('HF_HUB_ENABLE_HF_TRANSFER', '0') != '1':
logger.warning('HF_HUB_ENABLE_HF_TRANSFER is not set to "1". Install hf_transfer and set the env variable for faster uploads:\npip install hf-transfer\nexport HF_HUB_ENABLE_HF_TRANSFER=1')
super().__init__(output_folder=local_working_dir, output_filename=output_filename, compression=compression, adapter=adapter, expand_metadata=expand_metadata, max_file_size=max_file_size)
self.operations = []
self._repo_init = False
def upload_files(self, *filenames):
if not self._repo_init:
create_repo(self.dataset, private=self.private, repo_type='dataset', exist_ok=True)
self._repo_init = True
additions = [CommitOperationAdd(path_in_repo=filename, path_or_fileobj=self.local_working_dir.resolve_paths(filename)) for filename in filenames]
logger.info(f"Uploading {','.join(filenames)} to the hub...")
preupload_lfs_files(self.dataset, repo_type='dataset', additions=additions)
logger.info(f"Upload of {','.join(filenames)} to the hub complete!")
if self.cleanup:
for filename in filenames:
self.local_working_dir.rm(filename)
self.operations.extend(additions)
def close(self, rank: int=0):
filelist = list(self.output_mg.get_open_files().keys())
super().close()
if filelist:
logger.info(f'Starting upload of {len(filelist)} files to {self.dataset}')
self.upload_files(*filelist)
retries = 0
while True:
try:
create_commit(self.dataset, repo_type='dataset', operations=self.operations, commit_message=f'DataTrove upload ({len(self.operations)} files)')
break
except HfHubHTTPError as e:
if 'A commit has happened since' in e.server_message:
if retries >= MAX_RETRIES:
logger.error(f'Failed to create commit after MAX_RETRIES={MAX_RETRIES!r}. Giving up.')
raise e
logger.info('Commit creation race condition issue. Waiting...')
time.sleep(BASE_DELAY * 2 ** retries + random.uniform(0, 2))
retries += 1
else:
raise e
def _on_file_switch(self, original_name, old_filename, new_filename):
super()._on_file_switch(original_name, old_filename, new_filename)
self.upload_files(old_filename)
# File: datatrove-main/src/datatrove/pipeline/writers/jsonl.py
from typing import IO, Callable
from datatrove.io import DataFolderLike
from datatrove.pipeline.writers.disk_base import DiskWriter
class JsonlWriter(DiskWriter):
default_output_filename: str = '${rank}.jsonl'
name = '🐿 Jsonl'
_requires_dependencies = ['orjson']
def __init__(self, output_folder: DataFolderLike, output_filename: str=None, compression: str | None='gzip', adapter: Callable=None, expand_metadata: bool=False, max_file_size: int=-1):
super().__init__(output_folder, output_filename=output_filename, compression=compression, adapter=adapter, expand_metadata=expand_metadata, mode='wb', max_file_size=max_file_size)
def _write(self, document: dict, file_handler: IO, _filename: str):
import orjson
file_handler.write(orjson.dumps(document, option=orjson.OPT_APPEND_NEWLINE))
# File: datatrove-main/src/datatrove/pipeline/writers/parquet.py
from collections import Counter, defaultdict
from typing import IO, Callable, Literal
from datatrove.io import DataFolderLike
from datatrove.pipeline.writers.disk_base import DiskWriter
class ParquetWriter(DiskWriter):
default_output_filename: str = '${rank}.parquet'
name = '📒 Parquet'
_requires_dependencies = ['pyarrow']
def __init__(self, output_folder: DataFolderLike, output_filename: str=None, compression: Literal['snappy', 'gzip', 'brotli', 'lz4', 'zstd'] | None=None, adapter: Callable=None, batch_size: int=1000, expand_metadata: bool=False, max_file_size: int=5 * 2 ** 30):
if compression not in {'snappy', 'gzip', 'brotli', 'lz4', 'zstd', None}:
raise ValueError("Invalid compression type. Allowed types are 'snappy', 'gzip', 'brotli', 'lz4', 'zstd', or None.")
super().__init__(output_folder, output_filename, compression=None, adapter=adapter, mode='wb', expand_metadata=expand_metadata, max_file_size=max_file_size)
self._writers = {}
self._batches = defaultdict(list)
self._file_counter = Counter()
self.compression = compression
self.batch_size = batch_size
def _on_file_switch(self, original_name, old_filename, new_filename):
self._writers.pop(original_name).close()
super()._on_file_switch(original_name, old_filename, new_filename)
def _write_batch(self, filename):
if not self._batches[filename]:
return
import pyarrow as pa
batch = pa.RecordBatch.from_pylist(self._batches.pop(filename))
self._writers[filename].write_batch(batch)
def _write(self, document: dict, file_handler: IO, filename: str):
import pyarrow as pa
import pyarrow.parquet as pq
if filename not in self._writers:
self._writers[filename] = pq.ParquetWriter(file_handler, schema=pa.RecordBatch.from_pylist([document]).schema, compression=self.compression)
self._batches[filename].append(document)
if len(self._batches[filename]) == self.batch_size:
self._write_batch(filename)
def close(self):
for filename in list(self._batches.keys()):
self._write_batch(filename)
for writer in self._writers.values():
writer.close()
self._batches.clear()
self._writers.clear()
super().close()
# File: datatrove-main/src/datatrove/tools/check_dataset.py
import argparse
import os
import struct
from typing import IO
import numpy as np
from tqdm import tqdm
from datatrove.io import DataFolder, get_datafolder
from datatrove.utils.tokenization import load_tokenizer
parser = argparse.ArgumentParser()
parser.add_argument('data', type=str, help='path to folder with dataset to check', nargs='?', default=os.getcwd())
parser.add_argument('-t', '--tokenizer', type=str, help='tokenizer to use', default='gpt2')
parser.add_argument('--eos', type=str, help='eos token', default='<|endoftext|>')
''
def load_doc_ends(file: IO):
with file as f:
return np.frombuffer(f.read(), dtype=np.uint64).tolist()
def load_dataset_bytes(file, doc_ends, bytes_per_value: int=2):
with file as f:
for (start, end) in zip([0] + doc_ends[:-1], doc_ends):
data = f.read((end - start) * bytes_per_value)
assert len(data) == (end - start) * bytes_per_value, 'Could not read correct number of bytes'
yield data
assert f.read(1) == b'', 'Dataset should be exhausted but there is more data to read'
def check_dataset(input_folder: DataFolder, tokenizer: str='gpt2', eos_token: str='<|endoftext|>'):
tokenizer = load_tokenizer(tokenizer)
eos_token = tokenizer.token_to_id(eos_token)
def open_file(path):
return input_folder.open(path, 'rb')
datafiles = input_folder.list_files(glob_pattern='*.ds')
datafiles_index = input_folder.list_files(glob_pattern='*.ds.index')
datafiles_loss = input_folder.list_files(glob_pattern='*.ds.loss')
check_loss = bool(datafiles_loss)
assert len(datafiles) == len(datafiles_index) and (not check_loss or len(datafiles) == len(datafiles_loss)), 'Mismatch between number of .ds, .ds.index and/or .ds.loss files'
doc_ends = [load_doc_ends(open_file(file)) for file in datafiles_index]
token_inputs = [load_dataset_bytes(open_file(path), ends) for (path, ends) in zip(datafiles, doc_ends)]
loss_inputs = [load_dataset_bytes(open_file(path), ends, bytes_per_value=1) for (path, ends) in zip(datafiles_loss, doc_ends)] if check_loss else [None] * len(token_inputs)
for (filei, (file_doc_ends, file_token_inputs, file_loss_inputs)) in enumerate(zip(doc_ends, token_inputs, loss_inputs)):
for (doci, tokens) in tqdm(enumerate(file_token_inputs), total=len(file_doc_ends)):
last_token = struct.unpack('<H', tokens[-2:])[0]
assert last_token == eos_token, f'no EOS at doc end of doc {doci}'
if __name__ == '__main__':
args = parser.parse_args()
input_folder: DataFolder = get_datafolder(args.data)
check_dataset(input_folder, args.tokenizer, args.eos)
print('All checks ok')
# File: datatrove-main/src/datatrove/tools/failed_logs.py
import argparse
import json
import os.path
import re
from rich.console import Console
from rich.prompt import Confirm
from datatrove.io import get_datafolder
from datatrove.utils._import_utils import is_rich_available
from datatrove.utils.logging import logger
if not is_rich_available():
raise ImportError('Please install `rich` to run this command (`pip install rich`).')
parser = argparse.ArgumentParser('Fetch the log files of failed tasks.')
parser.add_argument('path', type=str, nargs='?', help='Path to the logging folder. Defaults to current directory.', default=os.getcwd())
RANK_FROM_LOG_FILENAME_REGEX = re.compile('logs/task_(\\d{5})\\.log')
def main():
args = parser.parse_args()
console = Console()
logger.remove()
logging_dir = get_datafolder(args.path)
if not logging_dir.isfile('executor.json'):
console.log('Could not find "executor.json" in the given directory. Are you sure it is a logging folder?', style='red')
return
with logging_dir.open('executor.json', 'rt') as f:
world_size = json.load(f).get('world_size', None)
if not world_size:
console.log('Could not get the total number of tasks, please try relaunching the run.', style='red')
return
console.log(f'Found executor config: {world_size} tasks')
with console.status('Fetching list of incomplete tasks'):
completed = set(logging_dir.list_files('completions'))
incomplete = set(filter(lambda rank: f'completions/{rank:05d}' not in completed, range(world_size)))
console.log(f'Found {len(incomplete)}/{world_size} incomplete tasks.')
with console.status('Looking for log files'):
incomplete_logs = list(filter(lambda file: int(RANK_FROM_LOG_FILENAME_REGEX.search(file).group(1)) in incomplete, logging_dir.list_files('logs')))
console.log(f'Found {len(incomplete_logs)} log files for incomplete tasks.')
first = True
for incomplete_log in incomplete_logs:
if not first and (not Confirm.ask(f'Show next log ([i]{incomplete_log}[/i])?', default=True)):
break
with console.pager():
with logging_dir.open(incomplete_log, 'rt') as f:
console.print(f.read())
first = False
if __name__ == '__main__':
main()
# File: datatrove-main/src/datatrove/tools/inspect_data.py
import argparse
import os.path
import sys
from rich.console import Console
from rich.panel import Panel
from rich.prompt import Confirm, Prompt
from datatrove.io import DataFolder, get_datafolder
from datatrove.pipeline.filters import SamplerFilter
from datatrove.pipeline.readers import CSVReader, JsonlReader, ParquetReader, WarcReader
from datatrove.pipeline.writers import JsonlWriter
from datatrove.utils._import_utils import is_rich_available
''
if not is_rich_available():
raise ImportError('Please install `rich` to run this command (`pip install rich`).')
parser = argparse.ArgumentParser("Manually inspect some RefinedWeb samples. Any unknown parameters will be passed to the reader (example: 'text_key=text').")
parser.add_argument('path', type=str, nargs='?', help='Path to the data folder. Defaults to current directory.', default=os.getcwd())
parser.add_argument('-r', '--reader', type=str, help="The type of Reader to use to read the data. By default it will be guessed from the file extension. Can be ('jsonl', 'parquet', 'csv' or 'warc')")
parser.add_argument('-s', '--sample', type=float, help='Randomly sample a given % of samples. 1.0 to see all samples', default=1.0)
parser.add_argument('-l', '--label', type=str, help='Label the examples as good/bad and store at this location', default='')
console = Console()
def reader_class_from_name(reader_type):
match reader_type:
case 'jsonl':
return JsonlReader
case 'csv':
return CSVReader
case 'parquet':
return ParquetReader
case 'warc':
return WarcReader
case other:
console.log(f'[red]Unknwon reader type {other}')
sys.exit(-1)
def reader_factory(data_folder: DataFolder, reader_type: str=None, **kwargs):
data_files = data_folder.list_files()
if not data_files:
console.log(f'[red]Could not find any files in "{data_folder.path}"')
sys.exit(-1)
if not reader_type:
match data_files[0][data_files[0].index('.'):]:
case '.jsonl.gz' | '.jsonl' | '.json':
reader_type = 'jsonl'
case '.csv':
reader_type = 'csv'
case '.parquet':
reader_type = 'parquet'
case '.warc.gz' | 'arc.gz' | '.warc':
reader_type = 'warc'
case other:
console.log(f'[red]Could not find a matching reader for file extension "{other}"')
sys.exit(-1)
return reader_class_from_name(reader_type)(data_folder, **kwargs)
def get_filter_expr(text=None):
return (lambda x: eval(text)) if text else lambda x: True
def main():
""""""
(args, extra_args) = parser.parse_known_args()
kwargs = dict((extra_arg.split('=') for extra_arg in extra_args))
data_folder = get_datafolder(args.path)
label_folder = get_datafolder(args.label) if args.label else None
reader = reader_factory(data_folder, args.reader, **kwargs)
sampler = SamplerFilter(args.sample)
console.print(f'''Loading samples from "{data_folder.path}" with {reader} and sampling_rate={args.sample}.\nSamples are displayed full page one by one.\nIf you don't see any color you may run "export PAGER='less -r'".''')
filter_expr_text = None
if Confirm.ask("Would you like to add a filtering expression? (ex: x.metadata['token_count'] > 5000)", default=False):
filter_expr_text = Confirm.get_input(console, 'Type your filtering expression: ', password=False)
filter_expr = get_filter_expr(filter_expr_text)
good_samples = []
bad_samples = []
iterator = sampler(reader())
try:
for sample in iterator:
if not filter_expr(sample):
continue
with console.pager(styles=True):
console.print(Panel(f'[yellow]Data ID:[reset] {sample.id}\n[yellow]Metadata:[reset]\n' + '\n'.join((f'- [blue]{field}: [reset] {value}' for (field, value) in sample.metadata.items()))))
console.print(sample.text)
if label_folder:
result = Prompt.ask("To label as good/bad example enter 'g'/'b'. Enter 'q' to skip labelling and move to the next sample. Enter 'e' (exit) to leave:", console=console, choices=['g', 'b', 'e', 'q'])
if result == 'g':
good_samples.append(sample)
elif result == 'b':
bad_samples.append(sample)
elif result == 'e':
break
except Exception:
console.print_exception()
finally:
if good_samples and label_folder:
with JsonlWriter(label_folder, 'good_samples.jsonl', compression=None) as writer:
for sample in good_samples:
writer.write(sample)
if bad_samples and label_folder:
with JsonlWriter(label_folder, 'bad_samples.jsonl', compression=None) as writer:
for sample in bad_samples:
writer.write(sample)
if __name__ == '__main__':
main()
# File: datatrove-main/src/datatrove/tools/jobs_status.py
import argparse
import json
import os.path
from rich.console import Console
from datatrove.io import get_datafolder
from datatrove.utils._import_utils import is_rich_available
from datatrove.utils.logging import logger
if not is_rich_available():
raise ImportError('Please install `rich` to run this command (`pip install rich`).')
parser = argparse.ArgumentParser('Fetch all jobs that are running or complete.')
parser.add_argument('path', type=str, nargs='?', help='Path to the logging folder. Defaults to current directory.', default=os.getcwd())
parser.add_argument('-p', '--log_prefix', type=str, nargs='?', help='Prefix of logging folders to be scanned.', default='')
parser.add_argument('-hc', '--hide_complete', help='Hide all jobs that are already complete.', action='store_true')
def main():
args = parser.parse_args()
console = Console()
main_folder = get_datafolder(args.path)
logging_dirs = [f for (f, info) in main_folder.glob(f'{args.log_prefix}*', detail=True, maxdepth=1).items() if info['type'] == 'directory']
logger.remove()
complete_jobs = 0
incomplete_jobs = 0
complete_tasks = 0
incomplete_tasks = 0
for path in logging_dirs:
logging_dir = get_datafolder(main_folder.resolve_paths(path))
if not logging_dir.isfile('executor.json'):
console.log(f'Could not find "executor.json" in the given directory ({path}). Are you sure it is a logging folder?', style='red')
continue
with logging_dir.open('executor.json', 'rt') as f:
world_size = json.load(f).get('world_size', None)
if not world_size:
console.log(f'Could not get the total number of tasks in {path}, please try relaunching the run.', style='red')
continue
with console.status('Fetching list of incomplete tasks'):
completed = set(logging_dir.list_files('completions'))
incomplete = set(filter(lambda rank: f'completions/{rank:05d}' not in completed, range(world_size)))
complete_tasks += len(completed)
incomplete_tasks += len(incomplete)
if len(incomplete) == 0:
emoji = '✅'
complete_jobs += 1
else:
emoji = '❌'
incomplete_jobs += 1
if len(incomplete) > 0 or not args.hide_complete:
console.log(f"{emoji} {path + ':': <50}{len(completed)}/{world_size} ({len(completed) / world_size:.0%}) completed tasks.")
if complete_jobs + incomplete_jobs > 0:
console.log(f'Summary: {complete_jobs}/{complete_jobs + incomplete_jobs} ({complete_jobs / (complete_jobs + incomplete_jobs):.0%}) jobs completed, {complete_tasks}/{complete_tasks + incomplete_tasks} ({complete_tasks / (complete_tasks + incomplete_tasks):.0%}) tasks completed.')
else:
console.log('No jobs found.')
if __name__ == '__main__':
main()
# File: datatrove-main/src/datatrove/tools/launch_pickled_pipeline.py
import argparse
import dill
from datatrove.executor.base import PipelineExecutor
from datatrove.io import open_file
parser = argparse.ArgumentParser('Loads a pickled pipeline executor and launches it.')
parser.add_argument('path', type=str, help='Path to the pickled file (usually a file called executor.pik)')
def main():
args = parser.parse_args()
with open_file(args.path, 'rb') as f:
executor: PipelineExecutor = dill.load(f)
executor.run()
if __name__ == '__main__':
main()
# File: datatrove-main/src/datatrove/tools/merge_stats.py
import argparse
import json
import os.path
from tqdm import tqdm
from datatrove.io import get_datafolder, open_file
from datatrove.utils.logging import logger
from datatrove.utils.stats import PipelineStats
parser = argparse.ArgumentParser('Combine and average per task statistics into a single file.')
parser.add_argument('path', type=str, nargs='?', help='Path to the stats folder. Defaults to current directory.', default=os.getcwd())
parser.add_argument('--output', '-o', type=str, help="Save file location. Defaults to 'merged_stats.json'.", default='merged_stats.json')
def main():
args = parser.parse_args()
stats_folder = get_datafolder(args.path)
path = args.output
stats = []
for file in tqdm(stats_folder.list_files()):
with stats_folder.open(file, 'rt') as f:
stats.append(PipelineStats.from_json(json.load(f)))
merged = sum(tqdm(stats), start=PipelineStats())
with open_file(path, mode='wt') as f:
merged.save_to_disk(f)
logger.info(f'Processing complete. Results saved to {path}.')
logger.info(merged)
if __name__ == '__main__':
main()