diff --git "a/huggingface_hugginface_datasets.txt" "b/huggingface_hugginface_datasets.txt" new file mode 100644--- /dev/null +++ "b/huggingface_hugginface_datasets.txt" @@ -0,0 +1,13876 @@ +# File: datasets-main/src/datasets/__init__.py +__version__ = '3.0.1.dev0' +from .arrow_dataset import Dataset +from .arrow_reader import ReadInstruction +from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder +from .combine import concatenate_datasets, interleave_datasets +from .dataset_dict import DatasetDict, IterableDatasetDict +from .download import * +from .features import * +from .fingerprint import disable_caching, enable_caching, is_caching_enabled +from .info import DatasetInfo +from .inspect import get_dataset_config_info, get_dataset_config_names, get_dataset_default_config_name, get_dataset_infos, get_dataset_split_names +from .iterable_dataset import IterableDataset +from .load import load_dataset, load_dataset_builder, load_from_disk +from .splits import NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent +from .utils import * +from .utils import logging + +# File: datasets-main/src/datasets/arrow_dataset.py +"""""" +import contextlib +import copy +import fnmatch +import itertools +import json +import math +import os +import posixpath +import re +import shutil +import sys +import tempfile +import time +import warnings +import weakref +from collections import Counter +from collections.abc import Mapping +from copy import deepcopy +from functools import partial, wraps +from io import BytesIO +from math import ceil, floor +from pathlib import Path +from random import sample +from typing import TYPE_CHECKING, Any, BinaryIO, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, overload +from typing import Sequence as Sequence_ +import fsspec +import numpy as np +import pandas as pd +import pyarrow as pa +import pyarrow.compute as pc +from fsspec.core import url_to_fs +from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi +from huggingface_hub.hf_api import RepoFile +from multiprocess import Pool +from tqdm.contrib.concurrent import thread_map +from . import config +from .arrow_reader import ArrowReader +from .arrow_writer import ArrowWriter, OptimizedTypedSequence +from .data_files import sanitize_patterns +from .download.streaming_download_manager import xgetsize +from .features import Audio, ClassLabel, Features, Image, Sequence, Value +from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, generate_from_arrow_type, pandas_types_mapper, require_decoding +from .filesystems import is_remote_filesystem +from .fingerprint import fingerprint_transform, format_kwargs_for_fingerprint, format_transform_for_fingerprint, generate_fingerprint, generate_random_fingerprint, get_temporary_cache_files_directory, is_caching_enabled, maybe_register_dataset_for_temp_dir_deletion, update_fingerprint, validate_fingerprint +from .formatting import format_table, get_format_type_from_alias, get_formatter, query_table +from .formatting.formatting import LazyDict, _is_range_contiguous +from .info import DatasetInfo, DatasetInfosDict +from .naming import _split_re +from .search import IndexableMixin +from .splits import NamedSplit, Split, SplitDict, SplitInfo +from .table import InMemoryTable, MemoryMappedTable, Table, _memory_mapped_record_batch_reader_from_file, cast_array_to_feature, concat_tables, embed_table_storage, list_table_cache_files, table_cast, table_iter, table_visitor +from .utils import logging +from .utils import tqdm as hf_tqdm +from .utils.file_utils import estimate_dataset_size +from .utils.info_utils import is_small_dataset +from .utils.metadata import MetadataConfigs +from .utils.py_utils import Literal, asdict, convert_file_size_to_int, glob_pattern_to_regex, iflatmap_unordered, string_to_dict +from .utils.stratify import stratified_shuffle_split_generate_indices +from .utils.tf_utils import dataset_to_tf, minimal_tf_collate_fn, multiprocess_dataset_to_tf +from .utils.typing import ListLike, PathLike +if TYPE_CHECKING: + import sqlite3 + import polars as pl + import pyspark + import sqlalchemy + from .dataset_dict import DatasetDict + from .iterable_dataset import IterableDataset +logger = logging.get_logger(__name__) +PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED = 'data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.parquet' + +class DatasetInfoMixin: + + def __init__(self, info: DatasetInfo, split: Optional[NamedSplit]): + self._info = info + self._split = split + + @property + def info(self): + return self._info + + @property + def split(self): + return self._split + + @property + def builder_name(self) -> str: + return self._info.builder_name + + @property + def citation(self) -> str: + return self._info.citation + + @property + def config_name(self) -> str: + return self._info.config_name + + @property + def dataset_size(self) -> Optional[int]: + return self._info.dataset_size + + @property + def description(self) -> str: + return self._info.description + + @property + def download_checksums(self) -> Optional[dict]: + return self._info.download_checksums + + @property + def download_size(self) -> Optional[int]: + return self._info.download_size + + @property + def features(self) -> Optional[Features]: + return self._info.features.copy() if self._info.features is not None else None + + @property + def homepage(self) -> Optional[str]: + return self._info.homepage + + @property + def license(self) -> Optional[str]: + return self._info.license + + @property + def size_in_bytes(self) -> Optional[int]: + return self._info.size_in_bytes + + @property + def supervised_keys(self): + return self._info.supervised_keys + + @property + def version(self): + return self._info.version + +class TensorflowDatasetMixin: + _TF_DATASET_REFS = set() + + @staticmethod + def _get_output_signature(dataset: 'Dataset', collate_fn: Callable, collate_fn_args: dict, cols_to_retain: Optional[List[str]]=None, batch_size: Optional[int]=None, num_test_batches: int=20): + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError('Called a Tensorflow-specific function but Tensorflow is not installed.') + if len(dataset) == 0: + raise ValueError('Unable to get the output signature because the dataset is empty.') + if batch_size is not None: + batch_size = min(len(dataset), batch_size) + test_batch_size = 1 + if cols_to_retain is not None: + cols_to_retain = list(set(cols_to_retain + ['label_ids', 'label', 'labels'])) + test_batches = [] + for _ in range(num_test_batches): + indices = sample(range(len(dataset)), test_batch_size) + test_batch = dataset[indices] + if cols_to_retain is not None: + test_batch = {key: value for (key, value) in test_batch.items() if key in cols_to_retain} + test_batch = [{key: value[i] for (key, value) in test_batch.items()} for i in range(test_batch_size)] + test_batch = collate_fn(test_batch, **collate_fn_args) + test_batches.append(test_batch) + tf_columns_to_signatures = {} + np_columns_to_dtypes = {} + for column in test_batches[0].keys(): + raw_arrays = [batch[column] for batch in test_batches] + np_arrays = [] + for array in raw_arrays: + if isinstance(array, np.ndarray): + np_arrays.append(array) + elif isinstance(array, tf.Tensor): + np_arrays.append(array.numpy()) + else: + np_arrays.append(np.array(array)) + if np.issubdtype(np_arrays[0].dtype, np.integer) or np_arrays[0].dtype == bool: + tf_dtype = tf.int64 + np_dtype = np.int64 + elif np.issubdtype(np_arrays[0].dtype, np.number): + tf_dtype = tf.float32 + np_dtype = np.float32 + elif np_arrays[0].dtype.kind == 'U': + np_dtype = np.unicode_ + tf_dtype = tf.string + else: + raise RuntimeError(f'Unrecognized array dtype {np_arrays[0].dtype}. \nNested types and image/audio types are not supported yet.') + shapes = [array.shape for array in np_arrays] + static_shape = [] + for dim in range(len(shapes[0])): + sizes = {shape[dim] for shape in shapes} + if dim == 0: + static_shape.append(batch_size) + continue + if len(sizes) == 1: + static_shape.append(sizes.pop()) + else: + static_shape.append(None) + tf_columns_to_signatures[column] = tf.TensorSpec(shape=static_shape, dtype=tf_dtype) + np_columns_to_dtypes[column] = np_dtype + return (tf_columns_to_signatures, np_columns_to_dtypes) + + def to_tf_dataset(self, batch_size: Optional[int]=None, columns: Optional[Union[str, List[str]]]=None, shuffle: bool=False, collate_fn: Optional[Callable]=None, drop_remainder: bool=False, collate_fn_args: Optional[Dict[str, Any]]=None, label_cols: Optional[Union[str, List[str]]]=None, prefetch: bool=True, num_workers: int=0, num_test_batches: int=20): + if config.TF_AVAILABLE: + import tensorflow as tf + else: + raise ImportError('Called a Tensorflow-specific function but Tensorflow is not installed.') + if isinstance(columns, list) and len(columns) == 1 or (isinstance(label_cols, list) and len(label_cols) == 1): + warnings.warn("The output of `to_tf_dataset` will change when a passing single element list for `labels` or `columns` in the next datasets version. To return a tuple structure rather than dict, pass a single string.\nOld behaviour: columns=['a'], labels=['labels'] -> (tf.Tensor, tf.Tensor) \n : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) \nNew behaviour: columns=['a'],labels=['labels'] -> ({'a': tf.Tensor}, {'labels': tf.Tensor}) \n : columns='a', labels='labels' -> (tf.Tensor, tf.Tensor) ", FutureWarning) + if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy): + logger.warning('Note that to_tf_dataset() loads the data with a generator rather than a full tf.data pipeline and is not compatible with remote TPU connections. If you encounter errors, please try using a TPU VM or, if your data can fit in memory, loading it into memory as a dict of Tensors instead of streaming with to_tf_dataset().') + if collate_fn is None: + collate_fn = minimal_tf_collate_fn + if collate_fn_args is None: + collate_fn_args = {} + if label_cols and (not columns): + raise ValueError('Cannot specify label_cols without specifying columns!') + if label_cols is None: + label_cols = [] + elif isinstance(label_cols, str): + label_cols = [label_cols] + if len(set(label_cols)) < len(label_cols): + raise ValueError('List of label_cols contains duplicates.') + if columns: + if isinstance(columns, str): + columns = [columns] + if len(set(columns)) < len(columns): + raise ValueError('List of columns contains duplicates.') + cols_to_retain = list(set(columns + label_cols)) + else: + cols_to_retain = None + columns = [] + if self.format['type'] not in ['custom', 'numpy']: + dataset = self.with_format('numpy') + else: + dataset = self + (output_signature, columns_to_np_types) = dataset._get_output_signature(dataset, collate_fn=collate_fn, collate_fn_args=collate_fn_args, cols_to_retain=cols_to_retain, batch_size=batch_size if drop_remainder else None, num_test_batches=num_test_batches) + if 'labels' in output_signature: + if ('label_ids' in columns or 'label' in columns) and 'labels' not in columns: + columns = [col for col in columns if col not in ['label_ids', 'label']] + ['labels'] + if ('label_ids' in label_cols or 'label' in label_cols) and 'labels' not in label_cols: + label_cols = [col for col in label_cols if col not in ['label_ids', 'label']] + ['labels'] + for col in columns: + if col not in output_signature: + raise ValueError(f'Column {col} not found in dataset!') + for col in label_cols: + if col not in output_signature: + raise ValueError(f'Label column {col} not found in dataset!') + if num_workers == 0: + tf_dataset = dataset_to_tf(dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder) + elif num_workers > 0: + if batch_size is None: + raise NotImplementedError('`batch_size` must be specified when using multiple workers, as unbatched multiprocessing is not supported yet. Please provide a `batch_size` if `num_workers` is greater than 0.') + tf_dataset = multiprocess_dataset_to_tf(dataset=dataset, cols_to_retain=cols_to_retain, collate_fn=collate_fn, collate_fn_args=collate_fn_args, columns_to_np_types=columns_to_np_types, output_signature=output_signature, shuffle=shuffle, batch_size=batch_size, drop_remainder=drop_remainder, num_workers=num_workers) + else: + raise ValueError('num_workers must be >= 0') + + def split_features_and_labels(input_batch): + features = {key: tensor for (key, tensor) in input_batch.items() if key in columns} + labels = {key: tensor for (key, tensor) in input_batch.items() if key in label_cols} + if len(features) == 1: + features = list(features.values())[0] + if len(labels) == 1: + labels = list(labels.values())[0] + if isinstance(labels, dict) and len(labels) == 0: + return features + else: + return (features, labels) + if cols_to_retain is not None: + tf_dataset = tf_dataset.map(split_features_and_labels) + if prefetch: + tf_dataset = tf_dataset.prefetch(tf.data.experimental.AUTOTUNE) + + def cleanup_callback(ref): + dataset.__del__() + self._TF_DATASET_REFS.remove(ref) + self._TF_DATASET_REFS.add(weakref.ref(tf_dataset, cleanup_callback)) + return tf_dataset + +class DatasetTransformationNotAllowedError(Exception): + pass + +def transmit_format(func): + + @wraps(func) + def wrapper(*args, **kwargs): + if args: + self: 'Dataset' = args[0] + args = args[1:] + else: + self: 'Dataset' = kwargs.pop('self') + unformatted_columns = set(self.column_names) - set(self._format_columns or []) + self_format = {'type': self._format_type, 'format_kwargs': self._format_kwargs, 'columns': self._format_columns, 'output_all_columns': self._output_all_columns} + out: Union['Dataset', 'DatasetDict'] = func(self, *args, **kwargs) + datasets: List['Dataset'] = list(out.values()) if isinstance(out, dict) else [out] + for dataset in datasets: + new_format = self_format.copy() + if new_format['columns'] is not None: + new_format['columns'] = sorted(set(dataset.column_names) - unformatted_columns) + out_format = {'type': dataset._format_type, 'format_kwargs': dataset._format_kwargs, 'columns': sorted(dataset._format_columns) if dataset._format_columns is not None else None, 'output_all_columns': dataset._output_all_columns} + if out_format != new_format: + fingerprint = dataset._fingerprint + dataset.set_format(**new_format) + dataset._fingerprint = fingerprint + return out + wrapper._decorator_name_ = 'transmit_format' + return wrapper + +def update_metadata_with_features(table: Table, features: Features): + features = Features({col_name: features[col_name] for col_name in table.column_names}) + if table.schema.metadata is None or b'huggingface' not in table.schema.metadata: + pa_metadata = ArrowWriter._build_metadata(DatasetInfo(features=features)) + else: + metadata = json.loads(table.schema.metadata[b'huggingface'].decode()) + if 'info' not in metadata: + metadata['info'] = asdict(DatasetInfo(features=features)) + else: + metadata['info']['features'] = asdict(DatasetInfo(features=features))['features'] + pa_metadata = {'huggingface': json.dumps(metadata)} + table = table.replace_schema_metadata(pa_metadata) + return table + +def _check_table(table) -> Table: + if isinstance(table, pa.Table): + return InMemoryTable(table) + elif isinstance(table, Table): + return table + else: + raise TypeError(f'Expected a pyarrow.Table or a datasets.table.Table object, but got {table}.') + +def _check_column_names(column_names: List[str]): + counter = Counter(column_names) + if not all((count == 1 for count in counter.values())): + duplicated_columns = [col for col in counter if counter[col] > 1] + raise ValueError(f"The table can't have duplicated columns but columns {duplicated_columns} are duplicated.") + +def _check_valid_indices_value(index, size): + if index < 0 and index + size < 0 or index >= size: + raise IndexError(f'Index {index} out of range for dataset of size {size}.') + +class NonExistentDatasetError(Exception): + pass + +class Dataset(DatasetInfoMixin, IndexableMixin, TensorflowDatasetMixin): + + def __init__(self, arrow_table: Table, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, indices_table: Optional[Table]=None, fingerprint: Optional[str]=None): + info = info.copy() if info is not None else DatasetInfo() + DatasetInfoMixin.__init__(self, info=info, split=split) + IndexableMixin.__init__(self) + self._data: Table = _check_table(arrow_table) + self._indices: Optional[Table] = _check_table(indices_table) if indices_table is not None else None + maybe_register_dataset_for_temp_dir_deletion(self) + self._format_type: Optional[str] = None + self._format_kwargs: dict = {} + self._format_columns: Optional[list] = None + self._output_all_columns: bool = False + self._fingerprint: str = fingerprint + if self._data.schema.metadata is not None and b'huggingface' in self._data.schema.metadata: + metadata = json.loads(self._data.schema.metadata[b'huggingface'].decode()) + if 'fingerprint' in metadata and self._fingerprint is None: + self._fingerprint = metadata['fingerprint'] + inferred_features = Features.from_arrow_schema(arrow_table.schema) + if self.info.features is None: + self.info.features = inferred_features + else: + try: + self.info.features = self.info.features.reorder_fields_as(inferred_features) + except ValueError as e: + raise ValueError(f"{e}\nThe 'source' features come from dataset_info.json, and the 'target' ones are those of the dataset arrow file.") + if self.data.schema != self.info.features.arrow_schema: + self._data = self.data.cast(self.info.features.arrow_schema) + if self._fingerprint is None: + self._fingerprint = generate_fingerprint(self) + if self._info.features is None: + raise ValueError("Features can't be None in a Dataset object") + if self._fingerprint is None: + raise ValueError("Fingerprint can't be None in a Dataset object") + if self.info.features.type != inferred_features.type: + raise ValueError(f"External features info don't match the dataset:\nGot\n{self.info.features}\nwith type\n{self.info.features.type}\n\nbut expected something like\n{inferred_features}\nwith type\n{inferred_features.type}") + if self._indices is not None: + if not pa.types.is_unsigned_integer(self._indices.column(0).type): + raise ValueError(f'indices must be an Arrow table of unsigned integers, current type is {self._indices.column(0).type}') + _check_column_names(self._data.column_names) + self._data = update_metadata_with_features(self._data, self._info.features) + + @property + def features(self) -> Features: + features = super().features + if features is None: + raise ValueError("Features can't be None in a Dataset object") + return features + + @classmethod + def from_file(cls, filename: str, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, indices_filename: Optional[str]=None, in_memory: bool=False) -> 'Dataset': + table = ArrowReader.read_table(filename, in_memory=in_memory) + if indices_filename is not None: + indices_pa_table = ArrowReader.read_table(indices_filename, in_memory=in_memory) + else: + indices_pa_table = None + return cls(arrow_table=table, info=info, split=split, indices_table=indices_pa_table) + + @classmethod + def from_buffer(cls, buffer: pa.Buffer, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, indices_buffer: Optional[pa.Buffer]=None) -> 'Dataset': + table = InMemoryTable.from_buffer(buffer) + if indices_buffer is not None: + indices_table = InMemoryTable.from_buffer(buffer) + else: + indices_table = None + return cls(table, info=info, split=split, indices_table=indices_table) + + @classmethod + def from_pandas(cls, df: pd.DataFrame, features: Optional[Features]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, preserve_index: Optional[bool]=None) -> 'Dataset': + if info is not None and features is not None and (info.features != features): + raise ValueError(f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}") + features = features if features is not None else info.features if info is not None else None + if info is None: + info = DatasetInfo() + info.features = features + table = InMemoryTable.from_pandas(df=df, preserve_index=preserve_index) + if features is not None: + table = table.cast(features.arrow_schema) + return cls(table, info=info, split=split) + + @classmethod + def from_polars(cls, df: 'pl.DataFrame', features: Optional[Features]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None) -> 'Dataset': + if info is not None and features is not None and (info.features != features): + raise ValueError(f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}") + features = features if features is not None else info.features if info is not None else None + if info is None: + info = DatasetInfo() + info.features = features + table = InMemoryTable(df.to_arrow()) + if features is not None: + table = table.cast(features.arrow_schema) + return cls(table, info=info, split=split) + + @classmethod + def from_dict(cls, mapping: dict, features: Optional[Features]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None) -> 'Dataset': + if info is not None and features is not None and (info.features != features): + raise ValueError(f"Features specified in `features` and `info.features` can't be different:\n{features}\n{info.features}") + features = features if features is not None else info.features if info is not None else None + arrow_typed_mapping = {} + for (col, data) in mapping.items(): + if isinstance(data, (pa.Array, pa.ChunkedArray)): + data = cast_array_to_feature(data, features[col]) if features is not None else data + else: + data = OptimizedTypedSequence(features.encode_column(data, col) if features is not None else data, type=features[col] if features is not None else None, col=col) + arrow_typed_mapping[col] = data + mapping = arrow_typed_mapping + pa_table = InMemoryTable.from_pydict(mapping=mapping) + if info is None: + info = DatasetInfo() + info.features = features + if info.features is None: + info.features = Features({col: generate_from_arrow_type(data.type) if isinstance(data, (pa.Array, pa.ChunkedArray)) else data.get_inferred_type() for (col, data) in mapping.items()}) + return cls(pa_table, info=info, split=split) + + @classmethod + def from_list(cls, mapping: List[dict], features: Optional[Features]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None) -> 'Dataset': + mapping = {k: [r.get(k) for r in mapping] for k in mapping[0]} if mapping else {} + return cls.from_dict(mapping, features, info, split) + + @staticmethod + def from_csv(path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, num_proc: Optional[int]=None, **kwargs): + from .io.csv import CsvDatasetReader + return CsvDatasetReader(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs).read() + + @staticmethod + def from_generator(generator: Callable, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, gen_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, split: NamedSplit=Split.TRAIN, **kwargs): + from .io.generator import GeneratorDatasetInputStream + return GeneratorDatasetInputStream(generator=generator, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, gen_kwargs=gen_kwargs, num_proc=num_proc, split=split, **kwargs).read() + + @staticmethod + def from_json(path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, field: Optional[str]=None, num_proc: Optional[int]=None, **kwargs): + from .io.json import JsonDatasetReader + return JsonDatasetReader(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, field=field, num_proc=num_proc, **kwargs).read() + + @staticmethod + def from_parquet(path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, columns: Optional[List[str]]=None, num_proc: Optional[int]=None, **kwargs): + from .io.parquet import ParquetDatasetReader + return ParquetDatasetReader(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, num_proc=num_proc, **kwargs).read() + + @staticmethod + def from_text(path_or_paths: Union[PathLike, List[PathLike]], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, num_proc: Optional[int]=None, **kwargs): + from .io.text import TextDatasetReader + return TextDatasetReader(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, num_proc=num_proc, **kwargs).read() + + @staticmethod + def from_spark(df: 'pyspark.sql.DataFrame', split: Optional[NamedSplit]=None, features: Optional[Features]=None, keep_in_memory: bool=False, cache_dir: str=None, working_dir: str=None, load_from_cache_file: bool=True, **kwargs): + from .io.spark import SparkDatasetReader + if sys.platform == 'win32': + raise EnvironmentError('Dataset.from_spark is not currently supported on Windows') + return SparkDatasetReader(df, split=split, features=features, streaming=False, cache_dir=cache_dir, keep_in_memory=keep_in_memory, working_dir=working_dir, load_from_cache_file=load_from_cache_file, **kwargs).read() + + @staticmethod + def from_sql(sql: Union[str, 'sqlalchemy.sql.Selectable'], con: Union[str, 'sqlalchemy.engine.Connection', 'sqlalchemy.engine.Engine', 'sqlite3.Connection'], features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, **kwargs): + from .io.sql import SqlDatasetReader + return SqlDatasetReader(sql, con, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs).read() + + def __setstate__(self, state): + self.__dict__.update(state) + maybe_register_dataset_for_temp_dir_deletion(self) + return self + + def __del__(self): + if hasattr(self, '_data'): + del self._data + if hasattr(self, '_indices'): + del self._indices + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.__del__() + + def save_to_disk(self, dataset_path: PathLike, max_shard_size: Optional[Union[str, int]]=None, num_shards: Optional[int]=None, num_proc: Optional[int]=None, storage_options: Optional[dict]=None): + if max_shard_size is not None and num_shards is not None: + raise ValueError('Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both.') + if self.list_indexes(): + raise ValueError('please remove all the indexes using `dataset.drop_index` before saving a dataset') + if num_shards is None: + dataset_nbytes = self._estimate_nbytes() + max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) + num_shards = int(dataset_nbytes / max_shard_size) + 1 + num_shards = max(num_shards, num_proc or 1) + num_proc = num_proc if num_proc is not None else 1 + num_shards = num_shards if num_shards is not None else num_proc + fs: fsspec.AbstractFileSystem + (fs, _) = url_to_fs(dataset_path, **storage_options or {}) + if not is_remote_filesystem(fs): + parent_cache_files_paths = {Path(cache_filename['filename']).resolve().parent for cache_filename in self.cache_files} + if Path(dataset_path).expanduser().resolve() in parent_cache_files_paths: + raise PermissionError(f"Tried to overwrite {Path(dataset_path).expanduser().resolve()} but a dataset can't overwrite itself.") + fs.makedirs(dataset_path, exist_ok=True) + state = {key: self.__dict__[key] for key in ['_fingerprint', '_format_columns', '_format_kwargs', '_format_type', '_output_all_columns']} + state['_split'] = str(self.split) if self.split is not None else self.split + state['_data_files'] = [{'filename': f'data-{shard_idx:05d}-of-{num_shards:05d}.arrow'} for shard_idx in range(num_shards)] + for k in state['_format_kwargs'].keys(): + try: + json.dumps(state['_format_kwargs'][k]) + except TypeError as e: + raise TypeError(str(e) + f"\nThe format kwargs must be JSON serializable, but key '{k}' isn't.") from None + dataset_info = asdict(self._info) + shards_done = 0 + pbar = hf_tqdm(unit=' examples', total=len(self), desc=f'Saving the dataset ({shards_done}/{num_shards} shards)') + kwargs_per_job = ({'job_id': shard_idx, 'shard': self.shard(num_shards=num_shards, index=shard_idx, contiguous=True), 'fpath': posixpath.join(dataset_path, f'data-{shard_idx:05d}-of-{num_shards:05d}.arrow'), 'storage_options': storage_options} for shard_idx in range(num_shards)) + shard_lengths = [None] * num_shards + shard_sizes = [None] * num_shards + if num_proc > 1: + with Pool(num_proc) as pool: + with pbar: + for (job_id, done, content) in iflatmap_unordered(pool, Dataset._save_to_disk_single, kwargs_iterable=kwargs_per_job): + if done: + shards_done += 1 + pbar.set_description(f'Saving the dataset ({shards_done}/{num_shards} shards)') + logger.debug(f'Finished writing shard number {job_id} of {num_shards}.') + (shard_lengths[job_id], shard_sizes[job_id]) = content + else: + pbar.update(content) + else: + with pbar: + for kwargs in kwargs_per_job: + for (job_id, done, content) in Dataset._save_to_disk_single(**kwargs): + if done: + shards_done += 1 + pbar.set_description(f'Saving the dataset ({shards_done}/{num_shards} shards)') + logger.debug(f'Finished writing shard number {job_id} of {num_shards}.') + (shard_lengths[job_id], shard_sizes[job_id]) = content + else: + pbar.update(content) + with fs.open(posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME), 'w', encoding='utf-8') as state_file: + json.dump(state, state_file, indent=2, sort_keys=True) + with fs.open(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME), 'w', encoding='utf-8') as dataset_info_file: + sorted_keys_dataset_info = {key: dataset_info[key] for key in sorted(dataset_info)} + json.dump(sorted_keys_dataset_info, dataset_info_file, indent=2) + + @staticmethod + def _save_to_disk_single(job_id: int, shard: 'Dataset', fpath: str, storage_options: Optional[dict]): + batch_size = config.DEFAULT_MAX_BATCH_SIZE + num_examples_progress_update = 0 + writer = ArrowWriter(features=shard.features, path=fpath, storage_options=storage_options, embed_local_files=True) + try: + _time = time.time() + for pa_table in shard.with_format('arrow').iter(batch_size): + writer.write_table(pa_table) + num_examples_progress_update += len(pa_table) + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield (job_id, False, num_examples_progress_update) + num_examples_progress_update = 0 + finally: + yield (job_id, False, num_examples_progress_update) + (num_examples, num_bytes) = writer.finalize() + writer.close() + yield (job_id, True, (num_examples, num_bytes)) + + @staticmethod + def _build_local_temp_path(uri_or_path: str) -> Path: + src_dataset_path = Path(uri_or_path) + tmp_dir = get_temporary_cache_files_directory() + return Path(tmp_dir, src_dataset_path.relative_to(src_dataset_path.anchor)) + + @staticmethod + def load_from_disk(dataset_path: PathLike, keep_in_memory: Optional[bool]=None, storage_options: Optional[dict]=None) -> 'Dataset': + fs: fsspec.AbstractFileSystem + (fs, dataset_path) = url_to_fs(dataset_path, **storage_options or {}) + dest_dataset_path = dataset_path + dataset_dict_json_path = posixpath.join(dest_dataset_path, config.DATASETDICT_JSON_FILENAME) + dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) + dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) + dataset_dict_is_file = fs.isfile(dataset_dict_json_path) + dataset_info_is_file = fs.isfile(dataset_info_path) + dataset_state_is_file = fs.isfile(dataset_state_json_path) + if not dataset_info_is_file and (not dataset_state_is_file): + if dataset_dict_is_file: + raise FileNotFoundError(f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead.") + raise FileNotFoundError(f"No such files: '{dataset_info_path}', nor '{dataset_state_json_path}' found. Expected to load a `Dataset` object but provided path is not a `Dataset`.") + if not dataset_info_is_file: + if dataset_dict_is_file: + raise FileNotFoundError(f"No such file: '{dataset_info_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead.") + raise FileNotFoundError(f"No such file: '{dataset_info_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`.") + if not dataset_state_is_file: + if dataset_dict_is_file: + raise FileNotFoundError(f"No such file: '{dataset_state_json_path}' found. Expected to load a `Dataset` object, but got a `DatasetDict`. Please use either `datasets.load_from_disk` or `DatasetDict.load_from_disk` instead.") + raise FileNotFoundError(f"No such file: '{dataset_state_json_path}'. Expected to load a `Dataset` object but provided path is not a `Dataset`.") + if is_remote_filesystem(fs): + src_dataset_path = dest_dataset_path + dest_dataset_path = Dataset._build_local_temp_path(src_dataset_path) + fs.download(src_dataset_path, dest_dataset_path.as_posix(), recursive=True) + dataset_state_json_path = posixpath.join(dest_dataset_path, config.DATASET_STATE_JSON_FILENAME) + dataset_info_path = posixpath.join(dest_dataset_path, config.DATASET_INFO_FILENAME) + with open(dataset_state_json_path, encoding='utf-8') as state_file: + state = json.load(state_file) + with open(dataset_info_path, encoding='utf-8') as dataset_info_file: + dataset_info = DatasetInfo.from_dict(json.load(dataset_info_file)) + dataset_size = estimate_dataset_size((Path(dest_dataset_path, data_file['filename']) for data_file in state['_data_files'])) + keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(dataset_size) + table_cls = InMemoryTable if keep_in_memory else MemoryMappedTable + arrow_table = concat_tables(thread_map(table_cls.from_file, [posixpath.join(dest_dataset_path, data_file['filename']) for data_file in state['_data_files']], tqdm_class=hf_tqdm, desc='Loading dataset from disk', disable=len(state['_data_files']) <= 16 or None)) + split = state['_split'] + split = Split(split) if split is not None else split + dataset = Dataset(arrow_table=arrow_table, info=dataset_info, split=split, fingerprint=state['_fingerprint']) + format = {'type': state['_format_type'], 'format_kwargs': state['_format_kwargs'], 'columns': state['_format_columns'], 'output_all_columns': state['_output_all_columns']} + dataset = dataset.with_format(**format) + return dataset + + @property + def data(self) -> Table: + return self._data + + @property + def cache_files(self) -> List[dict]: + cache_files = list_table_cache_files(self._data) + if self._indices is not None: + cache_files += list_table_cache_files(self._indices) + return [{'filename': cache_filename} for cache_filename in cache_files] + + @property + def num_columns(self) -> int: + return self._data.num_columns + + @property + def num_rows(self) -> int: + if self._indices is not None: + return self._indices.num_rows + return self._data.num_rows + + @property + def column_names(self) -> List[str]: + return self._data.column_names + + @property + def shape(self) -> Tuple[int, int]: + if self._indices is not None: + return (self._indices.num_rows, self._data.num_columns) + return self._data.shape + + def unique(self, column: str) -> List: + if column not in self._data.column_names: + raise ValueError(f'Column ({column}) not in table columns ({self._data.column_names}).') + if self._indices is not None and self._indices.num_rows != self._data.num_rows: + dataset = self.flatten_indices() + else: + dataset = self + return dataset._data.column(column).unique().to_pylist() + + def class_encode_column(self, column: str, include_nulls: bool=False) -> 'Dataset': + if column not in self._data.column_names: + raise ValueError(f'Column ({column}) not in table columns ({self._data.column_names}).') + src_feat = self._info.features[column] + if not isinstance(src_feat, Value): + raise ValueError(f'Class encoding is only supported for {Value.__name__} column, and column {column} is {type(src_feat).__name__}.') + if src_feat.dtype != 'string' or (include_nulls and None in self.unique(column)): + + def stringify_column(batch): + batch[column] = [str(sample) if include_nulls or sample is not None else None for sample in batch[column]] + return batch + dset = self.map(stringify_column, batched=True, desc='Stringifying the column') + else: + dset = self + class_names = sorted((str(sample) for sample in dset.unique(column) if include_nulls or sample is not None)) + dst_feat = ClassLabel(names=class_names) + + def cast_to_class_labels(batch): + batch[column] = [dst_feat.str2int(str(sample)) if include_nulls or sample is not None else None for sample in batch[column]] + return batch + new_features = dset.features.copy() + new_features[column] = dst_feat + dset = dset.map(cast_to_class_labels, batched=True, features=new_features, desc='Casting to class labels') + return dset + + @fingerprint_transform(inplace=False) + def flatten(self, new_fingerprint: Optional[str]=None, max_depth=16) -> 'Dataset': + dataset = copy.deepcopy(self) + for depth in range(1, max_depth): + if any((isinstance(field.type, pa.StructType) for field in dataset._data.schema)): + dataset._data = dataset._data.flatten() + else: + break + dataset.info.features = self._info.features.flatten(max_depth=max_depth) + dataset.info.features = Features({col: dataset.info.features[col] for col in dataset.data.column_names}) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + logger.info(f"Flattened dataset from depth {depth} to depth {(1 if depth + 1 < max_depth else 'unknown')}.") + dataset._fingerprint = new_fingerprint + return dataset + + def cast(self, features: Features, batch_size: Optional[int]=1000, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, num_proc: Optional[int]=None) -> 'Dataset': + if sorted(features) != sorted(self._data.column_names): + raise ValueError(f'The columns in features ({list(features)}) must be identical as the columns in the dataset: {self._data.column_names}') + schema = features.arrow_schema + format = self.format + dataset = self.with_format('arrow') + dataset = dataset.map(partial(table_cast, schema=schema), batched=True, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, num_proc=num_proc, features=features, desc='Casting the dataset') + dataset = dataset.with_format(**format) + return dataset + + @fingerprint_transform(inplace=False) + def cast_column(self, column: str, feature: FeatureType, new_fingerprint: Optional[str]=None) -> 'Dataset': + if hasattr(feature, 'decode_example'): + dataset = copy.deepcopy(self) + dataset._info.features[column] = feature + dataset._fingerprint = new_fingerprint + dataset._data = dataset._data.cast(dataset.features.arrow_schema) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + return dataset + else: + features = self.features + features[column] = feature + return self.cast(features) + + @transmit_format + @fingerprint_transform(inplace=False) + def remove_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str]=None) -> 'Dataset': + dataset = copy.deepcopy(self) + if isinstance(column_names, str): + column_names = [column_names] + missing_columns = set(column_names) - set(self._data.column_names) + if missing_columns: + raise ValueError(f'Column name {list(missing_columns)} not in the dataset. Current columns in the dataset: {dataset._data.column_names}') + for column_name in column_names: + del dataset._info.features[column_name] + dataset._data = dataset._data.drop(column_names) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + @fingerprint_transform(inplace=False) + def rename_column(self, original_column_name: str, new_column_name: str, new_fingerprint: Optional[str]=None) -> 'Dataset': + dataset = copy.deepcopy(self) + if original_column_name not in dataset._data.column_names: + raise ValueError(f'Original column name {original_column_name} not in the dataset. Current columns in the dataset: {dataset._data.column_names}') + if new_column_name in dataset._data.column_names: + raise ValueError(f'New column name {new_column_name} already in the dataset. Please choose a column name which is not already in the dataset. Current columns in the dataset: {dataset._data.column_names}') + if not new_column_name: + raise ValueError('New column name is empty.') + + def rename(columns): + return [new_column_name if col == original_column_name else col for col in columns] + new_column_names = rename(self._data.column_names) + if self._format_columns is not None: + dataset._format_columns = rename(self._format_columns) + dataset._info.features = Features({new_column_name if col == original_column_name else col: feature for (col, feature) in self._info.features.items()}) + dataset._data = dataset._data.rename_columns(new_column_names) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + @fingerprint_transform(inplace=False) + def rename_columns(self, column_mapping: Dict[str, str], new_fingerprint: Optional[str]=None) -> 'Dataset': + dataset = copy.deepcopy(self) + extra_columns = set(column_mapping.keys()) - set(dataset.column_names) + if extra_columns: + raise ValueError(f'Original column names {extra_columns} not in the dataset. Current columns in the dataset: {dataset._data.column_names}') + number_of_duplicates_in_new_columns = len(column_mapping.values()) - len(set(column_mapping.values())) + if number_of_duplicates_in_new_columns != 0: + raise ValueError(f'New column names must all be different, but this column mapping has {number_of_duplicates_in_new_columns} duplicates') + empty_new_columns = [new_col for new_col in column_mapping.values() if not new_col] + if empty_new_columns: + raise ValueError(f'New column names {empty_new_columns} are empty.') + + def rename(columns): + return [column_mapping[col] if col in column_mapping else col for col in columns] + new_column_names = rename(self._data.column_names) + if self._format_columns is not None: + dataset._format_columns = rename(self._format_columns) + dataset._info.features = Features({column_mapping[col] if col in column_mapping else col: feature for (col, feature) in (self._info.features or {}).items()}) + dataset._data = dataset._data.rename_columns(new_column_names) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + @transmit_format + @fingerprint_transform(inplace=False) + def select_columns(self, column_names: Union[str, List[str]], new_fingerprint: Optional[str]=None) -> 'Dataset': + if isinstance(column_names, str): + column_names = [column_names] + missing_columns = set(column_names) - set(self._data.column_names) + if missing_columns: + raise ValueError(f'Column name {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}.') + dataset = copy.deepcopy(self) + dataset._data = dataset._data.select(column_names) + dataset._info.features = Features({col: self._info.features[col] for col in dataset._data.column_names}) + dataset._data = update_metadata_with_features(dataset._data, dataset.features) + dataset._fingerprint = new_fingerprint + return dataset + + def __len__(self): + return self.num_rows + + def __iter__(self): + if self._indices is None: + format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} + formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) + batch_size = config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER + for pa_subtable in table_iter(self.data, batch_size=batch_size): + for i in range(pa_subtable.num_rows): + pa_subtable_ex = pa_subtable.slice(i, 1) + formatted_output = format_table(pa_subtable_ex, 0, formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns) + yield formatted_output + else: + for i in range(self.num_rows): + yield self._getitem(i) + + def iter(self, batch_size: int, drop_last_batch: bool=False): + if self._indices is None: + format_kwargs = self._format_kwargs if self._format_kwargs is not None else {} + formatter = get_formatter(self._format_type, features=self._info.features, **format_kwargs) + for pa_subtable in table_iter(self.data, batch_size=batch_size, drop_last_batch=drop_last_batch): + formatted_batch = format_table(pa_subtable, range(pa_subtable.num_rows), formatter=formatter, format_columns=self._format_columns, output_all_columns=self._output_all_columns) + yield formatted_batch + else: + num_rows = self.num_rows if not drop_last_batch else self.num_rows // batch_size * batch_size + for i in range(0, num_rows, batch_size): + yield self._getitem(slice(i, i + batch_size)) + + def __repr__(self): + return f'Dataset({{\n features: {list(self._info.features.keys())},\n num_rows: {self.num_rows}\n}})' + + @property + def format(self): + return {'type': self._format_type, 'format_kwargs': self._format_kwargs, 'columns': self.column_names if self._format_columns is None else self._format_columns, 'output_all_columns': self._output_all_columns} + + @contextlib.contextmanager + def formatted_as(self, type: Optional[str]=None, columns: Optional[List]=None, output_all_columns: bool=False, **format_kwargs): + old_format_type = self._format_type + old_format_kwargs = self._format_kwargs + old_format_columns = self._format_columns + old_output_all_columns = self._output_all_columns + try: + self.set_format(type, columns, output_all_columns, **format_kwargs) + yield + finally: + self.set_format(old_format_type, old_format_columns, old_output_all_columns, **old_format_kwargs) + + @fingerprint_transform(inplace=True) + def set_format(self, type: Optional[str]=None, columns: Optional[List]=None, output_all_columns: bool=False, **format_kwargs): + format_kwargs.update(format_kwargs.pop('format_kwargs', {})) + type = get_format_type_from_alias(type) + get_formatter(type, features=self._info.features, **format_kwargs) + if isinstance(columns, str): + columns = [columns] + if isinstance(columns, tuple): + columns = list(columns) + if columns is not None: + missing_columns = set(columns) - set(self._data.column_names) + if missing_columns: + raise ValueError(f'Columns {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}') + if columns is not None: + columns = columns.copy() + self._format_type = type + self._format_kwargs = format_kwargs + self._format_columns = columns + self._output_all_columns = output_all_columns + logger.debug('Set __getitem__(key) output type to %s for %s columns (when key is int or slice) and %s output other (un-formatted) columns.', 'python objects' if type is None else type, 'no' if columns is None else str(columns), 'do' if output_all_columns else "don't") + + def reset_format(self): + self.set_format() + + def set_transform(self, transform: Optional[Callable], columns: Optional[List]=None, output_all_columns: bool=False): + self.set_format('custom', columns=columns, output_all_columns=output_all_columns, transform=transform) + + def with_format(self, type: Optional[str]=None, columns: Optional[List]=None, output_all_columns: bool=False, **format_kwargs): + dataset = copy.deepcopy(self) + dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) + return dataset + + def with_transform(self, transform: Optional[Callable], columns: Optional[List]=None, output_all_columns: bool=False): + dataset = copy.deepcopy(self) + dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) + return dataset + + def _getitem(self, key: Union[int, slice, str, ListLike[int]], **kwargs) -> Union[Dict, List]: + if isinstance(key, bool): + raise TypeError('dataset index must be int, str, slice or collection of int, not bool') + format_type = kwargs['format_type'] if 'format_type' in kwargs else self._format_type + format_columns = kwargs['format_columns'] if 'format_columns' in kwargs else self._format_columns + output_all_columns = kwargs['output_all_columns'] if 'output_all_columns' in kwargs else self._output_all_columns + format_kwargs = kwargs['format_kwargs'] if 'format_kwargs' in kwargs else self._format_kwargs + format_kwargs = format_kwargs if format_kwargs is not None else {} + formatter = get_formatter(format_type, features=self._info.features, **format_kwargs) + pa_subtable = query_table(self._data, key, indices=self._indices) + formatted_output = format_table(pa_subtable, key, formatter=formatter, format_columns=format_columns, output_all_columns=output_all_columns) + return formatted_output + + @overload + def __getitem__(self, key: Union[int, slice, Iterable[int]]) -> Dict: + ... + + @overload + def __getitem__(self, key: str) -> List: + ... + + def __getitem__(self, key): + return self._getitem(key) + + def __getitems__(self, keys: List) -> List: + batch = self.__getitem__(keys) + n_examples = len(batch[next(iter(batch))]) + return [{col: array[i] for (col, array) in batch.items()} for i in range(n_examples)] + + def cleanup_cache_files(self) -> int: + current_cache_files = [os.path.abspath(cache_file['filename']) for cache_file in self.cache_files] + if not current_cache_files: + return 0 + cache_directory = os.path.dirname(current_cache_files[0]) + logger.info(f'Listing files in {cache_directory}') + files: List[str] = os.listdir(cache_directory) + files_to_remove = [] + for f_name in files: + full_name = os.path.abspath(os.path.join(cache_directory, f_name)) + if f_name.startswith('cache-') and f_name.endswith('.arrow'): + if full_name in current_cache_files: + logger.info(f'Keeping currently used cache file at {full_name}') + continue + files_to_remove.append(full_name) + for file_path in files_to_remove: + logger.info(f'Removing {file_path}') + os.remove(file_path) + return len(files_to_remove) + + def _get_cache_file_path(self, fingerprint): + if is_caching_enabled() and self.cache_files: + cache_file_name = 'cache-' + fingerprint + '.arrow' + cache_directory = os.path.dirname(self.cache_files[0]['filename']) + else: + cache_file_name = 'cache-' + generate_random_fingerprint() + '.arrow' + cache_directory = get_temporary_cache_files_directory() + cache_file_path = os.path.join(cache_directory, cache_file_name) + return cache_file_path + + @transmit_format + def map(self, function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str='_{rank:05d}_of_{num_proc:05d}', new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> 'Dataset': + if keep_in_memory and cache_file_name is not None: + raise ValueError('Please use either `keep_in_memory` or `cache_file_name` but not both.') + if num_proc is not None and num_proc <= 0: + raise ValueError('num_proc must be an integer > 0.') + if len(self) == 0: + if self._indices is not None: + self = Dataset(self.data.slice(0, 0), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint) + if remove_columns: + return self.remove_columns(remove_columns) + else: + return self + if function is None: + function = lambda x: x + if isinstance(input_columns, str): + input_columns = [input_columns] + if input_columns is not None: + missing_columns = set(input_columns) - set(self._data.column_names) + if missing_columns: + raise ValueError(f'Input column {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}') + if isinstance(remove_columns, str): + remove_columns = [remove_columns] + if remove_columns is not None: + missing_columns = set(remove_columns) - set(self._data.column_names) + if missing_columns: + raise ValueError(f'Column to remove {list(missing_columns)} not in the dataset. Current columns in the dataset: {self._data.column_names}') + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + if fn_kwargs is None: + fn_kwargs = {} + if num_proc is not None and num_proc > len(self): + num_proc = len(self) + logger.warning(f'num_proc must be <= {len(self)}. Reducing num_proc to {num_proc} for dataset of size {len(self)}.') + dataset_kwargs = {'shard': self, 'function': function, 'with_indices': with_indices, 'with_rank': with_rank, 'input_columns': input_columns, 'batched': batched, 'batch_size': batch_size, 'drop_last_batch': drop_last_batch, 'remove_columns': remove_columns, 'keep_in_memory': keep_in_memory, 'writer_batch_size': writer_batch_size, 'features': features, 'disable_nullable': disable_nullable, 'fn_kwargs': fn_kwargs} + if new_fingerprint is None: + transform = format_transform_for_fingerprint(Dataset._map_single) + kwargs_for_fingerprint = format_kwargs_for_fingerprint(Dataset._map_single, (), dataset_kwargs) + kwargs_for_fingerprint['fingerprint_name'] = 'new_fingerprint' + new_fingerprint = update_fingerprint(self._fingerprint, transform, kwargs_for_fingerprint) + else: + validate_fingerprint(new_fingerprint) + dataset_kwargs['new_fingerprint'] = new_fingerprint + if self.cache_files: + if cache_file_name is None: + cache_file_name = self._get_cache_file_path(new_fingerprint) + dataset_kwargs['cache_file_name'] = cache_file_name + + def load_processed_shard_from_cache(shard_kwargs): + shard = shard_kwargs['shard'] + if shard_kwargs['cache_file_name'] is not None: + if os.path.exists(shard_kwargs['cache_file_name']) and load_from_cache_file: + info = shard.info.copy() + info.features = features + return Dataset.from_file(shard_kwargs['cache_file_name'], info=info, split=shard.split) + raise NonExistentDatasetError + num_shards = num_proc if num_proc is not None else 1 + if batched and drop_last_batch: + pbar_total = len(self) // num_shards // batch_size * num_shards * batch_size + else: + pbar_total = len(self) + shards_done = 0 + if num_proc is None or num_proc == 1: + transformed_dataset = None + try: + transformed_dataset = load_processed_shard_from_cache(dataset_kwargs) + logger.info(f"Loading cached processed dataset at {dataset_kwargs['cache_file_name']}") + except NonExistentDatasetError: + pass + if transformed_dataset is None: + with hf_tqdm(unit=' examples', total=pbar_total, desc=desc or 'Map') as pbar: + for (rank, done, content) in Dataset._map_single(**dataset_kwargs): + if done: + shards_done += 1 + logger.debug(f'Finished processing shard number {rank} of {num_shards}.') + transformed_dataset = content + else: + pbar.update(content) + assert transformed_dataset is not None, 'Failed to retrieve the result from map' + if transformed_dataset._fingerprint != self._fingerprint: + transformed_dataset._fingerprint = new_fingerprint + return transformed_dataset + else: + + def format_cache_file_name(cache_file_name: Optional[str], rank: Union[int, Literal['*']]) -> Optional[str]: + if not cache_file_name: + return cache_file_name + sep = cache_file_name.rindex('.') + (base_name, extension) = (cache_file_name[:sep], cache_file_name[sep:]) + if isinstance(rank, int): + cache_file_name = base_name + suffix_template.format(rank=rank, num_proc=num_proc) + extension + logger.info(f'Process #{rank} will write at {cache_file_name}') + else: + cache_file_name = base_name + suffix_template.replace('{rank:05d}', '{rank}').format(rank=rank, num_proc=num_proc) + extension + return cache_file_name + + def format_new_fingerprint(new_fingerprint: str, rank: int) -> str: + new_fingerprint = new_fingerprint + suffix_template.format(rank=rank, num_proc=num_proc) + validate_fingerprint(new_fingerprint) + return new_fingerprint + prev_env = deepcopy(os.environ) + if prev_env.get('TOKENIZERS_PARALLELISM', 'false').lower() not in ('', 'off', 'false', 'f', 'no', 'n', '0'): + logger.warning('Setting TOKENIZERS_PARALLELISM=false for forked processes.') + os.environ['TOKENIZERS_PARALLELISM'] = 'false' + shards = [self.shard(num_shards=num_proc, index=rank, contiguous=True, keep_in_memory=keep_in_memory) for rank in range(num_proc)] + kwargs_per_job = [{**dataset_kwargs, 'shard': shards[rank], 'cache_file_name': format_cache_file_name(cache_file_name, rank), 'rank': rank, 'offset': sum((len(s) for s in shards[:rank])), 'new_fingerprint': format_new_fingerprint(new_fingerprint, rank)} for rank in range(num_shards)] + transformed_shards = [None] * num_shards + for rank in range(num_shards): + try: + transformed_shards[rank] = load_processed_shard_from_cache(kwargs_per_job[rank]) + kwargs_per_job[rank] = None + except NonExistentDatasetError: + pass + kwargs_per_job = [kwargs for kwargs in kwargs_per_job if kwargs is not None] + if kwargs_per_job: + if len(kwargs_per_job) < num_shards: + logger.info(f'Reprocessing {len(kwargs_per_job)}/{num_shards} shards because some of them were missing from the cache.') + with Pool(len(kwargs_per_job)) as pool: + os.environ = prev_env + logger.info(f'Spawning {num_proc} processes') + with hf_tqdm(unit=' examples', total=pbar_total, desc=(desc or 'Map') + f' (num_proc={num_proc})') as pbar: + for (rank, done, content) in iflatmap_unordered(pool, Dataset._map_single, kwargs_iterable=kwargs_per_job): + if done: + shards_done += 1 + logger.debug(f'Finished processing shard number {rank} of {num_shards}.') + transformed_shards[rank] = content + else: + pbar.update(content) + for kwargs in kwargs_per_job: + del kwargs['shard'] + else: + logger.info(f"Loading cached processed dataset at {format_cache_file_name(cache_file_name, '*')}") + assert None not in transformed_shards, f'Failed to retrieve results from map: result list {transformed_shards} still contains None - at least one worker failed to return its results' + logger.info(f'Concatenating {num_proc} shards') + result = _concatenate_map_style_datasets(transformed_shards) + if any((transformed_shard._fingerprint != shard._fingerprint for (transformed_shard, shard) in zip(transformed_shards, shards))): + result._fingerprint = new_fingerprint + else: + result._fingerprint = self._fingerprint + return result + + @staticmethod + def _map_single(shard: 'Dataset', function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[List[str]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[List[str]]=None, keep_in_memory: bool=False, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, new_fingerprint: Optional[str]=None, rank: Optional[int]=None, offset: int=0) -> Iterable[Tuple[int, bool, Union[int, 'Dataset']]]: + if fn_kwargs is None: + fn_kwargs = {} + if batched and (batch_size is None or batch_size <= 0): + batch_size = shard.num_rows + update_data = None + format_kwargs = shard._format_kwargs.copy() + if not input_columns and shard._format_type is None: + format_kwargs['lazy'] = True + input_formatter = get_formatter(shard._format_type, features=shard.features, **format_kwargs) + + class NumExamplesMismatchError(Exception): + pass + + def validate_function_output(processed_inputs, indices): + allowed_processed_inputs_types = (Mapping, pa.Table, pd.DataFrame) + if config.POLARS_AVAILABLE and 'polars' in sys.modules: + import polars as pl + allowed_processed_inputs_types += (pl.DataFrame,) + if processed_inputs is not None and (not isinstance(processed_inputs, allowed_processed_inputs_types)): + raise TypeError(f'Provided `function` which is applied to all elements of table returns a variable of type {type(processed_inputs)}. Make sure provided `function` returns a variable of type `dict` (or a pyarrow table) to update the dataset or `None` if you are only interested in side effects.') + elif isinstance(indices, list) and isinstance(processed_inputs, Mapping): + allowed_batch_return_types = (list, np.ndarray, pd.Series) + if config.POLARS_AVAILABLE and 'polars' in sys.modules: + import polars as pl + allowed_batch_return_types += (pl.Series, pl.DataFrame) + if config.TF_AVAILABLE and 'tensorflow' in sys.modules: + import tensorflow as tf + allowed_batch_return_types += (tf.Tensor,) + if config.TORCH_AVAILABLE and 'torch' in sys.modules: + import torch + allowed_batch_return_types += (torch.Tensor,) + if config.JAX_AVAILABLE and 'jax' in sys.modules: + import jax.numpy as jnp + allowed_batch_return_types += (jnp.ndarray,) + all_dict_values_are_lists = all((isinstance(value, allowed_batch_return_types) for value in processed_inputs.values())) + if all_dict_values_are_lists is False: + raise TypeError(f'Provided `function` which is applied to all elements of table returns a `dict` of types {[type(x) for x in processed_inputs.values()]}. When using `batched=True`, make sure provided `function` returns a `dict` of types like `{allowed_batch_return_types}`.') + + def apply_function_on_filtered_inputs(pa_inputs, indices, check_same_num_examples=False, offset=0): + nonlocal update_data + inputs = format_table(pa_inputs, 0 if not batched else range(pa_inputs.num_rows), format_columns=input_columns, formatter=input_formatter) + fn_args = [inputs] if input_columns is None else [inputs[col] for col in input_columns] + if offset == 0: + effective_indices = indices + else: + effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset + additional_args = () + if with_indices: + additional_args += (effective_indices,) + if with_rank: + additional_args += (rank,) + processed_inputs = function(*fn_args, *additional_args, **fn_kwargs) + if isinstance(processed_inputs, LazyDict): + processed_inputs = {k: v for (k, v) in processed_inputs.data.items() if k not in processed_inputs.keys_to_format} + returned_lazy_dict = True + else: + returned_lazy_dict = False + if update_data is None: + updatable_types = (Mapping, pa.Table, pd.DataFrame) + if config.POLARS_AVAILABLE and 'polars' in sys.modules: + import polars as pl + updatable_types += (pl.DataFrame,) + update_data = isinstance(processed_inputs, updatable_types) + validate_function_output(processed_inputs, indices) + if not update_data: + return None + if shard._format_type or input_columns: + inputs_to_merge = dict(zip(pa_inputs.column_names, pa_inputs.itercolumns())) + elif isinstance(inputs, LazyDict): + inputs_to_merge = {k: v if k not in inputs.keys_to_format else pa_inputs[k] for (k, v) in inputs.data.items()} + else: + inputs_to_merge = inputs + if remove_columns is not None: + for column in remove_columns: + if column in inputs_to_merge: + inputs_to_merge.pop(column) + if returned_lazy_dict and column in processed_inputs: + processed_inputs.pop(column) + if check_same_num_examples: + input_num_examples = len(pa_inputs) + processed_inputs_num_examples = len(processed_inputs[next(iter(processed_inputs.keys()))]) + if input_num_examples != processed_inputs_num_examples: + raise NumExamplesMismatchError() + if isinstance(inputs, Mapping) and isinstance(processed_inputs, Mapping): + return {**inputs_to_merge, **processed_inputs} + else: + return processed_inputs + + def init_buffer_and_writer(): + writer_features = features + if writer_features is None: + writer_features = shard.features + update_features = True + else: + update_features = False + if keep_in_memory or cache_file_name is None: + buf_writer = pa.BufferOutputStream() + tmp_file = None + writer = ArrowWriter(features=writer_features, stream=buf_writer, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable) + else: + buf_writer = None + logger.info(f'Caching processed dataset at {cache_file_name}') + cache_dir = os.path.dirname(cache_file_name) + os.makedirs(cache_dir, exist_ok=True) + tmp_file = tempfile.NamedTemporaryFile('wb', dir=cache_dir, delete=False) + writer = ArrowWriter(features=writer_features, path=tmp_file.name, writer_batch_size=writer_batch_size, update_features=update_features, fingerprint=new_fingerprint, disable_nullable=disable_nullable) + return (buf_writer, writer, tmp_file) + num_examples_progress_update = 0 + (buf_writer, writer, tmp_file) = (None, None, None) + if config.POLARS_AVAILABLE and 'polars' in sys.modules: + import polars as pl + with contextlib.ExitStack() as stack: + try: + arrow_formatted_shard = shard.with_format('arrow') + if not batched: + shard_iterable = enumerate(arrow_formatted_shard) + else: + num_rows = len(shard) if not drop_last_batch else len(shard) // batch_size * batch_size + shard_iterable = zip(range(0, num_rows, batch_size), arrow_formatted_shard.iter(batch_size, drop_last_batch=drop_last_batch)) + if not batched: + _time = time.time() + for (i, example) in shard_iterable: + example = apply_function_on_filtered_inputs(example, i, offset=offset) + if update_data: + if i == 0: + (buf_writer, writer, tmp_file) = init_buffer_and_writer() + stack.enter_context(writer) + if isinstance(example, pa.Table): + writer.write_row(example) + elif isinstance(example, pd.DataFrame): + writer.write_row(pa.Table.from_pandas(example)) + elif config.POLARS_AVAILABLE and 'polars' in sys.modules and isinstance(example, pl.DataFrame): + writer.write_row(example.to_arrow()) + else: + writer.write(example) + num_examples_progress_update += 1 + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield (rank, False, num_examples_progress_update) + num_examples_progress_update = 0 + else: + _time = time.time() + for (i, batch) in shard_iterable: + num_examples_in_batch = len(batch) + indices = list(range(*slice(i, i + batch_size).indices(shard.num_rows))) + try: + batch = apply_function_on_filtered_inputs(batch, indices, check_same_num_examples=len(shard.list_indexes()) > 0, offset=offset) + except NumExamplesMismatchError: + raise DatasetTransformationNotAllowedError("Using `.map` in batched mode on a dataset with attached indexes is allowed only if it doesn't create or remove existing examples. You can first run `.drop_index() to remove your index and then re-add it.") from None + if update_data: + if i == 0: + (buf_writer, writer, tmp_file) = init_buffer_and_writer() + stack.enter_context(writer) + if isinstance(batch, pa.Table): + writer.write_table(batch) + elif isinstance(batch, pd.DataFrame): + writer.write_table(pa.Table.from_pandas(batch)) + elif config.POLARS_AVAILABLE and 'polars' in sys.modules and isinstance(batch, pl.DataFrame): + writer.write_table(batch.to_arrow()) + else: + writer.write_batch(batch) + num_examples_progress_update += num_examples_in_batch + if time.time() > _time + config.PBAR_REFRESH_TIME_INTERVAL: + _time = time.time() + yield (rank, False, num_examples_progress_update) + num_examples_progress_update = 0 + if update_data and writer is not None: + writer.finalize() + except (Exception, KeyboardInterrupt): + yield (rank, False, num_examples_progress_update) + if update_data: + if writer is not None: + writer.finalize() + if tmp_file is not None: + tmp_file.close() + if os.path.exists(tmp_file.name): + os.remove(tmp_file.name) + raise + yield (rank, False, num_examples_progress_update) + if update_data and tmp_file is not None: + tmp_file.close() + shutil.move(tmp_file.name, cache_file_name) + umask = os.umask(438) + os.umask(umask) + os.chmod(cache_file_name, 438 & ~umask) + if update_data: + info = shard.info.copy() + info.features = writer._features + if buf_writer is None: + yield (rank, True, Dataset.from_file(cache_file_name, info=info, split=shard.split)) + else: + yield (rank, True, Dataset.from_buffer(buf_writer.getvalue(), info=info, split=shard.split)) + else: + yield (rank, True, shard) + + @transmit_format + @fingerprint_transform(inplace=False) + def batch(self, batch_size: int, drop_last_batch: bool=False, num_proc: Optional[int]=None, new_fingerprint: Optional[str]=None) -> 'Dataset': + + def batch_fn(example): + return {k: [v] for (k, v) in example.items()} + return self.map(batch_fn, batched=True, batch_size=batch_size, drop_last_batch=drop_last_batch, num_proc=num_proc, new_fingerprint=new_fingerprint, desc='Batching examples') + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=['load_from_cache_file', 'cache_file_name', 'desc'], version='2.0.1') + def filter(self, function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, suffix_template: str='_{rank:05d}_of_{num_proc:05d}', new_fingerprint: Optional[str]=None, desc: Optional[str]=None) -> 'Dataset': + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.filter` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.`') + if function is None: + function = lambda x: True + if len(self) == 0: + return self + indices = self.map(function=partial(get_indices_from_mask_function, function, batched, with_indices, with_rank, input_columns, self._indices), with_indices=True, with_rank=True, features=Features({'indices': Value('uint64')}), batched=True, batch_size=batch_size, remove_columns=self.column_names, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, suffix_template=suffix_template, new_fingerprint=new_fingerprint, input_columns=input_columns, desc=desc or 'Filter') + new_dataset = copy.deepcopy(self) + new_dataset._indices = indices.data + new_dataset._fingerprint = new_fingerprint + return new_dataset + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=['cache_file_name']) + def flatten_indices(self, keep_in_memory: bool=False, cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, num_proc: Optional[int]=None, new_fingerprint: Optional[str]=None) -> 'Dataset': + return self.map(batched=True, keep_in_memory=keep_in_memory, cache_file_name=cache_file_name, writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, new_fingerprint=new_fingerprint, desc='Flattening the indices', num_proc=num_proc) + + def _new_dataset_with_indices(self, indices_cache_file_name: Optional[str]=None, indices_buffer: Optional[pa.Buffer]=None, fingerprint: Optional[str]=None) -> 'Dataset': + if indices_cache_file_name is None and indices_buffer is None: + raise ValueError('At least one of indices_cache_file_name or indices_buffer must be provided.') + if fingerprint is None: + raise ValueError('please specify a fingerprint for the dataset with indices') + if indices_cache_file_name is not None: + indices_table = MemoryMappedTable.from_file(indices_cache_file_name) + else: + indices_table = InMemoryTable.from_buffer(indices_buffer) + return Dataset(self._data, info=self.info.copy(), split=self.split, indices_table=indices_table, fingerprint=fingerprint) + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=['indices_cache_file_name']) + def select(self, indices: Iterable, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None) -> 'Dataset': + if keep_in_memory and indices_cache_file_name is not None: + raise ValueError('Please use either `keep_in_memory` or `indices_cache_file_name` but not both.') + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.') + if len(self) == 0: + return self + if isinstance(indices, (pa.Array, pa.ChunkedArray)): + indices = indices.to_numpy().astype(np.int64) + if isinstance(indices, Iterator): + indices = list(indices) + if isinstance(indices, range): + if _is_range_contiguous(indices) and indices.start >= 0: + (start, length) = (indices.start, indices.stop - indices.start) + return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) + else: + try: + start = next(iter(indices)) + except StopIteration: + return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) + if start >= 0: + counter_from_start = itertools.count(start=start) + if all((i == j for (i, j) in zip(indices, counter_from_start))): + length = next(counter_from_start) - start + return self._select_contiguous(start, length, new_fingerprint=new_fingerprint) + return self._select_with_indices_mapping(indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint) + + @transmit_format + @fingerprint_transform(inplace=False) + def _select_contiguous(self, start: int, length: int, new_fingerprint: Optional[str]=None) -> 'Dataset': + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.') + if len(self) == 0: + return self + _check_valid_indices_value(start, len(self)) + _check_valid_indices_value(start + length - 1, len(self)) + if self._indices is None or length == 0: + return Dataset(self.data.slice(start, length), info=self.info.copy(), split=self.split, fingerprint=new_fingerprint) + else: + return Dataset(self.data, info=self.info.copy(), split=self.split, indices_table=self._indices.slice(start, length), fingerprint=new_fingerprint) + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=['indices_cache_file_name']) + def _select_with_indices_mapping(self, indices: Iterable, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None) -> 'Dataset': + if keep_in_memory and indices_cache_file_name is not None: + raise ValueError('Please use either `keep_in_memory` or `indices_cache_file_name` but not both.') + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.select` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.') + if len(self) == 0: + return self + if keep_in_memory or indices_cache_file_name is None: + buf_writer = pa.BufferOutputStream() + tmp_file = None + writer = ArrowWriter(stream=buf_writer, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit='indices') + else: + buf_writer = None + logger.info(f'Caching indices mapping at {indices_cache_file_name}') + cache_dir = os.path.dirname(indices_cache_file_name) + os.makedirs(cache_dir, exist_ok=True) + tmp_file = tempfile.NamedTemporaryFile('wb', dir=cache_dir, delete=False) + writer = ArrowWriter(path=tmp_file.name, writer_batch_size=writer_batch_size, fingerprint=new_fingerprint, unit='indices') + indices = indices if isinstance(indices, list) else list(indices) + size = len(self) + if indices: + _check_valid_indices_value(int(max(indices)), size=size) + _check_valid_indices_value(int(min(indices)), size=size) + else: + return self._select_contiguous(0, 0, new_fingerprint=new_fingerprint) + indices_array = pa.array(indices, type=pa.uint64()) + if self._indices is not None: + indices_array = self._indices.column(0).take(indices_array) + indices_table = pa.Table.from_arrays([indices_array], names=['indices']) + with writer: + try: + writer.write_table(indices_table) + writer.finalize() + except (Exception, KeyboardInterrupt): + if tmp_file is not None: + tmp_file.close() + if os.path.exists(tmp_file.name): + os.remove(tmp_file.name) + raise + if tmp_file is not None: + tmp_file.close() + shutil.move(tmp_file.name, indices_cache_file_name) + umask = os.umask(438) + os.umask(umask) + os.chmod(indices_cache_file_name, 438 & ~umask) + if buf_writer is None: + return self._new_dataset_with_indices(indices_cache_file_name=indices_cache_file_name, fingerprint=new_fingerprint) + else: + return self._new_dataset_with_indices(indices_buffer=buf_writer.getvalue(), fingerprint=new_fingerprint) + + def skip(self, n: int) -> 'Dataset': + return self.select(range(n, len(self))) + + def take(self, n: int) -> 'Dataset': + return self.select(range(n)) + + @transmit_format + @fingerprint_transform(inplace=False, ignore_kwargs=['load_from_cache_file', 'indices_cache_file_name']) + def sort(self, column_names: Union[str, Sequence_[str]], reverse: Union[bool, Sequence_[bool]]=False, null_placement: str='at_end', keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None) -> 'Dataset': + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.sort` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.') + if len(self) == 0: + return self + if isinstance(column_names, str): + column_names = [column_names] + if not isinstance(reverse, bool): + if len(reverse) != len(column_names): + raise ValueError("Parameter 'reverse' should be either a boolean or a list of booleans with the same length as 'column_names'.") + else: + reverse = [reverse] * len(column_names) + for column in column_names: + if not isinstance(column, str) or column not in self._data.column_names: + raise ValueError(f"Column '{column}' not found in the dataset. Please provide a column selected in: {self._data.column_names}") + if null_placement not in ['at_start', 'at_end']: + if null_placement == 'first': + null_placement = 'at_start' + elif null_placement == 'last': + null_placement = 'at_end' + else: + raise ValueError(f"null_placement '{null_placement}' is an invalid parameter value. Must be either 'last', 'at_end', 'first' or 'at_start'.") + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + if self.cache_files: + if indices_cache_file_name is None: + indices_cache_file_name = self._get_cache_file_path(new_fingerprint) + if os.path.exists(indices_cache_file_name) and load_from_cache_file: + logger.info(f'Loading cached sorted indices for dataset at {indices_cache_file_name}') + return self._new_dataset_with_indices(fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name) + sort_table = query_table(table=self._data, key=slice(0, len(self)), indices=self._indices) + sort_keys = [(col, 'ascending' if not col_reverse else 'descending') for (col, col_reverse) in zip(column_names, reverse)] + indices = pc.sort_indices(sort_table, sort_keys=sort_keys, null_placement=null_placement) + return self.select(indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint) + + @transmit_format + @fingerprint_transform(inplace=False, randomized_function=True, ignore_kwargs=['load_from_cache_file', 'indices_cache_file_name']) + def shuffle(self, seed: Optional[int]=None, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, new_fingerprint: Optional[str]=None) -> 'Dataset': + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.shuffle` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.') + if len(self) == 0: + return self + if keep_in_memory and indices_cache_file_name is not None: + raise ValueError('Please use either `keep_in_memory` or `indices_cache_file_name` but not both.') + if seed is not None and generator is not None: + raise ValueError('Both `seed` and `generator` were provided. Please specify just one of them.') + if generator is not None and (not isinstance(generator, np.random.Generator)): + raise ValueError('The provided generator must be an instance of numpy.random.Generator') + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + if generator is None: + if seed is None: + (_, seed, pos, *_) = np.random.get_state() + seed = seed[pos] if pos < 624 else seed[0] + _ = np.random.random() + generator = np.random.default_rng(seed) + if self.cache_files: + if indices_cache_file_name is None: + indices_cache_file_name = self._get_cache_file_path(new_fingerprint) + if os.path.exists(indices_cache_file_name) and load_from_cache_file: + logger.info(f'Loading cached shuffled indices for dataset at {indices_cache_file_name}') + return self._new_dataset_with_indices(fingerprint=new_fingerprint, indices_cache_file_name=indices_cache_file_name) + permutation = generator.permutation(len(self)) + return self.select(indices=permutation, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name if not keep_in_memory else None, writer_batch_size=writer_batch_size, new_fingerprint=new_fingerprint) + + @transmit_format + @fingerprint_transform(inplace=False, randomized_function=True, fingerprint_names=['train_new_fingerprint', 'test_new_fingerprint'], ignore_kwargs=['load_from_cache_file', 'train_indices_cache_file_name', 'test_indices_cache_file_name']) + def train_test_split(self, test_size: Union[float, int, None]=None, train_size: Union[float, int, None]=None, shuffle: bool=True, stratify_by_column: Optional[str]=None, seed: Optional[int]=None, generator: Optional[np.random.Generator]=None, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, train_indices_cache_file_name: Optional[str]=None, test_indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000, train_new_fingerprint: Optional[str]=None, test_new_fingerprint: Optional[str]=None) -> 'DatasetDict': + from .dataset_dict import DatasetDict + if len(self.list_indexes()) > 0: + raise DatasetTransformationNotAllowedError('Using `.train_test_split` on a dataset with attached indexes is not allowed. You can first run `.drop_index() to remove your index and then re-add it.') + if len(self) == 0: + return DatasetDict({'train': self, 'test': self}) + if test_size is None and train_size is None: + test_size = 0.25 + n_samples = len(self) + if isinstance(test_size, int) and (test_size >= n_samples or test_size <= 0) or (isinstance(test_size, float) and (test_size <= 0 or test_size >= 1)): + raise ValueError(f'test_size={test_size} should be either positive and smaller than the number of samples {n_samples} or a float in the (0, 1) range') + if isinstance(train_size, int) and (train_size >= n_samples or train_size <= 0) or (isinstance(train_size, float) and (train_size <= 0 or train_size >= 1)): + raise ValueError(f'train_size={train_size} should be either positive and smaller than the number of samples {n_samples} or a float in the (0, 1) range') + if train_size is not None and (not isinstance(train_size, (int, float))): + raise ValueError(f'Invalid value for train_size: {train_size} of type {type(train_size)}') + if test_size is not None and (not isinstance(test_size, (int, float))): + raise ValueError(f'Invalid value for test_size: {test_size} of type {type(test_size)}') + if isinstance(train_size, float) and isinstance(test_size, float) and (train_size + test_size > 1): + raise ValueError(f'The sum of test_size and train_size = {train_size + test_size}, should be in the (0, 1) range. Reduce test_size and/or train_size.') + if isinstance(test_size, float): + n_test = ceil(test_size * n_samples) + elif isinstance(test_size, int): + n_test = float(test_size) + if isinstance(train_size, float): + n_train = floor(train_size * n_samples) + elif isinstance(train_size, int): + n_train = float(train_size) + if train_size is None: + n_train = n_samples - n_test + elif test_size is None: + n_test = n_samples - n_train + if n_train + n_test > n_samples: + raise ValueError(f'The sum of train_size and test_size = {n_train + n_test}, should be smaller than the number of samples {n_samples}. Reduce test_size and/or train_size.') + (n_train, n_test) = (int(n_train), int(n_test)) + if n_train == 0: + raise ValueError(f'With n_samples={n_samples}, test_size={test_size} and train_size={train_size}, the resulting train set will be empty. Adjust any of the aforementioned parameters.') + load_from_cache_file = load_from_cache_file if load_from_cache_file is not None else is_caching_enabled() + if generator is None and shuffle is True: + if seed is None: + (_, seed, pos, *_) = np.random.get_state() + seed = seed[pos] if pos < 624 else seed[0] + _ = np.random.random() + generator = np.random.default_rng(seed) + if self.cache_files: + if train_indices_cache_file_name is None or test_indices_cache_file_name is None: + if train_indices_cache_file_name is None: + train_indices_cache_file_name = self._get_cache_file_path(train_new_fingerprint) + if test_indices_cache_file_name is None: + test_indices_cache_file_name = self._get_cache_file_path(test_new_fingerprint) + if os.path.exists(train_indices_cache_file_name) and os.path.exists(test_indices_cache_file_name) and load_from_cache_file: + logger.info(f'Loading cached split indices for dataset at {train_indices_cache_file_name} and {test_indices_cache_file_name}') + return DatasetDict({'train': self._new_dataset_with_indices(fingerprint=train_new_fingerprint, indices_cache_file_name=train_indices_cache_file_name), 'test': self._new_dataset_with_indices(fingerprint=test_new_fingerprint, indices_cache_file_name=test_indices_cache_file_name)}) + if not shuffle: + if stratify_by_column is not None: + raise ValueError('Stratified train/test split is not implemented for `shuffle=False`') + train_indices = np.arange(n_train) + test_indices = np.arange(n_train, n_train + n_test) + elif stratify_by_column is not None: + if stratify_by_column not in self._info.features.keys(): + raise ValueError(f'Key {stratify_by_column} not found in {self._info.features.keys()}') + if not isinstance(self._info.features[stratify_by_column], ClassLabel): + raise ValueError(f'Stratifying by column is only supported for {ClassLabel.__name__} column, and column {stratify_by_column} is {type(self._info.features[stratify_by_column]).__name__}.') + try: + (train_indices, test_indices) = next(stratified_shuffle_split_generate_indices(self.with_format('numpy')[stratify_by_column], n_train, n_test, rng=generator)) + except Exception as error: + if str(error) == 'Minimum class count error': + raise ValueError(f'The least populated class in {stratify_by_column} column has only 1 member, which is too few. The minimum number of groups for any class cannot be less than 2.') + else: + raise error + else: + permutation = generator.permutation(len(self)) + test_indices = permutation[:n_test] + train_indices = permutation[n_test:n_test + n_train] + train_split = self.select(indices=train_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=train_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=train_new_fingerprint) + test_split = self.select(indices=test_indices, keep_in_memory=keep_in_memory, indices_cache_file_name=test_indices_cache_file_name, writer_batch_size=writer_batch_size, new_fingerprint=test_new_fingerprint) + return DatasetDict({'train': train_split, 'test': test_split}) + + def shard(self, num_shards: int, index: int, contiguous: bool=False, keep_in_memory: bool=False, indices_cache_file_name: Optional[str]=None, writer_batch_size: Optional[int]=1000) -> 'Dataset': + if not 0 <= index < num_shards: + raise ValueError('index should be in [0, num_shards-1]') + if contiguous: + div = len(self) // num_shards + mod = len(self) % num_shards + start = div * index + min(index, mod) + end = start + div + (1 if index < mod else 0) + indices = range(start, end) + else: + indices = np.arange(index, len(self), num_shards) + return self.select(indices=indices, keep_in_memory=keep_in_memory, indices_cache_file_name=indices_cache_file_name, writer_batch_size=writer_batch_size) + + def to_csv(self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int]=None, num_proc: Optional[int]=None, storage_options: Optional[dict]=None, **to_csv_kwargs) -> int: + from .io.csv import CsvDatasetWriter + return CsvDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_csv_kwargs).write() + + def to_dict(self, batch_size: Optional[int]=None) -> Union[dict, Iterator[dict]]: + return query_table(table=self._data, key=slice(0, len(self)), indices=self._indices).to_pydict() + + def to_list(self) -> list: + return query_table(table=self._data, key=slice(0, len(self)), indices=self._indices).to_pylist() + + def to_json(self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int]=None, num_proc: Optional[int]=None, storage_options: Optional[dict]=None, **to_json_kwargs) -> int: + from .io.json import JsonDatasetWriter + return JsonDatasetWriter(self, path_or_buf, batch_size=batch_size, num_proc=num_proc, storage_options=storage_options, **to_json_kwargs).write() + + def to_pandas(self, batch_size: Optional[int]=None, batched: bool=False) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]: + if not batched: + return query_table(table=self._data, key=slice(0, len(self)), indices=self._indices).to_pandas(types_mapper=pandas_types_mapper) + else: + batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + return (query_table(table=self._data, key=slice(offset, offset + batch_size), indices=self._indices).to_pandas(types_mapper=pandas_types_mapper) for offset in range(0, len(self), batch_size)) + + def to_polars(self, batch_size: Optional[int]=None, batched: bool=False, schema_overrides: Optional[dict]=None, rechunk: bool=True) -> Union['pl.DataFrame', Iterator['pl.DataFrame']]: + if config.POLARS_AVAILABLE: + import polars as pl + if not batched: + return pl.from_arrow(query_table(table=self._data, key=slice(0, len(self)), indices=self._indices if self._indices is not None else None), schema_overrides=schema_overrides, rechunk=rechunk) + else: + batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + return (pl.from_arrow(query_table(table=self._data, key=slice(offset, offset + batch_size), indices=self._indices if self._indices is not None else None), schema_overrides=schema_overrides, rechunk=rechunk) for offset in range(0, len(self), batch_size)) + else: + raise ValueError('Polars needs to be installed to be able to return Polars dataframes.') + + def to_parquet(self, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int]=None, storage_options: Optional[dict]=None, **parquet_writer_kwargs) -> int: + from .io.parquet import ParquetDatasetWriter + return ParquetDatasetWriter(self, path_or_buf, batch_size=batch_size, storage_options=storage_options, **parquet_writer_kwargs).write() + + def to_sql(self, name: str, con: Union[str, 'sqlalchemy.engine.Connection', 'sqlalchemy.engine.Engine', 'sqlite3.Connection'], batch_size: Optional[int]=None, **sql_writer_kwargs) -> int: + from .io.sql import SqlDatasetWriter + return SqlDatasetWriter(self, name, con, batch_size=batch_size, **sql_writer_kwargs).write() + + def _estimate_nbytes(self) -> int: + dataset_nbytes = self.data.nbytes + decodable_columns = [k for (k, v) in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)] + if decodable_columns: + extra_nbytes = 0 + + def extra_nbytes_visitor(array, feature): + nonlocal extra_nbytes + if isinstance(feature, (Audio, Image)): + for x in array.to_pylist(): + if x is not None and x['bytes'] is None and (x['path'] is not None): + size = xgetsize(x['path']) + extra_nbytes += size + extra_nbytes -= array.field('path').nbytes + table = self.with_format('arrow')[:1000] + table_visitor(table, extra_nbytes_visitor) + extra_nbytes = extra_nbytes * len(self.data) / len(table) + dataset_nbytes = dataset_nbytes + extra_nbytes + if self._indices is not None: + dataset_nbytes = dataset_nbytes * len(self._indices) / len(self.data) + return dataset_nbytes + + @staticmethod + def _generate_tables_from_shards(shards: List['Dataset'], batch_size: int): + for (shard_idx, shard) in enumerate(shards): + for pa_table in shard.with_format('arrow').iter(batch_size): + yield (shard_idx, pa_table) + + @staticmethod + def _generate_tables_from_cache_file(filename: str): + for (batch_idx, batch) in enumerate(_memory_mapped_record_batch_reader_from_file(filename)): + yield (batch_idx, pa.Table.from_batches([batch])) + + def to_iterable_dataset(self, num_shards: Optional[int]=1) -> 'IterableDataset': + from .iterable_dataset import ArrowExamplesIterable, IterableDataset + if self._format_type is not None: + raise NotImplementedError('Converting a formatted dataset to a formatted iterable dataset is not implemented yet. Please run `my_dataset = my_dataset.with_format(None)` before calling to_iterable_dataset') + if num_shards > len(self): + raise ValueError(f'Unable to shard a dataset of size {len(self)} into {num_shards} shards (the number of shards exceeds the number of samples).') + if self._indices is not None: + logger.info('Converting an Arrow dataset to iterable but it has an indices mapping that can make it slower. You can use `ds = ds.flatten_indices()` to write your dataset in contiguous chunks of data and have optimal speed.') + shards = [copy.deepcopy(self)] if num_shards == 1 else [self.shard(num_shards=num_shards, index=shard_idx, contiguous=True) for shard_idx in range(num_shards)] + ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_shards, kwargs={'shards': shards, 'batch_size': config.DEFAULT_MAX_BATCH_SIZE}) + return IterableDataset(ex_iterable, info=DatasetInfo(features=self.features)) + + def _push_parquet_shards_to_hub(self, repo_id: str, data_dir: str='data', split: Optional[str]=None, token: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=False, max_shard_size: Optional[Union[int, str]]=None, num_shards: Optional[int]=None, embed_external_files: bool=True) -> Tuple[str, str, int, int, List[str], int]: + decodable_columns = [k for (k, v) in self._info.features.items() if require_decoding(v, ignore_decode_attribute=True)] if embed_external_files else [] + dataset_nbytes = self._estimate_nbytes() + if num_shards is None: + max_shard_size = convert_file_size_to_int(max_shard_size or config.MAX_SHARD_SIZE) + num_shards = int(dataset_nbytes / max_shard_size) + 1 + num_shards = max(num_shards, 1) + shards = (self.shard(num_shards=num_shards, index=i, contiguous=True) for i in range(num_shards)) + if decodable_columns: + + def shards_with_embedded_external_files(shards): + for shard in shards: + format = shard.format + shard = shard.with_format('arrow') + shard = shard.map(embed_table_storage, batched=True, batch_size=1000, keep_in_memory=True) + shard = shard.with_format(**format) + yield shard + shards = shards_with_embedded_external_files(shards) + api = HfApi(endpoint=config.HF_ENDPOINT, token=token) + uploaded_size = 0 + additions = [] + for (index, shard) in hf_tqdm(enumerate(shards), desc='Uploading the dataset shards', total=num_shards): + shard_path_in_repo = f'{data_dir}/{split}-{index:05d}-of-{num_shards:05d}.parquet' + buffer = BytesIO() + shard.to_parquet(buffer) + uploaded_size += buffer.tell() + shard_addition = CommitOperationAdd(path_in_repo=shard_path_in_repo, path_or_fileobj=buffer) + api.preupload_lfs_files(repo_id=repo_id, additions=[shard_addition], repo_type='dataset', revision=revision, create_pr=create_pr) + additions.append(shard_addition) + return (additions, uploaded_size, dataset_nbytes) + + def push_to_hub(self, repo_id: str, config_name: str='default', set_default: Optional[bool]=None, split: Optional[str]=None, data_dir: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, private: Optional[bool]=False, token: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=False, max_shard_size: Optional[Union[int, str]]=None, num_shards: Optional[int]=None, embed_external_files: bool=True) -> CommitInfo: + if config_name == 'data': + raise ValueError("`config_name` cannot be 'data'. Please, choose another name for configuration.") + if max_shard_size is not None and num_shards is not None: + raise ValueError('Failed to push_to_hub: please specify either max_shard_size or num_shards, but not both.') + if split is None: + split = str(self.split) if self.split is not None else 'train' + if not re.match(_split_re, split): + raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") + api = HfApi(endpoint=config.HF_ENDPOINT, token=token) + repo_url = api.create_repo(repo_id, token=token, repo_type='dataset', private=private, exist_ok=True) + repo_id = repo_url.repo_id + if revision is not None and (not revision.startswith('refs/pr/')): + api.create_branch(repo_id, branch=revision, token=token, repo_type='dataset', exist_ok=True) + if not data_dir: + data_dir = config_name if config_name != 'default' else 'data' + (additions, uploaded_size, dataset_nbytes) = self._push_parquet_shards_to_hub(repo_id=repo_id, data_dir=data_dir, split=split, token=token, revision=revision, max_shard_size=max_shard_size, num_shards=num_shards, create_pr=create_pr, embed_external_files=embed_external_files) + (repo_with_dataset_card, repo_with_dataset_infos) = (False, False) + (deletions, deleted_size) = ([], 0) + repo_splits = [] + repo_files_to_add = [addition.path_in_repo for addition in additions] + for repo_file in api.list_repo_tree(repo_id=repo_id, revision=revision, repo_type='dataset', token=token, recursive=True): + if not isinstance(repo_file, RepoFile): + continue + if repo_file.rfilename == config.REPOCARD_FILENAME: + repo_with_dataset_card = True + elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: + repo_with_dataset_infos = True + elif repo_file.rfilename.startswith(f'{data_dir}/{split}-') and repo_file.rfilename not in repo_files_to_add: + deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) + deleted_size += repo_file.size + elif fnmatch.fnmatch(repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace('{split}', '*')): + repo_split = string_to_dict(repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED))['split'] + if repo_split not in repo_splits: + repo_splits.append(repo_split) + (organization, dataset_name) = repo_id.split('/') if '/' in repo_id else (None, repo_id) + info_to_dump = self.info.copy() + info_to_dump.download_checksums = None + info_to_dump.download_size = uploaded_size + info_to_dump.dataset_size = dataset_nbytes + info_to_dump.size_in_bytes = uploaded_size + dataset_nbytes + info_to_dump.config_name = config_name + info_to_dump.splits = SplitDict({split: SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name)}) + if repo_with_dataset_card: + dataset_card_path = api.hf_hub_download(repo_id, config.REPOCARD_FILENAME, repo_type='dataset', revision=revision) + dataset_card = DatasetCard.load(Path(dataset_card_path)) + dataset_card_data = dataset_card.data + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card_data) + if dataset_infos and config_name in dataset_infos: + repo_info = dataset_infos[config_name] + else: + repo_info = None + elif repo_with_dataset_infos: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + dataset_infos_path = api.hf_hub_download(repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type='dataset', revision=revision) + with open(dataset_infos_path, encoding='utf-8') as f: + dataset_infos: dict = json.load(f) + dataset_info = dataset_infos.get(config_name, None) if dataset_infos else None + repo_info = DatasetInfo.from_dict(dataset_info) if dataset_info else None + else: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + repo_info = None + if repo_info is not None: + logger.info('Updating downloaded metadata with the new split.') + if repo_info.splits and list(repo_info.splits) != [split]: + if self._info.features != repo_info.features: + raise ValueError(f"Features of the new split don't match the features of the existing splits on the hub: {self._info.features} != {repo_info.features}") + if split in repo_info.splits: + repo_info.download_size -= deleted_size + repo_info.dataset_size -= repo_info.splits.get(split, SplitInfo()).num_bytes or 0 + repo_info.download_checksums = None + repo_info.download_size = (repo_info.download_size or 0) + uploaded_size + repo_info.dataset_size = (repo_info.dataset_size or 0) + dataset_nbytes + repo_info.size_in_bytes = repo_info.download_size + repo_info.dataset_size + repo_info.splits.pop(split, None) + repo_info.splits[split] = SplitInfo(split, num_bytes=dataset_nbytes, num_examples=len(self), dataset_name=dataset_name) + info_to_dump = repo_info + if not metadata_configs and repo_splits: + default_metadata_configs_to_dump = {'data_files': [{'split': split, 'path': f'data/{split}-*'} for split in repo_splits]} + MetadataConfigs({'default': default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) + if config_name in metadata_configs: + metadata_config = metadata_configs[config_name] + if 'data_files' in metadata_config: + data_files_to_dump = sanitize_patterns(metadata_config['data_files']) + else: + data_files_to_dump = {} + data_files_to_dump[split] = [f'{data_dir}/{split}-*'] + metadata_config_to_dump = {'data_files': [{'split': _split, 'path': _pattern[0] if len(_pattern) == 1 else _pattern} for (_split, _pattern) in data_files_to_dump.items()]} + else: + metadata_config_to_dump = {'data_files': [{'split': split, 'path': f'{data_dir}/{split}-*'}]} + if set_default and config_name != 'default': + if metadata_configs: + default_config_name = metadata_configs.get_default_config_name() + if default_config_name == 'default': + raise ValueError("There exists a configuration named 'default'. To set a different configuration as default, rename the 'default' one first.") + else: + _ = metadata_configs[default_config_name].pop('default') + metadata_config_to_dump['default'] = True + if repo_with_dataset_infos: + dataset_infos_path = api.hf_hub_download(repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type='dataset', revision=revision) + with open(dataset_infos_path, encoding='utf-8') as f: + dataset_infos: dict = json.load(f) + dataset_infos[config_name] = asdict(info_to_dump) + buffer = BytesIO() + buffer.write(json.dumps(dataset_infos, indent=4).encode('utf-8')) + additions.append(CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)) + DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) + MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) + dataset_card = DatasetCard(f'---\n{dataset_card_data}\n---\n') if dataset_card is None else dataset_card + additions.append(CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())) + commit_message = commit_message if commit_message is not None else 'Upload dataset' + if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: + commit_info = api.create_commit(repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type='dataset', revision=revision, create_pr=create_pr) + else: + logger.info(f'Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits.') + num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) + for i in range(0, num_commits): + operations = additions[i * config.UPLOADS_MAX_NUMBER_PER_COMMIT:(i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT] + (deletions if i == 0 else []) + commit_info = api.create_commit(repo_id, operations=operations, commit_message=commit_message + f' (part {i:05d}-of-{num_commits:05d})', commit_description=commit_description, token=token, repo_type='dataset', revision=revision, create_pr=create_pr) + logger.info(f'Commit #{i + 1} completed' + (f' (still {num_commits - i - 1} to go)' if num_commits - i - 1 else '') + '.') + return commit_info + + @transmit_format + @fingerprint_transform(inplace=False) + def add_column(self, name: str, column: Union[list, np.array], new_fingerprint: str): + column_table = InMemoryTable.from_pydict({name: column}) + _check_column_names(self._data.column_names + column_table.column_names) + dataset = self.flatten_indices() if self._indices is not None else self + table = concat_tables([dataset._data, column_table], axis=1) + info = dataset.info.copy() + info.features.update(Features.from_arrow_schema(column_table.schema)) + table = update_metadata_with_features(table, info.features) + return Dataset(table, info=info, split=self.split, indices_table=None, fingerprint=new_fingerprint) + + def add_faiss_index(self, column: str, index_name: Optional[str]=None, device: Optional[int]=None, string_factory: Optional[str]=None, metric_type: Optional[int]=None, custom_index: Optional['faiss.Index']=None, batch_size: int=1000, train_size: Optional[int]=None, faiss_verbose: bool=False, dtype=np.float32): + with self.formatted_as(type='numpy', columns=[column], dtype=dtype): + super().add_faiss_index(column=column, index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose) + return self + + def add_faiss_index_from_external_arrays(self, external_arrays: np.array, index_name: str, device: Optional[int]=None, string_factory: Optional[str]=None, metric_type: Optional[int]=None, custom_index: Optional['faiss.Index']=None, batch_size: int=1000, train_size: Optional[int]=None, faiss_verbose: bool=False, dtype=np.float32): + super().add_faiss_index_from_external_arrays(external_arrays=external_arrays.astype(dtype), index_name=index_name, device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose) + + def add_elasticsearch_index(self, column: str, index_name: Optional[str]=None, host: Optional[str]=None, port: Optional[int]=None, es_client: Optional['elasticsearch.Elasticsearch']=None, es_index_name: Optional[str]=None, es_index_config: Optional[dict]=None): + with self.formatted_as(type=None, columns=[column]): + super().add_elasticsearch_index(column=column, index_name=index_name, host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config) + return self + + @transmit_format + @fingerprint_transform(inplace=False) + def add_item(self, item: dict, new_fingerprint: str): + item_table = InMemoryTable.from_pydict({k: [v] for (k, v) in item.items()}) + (dset_features, item_features) = _align_features([self._info.features, Features.from_arrow_schema(item_table.schema)]) + table = concat_tables([self._data.cast(dset_features.arrow_schema) if self._info.features != dset_features else self._data, item_table.cast(item_features.arrow_schema)]) + if self._indices is None: + indices_table = None + else: + item_indices_array = pa.array([len(self._data)], type=pa.uint64()) + item_indices_table = InMemoryTable.from_arrays([item_indices_array], names=['indices']) + indices_table = concat_tables([self._indices, item_indices_table]) + info = self.info.copy() + info.features.update(item_features) + table = update_metadata_with_features(table, info.features) + return Dataset(table, info=info, split=self.split, indices_table=indices_table, fingerprint=new_fingerprint) + + def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> 'Dataset': + if label_column not in self._data.column_names: + raise ValueError(f'Column ({label_column}) not in table columns ({self._data.column_names}).') + label_feature = self._info.features[label_column] + if not (isinstance(label_feature, ClassLabel) or (isinstance(label_feature, Sequence) and isinstance(label_feature.feature, ClassLabel))): + raise ValueError(f'Aligning labels with a mapping is only supported for {ClassLabel.__name__} column or {Sequence.__name__} column with the inner type {ClassLabel.__name__}, and column {label_feature} is of type {type(label_feature).__name__}.') + label2id = dict(sorted(label2id.items(), key=lambda item: item[1])) + label_names = list(label2id.keys()) + label2id = {k.lower(): v for (k, v) in label2id.items()} + int2str_function = label_feature.int2str if isinstance(label_feature, ClassLabel) else label_feature.feature.int2str + if isinstance(label_feature, ClassLabel): + + def process_label_ids(batch): + dset_label_names = [int2str_function(label_id).lower() if label_id is not None else None for label_id in batch[label_column]] + batch[label_column] = [label2id[label_name] if label_name is not None else None for label_name in dset_label_names] + return batch + else: + + def process_label_ids(batch): + dset_label_names = [[int2str_function(label_id).lower() if label_id is not None else None for label_id in seq] for seq in batch[label_column]] + batch[label_column] = [[label2id[label_name] if label_name is not None else None for label_name in seq] for seq in dset_label_names] + return batch + features = self.features + features[label_column] = ClassLabel(num_classes=len(label_names), names=label_names) if isinstance(label_feature, ClassLabel) else Sequence(ClassLabel(num_classes=len(label_names), names=label_names)) + return self.map(process_label_ids, features=features, batched=True, desc='Aligning the labels') + +def _concatenate_map_style_datasets(dsets: List[Dataset], info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, axis: int=0): + if any((dset.num_rows > 0 for dset in dsets)): + dsets = [dset for dset in dsets if dset.num_rows > 0] + else: + return dsets[0] + if axis == 0: + _check_if_features_can_be_aligned([dset.features for dset in dsets]) + else: + if not all((dset.num_rows == dsets[0].num_rows for dset in dsets)): + raise ValueError('Number of rows must match for all datasets') + _check_column_names([col_name for dset in dsets for col_name in dset._data.column_names]) + format = dsets[0].format + if any((dset.format != format for dset in dsets)): + format = {} + logger.info('Some of the datasets have disparate format. Resetting the format of the concatenated dataset.') + + def apply_offset_to_indices_table(table, offset): + if offset == 0: + return table + else: + array = table['indices'] + new_array = pc.add(array, pa.scalar(offset, type=pa.uint64())) + return InMemoryTable.from_arrays([new_array], names=['indices']) + if any((dset._indices is not None for dset in dsets)): + if axis == 0: + indices_tables = [] + for i in range(len(dsets)): + if dsets[i]._indices is None: + dsets[i] = dsets[i]._select_with_indices_mapping(range(len(dsets[i]))) + indices_tables.append(dsets[i]._indices) + offset = 0 + for i in range(len(dsets)): + indices_tables[i] = apply_offset_to_indices_table(indices_tables[i], offset) + offset += len(dsets[i]._data) + indices_tables = [t for t in indices_tables if len(t) > 0] + if indices_tables: + indices_table = concat_tables(indices_tables) + else: + indices_table = InMemoryTable.from_batches([], schema=pa.schema({'indices': pa.int64()})) + elif len(dsets) == 1: + indices_table = dsets[0]._indices + else: + for i in range(len(dsets)): + dsets[i] = dsets[i].flatten_indices() + indices_table = None + else: + indices_table = None + table = concat_tables([dset._data for dset in dsets], axis=axis) + if axis == 0: + features_list = _align_features([dset.features for dset in dsets]) + else: + features_list = [dset.features for dset in dsets] + table = update_metadata_with_features(table, {k: v for features in features_list for (k, v) in features.items()}) + if info is None: + info = DatasetInfo.from_merge([dset.info for dset in dsets]) + fingerprint = update_fingerprint(''.join((dset._fingerprint for dset in dsets)), _concatenate_map_style_datasets, {'info': info, 'split': split}) + concatenated_dataset = Dataset(table, info=info, split=split, indices_table=indices_table, fingerprint=fingerprint) + concatenated_dataset.set_format(**format) + return concatenated_dataset + +def _interleave_map_style_datasets(datasets: List['Dataset'], probabilities: Optional[List[float]]=None, seed: Optional[int]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, stopping_strategy: Literal['first_exhausted', 'all_exhausted']='first_exhausted', **kwargs) -> 'Dataset': + if stopping_strategy not in ['first_exhausted', 'all_exhausted']: + raise ValueError(f'{stopping_strategy} stopping strategy in `interleave_datasets` is not implemented yet with a list of {type(datasets[0])}') + concatenated_datasets = _concatenate_map_style_datasets(datasets, info=info, split=split) + lengths = [len(dset) for dset in datasets] + offsets = np.cumsum([0] + lengths[:-1]) + oversampling = stopping_strategy == 'all_exhausted' + if probabilities is None and (not oversampling): + indices = (offsets.reshape(1, -1) + np.arange(min(lengths)).reshape(-1, 1)).flatten().tolist() + elif probabilities is None: + indices = np.mod(np.arange(max(lengths)).reshape(-1, 1), np.array(lengths).reshape(1, -1)) + indices = (indices + offsets).flatten().tolist() + else: + is_exhausted = np.full(len(lengths), False) + bool_strategy_func = np.all if oversampling else np.any + + def iter_random_indices(): + rng = np.random.default_rng(seed) + while True: + yield from (int(i) for i in rng.choice(len(datasets), size=1000, p=probabilities)) + current_index = [0] * len(datasets) + indices = [] + for source_idx in iter_random_indices(): + if bool_strategy_func(is_exhausted): + break + indices.append(current_index[source_idx] + offsets[source_idx]) + current_index[source_idx] += 1 + if current_index[source_idx] >= lengths[source_idx]: + is_exhausted[source_idx] = True + current_index[source_idx] = 0 + return concatenated_datasets.select(indices, **kwargs) + +def _split_by_node_map_style_dataset(dataset: Dataset, rank: int, world_size: int) -> Dataset: + return dataset.shard(num_shards=world_size, index=rank, contiguous=True) + +def get_indices_from_mask_function(function: Callable, batched: bool, with_indices: bool, with_rank: bool, input_columns: Optional[Union[str, List[str]]], indices_mapping: Optional[Table]=None, *args, **fn_kwargs): + if batched: + (*inputs, indices, rank) = args + additional_args = () + if with_indices: + additional_args += (indices,) + if with_rank: + additional_args += (rank,) + mask = function(*inputs, *additional_args, **fn_kwargs) + else: + (*inputs, indices, rank) = args + mask = [] + if input_columns is None: + batch: dict = inputs[0] + num_examples = len(batch[next(iter(batch.keys()))]) + for i in range(num_examples): + example = {key: batch[key][i] for key in batch} + additional_args = () + if with_indices: + additional_args += (indices[i],) + if with_rank: + additional_args += (rank,) + mask.append(function(example, *additional_args, **fn_kwargs)) + else: + columns: List[List] = inputs + num_examples = len(columns[0]) + for i in range(num_examples): + input = [column[i] for column in columns] + additional_args = () + if with_indices: + additional_args += (indices[i],) + if with_rank: + additional_args += (rank,) + mask.append(function(*input, *additional_args, **fn_kwargs)) + indices_array = [i for (i, to_keep) in zip(indices, mask) if to_keep] + if indices_mapping is not None: + indices_array = pa.array(indices_array, type=pa.uint64()) + indices_array = indices_mapping.column(0).take(indices_array) + indices_array = indices_array.to_pylist() + return {'indices': indices_array} + +# File: datasets-main/src/datasets/arrow_reader.py +"""""" +import copy +import math +import os +import re +from dataclasses import dataclass +from functools import partial +from typing import TYPE_CHECKING, List, Optional, Union +import pyarrow as pa +import pyarrow.parquet as pq +from tqdm.contrib.concurrent import thread_map +from .download.download_config import DownloadConfig +from .naming import _split_re, filenames_for_dataset_split +from .table import InMemoryTable, MemoryMappedTable, Table, concat_tables +from .utils import logging +from .utils import tqdm as hf_tqdm +if TYPE_CHECKING: + from .info import DatasetInfo + from .splits import Split, SplitInfo +logger = logging.get_logger(__name__) +HF_GCP_BASE_URL = 'https://storage.googleapis.com/huggingface-nlp/cache/datasets' +_SUB_SPEC_RE = re.compile(f'\n^\n (?P{_split_re[1:-1]})\n (\\[\n ((?P-?\\d+)\n (?P%)?)?\n :\n ((?P-?\\d+)\n (?P%)?)?\n \\])?(\\((?P[^\\)]*)\\))?\n$\n', re.X) +_ADDITION_SEP_RE = re.compile('\\s*\\+\\s*') + +class DatasetNotOnHfGcsError(ConnectionError): + pass + +class MissingFilesOnHfGcsError(ConnectionError): + pass + +@dataclass(frozen=True) +class FileInstructions: + num_examples: int + file_instructions: List[dict] + +def make_file_instructions(name: str, split_infos: List['SplitInfo'], instruction: Union[str, 'ReadInstruction'], filetype_suffix: Optional[str]=None, prefix_path: Optional[str]=None) -> FileInstructions: + if not isinstance(name, str): + raise TypeError(f"Expected str 'name', but got: {type(name).__name__}") + elif not name: + raise ValueError("Expected non-empty str 'name'") + name2len = {info.name: info.num_examples for info in split_infos} + name2shard_lengths = {info.name: info.shard_lengths for info in split_infos} + name2filenames = {info.name: filenames_for_dataset_split(path=prefix_path, dataset_name=name, split=info.name, filetype_suffix=filetype_suffix, shard_lengths=name2shard_lengths[info.name]) for info in split_infos} + if not isinstance(instruction, ReadInstruction): + instruction = ReadInstruction.from_spec(instruction) + absolute_instructions = instruction.to_absolute(name2len) + file_instructions = [] + num_examples = 0 + for abs_instr in absolute_instructions: + split_length = name2len[abs_instr.splitname] + filenames = name2filenames[abs_instr.splitname] + shard_lengths = name2shard_lengths[abs_instr.splitname] + from_ = 0 if abs_instr.from_ is None else abs_instr.from_ + to = split_length if abs_instr.to is None else abs_instr.to + if shard_lengths is None: + for filename in filenames: + take = to - from_ + if take == 0: + continue + num_examples += take + file_instructions.append({'filename': filename, 'skip': from_, 'take': take}) + else: + index_start = 0 + index_end = 0 + for (filename, shard_length) in zip(filenames, shard_lengths): + index_end += shard_length + if from_ < index_end and to > index_start: + skip = from_ - index_start if from_ > index_start else 0 + take = to - index_start - skip if to < index_end else -1 + if take == 0: + continue + file_instructions.append({'filename': filename, 'skip': skip, 'take': take}) + num_examples += shard_length - skip if take == -1 else take + index_start += shard_length + return FileInstructions(num_examples=num_examples, file_instructions=file_instructions) + +class BaseReader: + + def __init__(self, path: str, info: Optional['DatasetInfo']): + self._path: str = path + self._info: Optional['DatasetInfo'] = info + self._filetype_suffix: Optional[str] = None + + def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table: + raise NotImplementedError + + def _read_files(self, files, in_memory=False) -> Table: + if len(files) == 0 or not all((isinstance(f, dict) for f in files)): + raise ValueError('please provide valid file informations') + files = copy.deepcopy(files) + for f in files: + f['filename'] = os.path.join(self._path, f['filename']) + pa_tables = thread_map(partial(self._get_table_from_filename, in_memory=in_memory), files, tqdm_class=hf_tqdm, desc='Loading dataset shards', disable=len(files) <= 16 or None) + pa_tables = [t for t in pa_tables if len(t) > 0] + if not pa_tables and (self._info is None or self._info.features is None): + raise ValueError('Tried to read an empty table. Please specify at least info.features to create an empty table with the right type.') + pa_tables = pa_tables or [InMemoryTable.from_batches([], schema=pa.schema(self._info.features.type))] + pa_table = concat_tables(pa_tables) if len(pa_tables) != 1 else pa_tables[0] + return pa_table + + def get_file_instructions(self, name, instruction, split_infos): + file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix=self._filetype_suffix, prefix_path=self._path) + files = file_instructions.file_instructions + return files + + def read(self, name, instructions, split_infos, in_memory=False): + files = self.get_file_instructions(name, instructions, split_infos) + if not files: + msg = f'Instruction "{instructions}" corresponds to no data!' + raise ValueError(msg) + return self.read_files(files=files, original_instructions=instructions, in_memory=in_memory) + + def read_files(self, files: List[dict], original_instructions: Union[None, 'ReadInstruction', 'Split']=None, in_memory=False): + pa_table = self._read_files(files, in_memory=in_memory) + if original_instructions is not None: + from .splits import Split + split = Split(str(original_instructions)) + else: + split = None + dataset_kwargs = {'arrow_table': pa_table, 'info': self._info, 'split': split} + return dataset_kwargs + +class ArrowReader(BaseReader): + + def __init__(self, path: str, info: Optional['DatasetInfo']): + super().__init__(path, info) + self._filetype_suffix = 'arrow' + + def _get_table_from_filename(self, filename_skip_take, in_memory=False) -> Table: + (filename, skip, take) = (filename_skip_take['filename'], filename_skip_take['skip'] if 'skip' in filename_skip_take else None, filename_skip_take['take'] if 'take' in filename_skip_take else None) + table = ArrowReader.read_table(filename, in_memory=in_memory) + if take == -1: + take = len(table) - skip + if skip is not None and take is not None and (not (skip == 0 and take == len(table))): + table = table.slice(skip, take) + return table + + @staticmethod + def read_table(filename, in_memory=False) -> Table: + table_cls = InMemoryTable if in_memory else MemoryMappedTable + return table_cls.from_file(filename) + +class ParquetReader(BaseReader): + + def __init__(self, path: str, info: Optional['DatasetInfo']): + super().__init__(path, info) + self._filetype_suffix = 'parquet' + + def _get_table_from_filename(self, filename_skip_take, **kwargs): + (filename, skip, take) = (filename_skip_take['filename'], filename_skip_take['skip'] if 'skip' in filename_skip_take else None, filename_skip_take['take'] if 'take' in filename_skip_take else None) + pa_table = pq.read_table(filename, memory_map=True) + if skip is not None and take is not None and (not (skip == 0 and take == len(pa_table))): + pa_table = pa_table.slice(skip, take) + return pa_table + +@dataclass(frozen=True) +class _AbsoluteInstruction: + splitname: str + from_: int + to: int + +@dataclass(frozen=True) +class _RelativeInstruction: + splitname: str + from_: Optional[int] = None + to: Optional[int] = None + unit: Optional[str] = None + rounding: Optional[str] = None + + def __post_init__(self): + if self.unit is not None and self.unit not in ['%', 'abs']: + raise ValueError('unit must be either % or abs') + if self.rounding is not None and self.rounding not in ['closest', 'pct1_dropremainder']: + raise ValueError('rounding must be either closest or pct1_dropremainder') + if self.unit != '%' and self.rounding is not None: + raise ValueError('It is forbidden to specify rounding if not using percent slicing.') + if self.unit == '%' and self.from_ is not None and (abs(self.from_) > 100): + raise ValueError('Percent slice boundaries must be > -100 and < 100.') + if self.unit == '%' and self.to is not None and (abs(self.to) > 100): + raise ValueError('Percent slice boundaries must be > -100 and < 100.') + self.__dict__['rounding'] = 'closest' if self.rounding is None and self.unit == '%' else self.rounding + +def _str_to_read_instruction(spec): + res = _SUB_SPEC_RE.match(spec) + if not res: + raise ValueError(f'Unrecognized instruction format: {spec}') + unit = '%' if res.group('from_pct') or res.group('to_pct') else 'abs' + return ReadInstruction(split_name=res.group('split'), rounding=res.group('rounding'), from_=int(res.group('from')) if res.group('from') else None, to=int(res.group('to')) if res.group('to') else None, unit=unit) + +def _pct_to_abs_pct1(boundary, num_examples): + if num_examples < 100: + msg = 'Using "pct1_dropremainder" rounding on a split with less than 100 elements is forbidden: it always results in an empty dataset.' + raise ValueError(msg) + return boundary * math.trunc(num_examples / 100.0) + +def _pct_to_abs_closest(boundary, num_examples): + return int(round(boundary * num_examples / 100.0)) + +def _rel_to_abs_instr(rel_instr, name2len): + pct_to_abs = _pct_to_abs_closest if rel_instr.rounding == 'closest' else _pct_to_abs_pct1 + split = rel_instr.splitname + if split not in name2len: + raise ValueError(f'Unknown split "{split}". Should be one of {list(name2len)}.') + num_examples = name2len[split] + from_ = rel_instr.from_ + to = rel_instr.to + if rel_instr.unit == '%': + from_ = 0 if from_ is None else pct_to_abs(from_, num_examples) + to = num_examples if to is None else pct_to_abs(to, num_examples) + else: + from_ = 0 if from_ is None else from_ + to = num_examples if to is None else to + if from_ < 0: + from_ = max(num_examples + from_, 0) + if to < 0: + to = max(num_examples + to, 0) + from_ = min(from_, num_examples) + to = min(to, num_examples) + return _AbsoluteInstruction(split, from_, to) + +class ReadInstruction: + + def _init(self, relative_instructions): + self._relative_instructions = relative_instructions + + @classmethod + def _read_instruction_from_relative_instructions(cls, relative_instructions): + result = cls.__new__(cls) + result._init(relative_instructions) + return result + + def __init__(self, split_name, rounding=None, from_=None, to=None, unit=None): + self._init([_RelativeInstruction(split_name, from_, to, unit, rounding)]) + + @classmethod + def from_spec(cls, spec): + spec = str(spec) + subs = _ADDITION_SEP_RE.split(spec) + if not subs: + raise ValueError(f'No instructions could be built out of {spec}') + instruction = _str_to_read_instruction(subs[0]) + return sum((_str_to_read_instruction(sub) for sub in subs[1:]), instruction) + + def to_spec(self): + rel_instr_specs = [] + for rel_instr in self._relative_instructions: + rel_instr_spec = rel_instr.splitname + if rel_instr.from_ is not None or rel_instr.to is not None: + from_ = rel_instr.from_ + to = rel_instr.to + unit = rel_instr.unit + rounding = rel_instr.rounding + unit = unit if unit == '%' else '' + from_ = str(from_) + unit if from_ is not None else '' + to = str(to) + unit if to is not None else '' + slice_str = f'[{from_}:{to}]' + rounding_str = f'({rounding})' if unit == '%' and rounding is not None and (rounding != 'closest') else '' + rel_instr_spec += slice_str + rounding_str + rel_instr_specs.append(rel_instr_spec) + return '+'.join(rel_instr_specs) + + def __add__(self, other): + if not isinstance(other, ReadInstruction): + msg = 'ReadInstruction can only be added to another ReadInstruction obj.' + raise TypeError(msg) + self_ris = self._relative_instructions + other_ris = other._relative_instructions + if self_ris[0].unit != 'abs' and other_ris[0].unit != 'abs' and (self._relative_instructions[0].rounding != other_ris[0].rounding): + raise ValueError('It is forbidden to sum ReadInstruction instances with different rounding values.') + return self._read_instruction_from_relative_instructions(self_ris + other_ris) + + def __str__(self): + return self.to_spec() + + def __repr__(self): + return f'ReadInstruction({self._relative_instructions})' + + def to_absolute(self, name2len): + return [_rel_to_abs_instr(rel_instr, name2len) for rel_instr in self._relative_instructions] + +# File: datasets-main/src/datasets/arrow_writer.py +"""""" +import json +import sys +from typing import Any, Dict, Iterable, List, Optional, Tuple, Union +import fsspec +import numpy as np +import pyarrow as pa +import pyarrow.parquet as pq +from fsspec.core import url_to_fs +from . import config +from .features import Features, Image, Value +from .features.features import FeatureType, _ArrayXDExtensionType, cast_to_python_objects, generate_from_arrow_type, get_nested_type, list_of_np_array_to_pyarrow_listarray, numpy_to_pyarrow_listarray, to_pyarrow_listarray +from .filesystems import is_remote_filesystem +from .info import DatasetInfo +from .keyhash import DuplicatedKeysError, KeyHasher +from .table import array_cast, cast_array_to_feature, embed_table_storage, table_cast +from .utils import logging +from .utils.py_utils import asdict, first_non_null_value +logger = logging.get_logger(__name__) +type_ = type + +class SchemaInferenceError(ValueError): + pass + +class TypedSequence: + + def __init__(self, data: Iterable, type: Optional[FeatureType]=None, try_type: Optional[FeatureType]=None, optimized_int_type: Optional[FeatureType]=None): + if type is not None and try_type is not None: + raise ValueError('You cannot specify both type and try_type') + self.data = data + self.type = type + self.try_type = try_type + self.optimized_int_type = optimized_int_type + self.trying_type = self.try_type is not None + self.trying_int_optimization = optimized_int_type is not None and type is None and (try_type is None) + self._inferred_type = None + + def get_inferred_type(self) -> FeatureType: + if self._inferred_type is None: + self._inferred_type = generate_from_arrow_type(pa.array(self).type) + return self._inferred_type + + @staticmethod + def _infer_custom_type_and_encode(data: Iterable) -> Tuple[Iterable, Optional[FeatureType]]: + if config.PIL_AVAILABLE and 'PIL' in sys.modules: + import PIL.Image + (non_null_idx, non_null_value) = first_non_null_value(data) + if isinstance(non_null_value, PIL.Image.Image): + return ([Image().encode_example(value) if value is not None else None for value in data], Image()) + return (data, None) + + def __arrow_array__(self, type: Optional[pa.DataType]=None): + if type is not None: + raise ValueError('TypedSequence is supposed to be used with pa.array(typed_sequence, type=None)') + del type + data = self.data + if self.type is None and self.try_type is None: + (data, self._inferred_type) = self._infer_custom_type_and_encode(data) + if self._inferred_type is None: + type = self.try_type if self.trying_type else self.type + else: + type = self._inferred_type + pa_type = get_nested_type(type) if type is not None else None + optimized_int_pa_type = get_nested_type(self.optimized_int_type) if self.optimized_int_type is not None else None + trying_cast_to_python_objects = False + try: + if isinstance(pa_type, _ArrayXDExtensionType): + storage = to_pyarrow_listarray(data, pa_type) + return pa.ExtensionArray.from_storage(pa_type, storage) + if isinstance(data, np.ndarray): + out = numpy_to_pyarrow_listarray(data) + elif isinstance(data, list) and data and isinstance(first_non_null_value(data)[1], np.ndarray): + out = list_of_np_array_to_pyarrow_listarray(data) + else: + trying_cast_to_python_objects = True + out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True)) + if self.trying_int_optimization: + if pa.types.is_int64(out.type): + out = out.cast(optimized_int_pa_type) + elif pa.types.is_list(out.type): + if pa.types.is_int64(out.type.value_type): + out = array_cast(out, pa.list_(optimized_int_pa_type)) + elif pa.types.is_list(out.type.value_type) and pa.types.is_int64(out.type.value_type.value_type): + out = array_cast(out, pa.list_(pa.list_(optimized_int_pa_type))) + elif type is not None: + out = cast_array_to_feature(out, type, allow_primitive_to_str=not self.trying_type, allow_decimal_to_str=not self.trying_type) + return out + except (TypeError, pa.lib.ArrowInvalid, pa.lib.ArrowNotImplementedError) as e: + if not self.trying_type and isinstance(e, pa.lib.ArrowNotImplementedError): + raise + if self.trying_type: + try: + if isinstance(data, np.ndarray): + return numpy_to_pyarrow_listarray(data) + elif isinstance(data, list) and data and any((isinstance(value, np.ndarray) for value in data)): + return list_of_np_array_to_pyarrow_listarray(data) + else: + trying_cast_to_python_objects = True + return pa.array(cast_to_python_objects(data, only_1d_for_numpy=True)) + except pa.lib.ArrowInvalid as e: + if 'overflow' in str(e): + raise OverflowError(f'There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})') from None + elif self.trying_int_optimization and 'not in range' in str(e): + optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name + logger.info(f'Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.') + return out + elif trying_cast_to_python_objects and 'Could not convert' in str(e): + out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)) + if type is not None: + out = cast_array_to_feature(out, type, allow_primitive_to_str=True, allow_decimal_to_str=True) + return out + else: + raise + elif 'overflow' in str(e): + raise OverflowError(f'There was an overflow with type {type_(data)}. Try to reduce writer_batch_size to have batches smaller than 2GB.\n({e})') from None + elif self.trying_int_optimization and 'not in range' in str(e): + optimized_int_pa_type_str = np.dtype(optimized_int_pa_type.to_pandas_dtype()).name + logger.info(f'Failed to cast a sequence to {optimized_int_pa_type_str}. Falling back to int64.') + return out + elif trying_cast_to_python_objects and 'Could not convert' in str(e): + out = pa.array(cast_to_python_objects(data, only_1d_for_numpy=True, optimize_list_casting=False)) + if type is not None: + out = cast_array_to_feature(out, type, allow_primitive_to_str=True, allow_decimal_to_str=True) + return out + else: + raise + +class OptimizedTypedSequence(TypedSequence): + + def __init__(self, data, type: Optional[FeatureType]=None, try_type: Optional[FeatureType]=None, col: Optional[str]=None, optimized_int_type: Optional[FeatureType]=None): + optimized_int_type_by_col = {'attention_mask': Value('int8'), 'special_tokens_mask': Value('int8'), 'input_ids': Value('int32'), 'token_type_ids': Value('int8')} + if type is None and try_type is None: + optimized_int_type = optimized_int_type_by_col.get(col, None) + super().__init__(data, type=type, try_type=try_type, optimized_int_type=optimized_int_type) + +class ArrowWriter: + _WRITER_CLASS = pa.RecordBatchStreamWriter + + def __init__(self, schema: Optional[pa.Schema]=None, features: Optional[Features]=None, path: Optional[str]=None, stream: Optional[pa.NativeFile]=None, fingerprint: Optional[str]=None, writer_batch_size: Optional[int]=None, hash_salt: Optional[str]=None, check_duplicates: Optional[bool]=False, disable_nullable: bool=False, update_features: bool=False, with_metadata: bool=True, unit: str='examples', embed_local_files: bool=False, storage_options: Optional[dict]=None): + if path is None and stream is None: + raise ValueError('At least one of path and stream must be provided.') + if features is not None: + self._features = features + self._schema = None + elif schema is not None: + self._schema: pa.Schema = schema + self._features = Features.from_arrow_schema(self._schema) + else: + self._features = None + self._schema = None + if hash_salt is not None: + self._hasher = KeyHasher(hash_salt) + else: + self._hasher = KeyHasher('') + self._check_duplicates = check_duplicates + self._disable_nullable = disable_nullable + if stream is None: + (fs, path) = url_to_fs(path, **storage_options or {}) + self._fs: fsspec.AbstractFileSystem = fs + self._path = path if not is_remote_filesystem(self._fs) else self._fs.unstrip_protocol(path) + self.stream = self._fs.open(path, 'wb') + self._closable_stream = True + else: + self._fs = None + self._path = None + self.stream = stream + self._closable_stream = False + self.fingerprint = fingerprint + self.disable_nullable = disable_nullable + self.writer_batch_size = writer_batch_size or config.DEFAULT_MAX_BATCH_SIZE + self.update_features = update_features + self.with_metadata = with_metadata + self.unit = unit + self.embed_local_files = embed_local_files + self._num_examples = 0 + self._num_bytes = 0 + self.current_examples: List[Tuple[Dict[str, Any], str]] = [] + self.current_rows: List[pa.Table] = [] + self.pa_writer: Optional[pa.RecordBatchStreamWriter] = None + self.hkey_record = [] + + def __len__(self): + return self._num_examples + len(self.current_examples) + len(self.current_rows) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def close(self): + if self.pa_writer: + try: + self.pa_writer.close() + except Exception: + pass + if self._closable_stream and (not self.stream.closed): + self.stream.close() + + def _build_writer(self, inferred_schema: pa.Schema): + schema = self.schema + inferred_features = Features.from_arrow_schema(inferred_schema) + if self._features is not None: + if self.update_features: + fields = {field.name: field for field in self._features.type} + for inferred_field in inferred_features.type: + name = inferred_field.name + if name in fields: + if inferred_field == fields[name]: + inferred_features[name] = self._features[name] + self._features = inferred_features + schema: pa.Schema = inferred_schema + else: + self._features = inferred_features + schema: pa.Schema = inferred_features.arrow_schema + if self.disable_nullable: + schema = pa.schema((pa.field(field.name, field.type, nullable=False) for field in schema)) + if self.with_metadata: + schema = schema.with_metadata(self._build_metadata(DatasetInfo(features=self._features), self.fingerprint)) + else: + schema = schema.with_metadata({}) + self._schema = schema + self.pa_writer = self._WRITER_CLASS(self.stream, schema) + + @property + def schema(self): + _schema = self._schema if self._schema is not None else pa.schema(self._features.type) if self._features is not None else None + if self._disable_nullable and _schema is not None: + _schema = pa.schema((pa.field(field.name, field.type, nullable=False) for field in _schema)) + return _schema if _schema is not None else [] + + @staticmethod + def _build_metadata(info: DatasetInfo, fingerprint: Optional[str]=None) -> Dict[str, str]: + info_keys = ['features'] + info_as_dict = asdict(info) + metadata = {} + metadata['info'] = {key: info_as_dict[key] for key in info_keys} + if fingerprint is not None: + metadata['fingerprint'] = fingerprint + return {'huggingface': json.dumps(metadata)} + + def write_examples_on_file(self): + if not self.current_examples: + return + if self.schema: + schema_cols = set(self.schema.names) + examples_cols = self.current_examples[0][0].keys() + common_cols = [col for col in self.schema.names if col in examples_cols] + extra_cols = [col for col in examples_cols if col not in schema_cols] + cols = common_cols + extra_cols + else: + cols = list(self.current_examples[0][0]) + batch_examples = {} + for col in cols: + if all((isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) for row in self.current_examples)): + arrays = [row[0][col] for row in self.current_examples] + arrays = [chunk for array in arrays for chunk in (array.chunks if isinstance(array, pa.ChunkedArray) else [array])] + batch_examples[col] = pa.concat_arrays(arrays) + else: + batch_examples[col] = [row[0][col].to_pylist()[0] if isinstance(row[0][col], (pa.Array, pa.ChunkedArray)) else row[0][col] for row in self.current_examples] + self.write_batch(batch_examples=batch_examples) + self.current_examples = [] + + def write_rows_on_file(self): + if not self.current_rows: + return + table = pa.concat_tables(self.current_rows) + self.write_table(table) + self.current_rows = [] + + def write(self, example: Dict[str, Any], key: Optional[Union[str, int, bytes]]=None, writer_batch_size: Optional[int]=None): + if self._check_duplicates: + hash = self._hasher.hash(key) + self.current_examples.append((example, hash)) + self.hkey_record.append((hash, key)) + else: + self.current_examples.append((example, '')) + if writer_batch_size is None: + writer_batch_size = self.writer_batch_size + if writer_batch_size is not None and len(self.current_examples) >= writer_batch_size: + if self._check_duplicates: + self.check_duplicate_keys() + self.hkey_record = [] + self.write_examples_on_file() + + def check_duplicate_keys(self): + tmp_record = set() + for (hash, key) in self.hkey_record: + if hash in tmp_record: + duplicate_key_indices = [str(self._num_examples + index) for (index, (duplicate_hash, _)) in enumerate(self.hkey_record) if duplicate_hash == hash] + raise DuplicatedKeysError(key, duplicate_key_indices) + else: + tmp_record.add(hash) + + def write_row(self, row: pa.Table, writer_batch_size: Optional[int]=None): + if len(row) != 1: + raise ValueError(f'Only single-row pyarrow tables are allowed but got table with {len(row)} rows.') + self.current_rows.append(row) + if writer_batch_size is None: + writer_batch_size = self.writer_batch_size + if writer_batch_size is not None and len(self.current_rows) >= writer_batch_size: + self.write_rows_on_file() + + def write_batch(self, batch_examples: Dict[str, List], writer_batch_size: Optional[int]=None): + if batch_examples and len(next(iter(batch_examples.values()))) == 0: + return + features = None if self.pa_writer is None and self.update_features else self._features + try_features = self._features if self.pa_writer is None and self.update_features else None + arrays = [] + inferred_features = Features() + if self.schema: + schema_cols = set(self.schema.names) + batch_cols = batch_examples.keys() + common_cols = [col for col in self.schema.names if col in batch_cols] + extra_cols = [col for col in batch_cols if col not in schema_cols] + cols = common_cols + extra_cols + else: + cols = list(batch_examples) + for col in cols: + col_values = batch_examples[col] + col_type = features[col] if features else None + if isinstance(col_values, (pa.Array, pa.ChunkedArray)): + array = cast_array_to_feature(col_values, col_type) if col_type is not None else col_values + arrays.append(array) + inferred_features[col] = generate_from_arrow_type(col_values.type) + else: + col_try_type = try_features[col] if try_features is not None and col in try_features else None + typed_sequence = OptimizedTypedSequence(col_values, type=col_type, try_type=col_try_type, col=col) + arrays.append(pa.array(typed_sequence)) + inferred_features[col] = typed_sequence.get_inferred_type() + schema = inferred_features.arrow_schema if self.pa_writer is None else self.schema + pa_table = pa.Table.from_arrays(arrays, schema=schema) + self.write_table(pa_table, writer_batch_size) + + def write_table(self, pa_table: pa.Table, writer_batch_size: Optional[int]=None): + if writer_batch_size is None: + writer_batch_size = self.writer_batch_size + if self.pa_writer is None: + self._build_writer(inferred_schema=pa_table.schema) + pa_table = pa_table.combine_chunks() + pa_table = table_cast(pa_table, self._schema) + if self.embed_local_files: + pa_table = embed_table_storage(pa_table) + self._num_bytes += pa_table.nbytes + self._num_examples += pa_table.num_rows + self.pa_writer.write_table(pa_table, writer_batch_size) + + def finalize(self, close_stream=True): + self.write_rows_on_file() + if self._check_duplicates: + self.check_duplicate_keys() + self.hkey_record = [] + self.write_examples_on_file() + if self.pa_writer is None and self.schema: + self._build_writer(self.schema) + if self.pa_writer is not None: + self.pa_writer.close() + self.pa_writer = None + if close_stream: + self.stream.close() + else: + if close_stream: + self.stream.close() + raise SchemaInferenceError('Please pass `features` or at least one example when writing data') + logger.debug(f"Done writing {self._num_examples} {self.unit} in {self._num_bytes} bytes {(self._path if self._path else '')}.") + return (self._num_examples, self._num_bytes) + +class ParquetWriter(ArrowWriter): + _WRITER_CLASS = pq.ParquetWriter + +# File: datasets-main/src/datasets/combine.py +from typing import List, Optional, TypeVar +from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets +from .dataset_dict import DatasetDict, IterableDatasetDict +from .info import DatasetInfo +from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets +from .splits import NamedSplit +from .utils import logging +from .utils.py_utils import Literal +logger = logging.get_logger(__name__) +DatasetType = TypeVar('DatasetType', Dataset, IterableDataset) + +def interleave_datasets(datasets: List[DatasetType], probabilities: Optional[List[float]]=None, seed: Optional[int]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, stopping_strategy: Literal['first_exhausted', 'all_exhausted']='first_exhausted') -> DatasetType: + from .arrow_dataset import Dataset + from .iterable_dataset import IterableDataset + if not datasets: + raise ValueError('Unable to interleave an empty list of datasets.') + for (i, dataset) in enumerate(datasets): + if not isinstance(dataset, (Dataset, IterableDataset)): + if isinstance(dataset, (DatasetDict, IterableDatasetDict)): + if not dataset: + raise ValueError(f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is an empty dataset dictionary.') + raise ValueError(f"Dataset at position {i} has at least one split: {list(dataset)}\nPlease pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']") + raise ValueError(f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}.') + if i == 0: + (dataset_type, other_type) = (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) + elif not isinstance(dataset, dataset_type): + raise ValueError(f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.') + if stopping_strategy not in ['first_exhausted', 'all_exhausted']: + raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.') + if dataset_type is Dataset: + return _interleave_map_style_datasets(datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy) + else: + return _interleave_iterable_datasets(datasets, probabilities, seed, info=info, split=split, stopping_strategy=stopping_strategy) + +def concatenate_datasets(dsets: List[DatasetType], info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, axis: int=0) -> DatasetType: + if not dsets: + raise ValueError('Unable to concatenate an empty list of datasets.') + for (i, dataset) in enumerate(dsets): + if not isinstance(dataset, (Dataset, IterableDataset)): + if isinstance(dataset, (DatasetDict, IterableDatasetDict)): + if not dataset: + raise ValueError(f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is an empty dataset dictionary.') + raise ValueError(f"Dataset at position {i} has at least one split: {list(dataset)}\nPlease pick one to interleave with the other datasets, for example: dataset['{next(iter(dataset))}']") + raise ValueError(f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(dataset).__name__}.') + if i == 0: + (dataset_type, other_type) = (Dataset, IterableDataset) if isinstance(dataset, Dataset) else (IterableDataset, Dataset) + elif not isinstance(dataset, dataset_type): + raise ValueError(f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.') + if dataset_type is Dataset: + return _concatenate_map_style_datasets(dsets, info=info, split=split, axis=axis) + else: + return _concatenate_iterable_datasets(dsets, info=info, split=split, axis=axis) + +# File: datasets-main/src/datasets/commands/__init__.py +from abc import ABC, abstractmethod +from argparse import ArgumentParser + +class BaseDatasetsCLICommand(ABC): + + @staticmethod + @abstractmethod + def register_subcommand(parser: ArgumentParser): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() + +# File: datasets-main/src/datasets/commands/convert.py +import os +import re +import shutil +from argparse import ArgumentParser, Namespace +from datasets.commands import BaseDatasetsCLICommand +from datasets.utils.logging import get_logger +HIGHLIGHT_MESSAGE_PRE = '<<<<<<< This should probably be modified because it mentions: ' +HIGHLIGHT_MESSAGE_POST = '=======\n>>>>>>>\n' +TO_HIGHLIGHT = ['TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir'] +TO_CONVERT = [('tfds\\.core', 'datasets'), ('tf\\.io\\.gfile\\.GFile', 'open'), ('tf\\.([\\w\\d]+)', "datasets.Value('\\1')"), ('tfds\\.features\\.Text\\(\\)', "datasets.Value('string')"), ('tfds\\.features\\.Text\\(', "datasets.Value('string'),"), ('features\\s*=\\s*tfds.features.FeaturesDict\\(', 'features=datasets.Features('), ('tfds\\.features\\.FeaturesDict\\(', 'dict('), ('The TensorFlow Datasets Authors', 'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), ('tfds\\.', 'datasets.'), ('dl_manager\\.manual_dir', 'self.config.data_dir'), ('self\\.builder_config', 'self.config')] + +def convert_command_factory(args: Namespace): + return ConvertCommand(args.tfds_path, args.datasets_directory) + +class ConvertCommand(BaseDatasetsCLICommand): + + @staticmethod + def register_subcommand(parser: ArgumentParser): + train_parser = parser.add_parser('convert', help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.') + train_parser.add_argument('--tfds_path', type=str, required=True, help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.') + train_parser.add_argument('--datasets_directory', type=str, required=True, help='Path to the HuggingFace Datasets folder.') + train_parser.set_defaults(func=convert_command_factory) + + def __init__(self, tfds_path: str, datasets_directory: str, *args): + self._logger = get_logger('datasets-cli/converting') + self._tfds_path = tfds_path + self._datasets_directory = datasets_directory + + def run(self): + if os.path.isdir(self._tfds_path): + abs_tfds_path = os.path.abspath(self._tfds_path) + elif os.path.isfile(self._tfds_path): + abs_tfds_path = os.path.dirname(self._tfds_path) + else: + raise ValueError('--tfds_path is neither a directory nor a file. Please check path.') + abs_datasets_path = os.path.abspath(self._datasets_directory) + self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}') + utils_files = [] + with_manual_update = [] + imports_to_builder_map = {} + if os.path.isdir(self._tfds_path): + file_names = os.listdir(abs_tfds_path) + else: + file_names = [os.path.basename(self._tfds_path)] + for f_name in file_names: + self._logger.info(f'Looking at file {f_name}') + input_file = os.path.join(abs_tfds_path, f_name) + output_file = os.path.join(abs_datasets_path, f_name) + if not os.path.isfile(input_file) or '__init__' in f_name or '_test' in f_name or ('.py' not in f_name): + self._logger.info('Skipping file') + continue + with open(input_file, encoding='utf-8') as f: + lines = f.readlines() + out_lines = [] + is_builder = False + needs_manual_update = False + tfds_imports = [] + for line in lines: + out_line = line + if 'import tensorflow.compat.v2 as tf' in out_line: + continue + elif '@tfds.core' in out_line: + continue + elif 'builder=self' in out_line: + continue + elif 'import tensorflow_datasets.public_api as tfds' in out_line: + out_line = 'import datasets\n' + elif 'import tensorflow' in out_line: + out_line = '' + continue + elif 'from absl import logging' in out_line: + out_line = 'from datasets import logging\n' + elif 'getLogger' in out_line: + out_line = out_line.replace('getLogger', 'get_logger') + elif any((expression in out_line for expression in TO_HIGHLIGHT)): + needs_manual_update = True + to_remove = list(filter(lambda e: e in out_line, TO_HIGHLIGHT)) + out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(to_remove) + '\n') + out_lines.append(out_line) + out_lines.append(HIGHLIGHT_MESSAGE_POST) + continue + else: + for (pattern, replacement) in TO_CONVERT: + out_line = re.sub(pattern, replacement, out_line) + if 'tensorflow_datasets' in out_line: + match = re.match('from\\stensorflow_datasets.*import\\s([^\\.\\r\\n]+)', out_line) + tfds_imports.extend((imp.strip() for imp in match.group(1).split(','))) + out_line = 'from . import ' + match.group(1) + if 'tf.' in out_line or 'tfds.' in out_line or 'tensorflow_datasets' in out_line: + raise ValueError(f'Error converting {out_line.strip()}') + if 'GeneratorBasedBuilder' in out_line: + is_builder = True + out_lines.append(out_line) + if is_builder or 'wmt' in f_name: + dir_name = f_name.replace('.py', '') + output_dir = os.path.join(abs_datasets_path, dir_name) + output_file = os.path.join(output_dir, f_name) + os.makedirs(output_dir, exist_ok=True) + self._logger.info(f'Adding directory {output_dir}') + imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) + else: + utils_files.append(output_file) + if needs_manual_update: + with_manual_update.append(output_file) + with open(output_file, 'w', encoding='utf-8') as f: + f.writelines(out_lines) + self._logger.info(f'Converted in {output_file}') + for utils_file in utils_files: + try: + f_name = os.path.basename(utils_file) + dest_folder = imports_to_builder_map[f_name.replace('.py', '')] + self._logger.info(f'Moving {dest_folder} to {utils_file}') + shutil.copy(utils_file, dest_folder) + except KeyError: + self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.') + if with_manual_update: + for file_path in with_manual_update: + self._logger.warning(f"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.") + +# File: datasets-main/src/datasets/commands/convert_to_parquet.py +from argparse import ArgumentParser +from typing import Optional +from datasets.commands import BaseDatasetsCLICommand +from datasets.hub import convert_to_parquet + +def _command_factory(args): + return ConvertToParquetCommand(args.dataset_id, args.token, args.revision, args.trust_remote_code) + +class ConvertToParquetCommand(BaseDatasetsCLICommand): + + @staticmethod + def register_subcommand(parser): + parser: ArgumentParser = parser.add_parser('convert_to_parquet', help='Convert dataset to Parquet') + parser.add_argument('dataset_id', help='source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME') + parser.add_argument('--token', help="access token to the Hugging Face Hub (defaults to logged-in user's one)") + parser.add_argument('--revision', help='source revision') + parser.add_argument('--trust_remote_code', action='store_true', help='whether to trust the code execution of the load script') + parser.set_defaults(func=_command_factory) + + def __init__(self, dataset_id: str, token: Optional[str], revision: Optional[str], trust_remote_code: bool): + self._dataset_id = dataset_id + self._token = token + self._revision = revision + self._trust_remote_code = trust_remote_code + + def run(self) -> None: + _ = convert_to_parquet(self._dataset_id, revision=self._revision, token=self._token, trust_remote_code=self._trust_remote_code) + +# File: datasets-main/src/datasets/commands/datasets_cli.py +from argparse import ArgumentParser +from datasets.commands.convert import ConvertCommand +from datasets.commands.convert_to_parquet import ConvertToParquetCommand +from datasets.commands.delete_from_hub import DeleteFromHubCommand +from datasets.commands.env import EnvironmentCommand +from datasets.commands.test import TestCommand +from datasets.utils.logging import set_verbosity_info + +def parse_unknown_args(unknown_args): + return {key.lstrip('-'): value for (key, value) in zip(unknown_args[::2], unknown_args[1::2])} + +def main(): + parser = ArgumentParser('HuggingFace Datasets CLI tool', usage='datasets-cli []', allow_abbrev=False) + commands_parser = parser.add_subparsers(help='datasets-cli command helpers') + set_verbosity_info() + ConvertCommand.register_subcommand(commands_parser) + EnvironmentCommand.register_subcommand(commands_parser) + TestCommand.register_subcommand(commands_parser) + ConvertToParquetCommand.register_subcommand(commands_parser) + DeleteFromHubCommand.register_subcommand(commands_parser) + (args, unknown_args) = parser.parse_known_args() + if not hasattr(args, 'func'): + parser.print_help() + exit(1) + kwargs = parse_unknown_args(unknown_args) + service = args.func(args, **kwargs) + service.run() +if __name__ == '__main__': + main() + +# File: datasets-main/src/datasets/commands/delete_from_hub.py +from argparse import ArgumentParser +from typing import Optional +from datasets.commands import BaseDatasetsCLICommand +from datasets.hub import delete_from_hub + +def _command_factory(args): + return DeleteFromHubCommand(args.dataset_id, args.config_name, args.token, args.revision) + +class DeleteFromHubCommand(BaseDatasetsCLICommand): + + @staticmethod + def register_subcommand(parser): + parser: ArgumentParser = parser.add_parser('delete_from_hub', help='Delete dataset config from the Hub') + parser.add_argument('dataset_id', help='source dataset ID, e.g. USERNAME/DATASET_NAME or ORGANIZATION/DATASET_NAME') + parser.add_argument('config_name', help='config name to delete') + parser.add_argument('--token', help='access token to the Hugging Face Hub') + parser.add_argument('--revision', help='source revision') + parser.set_defaults(func=_command_factory) + + def __init__(self, dataset_id: str, config_name: str, token: Optional[str], revision: Optional[str]): + self._dataset_id = dataset_id + self._config_name = config_name + self._token = token + self._revision = revision + + def run(self) -> None: + _ = delete_from_hub(self._dataset_id, self._config_name, revision=self._revision, token=self._token) + +# File: datasets-main/src/datasets/commands/env.py +import platform +from argparse import ArgumentParser +import fsspec +import huggingface_hub +import pandas +import pyarrow +from datasets import __version__ as version +from datasets.commands import BaseDatasetsCLICommand + +def info_command_factory(_): + return EnvironmentCommand() + +class EnvironmentCommand(BaseDatasetsCLICommand): + + @staticmethod + def register_subcommand(parser: ArgumentParser): + download_parser = parser.add_parser('env', help='Print relevant system environment info.') + download_parser.set_defaults(func=info_command_factory) + + def run(self): + info = {'`datasets` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), '`huggingface_hub` version': huggingface_hub.__version__, 'PyArrow version': pyarrow.__version__, 'Pandas version': pandas.__version__, '`fsspec` version': fsspec.__version__} + print('\nCopy-and-paste the text below in your GitHub issue.\n') + print(self.format_dict(info)) + return info + + @staticmethod + def format_dict(d): + return '\n'.join([f'- {prop}: {val}' for (prop, val) in d.items()]) + '\n' + +# File: datasets-main/src/datasets/config.py +import importlib +import importlib.metadata +import logging +import os +import platform +from pathlib import Path +from typing import Optional +from huggingface_hub import constants +from packaging import version +logger = logging.getLogger(__name__.split('.', 1)[0]) +S3_DATASETS_BUCKET_PREFIX = 'https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets' +CLOUDFRONT_DATASETS_DISTRIB_PREFIX = 'https://cdn-datasets.huggingface.co/datasets/datasets' +REPO_DATASETS_URL = 'https://raw.githubusercontent.com/huggingface/datasets/{revision}/datasets/{path}/{name}' +HF_ENDPOINT = os.environ.get('HF_ENDPOINT', 'https://huggingface.co') +HUB_DATASETS_URL = HF_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' +HUB_DATASETS_HFFS_URL = 'hf://datasets/{repo_id}@{revision}/{path}' +HUB_DEFAULT_VERSION = 'main' +PY_VERSION = version.parse(platform.python_version()) +ENV_VARS_TRUE_VALUES = {'1', 'ON', 'YES', 'TRUE'} +ENV_VARS_FALSE_VALUES = {'0', 'OFF', 'NO', 'FALSE'} +ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({'AUTO'}) +ENV_VARS_FALSE_AND_AUTO_VALUES = ENV_VARS_FALSE_VALUES.union({'AUTO'}) +DILL_VERSION = version.parse(importlib.metadata.version('dill')) +FSSPEC_VERSION = version.parse(importlib.metadata.version('fsspec')) +PANDAS_VERSION = version.parse(importlib.metadata.version('pandas')) +PYARROW_VERSION = version.parse(importlib.metadata.version('pyarrow')) +HF_HUB_VERSION = version.parse(importlib.metadata.version('huggingface_hub')) +USE_TF = os.environ.get('USE_TF', 'AUTO').upper() +USE_TORCH = os.environ.get('USE_TORCH', 'AUTO').upper() +USE_JAX = os.environ.get('USE_JAX', 'AUTO').upper() +TORCH_VERSION = 'N/A' +TORCH_AVAILABLE = False +if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: + TORCH_AVAILABLE = importlib.util.find_spec('torch') is not None + if TORCH_AVAILABLE: + try: + TORCH_VERSION = version.parse(importlib.metadata.version('torch')) + logger.info(f'PyTorch version {TORCH_VERSION} available.') + except importlib.metadata.PackageNotFoundError: + pass +else: + logger.info('Disabling PyTorch because USE_TF is set') +POLARS_VERSION = 'N/A' +POLARS_AVAILABLE = importlib.util.find_spec('polars') is not None +if POLARS_AVAILABLE: + try: + POLARS_VERSION = version.parse(importlib.metadata.version('polars')) + logger.info(f'Polars version {POLARS_VERSION} available.') + except importlib.metadata.PackageNotFoundError: + pass +TF_VERSION = 'N/A' +TF_AVAILABLE = False +if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + TF_AVAILABLE = importlib.util.find_spec('tensorflow') is not None + if TF_AVAILABLE: + for package in ['tensorflow', 'tensorflow-cpu', 'tensorflow-gpu', 'tf-nightly', 'tf-nightly-cpu', 'tf-nightly-gpu', 'intel-tensorflow', 'tensorflow-rocm', 'tensorflow-macos']: + try: + TF_VERSION = version.parse(importlib.metadata.version(package)) + except importlib.metadata.PackageNotFoundError: + continue + else: + break + else: + TF_AVAILABLE = False + if TF_AVAILABLE: + if TF_VERSION.major < 2: + logger.info(f'TensorFlow found but with version {TF_VERSION}. `datasets` requires version 2 minimum.') + TF_AVAILABLE = False + else: + logger.info(f'TensorFlow version {TF_VERSION} available.') +else: + logger.info('Disabling Tensorflow because USE_TORCH is set') +JAX_VERSION = 'N/A' +JAX_AVAILABLE = False +if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: + JAX_AVAILABLE = importlib.util.find_spec('jax') is not None and importlib.util.find_spec('jaxlib') is not None + if JAX_AVAILABLE: + try: + JAX_VERSION = version.parse(importlib.metadata.version('jax')) + logger.info(f'JAX version {JAX_VERSION} available.') + except importlib.metadata.PackageNotFoundError: + pass +else: + logger.info('Disabling JAX because USE_JAX is set to False') +SQLALCHEMY_AVAILABLE = importlib.util.find_spec('sqlalchemy') is not None +PIL_AVAILABLE = importlib.util.find_spec('PIL') is not None +IS_OPUS_SUPPORTED = importlib.util.find_spec('soundfile') is not None and version.parse(importlib.import_module('soundfile').__libsndfile_version__) >= version.parse('1.0.31') +IS_MP3_SUPPORTED = importlib.util.find_spec('soundfile') is not None and version.parse(importlib.import_module('soundfile').__libsndfile_version__) >= version.parse('1.1.0') +RARFILE_AVAILABLE = importlib.util.find_spec('rarfile') is not None +ZSTANDARD_AVAILABLE = importlib.util.find_spec('zstandard') is not None +LZ4_AVAILABLE = importlib.util.find_spec('lz4') is not None +PY7ZR_AVAILABLE = importlib.util.find_spec('py7zr') is not None +DEFAULT_XDG_CACHE_HOME = '~/.cache' +XDG_CACHE_HOME = os.getenv('XDG_CACHE_HOME', DEFAULT_XDG_CACHE_HOME) +DEFAULT_HF_CACHE_HOME = os.path.join(XDG_CACHE_HOME, 'huggingface') +HF_CACHE_HOME = os.path.expanduser(os.getenv('HF_HOME', DEFAULT_HF_CACHE_HOME)) +DEFAULT_HF_DATASETS_CACHE = os.path.join(HF_CACHE_HOME, 'datasets') +HF_DATASETS_CACHE = Path(os.getenv('HF_DATASETS_CACHE', DEFAULT_HF_DATASETS_CACHE)) +DEFAULT_HF_MODULES_CACHE = os.path.join(HF_CACHE_HOME, 'modules') +HF_MODULES_CACHE = Path(os.getenv('HF_MODULES_CACHE', DEFAULT_HF_MODULES_CACHE)) +DOWNLOADED_DATASETS_DIR = 'downloads' +DEFAULT_DOWNLOADED_DATASETS_PATH = os.path.join(HF_DATASETS_CACHE, DOWNLOADED_DATASETS_DIR) +DOWNLOADED_DATASETS_PATH = Path(os.getenv('HF_DATASETS_DOWNLOADED_DATASETS_PATH', DEFAULT_DOWNLOADED_DATASETS_PATH)) +EXTRACTED_DATASETS_DIR = 'extracted' +DEFAULT_EXTRACTED_DATASETS_PATH = os.path.join(DEFAULT_DOWNLOADED_DATASETS_PATH, EXTRACTED_DATASETS_DIR) +EXTRACTED_DATASETS_PATH = Path(os.getenv('HF_DATASETS_EXTRACTED_DATASETS_PATH', DEFAULT_EXTRACTED_DATASETS_PATH)) +HF_UPDATE_DOWNLOAD_COUNTS = os.environ.get('HF_UPDATE_DOWNLOAD_COUNTS', 'AUTO').upper() in ENV_VARS_TRUE_AND_AUTO_VALUES +HF_DATASETS_MULTITHREADING_MAX_WORKERS = 16 +__HF_DATASETS_TRUST_REMOTE_CODE = os.environ.get('HF_DATASETS_TRUST_REMOTE_CODE', 'ask') +HF_DATASETS_TRUST_REMOTE_CODE: Optional[bool] = True if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_TRUE_VALUES else False if __HF_DATASETS_TRUST_REMOTE_CODE.upper() in ENV_VARS_FALSE_VALUES else None +TIME_OUT_REMOTE_CODE = 15 +USE_PARQUET_EXPORT = True +DEFAULT_MAX_BATCH_SIZE = 1000 +ARROW_READER_BATCH_SIZE_IN_DATASET_ITER = 10 +MAX_SHARD_SIZE = '500MB' +PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS = 100 +PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS = 100 +PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS = 100 +_offline = os.environ.get('HF_DATASETS_OFFLINE') +HF_HUB_OFFLINE = constants.HF_HUB_OFFLINE if _offline is None else _offline.upper() in ENV_VARS_TRUE_VALUES +HF_DATASETS_OFFLINE = HF_HUB_OFFLINE +__HF_DATASETS_DISABLE_PROGRESS_BARS = os.environ.get('HF_DATASETS_DISABLE_PROGRESS_BARS') +HF_DATASETS_DISABLE_PROGRESS_BARS: Optional[bool] = __HF_DATASETS_DISABLE_PROGRESS_BARS.upper() in ENV_VARS_TRUE_VALUES if __HF_DATASETS_DISABLE_PROGRESS_BARS is not None else None +DEFAULT_IN_MEMORY_MAX_SIZE = 0 +IN_MEMORY_MAX_SIZE = float(os.environ.get('HF_DATASETS_IN_MEMORY_MAX_SIZE', DEFAULT_IN_MEMORY_MAX_SIZE)) +DATASET_ARROW_FILENAME = 'dataset.arrow' +DATASET_INDICES_FILENAME = 'indices.arrow' +DATASET_STATE_JSON_FILENAME = 'state.json' +DATASET_INFO_FILENAME = 'dataset_info.json' +DATASETDICT_INFOS_FILENAME = 'dataset_infos.json' +LICENSE_FILENAME = 'LICENSE' +DATASETDICT_JSON_FILENAME = 'dataset_dict.json' +METADATA_CONFIGS_FIELD = 'configs' +REPOCARD_FILENAME = 'README.md' +REPOYAML_FILENAME = '.huggingface.yaml' +MODULE_NAME_FOR_DYNAMIC_MODULES = 'datasets_modules' +MAX_DATASET_CONFIG_ID_READABLE_LENGTH = 255 +TEMP_CACHE_DIR_PREFIX = 'hf_datasets-' +STREAMING_READ_MAX_RETRIES = 20 +STREAMING_READ_RETRY_INTERVAL = 5 +DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 +GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 10 +ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE = 200 +PBAR_REFRESH_TIME_INTERVAL = 0.05 +UPLOADS_MAX_NUMBER_PER_COMMIT = 50 +MAX_TABLE_NBYTES_FOR_PICKLING = 4 << 30 + +# File: datasets-main/src/datasets/data_files.py +import os +import re +from functools import partial +from glob import has_magic +from pathlib import Path, PurePath +from typing import Callable, Dict, List, Optional, Set, Tuple, Union +import huggingface_hub +from fsspec.core import url_to_fs +from fsspec.implementations.http import HTTPFileSystem +from huggingface_hub import HfFileSystem +from packaging import version +from tqdm.contrib.concurrent import thread_map +from . import config +from .download import DownloadConfig +from .naming import _split_re +from .splits import Split +from .utils import logging +from .utils import tqdm as hf_tqdm +from .utils.file_utils import _prepare_path_and_storage_options, is_local_path, is_relative_path, xbasename, xjoin +from .utils.py_utils import glob_pattern_to_regex, string_to_dict +SingleOriginMetadata = Union[Tuple[str, str], Tuple[str], Tuple[()]] +SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN) +logger = logging.get_logger(__name__) + +class Url(str): + pass + +class EmptyDatasetError(FileNotFoundError): + pass +SPLIT_PATTERN_SHARDED = 'data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*' +SPLIT_KEYWORDS = {Split.TRAIN: ['train', 'training'], Split.VALIDATION: ['validation', 'valid', 'dev', 'val'], Split.TEST: ['test', 'testing', 'eval', 'evaluation']} +NON_WORDS_CHARS = '-._ 0-9' +if config.FSSPEC_VERSION < version.parse('2023.9.0'): + KEYWORDS_IN_FILENAME_BASE_PATTERNS = ['**[{sep}/]{keyword}[{sep}]*', '{keyword}[{sep}]*'] + KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = ['{keyword}/**', '{keyword}[{sep}]*/**', '**[{sep}/]{keyword}/**', '**[{sep}/]{keyword}[{sep}]*/**'] +elif config.FSSPEC_VERSION < version.parse('2023.12.0'): + KEYWORDS_IN_FILENAME_BASE_PATTERNS = ['**/*[{sep}/]{keyword}[{sep}]*', '{keyword}[{sep}]*'] + KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = ['{keyword}/**/*', '{keyword}[{sep}]*/**/*', '**/*[{sep}/]{keyword}/**/*', '**/*[{sep}/]{keyword}[{sep}]*/**/*'] +else: + KEYWORDS_IN_FILENAME_BASE_PATTERNS = ['**/{keyword}[{sep}]*', '**/*[{sep}]{keyword}[{sep}]*'] + KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = ['**/{keyword}/**', '**/{keyword}[{sep}]*/**', '**/*[{sep}]{keyword}/**', '**/*[{sep}]{keyword}[{sep}]*/**'] +DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST] +DEFAULT_PATTERNS_SPLIT_IN_FILENAME = {split: [pattern.format(keyword=keyword, sep=NON_WORDS_CHARS) for keyword in SPLIT_KEYWORDS[split] for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS] for split in DEFAULT_SPLITS} +DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = {split: [pattern.format(keyword=keyword, sep=NON_WORDS_CHARS) for keyword in SPLIT_KEYWORDS[split] for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS] for split in DEFAULT_SPLITS} +DEFAULT_PATTERNS_ALL = {Split.TRAIN: ['**']} +ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED] +ALL_DEFAULT_PATTERNS = [DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME, DEFAULT_PATTERNS_SPLIT_IN_FILENAME, DEFAULT_PATTERNS_ALL] +if config.FSSPEC_VERSION < version.parse('2023.9.0'): + METADATA_PATTERNS = ['metadata.csv', '**/metadata.csv', 'metadata.jsonl', '**/metadata.jsonl'] +else: + METADATA_PATTERNS = ['**/metadata.csv', '**/metadata.jsonl'] +WILDCARD_CHARACTERS = '*[]' +FILES_TO_IGNORE = ['README.md', 'config.json', 'dataset_info.json', 'dataset_infos.json', 'dummy_data.zip', 'dataset_dict.json'] + +def contains_wildcards(pattern: str) -> bool: + return any((wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS)) + +def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], 'DataFilesList']]: + if isinstance(patterns, dict): + return {str(key): value if isinstance(value, list) else [value] for (key, value) in patterns.items()} + elif isinstance(patterns, str): + return {SANITIZED_DEFAULT_SPLIT: [patterns]} + elif isinstance(patterns, list): + if any((isinstance(pattern, dict) for pattern in patterns)): + for pattern in patterns: + if not (isinstance(pattern, dict) and len(pattern) == 2 and ('split' in pattern) and isinstance(pattern.get('path'), (str, list))): + raise ValueError(f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}") + splits = [pattern['split'] for pattern in patterns] + if len(set(splits)) != len(splits): + raise ValueError(f'Some splits are duplicated in data_files: {splits}') + return {str(pattern['split']): pattern['path'] if isinstance(pattern['path'], list) else [pattern['path']] for pattern in patterns} + else: + return {SANITIZED_DEFAULT_SPLIT: patterns} + else: + return sanitize_patterns(list(patterns)) + +def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool: + data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith('__')] + data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith('__')] + return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern) + +def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool: + hidden_directories_in_path = [part for part in PurePath(matched_rel_path).parts if part.startswith('.') and (not set(part) == {'.'})] + hidden_directories_in_pattern = [part for part in PurePath(pattern).parts if part.startswith('.') and (not set(part) == {'.'})] + return len(hidden_directories_in_path) != len(hidden_directories_in_pattern) + +def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]: + for split_pattern in ALL_SPLIT_PATTERNS: + pattern = split_pattern.replace('{split}', '*') + try: + data_files = pattern_resolver(pattern) + except FileNotFoundError: + continue + if len(data_files) > 0: + splits: Set[str] = {string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))['split'] for p in data_files} + if any((not re.match(_split_re, split) for split in splits)): + raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.") + sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted(splits - set(DEFAULT_SPLITS)) + return {split: [split_pattern.format(split=split)] for split in sorted_splits} + for patterns_dict in ALL_DEFAULT_PATTERNS: + non_empty_splits = [] + for (split, patterns) in patterns_dict.items(): + for pattern in patterns: + try: + data_files = pattern_resolver(pattern) + except FileNotFoundError: + continue + if len(data_files) > 0: + non_empty_splits.append(split) + break + if non_empty_splits: + return {split: patterns_dict[split] for split in non_empty_splits} + raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") + +def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]: + non_empty_patterns = [] + for pattern in METADATA_PATTERNS: + try: + metadata_files = pattern_resolver(pattern) + if len(metadata_files) > 0: + non_empty_patterns.append(pattern) + except FileNotFoundError: + pass + if non_empty_patterns: + return non_empty_patterns + raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") + +def resolve_pattern(pattern: str, base_path: str, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> List[str]: + if is_relative_path(pattern): + pattern = xjoin(base_path, pattern) + elif is_local_path(pattern): + base_path = os.path.splitdrive(pattern)[0] + os.sep + else: + base_path = '' + (pattern, storage_options) = _prepare_path_and_storage_options(pattern, download_config=download_config) + (fs, fs_pattern) = url_to_fs(pattern, **storage_options) + files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)} + protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0] + protocol_prefix = protocol + '://' if protocol != 'file' else '' + glob_kwargs = {} + if protocol == 'hf' and config.HF_HUB_VERSION >= version.parse('0.20.0'): + glob_kwargs['expand_info'] = False + matched_paths = [filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath for (filepath, info) in fs.glob(pattern, detail=True, **glob_kwargs).items() if info['type'] == 'file' and xbasename(filepath) not in files_to_ignore and (not _is_inside_unrequested_special_dir(filepath, fs_pattern)) and (not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(filepath, fs_pattern))] + if allowed_extensions is not None: + out = [filepath for filepath in matched_paths if any(('.' + suffix in allowed_extensions for suffix in xbasename(filepath).split('.')[1:]))] + if len(out) < len(matched_paths): + invalid_matched_files = list(set(matched_paths) - set(out)) + logger.info(f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}") + else: + out = matched_paths + if not out: + error_msg = f"Unable to find '{pattern}'" + if allowed_extensions is not None: + error_msg += f' with any supported extension {list(allowed_extensions)}' + raise FileNotFoundError(error_msg) + return out + +def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig]=None) -> Dict[str, List[str]]: + resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) + try: + return _get_data_files_patterns(resolver) + except FileNotFoundError: + raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None + +def get_metadata_patterns(base_path: str, download_config: Optional[DownloadConfig]=None) -> List[str]: + resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) + try: + return _get_metadata_files_patterns(resolver) + except FileNotFoundError: + raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None + +def _get_single_origin_metadata(data_file: str, download_config: Optional[DownloadConfig]=None) -> SingleOriginMetadata: + (data_file, storage_options) = _prepare_path_and_storage_options(data_file, download_config=download_config) + (fs, *_) = url_to_fs(data_file, **storage_options) + if isinstance(fs, HfFileSystem): + resolved_path = fs.resolve_path(data_file) + return (resolved_path.repo_id, resolved_path.revision) + elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT): + hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token) + data_file = 'hf://' + data_file[len(config.HF_ENDPOINT) + 1:].replace('/resolve/', '@', 1) + resolved_path = hffs.resolve_path(data_file) + return (resolved_path.repo_id, resolved_path.revision) + info = fs.info(data_file) + for key in ['ETag', 'etag', 'mtime']: + if key in info: + return (str(info[key]),) + return () + +def _get_origin_metadata(data_files: List[str], download_config: Optional[DownloadConfig]=None, max_workers: Optional[int]=None) -> List[SingleOriginMetadata]: + max_workers = max_workers if max_workers is not None else config.HF_DATASETS_MULTITHREADING_MAX_WORKERS + return thread_map(partial(_get_single_origin_metadata, download_config=download_config), data_files, max_workers=max_workers, tqdm_class=hf_tqdm, desc='Resolving data files', disable=len(data_files) <= 16 or None) + +class DataFilesList(List[str]): + + def __init__(self, data_files: List[str], origin_metadata: List[SingleOriginMetadata]) -> None: + super().__init__(data_files) + self.origin_metadata = origin_metadata + + def __add__(self, other: 'DataFilesList') -> 'DataFilesList': + return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata) + + @classmethod + def from_hf_repo(cls, patterns: List[str], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str]=None, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> 'DataFilesList': + base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip('/') + return cls.from_patterns(patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config) + + @classmethod + def from_local_or_remote(cls, patterns: List[str], base_path: Optional[str]=None, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> 'DataFilesList': + base_path = base_path if base_path is not None else Path().resolve().as_posix() + return cls.from_patterns(patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config) + + @classmethod + def from_patterns(cls, patterns: List[str], base_path: Optional[str]=None, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> 'DataFilesList': + base_path = base_path if base_path is not None else Path().resolve().as_posix() + data_files = [] + for pattern in patterns: + try: + data_files.extend(resolve_pattern(pattern, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config)) + except FileNotFoundError: + if not has_magic(pattern): + raise + origin_metadata = _get_origin_metadata(data_files, download_config=download_config) + return cls(data_files, origin_metadata) + + def filter_extensions(self, extensions: List[str]) -> 'DataFilesList': + pattern = '|'.join(('\\' + ext for ext in extensions)) + pattern = re.compile(f'.*({pattern})(\\..+)?$') + return DataFilesList([data_file for data_file in self if pattern.match(data_file)], origin_metadata=self.origin_metadata) + +class DataFilesDict(Dict[str, DataFilesList]): + + @classmethod + def from_local_or_remote(cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str]=None, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> 'DataFilesDict': + out = cls() + for (key, patterns_for_key) in patterns.items(): + out[key] = patterns_for_key if isinstance(patterns_for_key, DataFilesList) else DataFilesList.from_local_or_remote(patterns_for_key, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config) + return out + + @classmethod + def from_hf_repo(cls, patterns: Dict[str, Union[List[str], DataFilesList]], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str]=None, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> 'DataFilesDict': + out = cls() + for (key, patterns_for_key) in patterns.items(): + out[key] = patterns_for_key if isinstance(patterns_for_key, DataFilesList) else DataFilesList.from_hf_repo(patterns_for_key, dataset_info=dataset_info, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config) + return out + + @classmethod + def from_patterns(cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str]=None, allowed_extensions: Optional[List[str]]=None, download_config: Optional[DownloadConfig]=None) -> 'DataFilesDict': + out = cls() + for (key, patterns_for_key) in patterns.items(): + out[key] = patterns_for_key if isinstance(patterns_for_key, DataFilesList) else DataFilesList.from_patterns(patterns_for_key, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config) + return out + + def filter_extensions(self, extensions: List[str]) -> 'DataFilesDict': + out = type(self)() + for (key, data_files_list) in self.items(): + out[key] = data_files_list.filter_extensions(extensions) + return out + +class DataFilesPatternsList(List[str]): + + def __init__(self, patterns: List[str], allowed_extensions: List[Optional[List[str]]]): + super().__init__(patterns) + self.allowed_extensions = allowed_extensions + + def __add__(self, other): + return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions) + + @classmethod + def from_patterns(cls, patterns: List[str], allowed_extensions: Optional[List[str]]=None) -> 'DataFilesPatternsList': + return cls(patterns, [allowed_extensions] * len(patterns)) + + def resolve(self, base_path: str, download_config: Optional[DownloadConfig]=None) -> 'DataFilesList': + base_path = base_path if base_path is not None else Path().resolve().as_posix() + data_files = [] + for (pattern, allowed_extensions) in zip(self, self.allowed_extensions): + try: + data_files.extend(resolve_pattern(pattern, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config)) + except FileNotFoundError: + if not has_magic(pattern): + raise + origin_metadata = _get_origin_metadata(data_files, download_config=download_config) + return DataFilesList(data_files, origin_metadata) + + def filter_extensions(self, extensions: List[str]) -> 'DataFilesPatternsList': + return DataFilesPatternsList(self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions]) + +class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]): + + @classmethod + def from_patterns(cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]]=None) -> 'DataFilesPatternsDict': + out = cls() + for (key, patterns_for_key) in patterns.items(): + out[key] = patterns_for_key if isinstance(patterns_for_key, DataFilesPatternsList) else DataFilesPatternsList.from_patterns(patterns_for_key, allowed_extensions=allowed_extensions) + return out + + def resolve(self, base_path: str, download_config: Optional[DownloadConfig]=None) -> 'DataFilesDict': + out = DataFilesDict() + for (key, data_files_patterns_list) in self.items(): + out[key] = data_files_patterns_list.resolve(base_path, download_config) + return out + + def filter_extensions(self, extensions: List[str]) -> 'DataFilesPatternsDict': + out = type(self)() + for (key, data_files_patterns_list) in self.items(): + out[key] = data_files_patterns_list.filter_extensions(extensions) + return out + +# File: datasets-main/src/datasets/dataset_dict.py +import contextlib +import copy +import fnmatch +import json +import math +import posixpath +import re +from io import BytesIO +from pathlib import Path +from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union +import fsspec +import numpy as np +from fsspec.core import url_to_fs +from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi +from huggingface_hub.hf_api import RepoFile +from . import config +from .arrow_dataset import PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED, Dataset +from .features import Features +from .features.features import FeatureType +from .info import DatasetInfo, DatasetInfosDict +from .naming import _split_re +from .splits import NamedSplit, Split, SplitDict, SplitInfo +from .table import Table +from .utils import logging +from .utils.doc_utils import is_documented_by +from .utils.metadata import MetadataConfigs +from .utils.py_utils import asdict, glob_pattern_to_regex, string_to_dict +from .utils.typing import PathLike +logger = logging.get_logger(__name__) + +class DatasetDict(dict): + + def _check_values_type(self): + for dataset in self.values(): + if not isinstance(dataset, Dataset): + raise TypeError(f"Values in `DatasetDict` should be of type `Dataset` but got type '{type(dataset)}'") + + def _check_values_features(self): + items = list(self.items()) + for (item_a, item_b) in zip(items[:-1], items[1:]): + if item_a[1].features != item_b[1].features: + raise ValueError(f"All datasets in `DatasetDict` should have the same features but features for '{item_a[0]}' and '{item_b[0]}' don't match: {item_a[1].features} != {item_b[1].features}") + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + for dataset in self.values(): + if hasattr(dataset, '_data'): + del dataset._data + if hasattr(dataset, '_indices'): + del dataset._indices + + def __getitem__(self, k) -> Dataset: + if isinstance(k, (str, NamedSplit)) or len(self) == 0: + return super().__getitem__(k) + else: + available_suggested_splits = [split for split in (Split.TRAIN, Split.TEST, Split.VALIDATION) if split in self] + suggested_split = available_suggested_splits[0] if available_suggested_splits else list(self)[0] + raise KeyError(f"Invalid key: {k}. Please first select a split. For example: `my_dataset_dictionary['{suggested_split}'][{k}]`. Available splits: {sorted(self)}") + + @property + def data(self) -> Dict[str, Table]: + self._check_values_type() + return {k: dataset.data for (k, dataset) in self.items()} + + @property + def cache_files(self) -> Dict[str, Dict]: + self._check_values_type() + return {k: dataset.cache_files for (k, dataset) in self.items()} + + @property + def num_columns(self) -> Dict[str, int]: + self._check_values_type() + return {k: dataset.num_columns for (k, dataset) in self.items()} + + @property + def num_rows(self) -> Dict[str, int]: + self._check_values_type() + return {k: dataset.num_rows for (k, dataset) in self.items()} + + @property + def column_names(self) -> Dict[str, List[str]]: + self._check_values_type() + return {k: dataset.column_names for (k, dataset) in self.items()} + + @property + def shape(self) -> Dict[str, Tuple[int]]: + self._check_values_type() + return {k: dataset.shape for (k, dataset) in self.items()} + + def flatten(self, max_depth=16) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.flatten(max_depth=max_depth) for (k, dataset) in self.items()}) + + def unique(self, column: str) -> Dict[str, List]: + self._check_values_type() + return {k: dataset.unique(column) for (k, dataset) in self.items()} + + def cleanup_cache_files(self) -> Dict[str, int]: + self._check_values_type() + return {k: dataset.cleanup_cache_files() for (k, dataset) in self.items()} + + def __repr__(self): + repr = '\n'.join([f'{k}: {v}' for (k, v) in self.items()]) + repr = re.sub('^', ' ' * 4, repr, 0, re.M) + return f'DatasetDict({{\n{repr}\n}})' + + def cast(self, features: Features) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.cast(features=features) for (k, dataset) in self.items()}) + + def cast_column(self, column: str, feature) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.cast_column(column=column, feature=feature) for (k, dataset) in self.items()}) + + def remove_columns(self, column_names: Union[str, List[str]]) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.remove_columns(column_names=column_names) for (k, dataset) in self.items()}) + + def rename_column(self, original_column_name: str, new_column_name: str) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for (k, dataset) in self.items()}) + + def rename_columns(self, column_mapping: Dict[str, str]) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for (k, dataset) in self.items()}) + + def select_columns(self, column_names: Union[str, List[str]]) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.select_columns(column_names=column_names) for (k, dataset) in self.items()}) + + def class_encode_column(self, column: str, include_nulls: bool=False) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.class_encode_column(column=column, include_nulls=include_nulls) for (k, dataset) in self.items()}) + + @contextlib.contextmanager + def formatted_as(self, type: Optional[str]=None, columns: Optional[List]=None, output_all_columns: bool=False, **format_kwargs): + self._check_values_type() + old_format_type = {k: dataset._format_type for (k, dataset) in self.items()} + old_format_kwargs = {k: dataset._format_kwargs for (k, dataset) in self.items()} + old_format_columns = {k: dataset._format_columns for (k, dataset) in self.items()} + old_output_all_columns = {k: dataset._output_all_columns for (k, dataset) in self.items()} + try: + self.set_format(type, columns, output_all_columns, **format_kwargs) + yield + finally: + for (k, dataset) in self.items(): + dataset.set_format(old_format_type[k], old_format_columns[k], old_output_all_columns[k], **old_format_kwargs[k]) + + def set_format(self, type: Optional[str]=None, columns: Optional[List]=None, output_all_columns: bool=False, **format_kwargs): + self._check_values_type() + for dataset in self.values(): + dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) + + def reset_format(self): + self._check_values_type() + for dataset in self.values(): + dataset.set_format() + + def set_transform(self, transform: Optional[Callable], columns: Optional[List]=None, output_all_columns: bool=False): + self._check_values_type() + for dataset in self.values(): + dataset.set_format('custom', columns=columns, output_all_columns=output_all_columns, transform=transform) + + def with_format(self, type: Optional[str]=None, columns: Optional[List]=None, output_all_columns: bool=False, **format_kwargs) -> 'DatasetDict': + dataset = copy.deepcopy(self) + dataset.set_format(type=type, columns=columns, output_all_columns=output_all_columns, **format_kwargs) + return dataset + + def with_transform(self, transform: Optional[Callable], columns: Optional[List]=None, output_all_columns: bool=False) -> 'DatasetDict': + dataset = copy.deepcopy(self) + dataset.set_transform(transform=transform, columns=columns, output_all_columns=output_all_columns) + return dataset + + def map(self, function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> 'DatasetDict': + self._check_values_type() + if cache_file_names is None: + cache_file_names = {k: None for k in self} + return DatasetDict({k: dataset.map(function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, fn_kwargs=fn_kwargs, num_proc=num_proc, desc=desc) for (k, dataset) in self.items()}) + + def filter(self, function: Optional[Callable]=None, with_indices: bool=False, with_rank: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, fn_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, desc: Optional[str]=None) -> 'DatasetDict': + self._check_values_type() + if cache_file_names is None: + cache_file_names = {k: None for k in self} + return DatasetDict({k: dataset.filter(function=function, with_indices=with_indices, with_rank=with_rank, input_columns=input_columns, batched=batched, batch_size=batch_size, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, fn_kwargs=fn_kwargs, num_proc=num_proc, desc=desc) for (k, dataset) in self.items()}) + + def flatten_indices(self, keep_in_memory: bool=False, cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000, features: Optional[Features]=None, disable_nullable: bool=False, num_proc: Optional[int]=None, new_fingerprint: Optional[str]=None) -> 'DatasetDict': + self._check_values_type() + if cache_file_names is None: + cache_file_names = {k: None for k in self} + return DatasetDict({k: dataset.flatten_indices(keep_in_memory=keep_in_memory, cache_file_name=cache_file_names[k], writer_batch_size=writer_batch_size, features=features, disable_nullable=disable_nullable, num_proc=num_proc, new_fingerprint=new_fingerprint) for (k, dataset) in self.items()}) + + def sort(self, column_names: Union[str, Sequence[str]], reverse: Union[bool, Sequence[bool]]=False, null_placement: str='at_end', keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> 'DatasetDict': + self._check_values_type() + if indices_cache_file_names is None: + indices_cache_file_names = {k: None for k in self} + return DatasetDict({k: dataset.sort(column_names=column_names, reverse=reverse, null_placement=null_placement, keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, indices_cache_file_name=indices_cache_file_names[k], writer_batch_size=writer_batch_size) for (k, dataset) in self.items()}) + + def shuffle(self, seeds: Optional[Union[int, Dict[str, Optional[int]]]]=None, seed: Optional[int]=None, generators: Optional[Dict[str, np.random.Generator]]=None, keep_in_memory: bool=False, load_from_cache_file: Optional[bool]=None, indices_cache_file_names: Optional[Dict[str, Optional[str]]]=None, writer_batch_size: Optional[int]=1000) -> 'DatasetDict': + self._check_values_type() + if seed is not None and seeds is not None: + raise ValueError('Please specify seed or seeds, but not both') + seeds = seed if seed is not None else seeds + if seeds is None: + seeds = {k: None for k in self} + elif not isinstance(seeds, dict): + seeds = {k: seeds for k in self} + if generators is None: + generators = {k: None for k in self} + if indices_cache_file_names is None: + indices_cache_file_names = {k: None for k in self} + return DatasetDict({k: dataset.shuffle(seed=seeds[k], generator=generators[k], keep_in_memory=keep_in_memory, load_from_cache_file=load_from_cache_file, indices_cache_file_name=indices_cache_file_names[k], writer_batch_size=writer_batch_size) for (k, dataset) in self.items()}) + + def save_to_disk(self, dataset_dict_path: PathLike, max_shard_size: Optional[Union[str, int]]=None, num_shards: Optional[Dict[str, int]]=None, num_proc: Optional[int]=None, storage_options: Optional[dict]=None): + fs: fsspec.AbstractFileSystem + (fs, _) = url_to_fs(dataset_dict_path, **storage_options or {}) + if num_shards is None: + num_shards = {k: None for k in self} + elif not isinstance(num_shards, dict): + raise ValueError("Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}") + fs.makedirs(dataset_dict_path, exist_ok=True) + with fs.open(posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME), 'w', encoding='utf-8') as f: + json.dump({'splits': list(self)}, f) + for (k, dataset) in self.items(): + dataset.save_to_disk(posixpath.join(dataset_dict_path, k), num_shards=num_shards.get(k), max_shard_size=max_shard_size, num_proc=num_proc, storage_options=storage_options) + + @staticmethod + def load_from_disk(dataset_dict_path: PathLike, keep_in_memory: Optional[bool]=None, storage_options: Optional[dict]=None) -> 'DatasetDict': + fs: fsspec.AbstractFileSystem + (fs, dataset_dict_path) = url_to_fs(dataset_dict_path, **storage_options or {}) + dataset_dict_json_path = posixpath.join(dataset_dict_path, config.DATASETDICT_JSON_FILENAME) + dataset_state_json_path = posixpath.join(dataset_dict_path, config.DATASET_STATE_JSON_FILENAME) + dataset_info_path = posixpath.join(dataset_dict_path, config.DATASET_INFO_FILENAME) + if not fs.isfile(dataset_dict_json_path): + if fs.isfile(dataset_info_path) and fs.isfile(dataset_state_json_path): + raise FileNotFoundError(f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but got a `Dataset`. Please use either `datasets.load_from_disk` or `Dataset.load_from_disk` instead.") + raise FileNotFoundError(f"No such file: '{dataset_dict_json_path}'. Expected to load a `DatasetDict` object, but provided path is not a `DatasetDict`.") + with fs.open(dataset_dict_json_path, 'r', encoding='utf-8') as f: + splits = json.load(f)['splits'] + dataset_dict = DatasetDict() + for k in splits: + dataset_dict_split_path = posixpath.join(fs.unstrip_protocol(dataset_dict_path), k) + dataset_dict[k] = Dataset.load_from_disk(dataset_dict_split_path, keep_in_memory=keep_in_memory, storage_options=storage_options) + return dataset_dict + + @staticmethod + def from_csv(path_or_paths: Dict[str, PathLike], features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, **kwargs) -> 'DatasetDict': + from .io.csv import CsvDatasetReader + return CsvDatasetReader(path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs).read() + + @staticmethod + def from_json(path_or_paths: Dict[str, PathLike], features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, **kwargs) -> 'DatasetDict': + from .io.json import JsonDatasetReader + return JsonDatasetReader(path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs).read() + + @staticmethod + def from_parquet(path_or_paths: Dict[str, PathLike], features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, columns: Optional[List[str]]=None, **kwargs) -> 'DatasetDict': + from .io.parquet import ParquetDatasetReader + return ParquetDatasetReader(path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, columns=columns, **kwargs).read() + + @staticmethod + def from_text(path_or_paths: Dict[str, PathLike], features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, **kwargs) -> 'DatasetDict': + from .io.text import TextDatasetReader + return TextDatasetReader(path_or_paths, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs).read() + + @is_documented_by(Dataset.align_labels_with_mapping) + def align_labels_with_mapping(self, label2id: Dict, label_column: str) -> 'DatasetDict': + self._check_values_type() + return DatasetDict({k: dataset.align_labels_with_mapping(label2id=label2id, label_column=label_column) for (k, dataset) in self.items()}) + + def push_to_hub(self, repo_id, config_name: str='default', set_default: Optional[bool]=None, data_dir: Optional[str]=None, commit_message: Optional[str]=None, commit_description: Optional[str]=None, private: Optional[bool]=False, token: Optional[str]=None, revision: Optional[str]=None, create_pr: Optional[bool]=False, max_shard_size: Optional[Union[int, str]]=None, num_shards: Optional[Dict[str, int]]=None, embed_external_files: bool=True) -> CommitInfo: + if num_shards is None: + num_shards = {k: None for k in self} + elif not isinstance(num_shards, dict): + raise ValueError("Please provide one `num_shards` per dataset in the dataset dictionary, e.g. {{'train': 128, 'test': 4}}") + self._check_values_type() + self._check_values_features() + total_uploaded_size = 0 + total_dataset_nbytes = 0 + info_to_dump: DatasetInfo = next(iter(self.values())).info.copy() + info_to_dump.config_name = config_name + info_to_dump.splits = SplitDict() + for split in self.keys(): + if not re.match(_split_re, split): + raise ValueError(f"Split name should match '{_split_re}' but got '{split}'.") + api = HfApi(endpoint=config.HF_ENDPOINT, token=token) + repo_url = api.create_repo(repo_id, token=token, repo_type='dataset', private=private, exist_ok=True) + repo_id = repo_url.repo_id + if revision is not None and (not revision.startswith('refs/pr/')): + api.create_branch(repo_id, branch=revision, token=token, repo_type='dataset', exist_ok=True) + if not data_dir: + data_dir = config_name if config_name != 'default' else 'data' + additions = [] + for split in self.keys(): + logger.info(f'Pushing split {split} to the Hub.') + (split_additions, uploaded_size, dataset_nbytes) = self[split]._push_parquet_shards_to_hub(repo_id, data_dir=data_dir, split=split, token=token, revision=revision, create_pr=create_pr, max_shard_size=max_shard_size, num_shards=num_shards.get(split), embed_external_files=embed_external_files) + additions += split_additions + total_uploaded_size += uploaded_size + total_dataset_nbytes += dataset_nbytes + info_to_dump.splits[split] = SplitInfo(str(split), num_bytes=dataset_nbytes, num_examples=len(self[split])) + info_to_dump.download_checksums = None + info_to_dump.download_size = total_uploaded_size + info_to_dump.dataset_size = total_dataset_nbytes + info_to_dump.size_in_bytes = total_uploaded_size + total_dataset_nbytes + (repo_with_dataset_card, repo_with_dataset_infos) = (False, False) + repo_splits = [] + deletions = [] + repo_files_to_add = [addition.path_in_repo for addition in additions] + for repo_file in api.list_repo_tree(repo_id=repo_id, revision=revision, repo_type='dataset', token=token, recursive=True): + if not isinstance(repo_file, RepoFile): + continue + if repo_file.rfilename == config.REPOCARD_FILENAME: + repo_with_dataset_card = True + elif repo_file.rfilename == config.DATASETDICT_INFOS_FILENAME: + repo_with_dataset_infos = True + elif repo_file.rfilename.startswith(tuple((f'{data_dir}/{split}-' for split in self.keys()))) and repo_file.rfilename not in repo_files_to_add: + deletions.append(CommitOperationDelete(path_in_repo=repo_file.rfilename)) + elif fnmatch.fnmatch(repo_file.rfilename, PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED.replace('{split}', '*')): + repo_split = string_to_dict(repo_file.rfilename, glob_pattern_to_regex(PUSH_TO_HUB_WITHOUT_METADATA_CONFIGS_SPLIT_PATTERN_SHARDED))['split'] + if repo_split not in repo_splits: + repo_splits.append(split) + if repo_with_dataset_card: + dataset_card_path = api.hf_hub_download(repo_id, config.REPOCARD_FILENAME, repo_type='dataset', revision=revision) + dataset_card = DatasetCard.load(Path(dataset_card_path)) + dataset_card_data = dataset_card.data + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + elif repo_with_dataset_infos: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + else: + dataset_card = None + dataset_card_data = DatasetCardData() + metadata_configs = MetadataConfigs() + if not metadata_configs and repo_splits: + default_metadata_configs_to_dump = {'data_files': [{'split': split, 'path': f'data/{split}-*'} for split in repo_splits]} + MetadataConfigs({'default': default_metadata_configs_to_dump}).to_dataset_card_data(dataset_card_data) + metadata_config_to_dump = {'data_files': [{'split': split, 'path': f'{data_dir}/{split}-*'} for split in self.keys()]} + if set_default and config_name != 'default': + if metadata_configs: + default_config_name = metadata_configs.get_default_config_name() + if default_config_name == 'default': + raise ValueError("There exists a configuration named 'default'. To set a different configuration as default, rename the 'default' one first.") + else: + _ = metadata_configs[default_config_name].pop('default') + metadata_config_to_dump['default'] = True + if repo_with_dataset_infos: + dataset_infos_path = api.hf_hub_download(repo_id, config.DATASETDICT_INFOS_FILENAME, repo_type='dataset', revision=revision) + with open(dataset_infos_path, encoding='utf-8') as f: + dataset_infos: dict = json.load(f) + dataset_infos[config_name] = asdict(info_to_dump) + buffer = BytesIO() + buffer.write(json.dumps(dataset_infos, indent=4).encode('utf-8')) + additions.append(CommitOperationAdd(path_in_repo=config.DATASETDICT_INFOS_FILENAME, path_or_fileobj=buffer)) + DatasetInfosDict({config_name: info_to_dump}).to_dataset_card_data(dataset_card_data) + MetadataConfigs({config_name: metadata_config_to_dump}).to_dataset_card_data(dataset_card_data) + dataset_card = DatasetCard(f'---\n{dataset_card_data}\n---\n') if dataset_card is None else dataset_card + additions.append(CommitOperationAdd(path_in_repo=config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())) + commit_message = commit_message if commit_message is not None else 'Upload dataset' + if len(additions) <= config.UPLOADS_MAX_NUMBER_PER_COMMIT: + commit_info = api.create_commit(repo_id, operations=additions + deletions, commit_message=commit_message, commit_description=commit_description, token=token, repo_type='dataset', revision=revision, create_pr=create_pr) + else: + logger.info(f'Number of files to upload is larger than {config.UPLOADS_MAX_NUMBER_PER_COMMIT}. Splitting the push into multiple commits.') + num_commits = math.ceil(len(additions) / config.UPLOADS_MAX_NUMBER_PER_COMMIT) + for i in range(0, num_commits): + operations = additions[i * config.UPLOADS_MAX_NUMBER_PER_COMMIT:(i + 1) * config.UPLOADS_MAX_NUMBER_PER_COMMIT] + (deletions if i == 0 else []) + commit_info = api.create_commit(repo_id, operations=operations, commit_message=commit_message + f' (part {i:05d}-of-{num_commits:05d})', commit_description=commit_description, token=token, repo_type='dataset', revision=revision, create_pr=create_pr) + logger.info(f'Commit #{i + 1} completed' + (f' (still {num_commits - i - 1} to go)' if num_commits - i - 1 else '') + '.') + return commit_info + +class IterableDatasetDict(dict): + + def __repr__(self): + repr = '\n'.join([f'{k}: {v}' for (k, v) in self.items()]) + repr = re.sub('^', ' ' * 4, repr, 0, re.M) + return f'IterableDatasetDict({{\n{repr}\n}})' + + def with_format(self, type: Optional[str]=None) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.with_format(type=type) for (k, dataset) in self.items()}) + + def map(self, function: Optional[Callable]=None, with_indices: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: int=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, fn_kwargs: Optional[dict]=None) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.map(function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs) for (k, dataset) in self.items()}) + + def filter(self, function: Optional[Callable]=None, with_indices=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, fn_kwargs: Optional[dict]=None) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.filter(function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs) for (k, dataset) in self.items()}) + + def shuffle(self, seed=None, generator: Optional[np.random.Generator]=None, buffer_size: int=1000) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.shuffle(seed=seed, generator=generator, buffer_size=buffer_size) for (k, dataset) in self.items()}) + + def rename_column(self, original_column_name: str, new_column_name: str) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.rename_column(original_column_name=original_column_name, new_column_name=new_column_name) for (k, dataset) in self.items()}) + + def rename_columns(self, column_mapping: Dict[str, str]) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.rename_columns(column_mapping=column_mapping) for (k, dataset) in self.items()}) + + def remove_columns(self, column_names: Union[str, List[str]]) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.remove_columns(column_names) for (k, dataset) in self.items()}) + + def select_columns(self, column_names: Union[str, List[str]]) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.select_columns(column_names) for (k, dataset) in self.items()}) + + def cast_column(self, column: str, feature: FeatureType) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.cast_column(column=column, feature=feature) for (k, dataset) in self.items()}) + + def cast(self, features: Features) -> 'IterableDatasetDict': + return IterableDatasetDict({k: dataset.cast(features=features) for (k, dataset) in self.items()}) + +# File: datasets-main/src/datasets/distributed.py +from typing import TypeVar +from .arrow_dataset import Dataset, _split_by_node_map_style_dataset +from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset +DatasetType = TypeVar('DatasetType', Dataset, IterableDataset) + +def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType: + if isinstance(dataset, Dataset): + return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size) + else: + return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size) + +# File: datasets-main/src/datasets/download/download_config.py +import copy +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, Optional, Union +from .. import config + +@dataclass +class DownloadConfig: + cache_dir: Optional[Union[str, Path]] = None + force_download: bool = False + resume_download: bool = False + local_files_only: bool = False + proxies: Optional[Dict] = None + user_agent: Optional[str] = None + extract_compressed_file: bool = False + force_extract: bool = False + delete_extracted: bool = False + extract_on_the_fly: bool = False + use_etag: bool = True + num_proc: Optional[int] = None + max_retries: int = 1 + token: Optional[Union[str, bool]] = None + storage_options: Dict[str, Any] = field(default_factory=dict) + download_desc: Optional[str] = None + disable_tqdm: bool = False + + def copy(self) -> 'DownloadConfig': + return self.__class__(**{k: copy.deepcopy(v) for (k, v) in self.__dict__.items()}) + + def __setattr__(self, name, value): + if name == 'token' and getattr(self, 'storage_options', None) is not None: + if 'hf' not in self.storage_options: + self.storage_options['hf'] = {'token': value, 'endpoint': config.HF_ENDPOINT} + elif getattr(self.storage_options['hf'], 'token', None) is None: + self.storage_options['hf']['token'] = value + super().__setattr__(name, value) + +# File: datasets-main/src/datasets/download/download_manager.py +"""""" +import enum +import io +import multiprocessing +import os +from datetime import datetime +from functools import partial +from typing import Dict, List, Optional, Union +import fsspec +from fsspec.core import url_to_fs +from tqdm.contrib.concurrent import thread_map +from .. import config +from ..utils import tqdm as hf_tqdm +from ..utils.file_utils import ArchiveIterable, FilesIterable, cached_path, is_relative_path, stack_multiprocessing_download_progress_bars, url_or_path_join +from ..utils.info_utils import get_size_checksum_dict +from ..utils.logging import get_logger, tqdm +from ..utils.py_utils import NestedDataStructure, map_nested +from ..utils.track import tracked_str +from .download_config import DownloadConfig +logger = get_logger(__name__) + +class DownloadMode(enum.Enum): + REUSE_DATASET_IF_EXISTS = 'reuse_dataset_if_exists' + REUSE_CACHE_IF_EXISTS = 'reuse_cache_if_exists' + FORCE_REDOWNLOAD = 'force_redownload' + +class DownloadManager: + is_streaming = False + + def __init__(self, dataset_name: Optional[str]=None, data_dir: Optional[str]=None, download_config: Optional[DownloadConfig]=None, base_path: Optional[str]=None, record_checksums=True): + self._dataset_name = dataset_name + self._data_dir = data_dir + self._base_path = base_path or os.path.abspath('.') + self._recorded_sizes_checksums: Dict[str, Dict[str, Optional[Union[int, str]]]] = {} + self.record_checksums = record_checksums + self.download_config = download_config or DownloadConfig() + self.downloaded_paths = {} + self.extracted_paths = {} + + @property + def manual_dir(self): + return self._data_dir + + @property + def downloaded_size(self): + return sum((checksums_dict['num_bytes'] for checksums_dict in self._recorded_sizes_checksums.values())) + + def _record_sizes_checksums(self, url_or_urls: NestedDataStructure, downloaded_path_or_paths: NestedDataStructure): + delay = 5 + for (url, path) in hf_tqdm(list(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten())), delay=delay, desc='Computing checksums'): + self._recorded_sizes_checksums[str(url)] = get_size_checksum_dict(path, record_checksum=self.record_checksums) + + def download(self, url_or_urls): + download_config = self.download_config.copy() + download_config.extract_compressed_file = False + if download_config.download_desc is None: + download_config.download_desc = 'Downloading data' + download_func = partial(self._download_batched, download_config=download_config) + start_time = datetime.now() + with stack_multiprocessing_download_progress_bars(): + downloaded_path_or_paths = map_nested(download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, desc='Downloading data files', batched=True, batch_size=-1) + duration = datetime.now() - start_time + logger.info(f'Downloading took {duration.total_seconds() // 60} min') + url_or_urls = NestedDataStructure(url_or_urls) + downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) + self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) + start_time = datetime.now() + self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) + duration = datetime.now() - start_time + logger.info(f'Checksum Computation took {duration.total_seconds() // 60} min') + return downloaded_path_or_paths.data + + def _download_batched(self, url_or_filenames: List[str], download_config: DownloadConfig) -> List[str]: + if len(url_or_filenames) >= 16: + download_config = download_config.copy() + download_config.disable_tqdm = True + download_func = partial(self._download_single, download_config=download_config) + fs: fsspec.AbstractFileSystem + (fs, path) = url_to_fs(url_or_filenames[0], **download_config.storage_options) + size = 0 + try: + size = fs.info(path).get('size', 0) + except Exception: + pass + max_workers = config.HF_DATASETS_MULTITHREADING_MAX_WORKERS if size < 20 << 20 else 1 + return thread_map(download_func, url_or_filenames, desc=download_config.download_desc or 'Downloading', unit='files', position=multiprocessing.current_process()._identity[-1] if os.environ.get('HF_DATASETS_STACK_MULTIPROCESSING_DOWNLOAD_PROGRESS_BARS') == '1' and multiprocessing.current_process()._identity else None, max_workers=max_workers, tqdm_class=tqdm) + else: + return [self._download_single(url_or_filename, download_config=download_config) for url_or_filename in url_or_filenames] + + def _download_single(self, url_or_filename: str, download_config: DownloadConfig) -> str: + url_or_filename = str(url_or_filename) + if is_relative_path(url_or_filename): + url_or_filename = url_or_path_join(self._base_path, url_or_filename) + out = cached_path(url_or_filename, download_config=download_config) + out = tracked_str(out) + out.set_origin(url_or_filename) + return out + + def iter_archive(self, path_or_buf: Union[str, io.BufferedReader]): + if hasattr(path_or_buf, 'read'): + return ArchiveIterable.from_buf(path_or_buf) + else: + return ArchiveIterable.from_urlpath(path_or_buf) + + def iter_files(self, paths: Union[str, List[str]]): + return FilesIterable.from_urlpaths(paths) + + def extract(self, path_or_paths): + download_config = self.download_config.copy() + download_config.extract_compressed_file = True + extract_func = partial(self._download_single, download_config=download_config) + extracted_paths = map_nested(extract_func, path_or_paths, num_proc=download_config.num_proc, desc='Extracting data files') + path_or_paths = NestedDataStructure(path_or_paths) + extracted_paths = NestedDataStructure(extracted_paths) + self.extracted_paths.update(dict(zip(path_or_paths.flatten(), extracted_paths.flatten()))) + return extracted_paths.data + + def download_and_extract(self, url_or_urls): + return self.extract(self.download(url_or_urls)) + + def get_recorded_sizes_checksums(self): + return self._recorded_sizes_checksums.copy() + + def delete_extracted_files(self): + paths_to_delete = set(self.extracted_paths.values()) - set(self.downloaded_paths.values()) + for (key, path) in list(self.extracted_paths.items()): + if path in paths_to_delete and os.path.isfile(path): + os.remove(path) + del self.extracted_paths[key] + + def manage_extracted_files(self): + if self.download_config.delete_extracted: + self.delete_extracted_files() + +# File: datasets-main/src/datasets/download/streaming_download_manager.py +import io +import os +from typing import Iterable, List, Optional, Tuple, Union +from ..utils.file_utils import SINGLE_FILE_COMPRESSION_PROTOCOLS, ArchiveIterable, FilesIterable, _get_extraction_protocol, _get_path_extension, _prepare_path_and_storage_options, is_relative_path, url_or_path_join, xbasename, xdirname, xet_parse, xexists, xgetsize, xglob, xgzip_open, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xpandas_read_csv, xpandas_read_excel, xPath, xpyarrow_parquet_read_table, xrelpath, xsio_loadmat, xsplit, xsplitext, xwalk, xxml_dom_minidom_parse +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .download_config import DownloadConfig +logger = get_logger(__name__) + +class StreamingDownloadManager: + is_streaming = True + + def __init__(self, dataset_name: Optional[str]=None, data_dir: Optional[str]=None, download_config: Optional[DownloadConfig]=None, base_path: Optional[str]=None): + self._dataset_name = dataset_name + self._data_dir = data_dir + self._base_path = base_path or os.path.abspath('.') + self.download_config = download_config or DownloadConfig() + + @property + def manual_dir(self): + return self._data_dir + + def download(self, url_or_urls): + url_or_urls = map_nested(self._download_single, url_or_urls, map_tuple=True) + return url_or_urls + + def _download_single(self, urlpath: str) -> str: + urlpath = str(urlpath) + if is_relative_path(urlpath): + urlpath = url_or_path_join(self._base_path, urlpath) + return urlpath + + def extract(self, url_or_urls): + urlpaths = map_nested(self._extract, url_or_urls, map_tuple=True) + return urlpaths + + def _extract(self, urlpath: str) -> str: + urlpath = str(urlpath) + protocol = _get_extraction_protocol(urlpath, download_config=self.download_config) + path = urlpath.split('::')[0] + extension = _get_path_extension(path) + if extension in ['tgz', 'tar'] or path.endswith(('.tar.gz', '.tar.bz2', '.tar.xz')): + raise NotImplementedError(f"Extraction protocol for TAR archives like '{urlpath}' is not implemented in streaming mode. Please use `dl_manager.iter_archive` instead.\n\nExample usage:\n\n\turl = dl_manager.download(url)\n\ttar_archive_iterator = dl_manager.iter_archive(url)\n\n\tfor filename, file in tar_archive_iterator:\n\t\t...") + if protocol is None: + return urlpath + elif protocol in SINGLE_FILE_COMPRESSION_PROTOCOLS: + inner_file = os.path.basename(urlpath.split('::')[0]) + inner_file = inner_file[:inner_file.rindex('.')] if '.' in inner_file else inner_file + return f'{protocol}://{inner_file}::{urlpath}' + else: + return f'{protocol}://::{urlpath}' + + def download_and_extract(self, url_or_urls): + return self.extract(self.download(url_or_urls)) + + def iter_archive(self, urlpath_or_buf: Union[str, io.BufferedReader]) -> Iterable[Tuple]: + if hasattr(urlpath_or_buf, 'read'): + return ArchiveIterable.from_buf(urlpath_or_buf) + else: + return ArchiveIterable.from_urlpath(urlpath_or_buf, download_config=self.download_config) + + def iter_files(self, urlpaths: Union[str, List[str]]) -> Iterable[str]: + return FilesIterable.from_urlpaths(urlpaths, download_config=self.download_config) + +# File: datasets-main/src/datasets/exceptions.py +from typing import Any, Dict, List, Optional, Union +from huggingface_hub import HfFileSystem +from . import config +from .table import CastError +from .utils.track import TrackedIterableFromGenerator, tracked_list, tracked_str + +class DatasetsError(Exception): + +class DefunctDatasetError(DatasetsError): + +class FileNotFoundDatasetsError(DatasetsError, FileNotFoundError): + +class DataFilesNotFoundError(FileNotFoundDatasetsError): + +class DatasetNotFoundError(FileNotFoundDatasetsError): + +class DatasetBuildError(DatasetsError): + pass + +class ManualDownloadError(DatasetBuildError): + pass + +class FileFormatError(DatasetBuildError): + pass + +class DatasetGenerationError(DatasetBuildError): + pass + +class DatasetGenerationCastError(DatasetGenerationError): + + @classmethod + def from_cast_error(cls, cast_error: CastError, builder_name: str, gen_kwargs: Dict[str, Any], token: Optional[Union[bool, str]]) -> 'DatasetGenerationCastError': + explanation_message = f'\n\nAll the data files must have the same columns, but at some point {cast_error.details()}' + formatted_tracked_gen_kwargs: List[str] = [] + for gen_kwarg in gen_kwargs.values(): + if not isinstance(gen_kwarg, (tracked_str, tracked_list, TrackedIterableFromGenerator)): + continue + while isinstance(gen_kwarg, (tracked_list, TrackedIterableFromGenerator)) and gen_kwarg.last_item is not None: + gen_kwarg = gen_kwarg.last_item + if isinstance(gen_kwarg, tracked_str): + gen_kwarg = gen_kwarg.get_origin() + if isinstance(gen_kwarg, str) and gen_kwarg.startswith('hf://'): + resolved_path = HfFileSystem(endpoint=config.HF_ENDPOINT, token=token).resolve_path(gen_kwarg) + gen_kwarg = 'hf://' + resolved_path.unresolve() + if '@' + resolved_path.revision in gen_kwarg: + gen_kwarg = gen_kwarg.replace('@' + resolved_path.revision, '', 1) + f' (at revision {resolved_path.revision})' + formatted_tracked_gen_kwargs.append(str(gen_kwarg)) + if formatted_tracked_gen_kwargs: + explanation_message += f"\n\nThis happened while the {builder_name} dataset builder was generating data using\n\n{', '.join(formatted_tracked_gen_kwargs)}" + help_message = '\n\nPlease either edit the data files to have matching columns, or separate them into different configurations (see docs at https://hf.co/docs/hub/datasets-manual-configuration#multiple-configurations)' + return cls('An error occurred while generating the dataset' + explanation_message + help_message) + +class ChecksumVerificationError(DatasetsError): + +class UnexpectedDownloadedFileError(ChecksumVerificationError): + +class ExpectedMoreDownloadedFilesError(ChecksumVerificationError): + +class NonMatchingChecksumError(ChecksumVerificationError): + +class SplitsVerificationError(DatasetsError): + +class UnexpectedSplitsError(SplitsVerificationError): + +class ExpectedMoreSplitsError(SplitsVerificationError): + +class NonMatchingSplitsSizesError(SplitsVerificationError): + +# File: datasets-main/src/datasets/features/__init__.py +__all__ = ['Audio', 'Array2D', 'Array3D', 'Array4D', 'Array5D', 'ClassLabel', 'Features', 'LargeList', 'Sequence', 'Value', 'Image', 'Translation', 'TranslationVariableLanguages'] +from .audio import Audio +from .features import Array2D, Array3D, Array4D, Array5D, ClassLabel, Features, LargeList, Sequence, Value +from .image import Image +from .translation import Translation, TranslationVariableLanguages + +# File: datasets-main/src/datasets/features/audio.py +import os +from dataclasses import dataclass, field +from io import BytesIO +from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union +import numpy as np +import pyarrow as pa +from .. import config +from ..download.download_config import DownloadConfig +from ..table import array_cast +from ..utils.file_utils import xopen, xsplitext +from ..utils.py_utils import no_op_if_value_is_null, string_to_dict +if TYPE_CHECKING: + from .features import FeatureType + +@dataclass +class Audio: + sampling_rate: Optional[int] = None + mono: bool = True + decode: bool = True + id: Optional[str] = None + dtype: ClassVar[str] = 'dict' + pa_type: ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()}) + _type: str = field(default='Audio', init=False, repr=False) + + def __call__(self): + return self.pa_type + + def encode_example(self, value: Union[str, bytes, dict]) -> dict: + try: + import soundfile as sf + except ImportError as err: + raise ImportError("To support encoding audio data, please install 'soundfile'.") from err + if isinstance(value, str): + return {'bytes': None, 'path': value} + elif isinstance(value, bytes): + return {'bytes': value, 'path': None} + elif 'array' in value: + buffer = BytesIO() + sf.write(buffer, value['array'], value['sampling_rate'], format='wav') + return {'bytes': buffer.getvalue(), 'path': None} + elif value.get('path') is not None and os.path.isfile(value['path']): + if value['path'].endswith('pcm'): + if value.get('sampling_rate') is None: + raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object") + if value.get('bytes'): + bytes_value = np.frombuffer(value['bytes'], dtype=np.int16).astype(np.float32) / 32767 + else: + bytes_value = np.memmap(value['path'], dtype='h', mode='r').astype(np.float32) / 32767 + buffer = BytesIO(bytes()) + sf.write(buffer, bytes_value, value['sampling_rate'], format='wav') + return {'bytes': buffer.getvalue(), 'path': None} + else: + return {'bytes': None, 'path': value.get('path')} + elif value.get('bytes') is not None or value.get('path') is not None: + return {'bytes': value.get('bytes'), 'path': value.get('path')} + else: + raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.") + + def decode_example(self, value: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]]=None) -> dict: + if not self.decode: + raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.') + (path, file) = (value['path'], BytesIO(value['bytes'])) if value['bytes'] is not None else (value['path'], None) + if path is None and file is None: + raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.") + try: + import librosa + import soundfile as sf + except ImportError as err: + raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'.") from err + audio_format = xsplitext(path)[1][1:].lower() if path is not None else None + if not config.IS_OPUS_SUPPORTED and audio_format == 'opus': + raise RuntimeError('Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ') + elif not config.IS_MP3_SUPPORTED and audio_format == 'mp3': + raise RuntimeError('Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ') + if file is None: + token_per_repo_id = token_per_repo_id or {} + source_url = path.split('::')[-1] + pattern = config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL + try: + repo_id = string_to_dict(source_url, pattern)['repo_id'] + token = token_per_repo_id[repo_id] + except (ValueError, KeyError): + token = None + download_config = DownloadConfig(token=token) + with xopen(path, 'rb', download_config=download_config) as f: + (array, sampling_rate) = sf.read(f) + else: + (array, sampling_rate) = sf.read(file) + array = array.T + if self.mono: + array = librosa.to_mono(array) + if self.sampling_rate and self.sampling_rate != sampling_rate: + array = librosa.resample(array, orig_sr=sampling_rate, target_sr=self.sampling_rate) + sampling_rate = self.sampling_rate + return {'path': path, 'array': array, 'sampling_rate': sampling_rate} + + def flatten(self) -> Union['FeatureType', Dict[str, 'FeatureType']]: + from .features import Value + if self.decode: + raise ValueError('Cannot flatten a decoded Audio feature.') + return {'bytes': Value('binary'), 'path': Value('string')} + + def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray]) -> pa.StructArray: + if pa.types.is_string(storage.type): + bytes_array = pa.array([None] * len(storage), type=pa.binary()) + storage = pa.StructArray.from_arrays([bytes_array, storage], ['bytes', 'path'], mask=storage.is_null()) + elif pa.types.is_binary(storage.type): + path_array = pa.array([None] * len(storage), type=pa.string()) + storage = pa.StructArray.from_arrays([storage, path_array], ['bytes', 'path'], mask=storage.is_null()) + elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('array'): + storage = pa.array([Audio().encode_example(x) if x is not None else None for x in storage.to_pylist()]) + elif pa.types.is_struct(storage.type): + if storage.type.get_field_index('bytes') >= 0: + bytes_array = storage.field('bytes') + else: + bytes_array = pa.array([None] * len(storage), type=pa.binary()) + if storage.type.get_field_index('path') >= 0: + path_array = storage.field('path') + else: + path_array = pa.array([None] * len(storage), type=pa.string()) + storage = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=storage.is_null()) + return array_cast(storage, self.pa_type) + + def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: + + @no_op_if_value_is_null + def path_to_bytes(path): + with xopen(path, 'rb') as f: + bytes_ = f.read() + return bytes_ + bytes_array = pa.array([(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist()], type=pa.binary()) + path_array = pa.array([os.path.basename(path) if path is not None else None for path in storage.field('path').to_pylist()], type=pa.string()) + storage = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null()) + return array_cast(storage, self.pa_type) + +# File: datasets-main/src/datasets/features/features.py +"""""" +import copy +import json +import re +import sys +from collections.abc import Iterable, Mapping +from collections.abc import Sequence as SequenceABC +from dataclasses import InitVar, dataclass, field, fields +from functools import reduce, wraps +from operator import mul +from typing import Any, Callable, ClassVar, Dict, List, Optional, Tuple, Union +from typing import Sequence as Sequence_ +import numpy as np +import pandas as pd +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.types +from pandas.api.extensions import ExtensionArray as PandasExtensionArray +from pandas.api.extensions import ExtensionDtype as PandasExtensionDtype +from .. import config +from ..naming import camelcase_to_snakecase, snakecase_to_camelcase +from ..table import array_cast +from ..utils import experimental, logging +from ..utils.py_utils import asdict, first_non_null_value, zip_dict +from .audio import Audio +from .image import Image, encode_pil_image +from .translation import Translation, TranslationVariableLanguages +logger = logging.get_logger(__name__) + +def _arrow_to_datasets_dtype(arrow_type: pa.DataType) -> str: + if pyarrow.types.is_null(arrow_type): + return 'null' + elif pyarrow.types.is_boolean(arrow_type): + return 'bool' + elif pyarrow.types.is_int8(arrow_type): + return 'int8' + elif pyarrow.types.is_int16(arrow_type): + return 'int16' + elif pyarrow.types.is_int32(arrow_type): + return 'int32' + elif pyarrow.types.is_int64(arrow_type): + return 'int64' + elif pyarrow.types.is_uint8(arrow_type): + return 'uint8' + elif pyarrow.types.is_uint16(arrow_type): + return 'uint16' + elif pyarrow.types.is_uint32(arrow_type): + return 'uint32' + elif pyarrow.types.is_uint64(arrow_type): + return 'uint64' + elif pyarrow.types.is_float16(arrow_type): + return 'float16' + elif pyarrow.types.is_float32(arrow_type): + return 'float32' + elif pyarrow.types.is_float64(arrow_type): + return 'float64' + elif pyarrow.types.is_time32(arrow_type): + return f'time32[{pa.type_for_alias(str(arrow_type)).unit}]' + elif pyarrow.types.is_time64(arrow_type): + return f'time64[{pa.type_for_alias(str(arrow_type)).unit}]' + elif pyarrow.types.is_timestamp(arrow_type): + if arrow_type.tz is None: + return f'timestamp[{arrow_type.unit}]' + elif arrow_type.tz: + return f'timestamp[{arrow_type.unit}, tz={arrow_type.tz}]' + else: + raise ValueError(f'Unexpected timestamp object {arrow_type}.') + elif pyarrow.types.is_date32(arrow_type): + return 'date32' + elif pyarrow.types.is_date64(arrow_type): + return 'date64' + elif pyarrow.types.is_duration(arrow_type): + return f'duration[{arrow_type.unit}]' + elif pyarrow.types.is_decimal128(arrow_type): + return f'decimal128({arrow_type.precision}, {arrow_type.scale})' + elif pyarrow.types.is_decimal256(arrow_type): + return f'decimal256({arrow_type.precision}, {arrow_type.scale})' + elif pyarrow.types.is_binary(arrow_type): + return 'binary' + elif pyarrow.types.is_large_binary(arrow_type): + return 'large_binary' + elif pyarrow.types.is_string(arrow_type): + return 'string' + elif pyarrow.types.is_large_string(arrow_type): + return 'large_string' + elif pyarrow.types.is_dictionary(arrow_type): + return _arrow_to_datasets_dtype(arrow_type.value_type) + else: + raise ValueError(f'Arrow type {arrow_type} does not have a datasets dtype equivalent.') + +def string_to_arrow(datasets_dtype: str) -> pa.DataType: + + def _dtype_error_msg(dtype, pa_dtype, examples=None, urls=None): + msg = f'{dtype} is not a validly formatted string representation of the pyarrow {pa_dtype} type.' + if examples: + examples = ', '.join(examples[:-1]) + ' or ' + examples[-1] if len(examples) > 1 else examples[0] + msg += f'\nValid examples include: {examples}.' + if urls: + urls = ', '.join(urls[:-1]) + ' and ' + urls[-1] if len(urls) > 1 else urls[0] + msg += f'\nFor more insformation, see: {urls}.' + return msg + if datasets_dtype in pa.__dict__: + return pa.__dict__[datasets_dtype]() + if datasets_dtype + '_' in pa.__dict__: + return pa.__dict__[datasets_dtype + '_']() + timestamp_matches = re.search('^timestamp\\[(.*)\\]$', datasets_dtype) + if timestamp_matches: + timestamp_internals = timestamp_matches.group(1) + internals_matches = re.search('^(s|ms|us|ns),\\s*tz=([a-zA-Z0-9/_+\\-:]*)$', timestamp_internals) + if timestamp_internals in ['s', 'ms', 'us', 'ns']: + return pa.timestamp(timestamp_internals) + elif internals_matches: + return pa.timestamp(internals_matches.group(1), internals_matches.group(2)) + else: + raise ValueError(_dtype_error_msg(datasets_dtype, 'timestamp', examples=['timestamp[us]', 'timestamp[us, tz=America/New_York'], urls=['https://arrow.apache.org/docs/python/generated/pyarrow.timestamp.html'])) + duration_matches = re.search('^duration\\[(.*)\\]$', datasets_dtype) + if duration_matches: + duration_internals = duration_matches.group(1) + if duration_internals in ['s', 'ms', 'us', 'ns']: + return pa.duration(duration_internals) + else: + raise ValueError(_dtype_error_msg(datasets_dtype, 'duration', examples=['duration[s]', 'duration[us]'], urls=['https://arrow.apache.org/docs/python/generated/pyarrow.duration.html'])) + time_matches = re.search('^time(.*)\\[(.*)\\]$', datasets_dtype) + if time_matches: + time_internals_bits = time_matches.group(1) + if time_internals_bits == '32': + time_internals_unit = time_matches.group(2) + if time_internals_unit in ['s', 'ms']: + return pa.time32(time_internals_unit) + else: + raise ValueError(f'{time_internals_unit} is not a valid unit for the pyarrow time32 type. Supported units: s (second) and ms (millisecond).') + elif time_internals_bits == '64': + time_internals_unit = time_matches.group(2) + if time_internals_unit in ['us', 'ns']: + return pa.time64(time_internals_unit) + else: + raise ValueError(f'{time_internals_unit} is not a valid unit for the pyarrow time64 type. Supported units: us (microsecond) and ns (nanosecond).') + else: + raise ValueError(_dtype_error_msg(datasets_dtype, 'time', examples=['time32[s]', 'time64[us]'], urls=['https://arrow.apache.org/docs/python/generated/pyarrow.time32.html', 'https://arrow.apache.org/docs/python/generated/pyarrow.time64.html'])) + decimal_matches = re.search('^decimal(.*)\\((.*)\\)$', datasets_dtype) + if decimal_matches: + decimal_internals_bits = decimal_matches.group(1) + if decimal_internals_bits == '128': + decimal_internals_precision_and_scale = re.search('^(\\d+),\\s*(-?\\d+)$', decimal_matches.group(2)) + if decimal_internals_precision_and_scale: + precision = decimal_internals_precision_and_scale.group(1) + scale = decimal_internals_precision_and_scale.group(2) + return pa.decimal128(int(precision), int(scale)) + else: + raise ValueError(_dtype_error_msg(datasets_dtype, 'decimal128', examples=['decimal128(10, 2)', 'decimal128(4, -2)'], urls=['https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html'])) + elif decimal_internals_bits == '256': + decimal_internals_precision_and_scale = re.search('^(\\d+),\\s*(-?\\d+)$', decimal_matches.group(2)) + if decimal_internals_precision_and_scale: + precision = decimal_internals_precision_and_scale.group(1) + scale = decimal_internals_precision_and_scale.group(2) + return pa.decimal256(int(precision), int(scale)) + else: + raise ValueError(_dtype_error_msg(datasets_dtype, 'decimal256', examples=['decimal256(30, 2)', 'decimal256(38, -4)'], urls=['https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html'])) + else: + raise ValueError(_dtype_error_msg(datasets_dtype, 'decimal', examples=['decimal128(12, 3)', 'decimal256(40, 6)'], urls=['https://arrow.apache.org/docs/python/generated/pyarrow.decimal128.html', 'https://arrow.apache.org/docs/python/generated/pyarrow.decimal256.html'])) + raise ValueError(f"Neither {datasets_dtype} nor {datasets_dtype + '_'} seems to be a pyarrow data type. Please make sure to use a correct data type, see: https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions") + +def _cast_to_python_objects(obj: Any, only_1d_for_numpy: bool, optimize_list_casting: bool) -> Tuple[Any, bool]: + if config.TF_AVAILABLE and 'tensorflow' in sys.modules: + import tensorflow as tf + if config.TORCH_AVAILABLE and 'torch' in sys.modules: + import torch + if config.JAX_AVAILABLE and 'jax' in sys.modules: + import jax.numpy as jnp + if config.PIL_AVAILABLE and 'PIL' in sys.modules: + import PIL.Image + if isinstance(obj, np.ndarray): + if obj.ndim == 0: + return (obj[()], True) + elif not only_1d_for_numpy or obj.ndim == 1: + return (obj, False) + else: + return ([_cast_to_python_objects(x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] for x in obj], True) + elif config.TORCH_AVAILABLE and 'torch' in sys.modules and isinstance(obj, torch.Tensor): + if obj.dtype == torch.bfloat16: + return (_cast_to_python_objects(obj.detach().to(torch.float).cpu().numpy(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0], True) + if obj.ndim == 0: + return (obj.detach().cpu().numpy()[()], True) + elif not only_1d_for_numpy or obj.ndim == 1: + return (obj.detach().cpu().numpy(), True) + else: + return ([_cast_to_python_objects(x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] for x in obj.detach().cpu().numpy()], True) + elif config.TF_AVAILABLE and 'tensorflow' in sys.modules and isinstance(obj, tf.Tensor): + if obj.ndim == 0: + return (obj.numpy()[()], True) + elif not only_1d_for_numpy or obj.ndim == 1: + return (obj.numpy(), True) + else: + return ([_cast_to_python_objects(x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] for x in obj.numpy()], True) + elif config.JAX_AVAILABLE and 'jax' in sys.modules and isinstance(obj, jnp.ndarray): + if obj.ndim == 0: + return (np.asarray(obj)[()], True) + elif not only_1d_for_numpy or obj.ndim == 1: + return (np.asarray(obj), True) + else: + return ([_cast_to_python_objects(x, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] for x in np.asarray(obj)], True) + elif config.PIL_AVAILABLE and 'PIL' in sys.modules and isinstance(obj, PIL.Image.Image): + return (encode_pil_image(obj), True) + elif isinstance(obj, pd.Series): + return (_cast_to_python_objects(obj.tolist(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0], True) + elif isinstance(obj, pd.DataFrame): + return ({key: _cast_to_python_objects(value, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] for (key, value) in obj.to_dict('series').items()}, True) + elif isinstance(obj, pd.Timestamp): + return (obj.to_pydatetime(), True) + elif isinstance(obj, pd.Timedelta): + return (obj.to_pytimedelta(), True) + elif isinstance(obj, Mapping): + has_changed = not isinstance(obj, dict) + output = {} + for (k, v) in obj.items(): + (casted_v, has_changed_v) = _cast_to_python_objects(v, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting) + has_changed |= has_changed_v + output[k] = casted_v + return (output if has_changed else obj, has_changed) + elif hasattr(obj, '__array__'): + return (_cast_to_python_objects(obj.__array__(), only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0], True) + elif isinstance(obj, (list, tuple)): + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt): + break + (casted_first_elmt, has_changed_first_elmt) = _cast_to_python_objects(first_elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting) + if has_changed_first_elmt or not optimize_list_casting: + return ([_cast_to_python_objects(elmt, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] for elmt in obj], True) + elif isinstance(obj, (list, tuple)): + return (obj, False) + else: + return (list(obj), True) + else: + return (obj, False) + else: + return (obj, False) + +def cast_to_python_objects(obj: Any, only_1d_for_numpy=False, optimize_list_casting=True) -> Any: + return _cast_to_python_objects(obj, only_1d_for_numpy=only_1d_for_numpy, optimize_list_casting=optimize_list_casting)[0] + +@dataclass +class Value: + dtype: str + id: Optional[str] = None + pa_type: ClassVar[Any] = None + _type: str = field(default='Value', init=False, repr=False) + + def __post_init__(self): + if self.dtype == 'double': + self.dtype = 'float64' + if self.dtype == 'float': + self.dtype = 'float32' + self.pa_type = string_to_arrow(self.dtype) + + def __call__(self): + return self.pa_type + + def encode_example(self, value): + if pa.types.is_boolean(self.pa_type): + return bool(value) + elif pa.types.is_integer(self.pa_type): + return int(value) + elif pa.types.is_floating(self.pa_type): + return float(value) + elif pa.types.is_string(self.pa_type): + return str(value) + else: + return value + +class _ArrayXD: + + def __post_init__(self): + self.shape = tuple(self.shape) + + def __call__(self): + pa_type = globals()[self.__class__.__name__ + 'ExtensionType'](self.shape, self.dtype) + return pa_type + + def encode_example(self, value): + return value + +@dataclass +class Array2D(_ArrayXD): + shape: tuple + dtype: str + id: Optional[str] = None + _type: str = field(default='Array2D', init=False, repr=False) + +@dataclass +class Array3D(_ArrayXD): + shape: tuple + dtype: str + id: Optional[str] = None + _type: str = field(default='Array3D', init=False, repr=False) + +@dataclass +class Array4D(_ArrayXD): + shape: tuple + dtype: str + id: Optional[str] = None + _type: str = field(default='Array4D', init=False, repr=False) + +@dataclass +class Array5D(_ArrayXD): + shape: tuple + dtype: str + id: Optional[str] = None + _type: str = field(default='Array5D', init=False, repr=False) + +class _ArrayXDExtensionType(pa.ExtensionType): + ndims: Optional[int] = None + + def __init__(self, shape: tuple, dtype: str): + if self.ndims is None or self.ndims <= 1: + raise ValueError('You must instantiate an array type with a value for dim that is > 1') + if len(shape) != self.ndims: + raise ValueError(f"shape={shape} and ndims={self.ndims} don't match") + for dim in range(1, self.ndims): + if shape[dim] is None: + raise ValueError(f'Support only dynamic size on first dimension. Got: {shape}') + self.shape = tuple(shape) + self.value_type = dtype + self.storage_dtype = self._generate_dtype(self.value_type) + pa.ExtensionType.__init__(self, self.storage_dtype, f'{self.__class__.__module__}.{self.__class__.__name__}') + + def __arrow_ext_serialize__(self): + return json.dumps((self.shape, self.value_type)).encode() + + @classmethod + def __arrow_ext_deserialize__(cls, storage_type, serialized): + args = json.loads(serialized) + return cls(*args) + + def __reduce__(self): + return (self.__arrow_ext_deserialize__, (self.storage_type, self.__arrow_ext_serialize__())) + + def __hash__(self): + return hash((self.__class__, self.shape, self.value_type)) + + def __arrow_ext_class__(self): + return ArrayExtensionArray + + def _generate_dtype(self, dtype): + dtype = string_to_arrow(dtype) + for d in reversed(self.shape): + dtype = pa.list_(dtype) + return dtype + + def to_pandas_dtype(self): + return PandasArrayExtensionDtype(self.value_type) + +class Array2DExtensionType(_ArrayXDExtensionType): + ndims = 2 + +class Array3DExtensionType(_ArrayXDExtensionType): + ndims = 3 + +class Array4DExtensionType(_ArrayXDExtensionType): + ndims = 4 + +class Array5DExtensionType(_ArrayXDExtensionType): + ndims = 5 +pa.register_extension_type(Array2DExtensionType((1, 2), 'int64')) +pa.register_extension_type(Array3DExtensionType((1, 2, 3), 'int64')) +pa.register_extension_type(Array4DExtensionType((1, 2, 3, 4), 'int64')) +pa.register_extension_type(Array5DExtensionType((1, 2, 3, 4, 5), 'int64')) + +def _is_zero_copy_only(pa_type: pa.DataType, unnest: bool=False) -> bool: + + def _unnest_pa_type(pa_type: pa.DataType) -> pa.DataType: + if pa.types.is_list(pa_type): + return _unnest_pa_type(pa_type.value_type) + return pa_type + if unnest: + pa_type = _unnest_pa_type(pa_type) + return pa.types.is_primitive(pa_type) and (not (pa.types.is_boolean(pa_type) or pa.types.is_temporal(pa_type))) + +class ArrayExtensionArray(pa.ExtensionArray): + + def __array__(self): + zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) + return self.to_numpy(zero_copy_only=zero_copy_only) + + def __getitem__(self, i): + return self.storage[i] + + def to_numpy(self, zero_copy_only=True): + storage: pa.ListArray = self.storage + null_mask = storage.is_null().to_numpy(zero_copy_only=False) + if self.type.shape[0] is not None: + size = 1 + null_indices = np.arange(len(storage))[null_mask] - np.arange(np.sum(null_mask)) + for i in range(self.type.ndims): + size *= self.type.shape[i] + storage = storage.flatten() + numpy_arr = storage.to_numpy(zero_copy_only=zero_copy_only) + numpy_arr = numpy_arr.reshape(len(self) - len(null_indices), *self.type.shape) + if len(null_indices): + numpy_arr = np.insert(numpy_arr.astype(np.float64), null_indices, np.nan, axis=0) + else: + shape = self.type.shape + ndims = self.type.ndims + arrays = [] + first_dim_offsets = np.array([off.as_py() for off in storage.offsets]) + for (i, is_null) in enumerate(null_mask): + if is_null: + arrays.append(np.nan) + else: + storage_el = storage[i:i + 1] + first_dim = first_dim_offsets[i + 1] - first_dim_offsets[i] + for _ in range(ndims): + storage_el = storage_el.flatten() + numpy_arr = storage_el.to_numpy(zero_copy_only=zero_copy_only) + arrays.append(numpy_arr.reshape(first_dim, *shape[1:])) + if len(np.unique(np.diff(first_dim_offsets))) > 1: + numpy_arr = np.empty(len(arrays), dtype=object) + numpy_arr[:] = arrays + else: + numpy_arr = np.array(arrays) + return numpy_arr + + def to_pylist(self): + zero_copy_only = _is_zero_copy_only(self.storage.type, unnest=True) + numpy_arr = self.to_numpy(zero_copy_only=zero_copy_only) + if self.type.shape[0] is None and numpy_arr.dtype == object: + return [arr.tolist() for arr in numpy_arr.tolist()] + else: + return numpy_arr.tolist() + +class PandasArrayExtensionDtype(PandasExtensionDtype): + _metadata = 'value_type' + + def __init__(self, value_type: Union['PandasArrayExtensionDtype', np.dtype]): + self._value_type = value_type + + def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]): + if isinstance(array, pa.ChunkedArray): + array = array.type.wrap_array(pa.concat_arrays([chunk.storage for chunk in array.chunks])) + zero_copy_only = _is_zero_copy_only(array.storage.type, unnest=True) + numpy_arr = array.to_numpy(zero_copy_only=zero_copy_only) + return PandasArrayExtensionArray(numpy_arr) + + @classmethod + def construct_array_type(cls): + return PandasArrayExtensionArray + + @property + def type(self) -> type: + return np.ndarray + + @property + def kind(self) -> str: + return 'O' + + @property + def name(self) -> str: + return f'array[{self.value_type}]' + + @property + def value_type(self) -> np.dtype: + return self._value_type + +class PandasArrayExtensionArray(PandasExtensionArray): + + def __init__(self, data: np.ndarray, copy: bool=False): + self._data = data if not copy else np.array(data) + self._dtype = PandasArrayExtensionDtype(data.dtype) + + def __array__(self, dtype=None): + if dtype == np.dtype(object): + out = np.empty(len(self._data), dtype=object) + for i in range(len(self._data)): + out[i] = self._data[i] + return out + if dtype is None: + return self._data + else: + return self._data.astype(dtype) + + def copy(self, deep: bool=False) -> 'PandasArrayExtensionArray': + return PandasArrayExtensionArray(self._data, copy=True) + + @classmethod + def _from_sequence(cls, scalars, dtype: Optional[PandasArrayExtensionDtype]=None, copy: bool=False) -> 'PandasArrayExtensionArray': + if len(scalars) > 1 and all((isinstance(x, np.ndarray) and x.shape == scalars[0].shape and (x.dtype == scalars[0].dtype) for x in scalars)): + data = np.array(scalars, dtype=dtype if dtype is None else dtype.value_type, copy=copy) + else: + data = np.empty(len(scalars), dtype=object) + data[:] = scalars + return cls(data, copy=copy) + + @classmethod + def _concat_same_type(cls, to_concat: Sequence_['PandasArrayExtensionArray']) -> 'PandasArrayExtensionArray': + if len(to_concat) > 1 and all((va._data.shape == to_concat[0]._data.shape and va._data.dtype == to_concat[0]._data.dtype for va in to_concat)): + data = np.vstack([va._data for va in to_concat]) + else: + data = np.empty(len(to_concat), dtype=object) + data[:] = [va._data for va in to_concat] + return cls(data, copy=False) + + @property + def dtype(self) -> PandasArrayExtensionDtype: + return self._dtype + + @property + def nbytes(self) -> int: + return self._data.nbytes + + def isna(self) -> np.ndarray: + return np.array([pd.isna(arr).any() for arr in self._data]) + + def __setitem__(self, key: Union[int, slice, np.ndarray], value: Any) -> None: + raise NotImplementedError() + + def __getitem__(self, item: Union[int, slice, np.ndarray]) -> Union[np.ndarray, 'PandasArrayExtensionArray']: + if isinstance(item, int): + return self._data[item] + return PandasArrayExtensionArray(self._data[item], copy=False) + + def take(self, indices: Sequence_[int], allow_fill: bool=False, fill_value: bool=None) -> 'PandasArrayExtensionArray': + indices: np.ndarray = np.asarray(indices, dtype=int) + if allow_fill: + fill_value = self.dtype.na_value if fill_value is None else np.asarray(fill_value, dtype=self.dtype.value_type) + mask = indices == -1 + if (indices < -1).any(): + raise ValueError('Invalid value in `indices`, must be all >= -1 for `allow_fill` is True') + elif len(self) > 0: + pass + elif not np.all(mask): + raise IndexError('Invalid take for empty PandasArrayExtensionArray, must be all -1.') + else: + data = np.array([fill_value] * len(indices), dtype=self.dtype.value_type) + return PandasArrayExtensionArray(data, copy=False) + took = self._data.take(indices, axis=0) + if allow_fill and mask.any(): + took[mask] = [fill_value] * np.sum(mask) + return PandasArrayExtensionArray(took, copy=False) + + def __len__(self) -> int: + return len(self._data) + + def __eq__(self, other) -> np.ndarray: + if not isinstance(other, PandasArrayExtensionArray): + raise NotImplementedError(f'Invalid type to compare to: {type(other)}') + return (self._data == other._data).all() + +def pandas_types_mapper(dtype): + if isinstance(dtype, _ArrayXDExtensionType): + return PandasArrayExtensionDtype(dtype.value_type) + +@dataclass +class ClassLabel: + num_classes: InitVar[Optional[int]] = None + names: List[str] = None + names_file: InitVar[Optional[str]] = None + id: Optional[str] = None + dtype: ClassVar[str] = 'int64' + pa_type: ClassVar[Any] = pa.int64() + _str2int: ClassVar[Dict[str, int]] = None + _int2str: ClassVar[Dict[int, int]] = None + _type: str = field(default='ClassLabel', init=False, repr=False) + + def __post_init__(self, num_classes, names_file): + self.num_classes = num_classes + self.names_file = names_file + if self.names_file is not None and self.names is not None: + raise ValueError('Please provide either names or names_file but not both.') + if self.names is None: + if self.names_file is not None: + self.names = self._load_names_from_file(self.names_file) + elif self.num_classes is not None: + self.names = [str(i) for i in range(self.num_classes)] + else: + raise ValueError('Please provide either num_classes, names or names_file.') + elif not isinstance(self.names, SequenceABC): + raise TypeError(f'Please provide names as a list, is {type(self.names)}') + if self.num_classes is None: + self.num_classes = len(self.names) + elif self.num_classes != len(self.names): + raise ValueError(f'ClassLabel number of names do not match the defined num_classes. Got {len(self.names)} names VS {self.num_classes} num_classes') + self._int2str = [str(name) for name in self.names] + self._str2int = {name: i for (i, name) in enumerate(self._int2str)} + if len(self._int2str) != len(self._str2int): + raise ValueError('Some label names are duplicated. Each label name should be unique.') + + def __call__(self): + return self.pa_type + + def str2int(self, values: Union[str, Iterable]) -> Union[int, Iterable]: + if not isinstance(values, str) and (not isinstance(values, Iterable)): + raise ValueError(f'Values {values} should be a string or an Iterable (list, numpy array, pytorch, tensorflow tensors)') + return_list = True + if isinstance(values, str): + values = [values] + return_list = False + output = [self._strval2int(value) for value in values] + return output if return_list else output[0] + + def _strval2int(self, value: str) -> int: + failed_parse = False + value = str(value) + int_value = self._str2int.get(value) + if int_value is None: + int_value = self._str2int.get(value.strip()) + if int_value is None: + try: + int_value = int(value) + except ValueError: + failed_parse = True + else: + if int_value < -1 or int_value >= self.num_classes: + failed_parse = True + if failed_parse: + raise ValueError(f'Invalid string class label {value}') + return int_value + + def int2str(self, values: Union[int, Iterable]) -> Union[str, Iterable]: + if not isinstance(values, int) and (not isinstance(values, Iterable)): + raise ValueError(f'Values {values} should be an integer or an Iterable (list, numpy array, pytorch, tensorflow tensors)') + return_list = True + if isinstance(values, int): + values = [values] + return_list = False + for v in values: + if not 0 <= v < self.num_classes: + raise ValueError(f'Invalid integer class label {v:d}') + output = [self._int2str[int(v)] for v in values] + return output if return_list else output[0] + + def encode_example(self, example_data): + if self.num_classes is None: + raise ValueError('Trying to use ClassLabel feature with undefined number of class. Please set ClassLabel.names or num_classes.') + if isinstance(example_data, str): + example_data = self.str2int(example_data) + if not -1 <= example_data < self.num_classes: + raise ValueError(f'Class label {example_data:d} greater than configured num_classes {self.num_classes}') + return example_data + + def cast_storage(self, storage: Union[pa.StringArray, pa.IntegerArray]) -> pa.Int64Array: + if isinstance(storage, pa.IntegerArray) and len(storage) > 0: + min_max = pc.min_max(storage).as_py() + if min_max['max'] is not None and min_max['max'] >= self.num_classes: + raise ValueError(f"Class label {min_max['max']} greater than configured num_classes {self.num_classes}") + elif isinstance(storage, pa.StringArray): + storage = pa.array([self._strval2int(label) if label is not None else None for label in storage.to_pylist()]) + return array_cast(storage, self.pa_type) + + @staticmethod + def _load_names_from_file(names_filepath): + with open(names_filepath, encoding='utf-8') as f: + return [name.strip() for name in f.read().split('\n') if name.strip()] + +@dataclass +class Sequence: + feature: Any + length: int = -1 + id: Optional[str] = None + dtype: ClassVar[str] = 'list' + pa_type: ClassVar[Any] = None + _type: str = field(default='Sequence', init=False, repr=False) + +@dataclass +class LargeList: + feature: Any + id: Optional[str] = None + pa_type: ClassVar[Any] = None + _type: str = field(default='LargeList', init=False, repr=False) +FeatureType = Union[dict, list, tuple, Value, ClassLabel, Translation, TranslationVariableLanguages, LargeList, Sequence, Array2D, Array3D, Array4D, Array5D, Audio, Image] + +def _check_non_null_non_empty_recursive(obj, schema: Optional[FeatureType]=None) -> bool: + if obj is None: + return False + elif isinstance(obj, (list, tuple)) and (schema is None or isinstance(schema, (list, tuple, LargeList, Sequence))): + if len(obj) > 0: + if schema is None: + pass + elif isinstance(schema, (list, tuple)): + schema = schema[0] + else: + schema = schema.feature + return _check_non_null_non_empty_recursive(obj[0], schema) + else: + return False + else: + return True + +def get_nested_type(schema: FeatureType) -> pa.DataType: + if isinstance(schema, Features): + return pa.struct({key: get_nested_type(schema[key]) for key in schema}) + elif isinstance(schema, dict): + return pa.struct({key: get_nested_type(schema[key]) for key in schema}) + elif isinstance(schema, (list, tuple)): + if len(schema) != 1: + raise ValueError('When defining list feature, you should just provide one example of the inner type') + value_type = get_nested_type(schema[0]) + return pa.list_(value_type) + elif isinstance(schema, LargeList): + value_type = get_nested_type(schema.feature) + return pa.large_list(value_type) + elif isinstance(schema, Sequence): + value_type = get_nested_type(schema.feature) + if isinstance(schema.feature, dict): + data_type = pa.struct({f.name: pa.list_(f.type, schema.length) for f in value_type}) + else: + data_type = pa.list_(value_type, schema.length) + return data_type + return schema() + +def encode_nested_example(schema, obj, level=0): + if isinstance(schema, dict): + if level == 0 and obj is None: + raise ValueError('Got None but expected a dictionary instead') + return {k: encode_nested_example(schema[k], obj.get(k), level=level + 1) for k in schema} if obj is not None else None + elif isinstance(schema, (list, tuple)): + sub_schema = schema[0] + if obj is None: + return None + elif isinstance(obj, np.ndarray): + return encode_nested_example(schema, obj.tolist()) + else: + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, sub_schema): + break + if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: + return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] + return list(obj) + elif isinstance(schema, LargeList): + if obj is None: + return None + else: + if len(obj) > 0: + sub_schema = schema.feature + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, sub_schema): + break + if encode_nested_example(sub_schema, first_elmt, level=level + 1) != first_elmt: + return [encode_nested_example(sub_schema, o, level=level + 1) for o in obj] + return list(obj) + elif isinstance(schema, Sequence): + if obj is None: + return None + if isinstance(schema.feature, dict): + list_dict = {} + if isinstance(obj, (list, tuple)): + for k in schema.feature: + list_dict[k] = [encode_nested_example(schema.feature[k], o.get(k), level=level + 1) for o in obj] + return list_dict + else: + for k in schema.feature: + list_dict[k] = [encode_nested_example(schema.feature[k], o, level=level + 1) for o in obj[k]] if k in obj else None + return list_dict + if isinstance(obj, str): + raise ValueError(f"Got a string but expected a list instead: '{obj}'") + else: + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, schema.feature): + break + if not isinstance(first_elmt, list) or encode_nested_example(schema.feature, first_elmt, level=level + 1) != first_elmt: + return [encode_nested_example(schema.feature, o, level=level + 1) for o in obj] + return list(obj) + elif isinstance(schema, (Audio, Image, ClassLabel, TranslationVariableLanguages, Value, _ArrayXD)): + return schema.encode_example(obj) if obj is not None else None + return obj + +def decode_nested_example(schema, obj, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]]=None): + if isinstance(schema, dict): + return {k: decode_nested_example(sub_schema, sub_obj) for (k, (sub_schema, sub_obj)) in zip_dict(schema, obj)} if obj is not None else None + elif isinstance(schema, (list, tuple)): + sub_schema = schema[0] + if obj is None: + return None + else: + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, sub_schema): + break + if decode_nested_example(sub_schema, first_elmt) != first_elmt: + return [decode_nested_example(sub_schema, o) for o in obj] + return list(obj) + elif isinstance(schema, LargeList): + if obj is None: + return None + else: + sub_schema = schema.feature + if len(obj) > 0: + for first_elmt in obj: + if _check_non_null_non_empty_recursive(first_elmt, sub_schema): + break + if decode_nested_example(sub_schema, first_elmt) != first_elmt: + return [decode_nested_example(sub_schema, o) for o in obj] + return list(obj) + elif isinstance(schema, Sequence): + if isinstance(schema.feature, dict): + return {k: decode_nested_example([schema.feature[k]], obj[k]) for k in schema.feature} + else: + return decode_nested_example([schema.feature], obj) + elif isinstance(schema, (Audio, Image)): + if obj is not None and schema.decode: + return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) + return obj +_FEATURE_TYPES: Dict[str, FeatureType] = {Value.__name__: Value, ClassLabel.__name__: ClassLabel, Translation.__name__: Translation, TranslationVariableLanguages.__name__: TranslationVariableLanguages, LargeList.__name__: LargeList, Sequence.__name__: Sequence, Array2D.__name__: Array2D, Array3D.__name__: Array3D, Array4D.__name__: Array4D, Array5D.__name__: Array5D, Audio.__name__: Audio, Image.__name__: Image} + +@experimental +def register_feature(feature_cls: type, feature_type: str): + if feature_type in _FEATURE_TYPES: + logger.warning(f"Overwriting feature type '{feature_type}' ({_FEATURE_TYPES[feature_type].__name__} -> {feature_cls.__name__})") + _FEATURE_TYPES[feature_type] = feature_cls + +def generate_from_dict(obj: Any): + if isinstance(obj, list): + return [generate_from_dict(value) for value in obj] + if '_type' not in obj or isinstance(obj['_type'], dict): + return {key: generate_from_dict(value) for (key, value) in obj.items()} + obj = dict(obj) + _type = obj.pop('_type') + class_type = _FEATURE_TYPES.get(_type, None) or globals().get(_type, None) + if class_type is None: + raise ValueError(f"Feature type '{_type}' not found. Available feature types: {list(_FEATURE_TYPES.keys())}") + if class_type == LargeList: + feature = obj.pop('feature') + return LargeList(feature=generate_from_dict(feature), **obj) + if class_type == Sequence: + feature = obj.pop('feature') + return Sequence(feature=generate_from_dict(feature), **obj) + field_names = {f.name for f in fields(class_type)} + return class_type(**{k: v for (k, v) in obj.items() if k in field_names}) + +def generate_from_arrow_type(pa_type: pa.DataType) -> FeatureType: + if isinstance(pa_type, pa.StructType): + return {field.name: generate_from_arrow_type(field.type) for field in pa_type} + elif isinstance(pa_type, pa.FixedSizeListType): + return Sequence(feature=generate_from_arrow_type(pa_type.value_type), length=pa_type.list_size) + elif isinstance(pa_type, pa.ListType): + feature = generate_from_arrow_type(pa_type.value_type) + if isinstance(feature, (dict, tuple, list)): + return [feature] + return Sequence(feature=feature) + elif isinstance(pa_type, pa.LargeListType): + feature = generate_from_arrow_type(pa_type.value_type) + return LargeList(feature=feature) + elif isinstance(pa_type, _ArrayXDExtensionType): + array_feature = [None, None, Array2D, Array3D, Array4D, Array5D][pa_type.ndims] + return array_feature(shape=pa_type.shape, dtype=pa_type.value_type) + elif isinstance(pa_type, pa.DataType): + return Value(dtype=_arrow_to_datasets_dtype(pa_type)) + else: + raise ValueError(f'Cannot convert {pa_type} to a Feature type.') + +def numpy_to_pyarrow_listarray(arr: np.ndarray, type: pa.DataType=None) -> pa.ListArray: + arr = np.array(arr) + values = pa.array(arr.flatten(), type=type) + for i in range(arr.ndim - 1): + n_offsets = reduce(mul, arr.shape[:arr.ndim - i - 1], 1) + step_offsets = arr.shape[arr.ndim - i - 1] + offsets = pa.array(np.arange(n_offsets + 1) * step_offsets, type=pa.int32()) + values = pa.ListArray.from_arrays(offsets, values) + return values + +def list_of_pa_arrays_to_pyarrow_listarray(l_arr: List[Optional[pa.Array]]) -> pa.ListArray: + null_mask = np.array([arr is None for arr in l_arr]) + null_indices = np.arange(len(null_mask))[null_mask] - np.arange(np.sum(null_mask)) + l_arr = [arr for arr in l_arr if arr is not None] + offsets = np.cumsum([0] + [len(arr) for arr in l_arr], dtype=object) + offsets = np.insert(offsets, null_indices, None) + offsets = pa.array(offsets, type=pa.int32()) + values = pa.concat_arrays(l_arr) + return pa.ListArray.from_arrays(offsets, values) + +def list_of_np_array_to_pyarrow_listarray(l_arr: List[np.ndarray], type: pa.DataType=None) -> pa.ListArray: + if len(l_arr) > 0: + return list_of_pa_arrays_to_pyarrow_listarray([numpy_to_pyarrow_listarray(arr, type=type) if arr is not None else None for arr in l_arr]) + else: + return pa.array([], type=type) + +def contains_any_np_array(data: Any): + if isinstance(data, np.ndarray): + return True + elif isinstance(data, list): + return contains_any_np_array(first_non_null_value(data)[1]) + else: + return False + +def any_np_array_to_pyarrow_listarray(data: Union[np.ndarray, List], type: pa.DataType=None) -> pa.ListArray: + if isinstance(data, np.ndarray): + return numpy_to_pyarrow_listarray(data, type=type) + elif isinstance(data, list): + return list_of_pa_arrays_to_pyarrow_listarray([any_np_array_to_pyarrow_listarray(i, type=type) for i in data]) + +def to_pyarrow_listarray(data: Any, pa_type: _ArrayXDExtensionType) -> pa.Array: + if contains_any_np_array(data): + return any_np_array_to_pyarrow_listarray(data, type=pa_type.value_type) + else: + return pa.array(data, pa_type.storage_dtype) + +def _visit(feature: FeatureType, func: Callable[[FeatureType], Optional[FeatureType]]) -> FeatureType: + if isinstance(feature, dict): + out = func({k: _visit(f, func) for (k, f) in feature.items()}) + elif isinstance(feature, (list, tuple)): + out = func([_visit(feature[0], func)]) + elif isinstance(feature, LargeList): + out = func(LargeList(_visit(feature.feature, func))) + elif isinstance(feature, Sequence): + out = func(Sequence(_visit(feature.feature, func), length=feature.length)) + else: + out = func(feature) + return feature if out is None else out + +def require_decoding(feature: FeatureType, ignore_decode_attribute: bool=False) -> bool: + if isinstance(feature, dict): + return any((require_decoding(f) for f in feature.values())) + elif isinstance(feature, (list, tuple)): + return require_decoding(feature[0]) + elif isinstance(feature, LargeList): + return require_decoding(feature.feature) + elif isinstance(feature, Sequence): + return require_decoding(feature.feature) + else: + return hasattr(feature, 'decode_example') and (feature.decode if not ignore_decode_attribute else True) + +def require_storage_cast(feature: FeatureType) -> bool: + if isinstance(feature, dict): + return any((require_storage_cast(f) for f in feature.values())) + elif isinstance(feature, (list, tuple)): + return require_storage_cast(feature[0]) + elif isinstance(feature, LargeList): + return require_storage_cast(feature.feature) + elif isinstance(feature, Sequence): + return require_storage_cast(feature.feature) + else: + return hasattr(feature, 'cast_storage') + +def require_storage_embed(feature: FeatureType) -> bool: + if isinstance(feature, dict): + return any((require_storage_cast(f) for f in feature.values())) + elif isinstance(feature, (list, tuple)): + return require_storage_cast(feature[0]) + elif isinstance(feature, LargeList): + return require_storage_cast(feature.feature) + elif isinstance(feature, Sequence): + return require_storage_cast(feature.feature) + else: + return hasattr(feature, 'embed_storage') + +def keep_features_dicts_synced(func): + + @wraps(func) + def wrapper(*args, **kwargs): + if args: + self: 'Features' = args[0] + args = args[1:] + else: + self: 'Features' = kwargs.pop('self') + out = func(self, *args, **kwargs) + assert hasattr(self, '_column_requires_decoding') + self._column_requires_decoding = {col: require_decoding(feature) for (col, feature) in self.items()} + return out + wrapper._decorator_name_ = '_keep_dicts_synced' + return wrapper + +class Features(dict): + + def __init__(*args, **kwargs): + if not args: + raise TypeError("descriptor '__init__' of 'Features' object needs an argument") + (self, *args) = args + super(Features, self).__init__(*args, **kwargs) + self._column_requires_decoding: Dict[str, bool] = {col: require_decoding(feature) for (col, feature) in self.items()} + __setitem__ = keep_features_dicts_synced(dict.__setitem__) + __delitem__ = keep_features_dicts_synced(dict.__delitem__) + update = keep_features_dicts_synced(dict.update) + setdefault = keep_features_dicts_synced(dict.setdefault) + pop = keep_features_dicts_synced(dict.pop) + popitem = keep_features_dicts_synced(dict.popitem) + clear = keep_features_dicts_synced(dict.clear) + + def __reduce__(self): + return (Features, (dict(self),)) + + @property + def type(self): + return get_nested_type(self) + + @property + def arrow_schema(self): + hf_metadata = {'info': {'features': self.to_dict()}} + return pa.schema(self.type).with_metadata({'huggingface': json.dumps(hf_metadata)}) + + @classmethod + def from_arrow_schema(cls, pa_schema: pa.Schema) -> 'Features': + metadata_features = Features() + if pa_schema.metadata is not None and 'huggingface'.encode('utf-8') in pa_schema.metadata: + metadata = json.loads(pa_schema.metadata['huggingface'.encode('utf-8')].decode()) + if 'info' in metadata and 'features' in metadata['info'] and (metadata['info']['features'] is not None): + metadata_features = Features.from_dict(metadata['info']['features']) + metadata_features_schema = metadata_features.arrow_schema + obj = {field.name: metadata_features[field.name] if field.name in metadata_features and metadata_features_schema.field(field.name) == field else generate_from_arrow_type(field.type) for field in pa_schema} + return cls(**obj) + + @classmethod + def from_dict(cls, dic) -> 'Features': + obj = generate_from_dict(dic) + return cls(**obj) + + def to_dict(self): + return asdict(self) + + def _to_yaml_list(self) -> list: + yaml_data = self.to_dict() + + def simplify(feature: dict) -> dict: + if not isinstance(feature, dict): + raise TypeError(f'Expected a dict but got a {type(feature)}: {feature}') + for list_type in ['large_list', 'list', 'sequence']: + if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ['dtype']: + feature[list_type] = feature[list_type]['dtype'] + if isinstance(feature.get(list_type), dict) and list(feature[list_type]) == ['struct']: + feature[list_type] = feature[list_type]['struct'] + if isinstance(feature.get('class_label'), dict) and isinstance(feature['class_label'].get('names'), list): + feature['class_label']['names'] = {str(label_id): label_name for (label_id, label_name) in enumerate(feature['class_label']['names'])} + return feature + + def to_yaml_inner(obj: Union[dict, list]) -> dict: + if isinstance(obj, dict): + _type = obj.pop('_type', None) + if _type == 'LargeList': + _feature = obj.pop('feature') + return simplify({'large_list': to_yaml_inner(_feature), **obj}) + elif _type == 'Sequence': + _feature = obj.pop('feature') + return simplify({'sequence': to_yaml_inner(_feature), **obj}) + elif _type == 'Value': + return obj + elif _type and (not obj): + return {'dtype': camelcase_to_snakecase(_type)} + elif _type: + return {'dtype': simplify({camelcase_to_snakecase(_type): obj})} + else: + return {'struct': [{'name': name, **to_yaml_inner(_feature)} for (name, _feature) in obj.items()]} + elif isinstance(obj, list): + return simplify({'list': simplify(to_yaml_inner(obj[0]))}) + elif isinstance(obj, tuple): + return to_yaml_inner(list(obj)) + else: + raise TypeError(f'Expected a dict or a list but got {type(obj)}: {obj}') + + def to_yaml_types(obj: dict) -> dict: + if isinstance(obj, dict): + return {k: to_yaml_types(v) for (k, v) in obj.items()} + elif isinstance(obj, list): + return [to_yaml_types(v) for v in obj] + elif isinstance(obj, tuple): + return to_yaml_types(list(obj)) + else: + return obj + return to_yaml_types(to_yaml_inner(yaml_data)['struct']) + + @classmethod + def _from_yaml_list(cls, yaml_data: list) -> 'Features': + yaml_data = copy.deepcopy(yaml_data) + + def unsimplify(feature: dict) -> dict: + if not isinstance(feature, dict): + raise TypeError(f'Expected a dict but got a {type(feature)}: {feature}') + for list_type in ['large_list', 'list', 'sequence']: + if isinstance(feature.get(list_type), str): + feature[list_type] = {'dtype': feature[list_type]} + if isinstance(feature.get('class_label'), dict) and isinstance(feature['class_label'].get('names'), dict): + label_ids = sorted(feature['class_label']['names'], key=int) + if label_ids and [int(label_id) for label_id in label_ids] != list(range(int(label_ids[-1]) + 1)): + raise ValueError(f'ClassLabel expected a value for all label ids [0:{int(label_ids[-1]) + 1}] but some ids are missing.') + feature['class_label']['names'] = [feature['class_label']['names'][label_id] for label_id in label_ids] + return feature + + def from_yaml_inner(obj: Union[dict, list]) -> Union[dict, list]: + if isinstance(obj, dict): + if not obj: + return {} + _type = next(iter(obj)) + if _type == 'large_list': + _feature = unsimplify(obj).pop(_type) + return {'feature': from_yaml_inner(_feature), **obj, '_type': 'LargeList'} + if _type == 'sequence': + _feature = unsimplify(obj).pop(_type) + return {'feature': from_yaml_inner(_feature), **obj, '_type': 'Sequence'} + if _type == 'list': + return [from_yaml_inner(unsimplify(obj)[_type])] + if _type == 'struct': + return from_yaml_inner(obj['struct']) + elif _type == 'dtype': + if isinstance(obj['dtype'], str): + try: + Value(obj['dtype']) + return {**obj, '_type': 'Value'} + except ValueError: + return {'_type': snakecase_to_camelcase(obj['dtype'])} + else: + return from_yaml_inner(obj['dtype']) + else: + return {'_type': snakecase_to_camelcase(_type), **unsimplify(obj)[_type]} + elif isinstance(obj, list): + names = [_feature.pop('name') for _feature in obj] + return {name: from_yaml_inner(_feature) for (name, _feature) in zip(names, obj)} + else: + raise TypeError(f'Expected a dict or a list but got {type(obj)}: {obj}') + return cls.from_dict(from_yaml_inner(yaml_data)) + + def encode_example(self, example): + example = cast_to_python_objects(example) + return encode_nested_example(self, example) + + def encode_column(self, column, column_name: str): + column = cast_to_python_objects(column) + return [encode_nested_example(self[column_name], obj, level=1) for obj in column] + + def encode_batch(self, batch): + encoded_batch = {} + if set(batch) != set(self): + raise ValueError(f'Column mismatch between batch {set(batch)} and features {set(self)}') + for (key, column) in batch.items(): + column = cast_to_python_objects(column) + encoded_batch[key] = [encode_nested_example(self[key], obj, level=1) for obj in column] + return encoded_batch + + def decode_example(self, example: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]]=None): + return {column_name: decode_nested_example(feature, value, token_per_repo_id=token_per_repo_id) if self._column_requires_decoding[column_name] else value for (column_name, (feature, value)) in zip_dict({key: value for (key, value) in self.items() if key in example}, example)} + + def decode_column(self, column: list, column_name: str): + return [decode_nested_example(self[column_name], value) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column + + def decode_batch(self, batch: dict, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]]=None): + decoded_batch = {} + for (column_name, column) in batch.items(): + decoded_batch[column_name] = [decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id) if value is not None else None for value in column] if self._column_requires_decoding[column_name] else column + return decoded_batch + + def copy(self) -> 'Features': + return copy.deepcopy(self) + + def reorder_fields_as(self, other: 'Features') -> 'Features': + + def recursive_reorder(source, target, stack=''): + stack_position = ' at ' + stack[1:] if stack else '' + if isinstance(target, Sequence): + target = target.feature + if isinstance(target, dict): + target = {k: [v] for (k, v) in target.items()} + else: + target = [target] + if isinstance(source, Sequence): + sequence_kwargs = vars(source).copy() + source = sequence_kwargs.pop('feature') + if isinstance(source, dict): + source = {k: [v] for (k, v) in source.items()} + reordered = recursive_reorder(source, target, stack) + return Sequence({k: v[0] for (k, v) in reordered.items()}, **sequence_kwargs) + else: + source = [source] + reordered = recursive_reorder(source, target, stack) + return Sequence(reordered[0], **sequence_kwargs) + elif isinstance(source, dict): + if not isinstance(target, dict): + raise ValueError(f'Type mismatch: between {source} and {target}' + stack_position) + if sorted(source) != sorted(target): + message = f'Keys mismatch: between {source} (source) and {target} (target).\n{source.keys() - target.keys()} are missing from target and {target.keys() - source.keys()} are missing from source' + stack_position + raise ValueError(message) + return {key: recursive_reorder(source[key], target[key], stack + f'.{key}') for key in target} + elif isinstance(source, list): + if not isinstance(target, list): + raise ValueError(f'Type mismatch: between {source} and {target}' + stack_position) + if len(source) != len(target): + raise ValueError(f'Length mismatch: between {source} and {target}' + stack_position) + return [recursive_reorder(source[i], target[i], stack + '.') for i in range(len(target))] + elif isinstance(source, LargeList): + if not isinstance(target, LargeList): + raise ValueError(f'Type mismatch: between {source} and {target}' + stack_position) + return LargeList(recursive_reorder(source.feature, target.feature, stack)) + else: + return source + return Features(recursive_reorder(self, other)) + + def flatten(self, max_depth=16) -> 'Features': + for depth in range(1, max_depth): + no_change = True + flattened = self.copy() + for (column_name, subfeature) in self.items(): + if isinstance(subfeature, dict): + no_change = False + flattened.update({f'{column_name}.{k}': v for (k, v) in subfeature.items()}) + del flattened[column_name] + elif isinstance(subfeature, Sequence) and isinstance(subfeature.feature, dict): + no_change = False + flattened.update({f'{column_name}.{k}': Sequence(v) if not isinstance(v, dict) else [v] for (k, v) in subfeature.feature.items()}) + del flattened[column_name] + elif hasattr(subfeature, 'flatten') and subfeature.flatten() != subfeature: + no_change = False + flattened.update({f'{column_name}.{k}': v for (k, v) in subfeature.flatten().items()}) + del flattened[column_name] + self = flattened + if no_change: + break + return self + +def _align_features(features_list: List[Features]) -> List[Features]: + name2feature = {} + for features in features_list: + for (k, v) in features.items(): + if k in name2feature and isinstance(v, dict): + name2feature[k] = _align_features([name2feature[k], v])[0] + elif k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == 'null'): + name2feature[k] = v + return [Features({k: name2feature[k] for k in features.keys()}) for features in features_list] + +def _check_if_features_can_be_aligned(features_list: List[Features]): + name2feature = {} + for features in features_list: + for (k, v) in features.items(): + if k not in name2feature or (isinstance(name2feature[k], Value) and name2feature[k].dtype == 'null'): + name2feature[k] = v + for features in features_list: + for (k, v) in features.items(): + if isinstance(v, dict) and isinstance(name2feature[k], dict): + _check_if_features_can_be_aligned([name2feature[k], v]) + elif not (isinstance(v, Value) and v.dtype == 'null') and name2feature[k] != v: + raise ValueError(f"""The features can't be aligned because the key {k} of features {features} has unexpected type - {v} (expected either {name2feature[k]} or Value("null").""") + +# File: datasets-main/src/datasets/features/image.py +import os +import sys +import warnings +from dataclasses import dataclass, field +from io import BytesIO +from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union +import numpy as np +import pyarrow as pa +from .. import config +from ..download.download_config import DownloadConfig +from ..table import array_cast +from ..utils.file_utils import is_local_path, xopen +from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict +if TYPE_CHECKING: + import PIL.Image + from .features import FeatureType +_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None +_NATIVE_BYTEORDER = '<' if sys.byteorder == 'little' else '>' +_VALID_IMAGE_ARRAY_DTPYES = [np.dtype('|b1'), np.dtype('|u1'), np.dtype('u2'), np.dtype('i2'), np.dtype('u4'), np.dtype('i4'), np.dtype('f4'), np.dtype('f8')] + +@dataclass +class Image: + mode: Optional[str] = None + decode: bool = True + id: Optional[str] = None + dtype: ClassVar[str] = 'PIL.Image.Image' + pa_type: ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()}) + _type: str = field(default='Image', init=False, repr=False) + + def __call__(self): + return self.pa_type + + def encode_example(self, value: Union[str, bytes, dict, np.ndarray, 'PIL.Image.Image']) -> dict: + if config.PIL_AVAILABLE: + import PIL.Image + else: + raise ImportError("To support encoding images, please install 'Pillow'.") + if isinstance(value, list): + value = np.array(value) + if isinstance(value, str): + return {'path': value, 'bytes': None} + elif isinstance(value, bytes): + return {'path': None, 'bytes': value} + elif isinstance(value, np.ndarray): + return encode_np_array(value) + elif isinstance(value, PIL.Image.Image): + return encode_pil_image(value) + elif value.get('path') is not None and os.path.isfile(value['path']): + return {'bytes': None, 'path': value.get('path')} + elif value.get('bytes') is not None or value.get('path') is not None: + return {'bytes': value.get('bytes'), 'path': value.get('path')} + else: + raise ValueError(f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.") + + def decode_example(self, value: dict, token_per_repo_id=None) -> 'PIL.Image.Image': + if not self.decode: + raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.') + if config.PIL_AVAILABLE: + import PIL.Image + import PIL.ImageOps + else: + raise ImportError("To support decoding images, please install 'Pillow'.") + if token_per_repo_id is None: + token_per_repo_id = {} + (path, bytes_) = (value['path'], value['bytes']) + if bytes_ is None: + if path is None: + raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") + elif is_local_path(path): + image = PIL.Image.open(path) + else: + source_url = path.split('::')[-1] + pattern = config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL + try: + repo_id = string_to_dict(source_url, pattern)['repo_id'] + token = token_per_repo_id.get(repo_id) + except ValueError: + token = None + download_config = DownloadConfig(token=token) + with xopen(path, 'rb', download_config=download_config) as f: + bytes_ = BytesIO(f.read()) + image = PIL.Image.open(bytes_) + else: + image = PIL.Image.open(BytesIO(bytes_)) + image.load() + if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None: + image = PIL.ImageOps.exif_transpose(image) + if self.mode and self.mode != image.mode: + image = image.convert(self.mode) + return image + + def flatten(self) -> Union['FeatureType', Dict[str, 'FeatureType']]: + from .features import Value + return self if self.decode else {'bytes': Value('binary'), 'path': Value('string')} + + def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: + if pa.types.is_string(storage.type): + bytes_array = pa.array([None] * len(storage), type=pa.binary()) + storage = pa.StructArray.from_arrays([bytes_array, storage], ['bytes', 'path'], mask=storage.is_null()) + elif pa.types.is_binary(storage.type): + path_array = pa.array([None] * len(storage), type=pa.string()) + storage = pa.StructArray.from_arrays([storage, path_array], ['bytes', 'path'], mask=storage.is_null()) + elif pa.types.is_struct(storage.type): + if storage.type.get_field_index('bytes') >= 0: + bytes_array = storage.field('bytes') + else: + bytes_array = pa.array([None] * len(storage), type=pa.binary()) + if storage.type.get_field_index('path') >= 0: + path_array = storage.field('path') + else: + path_array = pa.array([None] * len(storage), type=pa.string()) + storage = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=storage.is_null()) + elif pa.types.is_list(storage.type): + bytes_array = pa.array([encode_np_array(np.array(arr))['bytes'] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary()) + path_array = pa.array([None] * len(storage), type=pa.string()) + storage = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null()) + return array_cast(storage, self.pa_type) + + def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: + + @no_op_if_value_is_null + def path_to_bytes(path): + with xopen(path, 'rb') as f: + bytes_ = f.read() + return bytes_ + bytes_array = pa.array([(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist()], type=pa.binary()) + path_array = pa.array([os.path.basename(path) if path is not None else None for path in storage.field('path').to_pylist()], type=pa.string()) + storage = pa.StructArray.from_arrays([bytes_array, path_array], ['bytes', 'path'], mask=bytes_array.is_null()) + return array_cast(storage, self.pa_type) + +def list_image_compression_formats() -> List[str]: + if config.PIL_AVAILABLE: + import PIL.Image + else: + raise ImportError("To support encoding images, please install 'Pillow'.") + global _IMAGE_COMPRESSION_FORMATS + if _IMAGE_COMPRESSION_FORMATS is None: + PIL.Image.init() + _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys())) + return _IMAGE_COMPRESSION_FORMATS + +def image_to_bytes(image: 'PIL.Image.Image') -> bytes: + buffer = BytesIO() + if image.format in list_image_compression_formats(): + format = image.format + else: + format = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' + image.save(buffer, format=format) + return buffer.getvalue() + +def encode_pil_image(image: 'PIL.Image.Image') -> dict: + if hasattr(image, 'filename') and image.filename != '': + return {'path': image.filename, 'bytes': None} + else: + return {'path': None, 'bytes': image_to_bytes(image)} + +def encode_np_array(array: np.ndarray) -> dict: + if config.PIL_AVAILABLE: + import PIL.Image + else: + raise ImportError("To support encoding images, please install 'Pillow'.") + dtype = array.dtype + dtype_byteorder = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER + dtype_kind = dtype.kind + dtype_itemsize = dtype.itemsize + dest_dtype = None + if array.shape[2:]: + if dtype_kind not in ['u', 'i']: + raise TypeError(f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.') + dest_dtype = np.dtype('|u1') + if dtype != dest_dtype: + warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") + elif dtype in _VALID_IMAGE_ARRAY_DTPYES: + dest_dtype = dtype + else: + while dtype_itemsize >= 1: + dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize) + if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES: + dest_dtype = np.dtype(dtype_str) + warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") + break + else: + dtype_itemsize //= 2 + if dest_dtype is None: + raise TypeError(f'Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}') + image = PIL.Image.fromarray(array.astype(dest_dtype)) + return {'path': None, 'bytes': image_to_bytes(image)} + +def objects_to_list_of_image_dicts(objs: Union[List[str], List[dict], List[np.ndarray], List['PIL.Image.Image']]) -> List[dict]: + if config.PIL_AVAILABLE: + import PIL.Image + else: + raise ImportError("To support encoding images, please install 'Pillow'.") + if objs: + (_, obj) = first_non_null_value(objs) + if isinstance(obj, str): + return [{'path': obj, 'bytes': None} if obj is not None else None for obj in objs] + if isinstance(obj, np.ndarray): + obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array) + return [obj_to_image_dict_func(obj) for obj in objs] + elif isinstance(obj, PIL.Image.Image): + obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image) + return [obj_to_image_dict_func(obj) for obj in objs] + else: + return objs + else: + return objs + +# File: datasets-main/src/datasets/features/translation.py +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union +import pyarrow as pa +if TYPE_CHECKING: + from .features import FeatureType + +@dataclass +class Translation: + languages: List[str] + id: Optional[str] = None + dtype: ClassVar[str] = 'dict' + pa_type: ClassVar[Any] = None + _type: str = field(default='Translation', init=False, repr=False) + + def __call__(self): + return pa.struct({lang: pa.string() for lang in sorted(self.languages)}) + + def flatten(self) -> Union['FeatureType', Dict[str, 'FeatureType']]: + from .features import Value + return {k: Value('string') for k in sorted(self.languages)} + +@dataclass +class TranslationVariableLanguages: + languages: Optional[List] = None + num_languages: Optional[int] = None + id: Optional[str] = None + dtype: ClassVar[str] = 'dict' + pa_type: ClassVar[Any] = None + _type: str = field(default='TranslationVariableLanguages', init=False, repr=False) + + def __post_init__(self): + self.languages = sorted(set(self.languages)) if self.languages else None + self.num_languages = len(self.languages) if self.languages else None + + def __call__(self): + return pa.struct({'language': pa.list_(pa.string()), 'translation': pa.list_(pa.string())}) + + def encode_example(self, translation_dict): + lang_set = set(self.languages) + if set(translation_dict) == {'language', 'translation'}: + return translation_dict + elif self.languages and set(translation_dict) - lang_set: + raise ValueError(f"Some languages in example ({', '.join(sorted(set(translation_dict) - lang_set))}) are not in valid set ({', '.join(lang_set)}).") + translation_tuples = [] + for (lang, text) in translation_dict.items(): + if isinstance(text, str): + translation_tuples.append((lang, text)) + else: + translation_tuples.extend([(lang, el) for el in text]) + (languages, translations) = zip(*sorted(translation_tuples)) + return {'language': languages, 'translation': translations} + + def flatten(self) -> Union['FeatureType', Dict[str, 'FeatureType']]: + from .features import Sequence, Value + return {'language': Sequence(Value('string')), 'translation': Sequence(Value('string'))} + +# File: datasets-main/src/datasets/filesystems/__init__.py +import importlib +import shutil +import warnings +from typing import List +import fsspec +import fsspec.asyn +from fsspec.implementations.local import LocalFileSystem +from . import compression +COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [compression.Bz2FileSystem, compression.GzipFileSystem, compression.Lz4FileSystem, compression.XzFileSystem, compression.ZstdFileSystem] +for fs_class in COMPRESSION_FILESYSTEMS: + if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: + warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') + fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) + +def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: + return not isinstance(fs, LocalFileSystem) + +def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str): + if not is_remote_filesystem(fs): + shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst)) + else: + fs.mv(src, dst, recursive=True) + +# File: datasets-main/src/datasets/filesystems/compression.py +import os +from functools import partial +from typing import Optional +import fsspec +from fsspec.archive import AbstractArchiveFileSystem + +class BaseCompressedFileFileSystem(AbstractArchiveFileSystem): + root_marker = '' + protocol: str = None + compression: str = None + extension: str = None + + def __init__(self, fo: str='', target_protocol: Optional[str]=None, target_options: Optional[dict]=None, **kwargs): + super().__init__(self, **kwargs) + self.fo = fo.__fspath__() if hasattr(fo, '__fspath__') else fo + self._open_with_fsspec = partial(fsspec.open, self.fo, mode='rb', protocol=target_protocol, compression=self.compression, client_kwargs={'requote_redirect_url': False, 'trust_env': True, **(target_options or {}).pop('client_kwargs', {})}, **target_options or {}) + self.compressed_name = os.path.basename(self.fo.split('::')[0]) + self.uncompressed_name = self.compressed_name[:self.compressed_name.rindex('.')] if '.' in self.compressed_name else self.compressed_name + self.dir_cache = None + + @classmethod + def _strip_protocol(cls, path): + return super()._strip_protocol(path).lstrip('/') + + def _get_dirs(self): + if self.dir_cache is None: + f = {**self._open_with_fsspec().fs.info(self.fo), 'name': self.uncompressed_name} + self.dir_cache = {f['name']: f} + + def cat(self, path: str): + with self._open_with_fsspec().open() as f: + return f.read() + + def _open(self, path: str, mode: str='rb', block_size=None, autocommit=True, cache_options=None, **kwargs): + path = self._strip_protocol(path) + if mode != 'rb': + raise ValueError(f"Tried to read with mode {mode} on file {self.fo} opened with mode 'rb'") + return self._open_with_fsspec().open() + +class Bz2FileSystem(BaseCompressedFileFileSystem): + protocol = 'bz2' + compression = 'bz2' + extension = '.bz2' + +class GzipFileSystem(BaseCompressedFileFileSystem): + protocol = 'gzip' + compression = 'gzip' + extension = '.gz' + +class Lz4FileSystem(BaseCompressedFileFileSystem): + protocol = 'lz4' + compression = 'lz4' + extension = '.lz4' + +class XzFileSystem(BaseCompressedFileFileSystem): + protocol = 'xz' + compression = 'xz' + extension = '.xz' + +class ZstdFileSystem(BaseCompressedFileFileSystem): + protocol = 'zstd' + compression = 'zstd' + extension = '.zst' + +# File: datasets-main/src/datasets/fingerprint.py +import inspect +import os +import random +import shutil +import tempfile +import weakref +from functools import wraps +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union +import numpy as np +import xxhash +from . import config +from .naming import INVALID_WINDOWS_CHARACTERS_IN_PATH +from .utils._dill import dumps +from .utils.logging import get_logger +if TYPE_CHECKING: + from .arrow_dataset import Dataset +logger = get_logger(__name__) +_CACHING_ENABLED = True +_TEMP_DIR_FOR_TEMP_CACHE_FILES: Optional['_TempCacheDir'] = None +_DATASETS_WITH_TABLE_IN_TEMP_DIR: Optional[weakref.WeakSet] = None + +class _TempCacheDir: + + def __init__(self): + self.name = tempfile.mkdtemp(prefix=config.TEMP_CACHE_DIR_PREFIX) + self._finalizer = weakref.finalize(self, self._cleanup) + + def _cleanup(self): + for dset in get_datasets_with_cache_file_in_temp_dir(): + dset.__del__() + if os.path.exists(self.name): + try: + shutil.rmtree(self.name) + except Exception as e: + raise OSError(f'An error occured while trying to delete temporary cache directory {self.name}. Please delete it manually.') from e + + def cleanup(self): + if self._finalizer.detach(): + self._cleanup() + +def maybe_register_dataset_for_temp_dir_deletion(dataset): + if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: + return + global _DATASETS_WITH_TABLE_IN_TEMP_DIR + if _DATASETS_WITH_TABLE_IN_TEMP_DIR is None: + _DATASETS_WITH_TABLE_IN_TEMP_DIR = weakref.WeakSet() + if any((Path(_TEMP_DIR_FOR_TEMP_CACHE_FILES.name) in Path(cache_file['filename']).parents for cache_file in dataset.cache_files)): + _DATASETS_WITH_TABLE_IN_TEMP_DIR.add(dataset) + +def get_datasets_with_cache_file_in_temp_dir(): + return list(_DATASETS_WITH_TABLE_IN_TEMP_DIR) if _DATASETS_WITH_TABLE_IN_TEMP_DIR is not None else [] + +def enable_caching(): + global _CACHING_ENABLED + _CACHING_ENABLED = True + +def disable_caching(): + global _CACHING_ENABLED + _CACHING_ENABLED = False + +def is_caching_enabled() -> bool: + global _CACHING_ENABLED + return bool(_CACHING_ENABLED) + +def get_temporary_cache_files_directory() -> str: + global _TEMP_DIR_FOR_TEMP_CACHE_FILES + if _TEMP_DIR_FOR_TEMP_CACHE_FILES is None: + _TEMP_DIR_FOR_TEMP_CACHE_FILES = _TempCacheDir() + return _TEMP_DIR_FOR_TEMP_CACHE_FILES.name + +class Hasher: + dispatch: Dict = {} + + def __init__(self): + self.m = xxhash.xxh64() + + @classmethod + def hash_bytes(cls, value: Union[bytes, List[bytes]]) -> str: + value = [value] if isinstance(value, bytes) else value + m = xxhash.xxh64() + for x in value: + m.update(x) + return m.hexdigest() + + @classmethod + def hash(cls, value: Any) -> str: + return cls.hash_bytes(dumps(value)) + + def update(self, value: Any) -> None: + header_for_update = f'=={type(value)}==' + value_for_update = self.hash(value) + self.m.update(header_for_update.encode('utf8')) + self.m.update(value_for_update.encode('utf-8')) + + def hexdigest(self) -> str: + return self.m.hexdigest() +fingerprint_rng = random.Random() +fingerprint_warnings: Dict[str, bool] = {} + +def generate_fingerprint(dataset: 'Dataset') -> str: + state = dataset.__dict__ + hasher = Hasher() + for key in sorted(state): + if key == '_fingerprint': + continue + hasher.update(key) + hasher.update(state[key]) + for cache_file in dataset.cache_files: + hasher.update(os.path.getmtime(cache_file['filename'])) + return hasher.hexdigest() + +def generate_random_fingerprint(nbits: int=64) -> str: + return f'{fingerprint_rng.getrandbits(nbits):0{nbits // 4}x}' + +def update_fingerprint(fingerprint, transform, transform_args): + global fingerprint_warnings + hasher = Hasher() + hasher.update(fingerprint) + try: + hasher.update(transform) + except: + if _CACHING_ENABLED: + if not fingerprint_warnings.get('update_fingerprint_transform_hash_failed', False): + logger.warning(f"Transform {transform} couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.") + fingerprint_warnings['update_fingerprint_transform_hash_failed'] = True + else: + logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead.") + else: + logger.info(f"Transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled.") + return generate_random_fingerprint() + for key in sorted(transform_args): + hasher.update(key) + try: + hasher.update(transform_args[key]) + except: + if _CACHING_ENABLED: + if not fingerprint_warnings.get('update_fingerprint_transform_hash_failed', False): + logger.warning(f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.") + fingerprint_warnings['update_fingerprint_transform_hash_failed'] = True + else: + logger.info(f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead.") + else: + logger.info(f"Parameter '{key}'={transform_args[key]} of the transform {transform} couldn't be hashed properly, a random hash was used instead. This doesn't affect caching since it's disabled.") + return generate_random_fingerprint() + return hasher.hexdigest() + +def validate_fingerprint(fingerprint: str, max_length=64): + if not isinstance(fingerprint, str) or not fingerprint: + raise ValueError(f"Invalid fingerprint '{fingerprint}': it should be a non-empty string.") + for invalid_char in INVALID_WINDOWS_CHARACTERS_IN_PATH: + if invalid_char in fingerprint: + raise ValueError(f"Invalid fingerprint. Bad characters from black list '{INVALID_WINDOWS_CHARACTERS_IN_PATH}' found in '{fingerprint}'. They could create issues when creating cache files.") + if len(fingerprint) > max_length: + raise ValueError(f"Invalid fingerprint. Maximum lenth is {max_length} but '{fingerprint}' has length {len(fingerprint)}.It could create issues when creating cache files.") + +def format_transform_for_fingerprint(func: Callable, version: Optional[str]=None) -> str: + transform = f'{func.__module__}.{func.__qualname__}' + if version is not None: + transform += f'@{version}' + return transform + +def format_kwargs_for_fingerprint(func: Callable, args: Tuple, kwargs: Dict[str, Any], use_kwargs: Optional[List[str]]=None, ignore_kwargs: Optional[List[str]]=None, randomized_function: bool=False) -> Dict[str, Any]: + kwargs_for_fingerprint = kwargs.copy() + if args: + params = [p.name for p in inspect.signature(func).parameters.values() if p != p.VAR_KEYWORD] + args = args[1:] + params = params[1:] + kwargs_for_fingerprint.update(zip(params, args)) + else: + del kwargs_for_fingerprint[next(iter(inspect.signature(func).parameters))] + if use_kwargs: + kwargs_for_fingerprint = {k: v for (k, v) in kwargs_for_fingerprint.items() if k in use_kwargs} + if ignore_kwargs: + kwargs_for_fingerprint = {k: v for (k, v) in kwargs_for_fingerprint.items() if k not in ignore_kwargs} + if randomized_function: + if kwargs_for_fingerprint.get('seed') is None and kwargs_for_fingerprint.get('generator') is None: + (_, seed, pos, *_) = np.random.get_state() + seed = seed[pos] if pos < 624 else seed[0] + kwargs_for_fingerprint['generator'] = np.random.default_rng(seed) + default_values = {p.name: p.default for p in inspect.signature(func).parameters.values() if p.default != inspect._empty} + for (default_varname, default_value) in default_values.items(): + if default_varname in kwargs_for_fingerprint and kwargs_for_fingerprint[default_varname] == default_value: + kwargs_for_fingerprint.pop(default_varname) + return kwargs_for_fingerprint + +def fingerprint_transform(inplace: bool, use_kwargs: Optional[List[str]]=None, ignore_kwargs: Optional[List[str]]=None, fingerprint_names: Optional[List[str]]=None, randomized_function: bool=False, version: Optional[str]=None): + if use_kwargs is not None and (not isinstance(use_kwargs, list)): + raise ValueError(f'use_kwargs is supposed to be a list, not {type(use_kwargs)}') + if ignore_kwargs is not None and (not isinstance(ignore_kwargs, list)): + raise ValueError(f'ignore_kwargs is supposed to be a list, not {type(use_kwargs)}') + if inplace and fingerprint_names: + raise ValueError('fingerprint_names are only used when inplace is False') + fingerprint_names = fingerprint_names if fingerprint_names is not None else ['new_fingerprint'] + + def _fingerprint(func): + if not inplace and (not all((name in func.__code__.co_varnames for name in fingerprint_names))): + raise ValueError(f'function {func} is missing parameters {fingerprint_names} in signature') + if randomized_function: + if 'seed' not in func.__code__.co_varnames: + raise ValueError(f"'seed' must be in {func}'s signature") + if 'generator' not in func.__code__.co_varnames: + raise ValueError(f"'generator' must be in {func}'s signature") + transform = format_transform_for_fingerprint(func, version=version) + + @wraps(func) + def wrapper(*args, **kwargs): + kwargs_for_fingerprint = format_kwargs_for_fingerprint(func, args, kwargs, use_kwargs=use_kwargs, ignore_kwargs=ignore_kwargs, randomized_function=randomized_function) + if args: + dataset: Dataset = args[0] + args = args[1:] + else: + dataset: Dataset = kwargs.pop(next(iter(inspect.signature(func).parameters))) + if inplace: + new_fingerprint = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint) + else: + for fingerprint_name in fingerprint_names: + if kwargs.get(fingerprint_name) is None: + kwargs_for_fingerprint['fingerprint_name'] = fingerprint_name + kwargs[fingerprint_name] = update_fingerprint(dataset._fingerprint, transform, kwargs_for_fingerprint) + else: + validate_fingerprint(kwargs[fingerprint_name]) + out = func(dataset, *args, **kwargs) + if inplace: + dataset._fingerprint = new_fingerprint + return out + wrapper._decorator_name_ = 'fingerprint' + return wrapper + return _fingerprint + +# File: datasets-main/src/datasets/formatting/__init__.py +from typing import Dict, List, Optional, Type +from .. import config +from ..utils import logging +from .formatting import ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table +from .np_formatter import NumpyFormatter +logger = logging.get_logger(__name__) +_FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} +_FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} +_FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} + +def _register_formatter(formatter_cls: type, format_type: Optional[str], aliases: Optional[List[str]]=None): + aliases = aliases if aliases is not None else [] + if format_type in _FORMAT_TYPES: + logger.warning(f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})") + _FORMAT_TYPES[format_type] = formatter_cls + for alias in set(aliases + [format_type]): + if alias in _FORMAT_TYPES_ALIASES: + logger.warning(f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})") + _FORMAT_TYPES_ALIASES[alias] = format_type + +def _register_unavailable_formatter(unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]]=None): + aliases = aliases if aliases is not None else [] + for alias in set(aliases + [format_type]): + _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error +_register_formatter(PythonFormatter, None, aliases=['python']) +_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) +_register_formatter(NumpyFormatter, 'numpy', aliases=['np']) +_register_formatter(PandasFormatter, 'pandas', aliases=['pd']) +_register_formatter(CustomFormatter, 'custom') +if config.POLARS_AVAILABLE: + from .polars_formatter import PolarsFormatter + _register_formatter(PolarsFormatter, 'polars', aliases=['pl']) +else: + _polars_error = ValueError('Polars needs to be installed to be able to return Polars dataframes.') + _register_unavailable_formatter(_polars_error, 'polars', aliases=['pl']) +if config.TORCH_AVAILABLE: + from .torch_formatter import TorchFormatter + _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) +else: + _torch_error = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') + _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) +if config.TF_AVAILABLE: + from .tf_formatter import TFFormatter + _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) +else: + _tf_error = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') + _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) +if config.JAX_AVAILABLE: + from .jax_formatter import JaxFormatter + _register_formatter(JaxFormatter, 'jax', aliases=[]) +else: + _jax_error = ValueError('JAX needs to be installed to be able to return JAX arrays.') + _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) + +def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: + if format_type in _FORMAT_TYPES_ALIASES: + return _FORMAT_TYPES_ALIASES[format_type] + else: + return format_type + +def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: + format_type = get_format_type_from_alias(format_type) + if format_type in _FORMAT_TYPES: + return _FORMAT_TYPES[format_type](**format_kwargs) + if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: + raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] + else: + raise ValueError(f"Format type should be one of {list(_FORMAT_TYPES.keys())}, but got '{format_type}'") + +# File: datasets-main/src/datasets/formatting/formatting.py +import operator +from collections.abc import Mapping, MutableMapping +from functools import partial +from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union +import numpy as np +import pandas as pd +import pyarrow as pa +from packaging import version +from .. import config +from ..features import Features +from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper +from ..table import Table +from ..utils.py_utils import no_op_if_value_is_null +T = TypeVar('T') +RowFormat = TypeVar('RowFormat') +ColumnFormat = TypeVar('ColumnFormat') +BatchFormat = TypeVar('BatchFormat') + +def _is_range_contiguous(key: range) -> bool: + return key.step == 1 and key.stop >= key.start + +def _raise_bad_key_type(key: Any): + raise TypeError(f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable.") + +def _query_table_with_indices_mapping(table: Table, key: Union[int, slice, range, str, Iterable], indices: Table) -> pa.Table: + if isinstance(key, int): + key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py() + return _query_table(table, key) + if isinstance(key, slice): + key = range(*key.indices(indices.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return _query_table(table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]) + else: + pass + if isinstance(key, str): + table = table.select([key]) + return _query_table(table, indices.column(0).to_pylist()) + if isinstance(key, Iterable): + return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key]) + _raise_bad_key_type(key) + +def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table: + if isinstance(key, int): + return table.fast_slice(key % table.num_rows, 1) + if isinstance(key, slice): + key = range(*key.indices(table.num_rows)) + if isinstance(key, range): + if _is_range_contiguous(key) and key.start >= 0: + return table.fast_slice(key.start, key.stop - key.start) + else: + pass + if isinstance(key, str): + return table.table.drop([column for column in table.column_names if column != key]) + if isinstance(key, Iterable): + key = np.fromiter(key, np.int64) + if len(key) == 0: + return table.table.slice(0, 0) + return table.fast_gather(key % table.num_rows) + _raise_bad_key_type(key) + +def _is_array_with_nulls(pa_array: pa.Array) -> bool: + return pa_array.null_count > 0 + +class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]): + + def extract_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def extract_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def extract_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + +def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]: + return {key: array[0] for (key, array) in py_dict.items()} + +class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]): + + def extract_row(self, pa_table: pa.Table) -> pa.Table: + return pa_table + + def extract_column(self, pa_table: pa.Table) -> pa.Array: + return pa_table.column(0) + + def extract_batch(self, pa_table: pa.Table) -> pa.Table: + return pa_table + +class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]): + + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(pa_table.to_pydict()) + + def extract_column(self, pa_table: pa.Table) -> list: + return pa_table.column(0).to_pylist() + + def extract_batch(self, pa_table: pa.Table) -> dict: + return pa_table.to_pydict() + +class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]): + + def __init__(self, **np_array_kwargs): + self.np_array_kwargs = np_array_kwargs + + def extract_row(self, pa_table: pa.Table) -> dict: + return _unnest(self.extract_batch(pa_table)) + + def extract_column(self, pa_table: pa.Table) -> np.ndarray: + return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]]) + + def extract_batch(self, pa_table: pa.Table) -> dict: + return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names} + + def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray: + if isinstance(pa_array, pa.ChunkedArray): + if isinstance(pa_array.type, _ArrayXDExtensionType): + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = [row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)] + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and all((not _is_array_with_nulls(chunk) for chunk in pa_array.chunks)) + array: List = [row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)] + elif isinstance(pa_array.type, _ArrayXDExtensionType): + zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only) + else: + zero_copy_only = _is_zero_copy_only(pa_array.type) and (not _is_array_with_nulls(pa_array)) + array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist() + if len(array) > 0: + if any((isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape) or (isinstance(x, float) and np.isnan(x)) for x in array)): + if np.lib.NumpyVersion(np.__version__) >= '2.0.0b1': + return np.asarray(array, dtype=object) + return np.array(array, copy=False, dtype=object) + if np.lib.NumpyVersion(np.__version__) >= '2.0.0b1': + return np.asarray(array) + else: + return np.array(array, copy=False) + +class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]): + + def extract_row(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper) + + def extract_column(self, pa_table: pa.Table) -> pd.Series: + return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]] + + def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame: + return pa_table.to_pandas(types_mapper=pandas_types_mapper) + +class PythonFeaturesDecoder: + + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: dict) -> dict: + return self.features.decode_example(row) if self.features else row + + def decode_column(self, column: list, column_name: str) -> list: + return self.features.decode_column(column, column_name) if self.features else column + + def decode_batch(self, batch: dict) -> dict: + return self.features.decode_batch(batch) if self.features else batch + +class PandasFeaturesDecoder: + + def __init__(self, features: Optional[Features]): + self.features = features + + def decode_row(self, row: pd.DataFrame) -> pd.DataFrame: + decode = {column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) for (column_name, feature) in self.features.items() if self.features._column_requires_decoding[column_name]} if self.features else {} + if decode: + row[list(decode.keys())] = row.transform(decode) + return row + + def decode_column(self, column: pd.Series, column_name: str) -> pd.Series: + decode = no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] else None + if decode: + column = column.transform(decode) + return column + + def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame: + return self.decode_row(batch) + +class LazyDict(MutableMapping): + + def __init__(self, pa_table: pa.Table, formatter: 'Formatter'): + self.pa_table = pa_table + self.formatter = formatter + self.data = {key: None for key in pa_table.column_names} + self.keys_to_format = set(self.data.keys()) + + def __len__(self): + return len(self.data) + + def __getitem__(self, key): + value = self.data[key] + if key in self.keys_to_format: + value = self.format(key) + self.data[key] = value + self.keys_to_format.remove(key) + return value + + def __setitem__(self, key, value): + if key in self.keys_to_format: + self.keys_to_format.remove(key) + self.data[key] = value + + def __delitem__(self, key) -> None: + if key in self.keys_to_format: + self.keys_to_format.remove(key) + del self.data[key] + + def __iter__(self): + return iter(self.data) + + def __contains__(self, key): + return key in self.data + + def __repr__(self): + self._format_all() + return repr(self.data) + if config.PY_VERSION >= version.parse('3.9'): + + def __or__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = inst.data | other.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = inst.data | other + return inst + return NotImplemented + + def __ror__(self, other): + if isinstance(other, LazyDict): + inst = self.copy() + other = other.copy() + other._format_all() + inst.keys_to_format -= other.data.keys() + inst.data = other.data | inst.data + return inst + if isinstance(other, dict): + inst = self.copy() + inst.keys_to_format -= other.keys() + inst.data = other | inst.data + return inst + return NotImplemented + + def __ior__(self, other): + if isinstance(other, LazyDict): + other = other.copy() + other._format_all() + self.keys_to_format -= other.data.keys() + self.data |= other.data + else: + self.keys_to_format -= other.keys() + self.data |= other + return self + + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + inst.__dict__['data'] = self.__dict__['data'].copy() + inst.__dict__['keys_to_format'] = self.__dict__['keys_to_format'].copy() + return inst + + def copy(self): + import copy + return copy.copy(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + raise NotImplementedError + + def format(self, key): + raise NotImplementedError + + def _format_all(self): + for key in self.keys_to_format: + self.data[key] = self.format(key) + self.keys_to_format.clear() + +class LazyRow(LazyDict): + + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key]))[0] + +class LazyBatch(LazyDict): + + def format(self, key): + return self.formatter.format_column(self.pa_table.select([key])) + +class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]): + simple_arrow_extractor = SimpleArrowExtractor + python_arrow_extractor = PythonArrowExtractor + numpy_arrow_extractor = NumpyArrowExtractor + pandas_arrow_extractor = PandasArrowExtractor + + def __init__(self, features: Optional[Features]=None): + self.features = features + self.python_features_decoder = PythonFeaturesDecoder(self.features) + self.pandas_features_decoder = PandasFeaturesDecoder(self.features) + + def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]: + if query_type == 'row': + return self.format_row(pa_table) + elif query_type == 'column': + return self.format_column(pa_table) + elif query_type == 'batch': + return self.format_batch(pa_table) + + def format_row(self, pa_table: pa.Table) -> RowFormat: + raise NotImplementedError + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + raise NotImplementedError + + def format_batch(self, pa_table: pa.Table) -> BatchFormat: + raise NotImplementedError + +class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]): + + def recursive_tensorize(self, data_struct: dict): + raise NotImplementedError + +class ArrowFormatter(Formatter[pa.Table, pa.Array, pa.Table]): + + def format_row(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_row(pa_table) + + def format_column(self, pa_table: pa.Table) -> pa.Array: + return self.simple_arrow_extractor().extract_column(pa_table) + + def format_batch(self, pa_table: pa.Table) -> pa.Table: + return self.simple_arrow_extractor().extract_batch(pa_table) + +class PythonFormatter(Formatter[Mapping, list, Mapping]): + + def __init__(self, features=None, lazy=False): + super().__init__(features) + self.lazy = lazy + + def format_row(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyRow(pa_table, self) + row = self.python_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> list: + column = self.python_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + if self.lazy: + return LazyBatch(pa_table, self) + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return batch + +class PandasFormatter(Formatter[pd.DataFrame, pd.Series, pd.DataFrame]): + + def format_row(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_row(pa_table) + row = self.pandas_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> pd.Series: + column = self.pandas_arrow_extractor().extract_column(pa_table) + column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> pd.DataFrame: + row = self.pandas_arrow_extractor().extract_batch(pa_table) + row = self.pandas_features_decoder.decode_batch(row) + return row + +class CustomFormatter(Formatter[dict, ColumnFormat, dict]): + + def __init__(self, transform: Callable[[dict], dict], features=None, **kwargs): + super().__init__(features=features) + self.transform = transform + + def format_row(self, pa_table: pa.Table) -> dict: + formatted_batch = self.format_batch(pa_table) + try: + return _unnest(formatted_batch) + except Exception as exc: + raise TypeError(f'Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}') from exc + + def format_column(self, pa_table: pa.Table) -> ColumnFormat: + formatted_batch = self.format_batch(pa_table) + if hasattr(formatted_batch, 'keys'): + if len(formatted_batch.keys()) > 1: + raise TypeError(f'Tried to query a column but the custom formatting function returns too many columns. Only one column was expected but got columns {list(formatted_batch.keys())}.') + else: + raise TypeError(f'Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}') + try: + return formatted_batch[pa_table.column_names[0]] + except Exception as exc: + raise TypeError(f'Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}') from exc + + def format_batch(self, pa_table: pa.Table) -> dict: + batch = self.python_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + return self.transform(batch) + +def _check_valid_column_key(key: str, columns: List[str]) -> None: + if key not in columns: + raise KeyError(f'Column {key} not in the dataset. Current columns in the dataset: {columns}') + +def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None: + if isinstance(key, int): + if key < 0 and key + size < 0 or key >= size: + raise IndexError(f'Invalid key: {key} is out of bounds for size {size}') + return + elif isinstance(key, slice): + pass + elif isinstance(key, range): + if len(key) > 0: + _check_valid_index_key(max(key), size=size) + _check_valid_index_key(min(key), size=size) + elif isinstance(key, Iterable): + if len(key) > 0: + _check_valid_index_key(int(max(key)), size=size) + _check_valid_index_key(int(min(key)), size=size) + else: + _raise_bad_key_type(key) + +def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str: + if isinstance(key, int): + return 'row' + elif isinstance(key, str): + return 'column' + elif isinstance(key, (slice, range, Iterable)): + return 'batch' + _raise_bad_key_type(key) + +def query_table(table: Table, key: Union[int, slice, range, str, Iterable], indices: Optional[Table]=None) -> pa.Table: + if not isinstance(key, (int, slice, range, str, Iterable)): + try: + key = operator.index(key) + except TypeError: + _raise_bad_key_type(key) + if isinstance(key, str): + _check_valid_column_key(key, table.column_names) + else: + size = indices.num_rows if indices is not None else table.num_rows + _check_valid_index_key(key, size) + if indices is None: + pa_subtable = _query_table(table, key) + else: + pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices) + return pa_subtable + +def format_table(table: Table, key: Union[int, slice, range, str, Iterable], formatter: Formatter, format_columns: Optional[list]=None, output_all_columns=False): + if isinstance(table, Table): + pa_table = table.table + else: + pa_table = table + query_type = key_to_query_type(key) + python_formatter = PythonFormatter(features=formatter.features) + if format_columns is None: + return formatter(pa_table, query_type=query_type) + elif query_type == 'column': + if key in format_columns: + return formatter(pa_table, query_type) + else: + return python_formatter(pa_table, query_type=query_type) + else: + pa_table_to_format = pa_table.drop((col for col in pa_table.column_names if col not in format_columns)) + formatted_output = formatter(pa_table_to_format, query_type=query_type) + if output_all_columns: + if isinstance(formatted_output, MutableMapping): + pa_table_with_remaining_columns = pa_table.drop((col for col in pa_table.column_names if col in format_columns)) + remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type) + formatted_output.update(remaining_columns_dict) + else: + raise TypeError(f'Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}') + return formatted_output + +# File: datasets-main/src/datasets/formatting/jax_formatter.py +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING, Dict, Optional +import numpy as np +import pyarrow as pa +from .. import config +from ..utils.logging import get_logger +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter +if TYPE_CHECKING: + import jax + import jaxlib +logger = get_logger() +DEVICE_MAPPING: Optional[dict] = None + +class JaxFormatter(TensorFormatter[Mapping, 'jax.Array', Mapping]): + + def __init__(self, features=None, device=None, **jnp_array_kwargs): + super().__init__(features=features) + import jax + from jaxlib.xla_client import Device + if isinstance(device, Device): + raise ValueError(f'Expected {device} to be a `str` not {type(device)}, as `jaxlib.xla_extension.Device` is not serializable neither with `pickle` nor with `dill`. Instead you can surround the device with `str()` to get its string identifier that will be internally mapped to the actual `jaxlib.xla_extension.Device`.') + self.device = device if isinstance(device, str) else str(jax.devices()[0]) + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + if self.device not in list(DEVICE_MAPPING.keys()): + logger.warning(f'Device with string identifier {self.device} not listed among the available devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default device: {str(jax.devices()[0])}.') + self.device = str(jax.devices()[0]) + self.jnp_array_kwargs = jnp_array_kwargs + + @staticmethod + def _map_devices_to_str() -> Dict[str, 'jaxlib.xla_extension.Device']: + import jax + return {str(device): device for device in jax.devices()} + + def _consolidate(self, column): + import jax + import jax.numpy as jnp + if isinstance(column, list) and column: + if all((isinstance(x, jax.Array) and x.shape == column[0].shape and (x.dtype == column[0].dtype) for x in column)): + return jnp.stack(column, axis=0) + return column + + def _tensorize(self, value): + import jax + import jax.numpy as jnp + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + default_dtype = {} + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + if jax.config.jax_enable_x64: + default_dtype = {'dtype': jnp.int64} + else: + default_dtype = {'dtype': jnp.int32} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {'dtype': jnp.float32} + elif config.PIL_AVAILABLE and 'PIL' in sys.modules: + import PIL.Image + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + global DEVICE_MAPPING + if DEVICE_MAPPING is None: + DEVICE_MAPPING = self._map_devices_to_str() + with jax.default_device(DEVICE_MAPPING[self.device]): + return jnp.array(value, **{**default_dtype, **self.jnp_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + import jax + if config.TORCH_AVAILABLE and 'torch' in sys.modules: + import torch + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, '__array__') and (not isinstance(data_struct, jax.Array)): + data_struct = data_struct.__array__() + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> 'jax.Array': + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch + +# File: datasets-main/src/datasets/formatting/np_formatter.py +import sys +from collections.abc import Mapping +import numpy as np +import pyarrow as pa +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter + +class NumpyFormatter(TensorFormatter[Mapping, np.ndarray, Mapping]): + + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + + def _consolidate(self, column): + if isinstance(column, list): + if column and all((isinstance(x, np.ndarray) and x.shape == column[0].shape and (x.dtype == column[0].dtype) for x in column)): + return np.stack(column) + else: + out = np.empty(len(column), dtype=object) + out[:] = column + return out + return column + + def _tensorize(self, value): + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value + elif isinstance(value, np.number): + return value + default_dtype = {} + if isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.integer): + default_dtype = {'dtype': np.int64} + elif isinstance(value, np.ndarray) and np.issubdtype(value.dtype, np.floating): + default_dtype = {'dtype': np.float32} + elif config.PIL_AVAILABLE and 'PIL' in sys.modules: + import PIL.Image + if isinstance(value, PIL.Image.Image): + return np.asarray(value, **self.np_array_kwargs) + return np.asarray(value, **{**default_dtype, **self.np_array_kwargs}) + + def _recursive_tensorize(self, data_struct): + if config.TORCH_AVAILABLE and 'torch' in sys.modules: + import torch + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, '__array__') and (not isinstance(data_struct, (np.ndarray, np.character, np.number))): + data_struct = data_struct.__array__() + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + if isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> np.ndarray: + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch + +# File: datasets-main/src/datasets/formatting/polars_formatter.py +import sys +from collections.abc import Mapping +from functools import partial +from typing import TYPE_CHECKING, Optional +import pyarrow as pa +from .. import config +from ..features import Features +from ..features.features import decode_nested_example +from ..utils.py_utils import no_op_if_value_is_null +from .formatting import BaseArrowExtractor, TensorFormatter +if TYPE_CHECKING: + import polars as pl + +class PolarsArrowExtractor(BaseArrowExtractor['pl.DataFrame', 'pl.Series', 'pl.DataFrame']): + + def extract_row(self, pa_table: pa.Table) -> 'pl.DataFrame': + if config.POLARS_AVAILABLE: + if 'polars' not in sys.modules: + import polars + else: + polars = sys.modules['polars'] + return polars.from_arrow(pa_table.slice(length=1)) + else: + raise ValueError('Polars needs to be installed to be able to return Polars dataframes.') + + def extract_column(self, pa_table: pa.Table) -> 'pl.Series': + if config.POLARS_AVAILABLE: + if 'polars' not in sys.modules: + import polars + else: + polars = sys.modules['polars'] + return polars.from_arrow(pa_table.select([0]))[pa_table.column_names[0]] + else: + raise ValueError('Polars needs to be installed to be able to return Polars dataframes.') + + def extract_batch(self, pa_table: pa.Table) -> 'pl.DataFrame': + if config.POLARS_AVAILABLE: + if 'polars' not in sys.modules: + import polars + else: + polars = sys.modules['polars'] + return polars.from_arrow(pa_table) + else: + raise ValueError('Polars needs to be installed to be able to return Polars dataframes.') + +class PolarsFeaturesDecoder: + + def __init__(self, features: Optional[Features]): + self.features = features + import polars as pl + + def decode_row(self, row: 'pl.DataFrame') -> 'pl.DataFrame': + decode = {column_name: no_op_if_value_is_null(partial(decode_nested_example, feature)) for (column_name, feature) in self.features.items() if self.features._column_requires_decoding[column_name]} if self.features else {} + if decode: + row[list(decode.keys())] = row.map_rows(decode) + return row + + def decode_column(self, column: 'pl.Series', column_name: str) -> 'pl.Series': + decode = no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name])) if self.features and column_name in self.features and self.features._column_requires_decoding[column_name] else None + if decode: + column = column.map_elements(decode) + return column + + def decode_batch(self, batch: 'pl.DataFrame') -> 'pl.DataFrame': + return self.decode_row(batch) + +class PolarsFormatter(TensorFormatter[Mapping, 'pl.DataFrame', Mapping]): + + def __init__(self, features=None, **np_array_kwargs): + super().__init__(features=features) + self.np_array_kwargs = np_array_kwargs + self.polars_arrow_extractor = PolarsArrowExtractor + self.polars_features_decoder = PolarsFeaturesDecoder(features) + import polars as pl + + def format_row(self, pa_table: pa.Table) -> 'pl.DataFrame': + row = self.polars_arrow_extractor().extract_row(pa_table) + row = self.polars_features_decoder.decode_row(row) + return row + + def format_column(self, pa_table: pa.Table) -> 'pl.Series': + column = self.polars_arrow_extractor().extract_column(pa_table) + column = self.polars_features_decoder.decode_column(column, pa_table.column_names[0]) + return column + + def format_batch(self, pa_table: pa.Table) -> 'pl.DataFrame': + row = self.polars_arrow_extractor().extract_batch(pa_table) + row = self.polars_features_decoder.decode_batch(row) + return row + +# File: datasets-main/src/datasets/formatting/tf_formatter.py +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING +import numpy as np +import pyarrow as pa +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter +if TYPE_CHECKING: + import tensorflow as tf + +class TFFormatter(TensorFormatter[Mapping, 'tf.Tensor', Mapping]): + + def __init__(self, features=None, **tf_tensor_kwargs): + super().__init__(features=features) + self.tf_tensor_kwargs = tf_tensor_kwargs + import tensorflow as tf + + def _consolidate(self, column): + import tensorflow as tf + if isinstance(column, list) and column: + if all((isinstance(x, tf.Tensor) and x.shape == column[0].shape and (x.dtype == column[0].dtype) for x in column)): + return tf.stack(column) + elif all((isinstance(x, (tf.Tensor, tf.RaggedTensor)) and x.ndim == 1 and (x.dtype == column[0].dtype) for x in column)): + return tf.ragged.stack(column) + return column + + def _tensorize(self, value): + import tensorflow as tf + if value is None: + return value + default_dtype = {} + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {'dtype': tf.int64} + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {'dtype': tf.float32} + elif config.PIL_AVAILABLE and 'PIL' in sys.modules: + import PIL.Image + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + return tf.convert_to_tensor(value, **{**default_dtype, **self.tf_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import tensorflow as tf + if config.TORCH_AVAILABLE and 'torch' in sys.modules: + import torch + if isinstance(data_struct, torch.Tensor): + return self._tensorize(data_struct.detach().cpu().numpy()[()]) + if hasattr(data_struct, '__array__') and (not isinstance(data_struct, tf.Tensor)): + data_struct = data_struct.__array__() + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> 'tf.Tensor': + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch + +# File: datasets-main/src/datasets/formatting/torch_formatter.py +import sys +from collections.abc import Mapping +from typing import TYPE_CHECKING +import numpy as np +import pyarrow as pa +from .. import config +from ..utils.py_utils import map_nested +from .formatting import TensorFormatter +if TYPE_CHECKING: + import torch + +class TorchFormatter(TensorFormatter[Mapping, 'torch.Tensor', Mapping]): + + def __init__(self, features=None, **torch_tensor_kwargs): + super().__init__(features=features) + self.torch_tensor_kwargs = torch_tensor_kwargs + import torch + + def _consolidate(self, column): + import torch + if isinstance(column, list) and column: + if all((isinstance(x, torch.Tensor) and x.shape == column[0].shape and (x.dtype == column[0].dtype) for x in column)): + return torch.stack(column) + return column + + def _tensorize(self, value): + import torch + if isinstance(value, (str, bytes, type(None))): + return value + elif isinstance(value, (np.character, np.ndarray)) and np.issubdtype(value.dtype, np.character): + return value.tolist() + default_dtype = {} + if isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.integer): + default_dtype = {'dtype': torch.int64} + if value.dtype in [np.uint16, np.uint32]: + value = value.astype(np.int64) + elif isinstance(value, (np.number, np.ndarray)) and np.issubdtype(value.dtype, np.floating): + default_dtype = {'dtype': torch.float32} + elif config.PIL_AVAILABLE and 'PIL' in sys.modules: + import PIL.Image + if isinstance(value, PIL.Image.Image): + value = np.asarray(value) + if value.ndim == 2: + value = value[:, :, np.newaxis] + value = value.transpose((2, 0, 1)) + return torch.tensor(value, **{**default_dtype, **self.torch_tensor_kwargs}) + + def _recursive_tensorize(self, data_struct): + import torch + if hasattr(data_struct, '__array__') and (not isinstance(data_struct, torch.Tensor)): + data_struct = data_struct.__array__() + if isinstance(data_struct, np.ndarray): + if data_struct.dtype == object: + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + elif isinstance(data_struct, (list, tuple)): + return self._consolidate([self.recursive_tensorize(substruct) for substruct in data_struct]) + return self._tensorize(data_struct) + + def recursive_tensorize(self, data_struct: dict): + return map_nested(self._recursive_tensorize, data_struct, map_list=False) + + def format_row(self, pa_table: pa.Table) -> Mapping: + row = self.numpy_arrow_extractor().extract_row(pa_table) + row = self.python_features_decoder.decode_row(row) + return self.recursive_tensorize(row) + + def format_column(self, pa_table: pa.Table) -> 'torch.Tensor': + column = self.numpy_arrow_extractor().extract_column(pa_table) + column = self.python_features_decoder.decode_column(column, pa_table.column_names[0]) + column = self.recursive_tensorize(column) + column = self._consolidate(column) + return column + + def format_batch(self, pa_table: pa.Table) -> Mapping: + batch = self.numpy_arrow_extractor().extract_batch(pa_table) + batch = self.python_features_decoder.decode_batch(batch) + batch = self.recursive_tensorize(batch) + for column_name in batch: + batch[column_name] = self._consolidate(batch[column_name]) + return batch + +# File: datasets-main/src/datasets/hub.py +import time +from itertools import chain +from typing import Optional, Union +from huggingface_hub import CommitInfo, CommitOperationAdd, CommitOperationDelete, DatasetCard, DatasetCardData, HfApi, HfFileSystem +from huggingface_hub.utils import HfHubHTTPError +import datasets.config +from datasets.info import DatasetInfosDict +from datasets.inspect import get_dataset_config_names, get_dataset_default_config_name +from datasets.load import load_dataset, load_dataset_builder +from datasets.utils.metadata import MetadataConfigs + +def convert_to_parquet(repo_id: str, revision: Optional[str]=None, token: Optional[Union[bool, str]]=None, trust_remote_code: Optional[bool]=None) -> CommitInfo: + print(f'{repo_id}') + configs = get_dataset_config_names(repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code) + print(f'configs = {configs!r}') + default_config = get_dataset_default_config_name(repo_id, token=token, revision=revision, trust_remote_code=trust_remote_code) + print(f'default_config = {default_config!r}') + if default_config: + config = default_config + configs.remove(default_config) + else: + config = configs.pop(0) + print(f'config = {config!r}') + dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) + commit_info = dataset.push_to_hub(repo_id, config_name=config, commit_message='Convert dataset to Parquet', commit_description='Convert dataset to Parquet.', create_pr=True, token=token, set_default=default_config is not None) + time.sleep(5) + (pr_revision, pr_url) = (commit_info.pr_revision, commit_info.pr_url) + for config in configs: + print(f'config = {config!r}') + dataset = load_dataset(repo_id, config, revision=revision, trust_remote_code=trust_remote_code) + dataset.push_to_hub(repo_id, config_name=config, commit_message=f"Add '{config}' config data files", revision=pr_revision, token=token) + time.sleep(5) + _delete_files(repo_id, revision=pr_revision, token=token) + if not revision: + api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) + try: + api.create_branch(repo_id, branch='script', repo_type='dataset', token=token, exist_ok=True) + except HfHubHTTPError: + pass + print(f'You can find your PR to convert the dataset to Parquet at: {pr_url}') + return commit_info + +def delete_from_hub(repo_id: str, config_name: str, revision: Optional[str]=None, token: Optional[Union[bool, str]]=None) -> CommitInfo: + operations = [] + fs = HfFileSystem(endpoint=datasets.config.HF_ENDPOINT, token=token) + builder = load_dataset_builder(repo_id, config_name, revision=revision, token=token, trust_remote_code=False) + for data_file in chain(*builder.config.data_files.values()): + data_file_resolved_path = fs.resolve_path(data_file) + if data_file_resolved_path.repo_id == repo_id: + operations.append(CommitOperationDelete(path_in_repo=data_file_resolved_path.path_in_repo)) + dataset_card = DatasetCard.load(repo_id) + if dataset_card.data.get('config_names', None) and config_name in dataset_card.data['config_names']: + dataset_card.data['config_names'].remove(config_name) + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card.data) + if metadata_configs: + _ = metadata_configs.pop(config_name, None) + dataset_card_data = DatasetCardData() + metadata_configs.to_dataset_card_data(dataset_card_data) + if datasets.config.METADATA_CONFIGS_FIELD in dataset_card_data: + dataset_card.data[datasets.config.METADATA_CONFIGS_FIELD] = dataset_card_data[datasets.config.METADATA_CONFIGS_FIELD] + else: + _ = dataset_card.data.pop(datasets.config.METADATA_CONFIGS_FIELD, None) + dataset_infos: DatasetInfosDict = DatasetInfosDict.from_dataset_card_data(dataset_card.data) + if dataset_infos: + _ = dataset_infos.pop(config_name, None) + dataset_card_data = DatasetCardData() + dataset_infos.to_dataset_card_data(dataset_card_data) + if 'dataset_info' in dataset_card_data: + dataset_card.data['dataset_info'] = dataset_card_data['dataset_info'] + else: + _ = dataset_card.data.pop('dataset_info', None) + operations.append(CommitOperationAdd(path_in_repo=datasets.config.REPOCARD_FILENAME, path_or_fileobj=str(dataset_card).encode())) + api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) + commit_info = api.create_commit(repo_id, operations=operations, commit_message=f"Delete '{config_name}' config", commit_description=f"Delete '{config_name}' config.", token=token, repo_type='dataset', revision=revision, create_pr=True) + print(f'You can find your PR to delete the dataset config at: {commit_info.pr_url}') + return commit_info + +def _delete_files(dataset_id, revision=None, token=None): + dataset_name = dataset_id.split('/')[-1] + hf_api = HfApi(endpoint=datasets.config.HF_ENDPOINT, token=token) + repo_files = hf_api.list_repo_files(dataset_id, repo_type='dataset') + if repo_files: + legacy_json_file = [] + python_files = [] + data_files = [] + for filename in repo_files: + if filename in {'.gitattributes', 'README.md'}: + continue + elif filename == f'{dataset_name}.py': + hf_api.delete_file(filename, dataset_id, repo_type='dataset', revision=revision, commit_message='Delete loading script') + elif filename == 'dataset_infos.json': + legacy_json_file.append(filename) + elif filename.endswith('.py'): + python_files.append(filename) + else: + data_files.append(filename) + if legacy_json_file: + hf_api.delete_file('dataset_infos.json', dataset_id, repo_type='dataset', revision=revision, commit_message='Delete legacy dataset_infos.json') + if python_files: + for filename in python_files: + hf_api.delete_file(filename, dataset_id, repo_type='dataset', revision=revision, commit_message='Delete loading script auxiliary file') + if data_files: + for filename in data_files: + hf_api.delete_file(filename, dataset_id, repo_type='dataset', revision=revision, commit_message='Delete data file') + +# File: datasets-main/src/datasets/info.py +"""""" +import copy +import dataclasses +import json +import os +import posixpath +from dataclasses import dataclass +from pathlib import Path +from typing import ClassVar, Dict, List, Optional, Union +import fsspec +from fsspec.core import url_to_fs +from huggingface_hub import DatasetCard, DatasetCardData +from . import config +from .features import Features +from .splits import SplitDict +from .utils import Version +from .utils.logging import get_logger +from .utils.py_utils import asdict, unique_values +logger = get_logger(__name__) + +@dataclass +class SupervisedKeysData: + input: str = '' + output: str = '' + +@dataclass +class DownloadChecksumsEntryData: + key: str = '' + value: str = '' + +class MissingCachedSizesConfigError(Exception): + +class NonMatchingCachedSizesError(Exception): + +@dataclass +class PostProcessedInfo: + features: Optional[Features] = None + resources_checksums: Optional[dict] = None + + def __post_init__(self): + if self.features is not None and (not isinstance(self.features, Features)): + self.features = Features.from_dict(self.features) + + @classmethod + def from_dict(cls, post_processed_info_dict: dict) -> 'PostProcessedInfo': + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for (k, v) in post_processed_info_dict.items() if k in field_names}) + +@dataclass +class DatasetInfo: + description: str = dataclasses.field(default_factory=str) + citation: str = dataclasses.field(default_factory=str) + homepage: str = dataclasses.field(default_factory=str) + license: str = dataclasses.field(default_factory=str) + features: Optional[Features] = None + post_processed: Optional[PostProcessedInfo] = None + supervised_keys: Optional[SupervisedKeysData] = None + builder_name: Optional[str] = None + dataset_name: Optional[str] = None + config_name: Optional[str] = None + version: Optional[Union[str, Version]] = None + splits: Optional[dict] = None + download_checksums: Optional[dict] = None + download_size: Optional[int] = None + post_processing_size: Optional[int] = None + dataset_size: Optional[int] = None + size_in_bytes: Optional[int] = None + _INCLUDED_INFO_IN_YAML: ClassVar[List[str]] = ['config_name', 'download_size', 'dataset_size', 'features', 'splits'] + + def __post_init__(self): + if self.features is not None and (not isinstance(self.features, Features)): + self.features = Features.from_dict(self.features) + if self.post_processed is not None and (not isinstance(self.post_processed, PostProcessedInfo)): + self.post_processed = PostProcessedInfo.from_dict(self.post_processed) + if self.version is not None and (not isinstance(self.version, Version)): + if isinstance(self.version, str): + self.version = Version(self.version) + else: + self.version = Version.from_dict(self.version) + if self.splits is not None and (not isinstance(self.splits, SplitDict)): + self.splits = SplitDict.from_split_dict(self.splits) + if self.supervised_keys is not None and (not isinstance(self.supervised_keys, SupervisedKeysData)): + if isinstance(self.supervised_keys, (tuple, list)): + self.supervised_keys = SupervisedKeysData(*self.supervised_keys) + else: + self.supervised_keys = SupervisedKeysData(**self.supervised_keys) + + def write_to_directory(self, dataset_info_dir, pretty_print=False, storage_options: Optional[dict]=None): + fs: fsspec.AbstractFileSystem + (fs, *_) = url_to_fs(dataset_info_dir, **storage_options or {}) + with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), 'wb') as f: + self._dump_info(f, pretty_print=pretty_print) + if self.license: + with fs.open(posixpath.join(dataset_info_dir, config.LICENSE_FILENAME), 'wb') as f: + self._dump_license(f) + + def _dump_info(self, file, pretty_print=False): + file.write(json.dumps(asdict(self), indent=4 if pretty_print else None).encode('utf-8')) + + def _dump_license(self, file): + file.write(self.license.encode('utf-8')) + + @classmethod + def from_merge(cls, dataset_infos: List['DatasetInfo']): + dataset_infos = [dset_info.copy() for dset_info in dataset_infos if dset_info is not None] + if len(dataset_infos) > 0 and all((dataset_infos[0] == dset_info for dset_info in dataset_infos)): + return dataset_infos[0] + description = '\n\n'.join(unique_values((info.description for info in dataset_infos))).strip() + citation = '\n\n'.join(unique_values((info.citation for info in dataset_infos))).strip() + homepage = '\n\n'.join(unique_values((info.homepage for info in dataset_infos))).strip() + license = '\n\n'.join(unique_values((info.license for info in dataset_infos))).strip() + features = None + supervised_keys = None + return cls(description=description, citation=citation, homepage=homepage, license=license, features=features, supervised_keys=supervised_keys) + + @classmethod + def from_directory(cls, dataset_info_dir: str, storage_options: Optional[dict]=None) -> 'DatasetInfo': + fs: fsspec.AbstractFileSystem + (fs, *_) = url_to_fs(dataset_info_dir, **storage_options or {}) + logger.info(f'Loading Dataset info from {dataset_info_dir}') + if not dataset_info_dir: + raise ValueError('Calling DatasetInfo.from_directory() with undefined dataset_info_dir.') + with fs.open(posixpath.join(dataset_info_dir, config.DATASET_INFO_FILENAME), 'r', encoding='utf-8') as f: + dataset_info_dict = json.load(f) + return cls.from_dict(dataset_info_dict) + + @classmethod + def from_dict(cls, dataset_info_dict: dict) -> 'DatasetInfo': + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for (k, v) in dataset_info_dict.items() if k in field_names}) + + def update(self, other_dataset_info: 'DatasetInfo', ignore_none=True): + self_dict = self.__dict__ + self_dict.update(**{k: copy.deepcopy(v) for (k, v) in other_dataset_info.__dict__.items() if v is not None or not ignore_none}) + + def copy(self) -> 'DatasetInfo': + return self.__class__(**{k: copy.deepcopy(v) for (k, v) in self.__dict__.items()}) + + def _to_yaml_dict(self) -> dict: + yaml_dict = {} + dataset_info_dict = asdict(self) + for key in dataset_info_dict: + if key in self._INCLUDED_INFO_IN_YAML: + value = getattr(self, key) + if hasattr(value, '_to_yaml_list'): + yaml_dict[key] = value._to_yaml_list() + elif hasattr(value, '_to_yaml_string'): + yaml_dict[key] = value._to_yaml_string() + else: + yaml_dict[key] = value + return yaml_dict + + @classmethod + def _from_yaml_dict(cls, yaml_data: dict) -> 'DatasetInfo': + yaml_data = copy.deepcopy(yaml_data) + if yaml_data.get('features') is not None: + yaml_data['features'] = Features._from_yaml_list(yaml_data['features']) + if yaml_data.get('splits') is not None: + yaml_data['splits'] = SplitDict._from_yaml_list(yaml_data['splits']) + field_names = {f.name for f in dataclasses.fields(cls)} + return cls(**{k: v for (k, v) in yaml_data.items() if k in field_names}) + +class DatasetInfosDict(Dict[str, DatasetInfo]): + + def write_to_directory(self, dataset_infos_dir, overwrite=False, pretty_print=False) -> None: + total_dataset_infos = {} + dataset_infos_path = os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME) + dataset_readme_path = os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME) + if not overwrite: + total_dataset_infos = self.from_directory(dataset_infos_dir) + total_dataset_infos.update(self) + if os.path.exists(dataset_infos_path): + with open(dataset_infos_path, 'w', encoding='utf-8') as f: + dataset_infos_dict = {config_name: asdict(dset_info) for (config_name, dset_info) in total_dataset_infos.items()} + json.dump(dataset_infos_dict, f, indent=4 if pretty_print else None) + if os.path.exists(dataset_readme_path): + dataset_card = DatasetCard.load(dataset_readme_path) + dataset_card_data = dataset_card.data + else: + dataset_card = None + dataset_card_data = DatasetCardData() + if total_dataset_infos: + total_dataset_infos.to_dataset_card_data(dataset_card_data) + dataset_card = DatasetCard('---\n' + str(dataset_card_data) + '\n---\n') if dataset_card is None else dataset_card + dataset_card.save(Path(dataset_readme_path)) + + @classmethod + def from_directory(cls, dataset_infos_dir) -> 'DatasetInfosDict': + logger.info(f'Loading Dataset Infos from {dataset_infos_dir}') + if os.path.exists(os.path.join(dataset_infos_dir, config.REPOCARD_FILENAME)): + dataset_card_data = DatasetCard.load(Path(dataset_infos_dir) / config.REPOCARD_FILENAME).data + if 'dataset_info' in dataset_card_data: + return cls.from_dataset_card_data(dataset_card_data) + if os.path.exists(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME)): + with open(os.path.join(dataset_infos_dir, config.DATASETDICT_INFOS_FILENAME), encoding='utf-8') as f: + return cls({config_name: DatasetInfo.from_dict(dataset_info_dict) for (config_name, dataset_info_dict) in json.load(f).items()}) + else: + return cls() + + @classmethod + def from_dataset_card_data(cls, dataset_card_data: DatasetCardData) -> 'DatasetInfosDict': + if isinstance(dataset_card_data.get('dataset_info'), (list, dict)): + if isinstance(dataset_card_data['dataset_info'], list): + return cls({dataset_info_yaml_dict.get('config_name', 'default'): DatasetInfo._from_yaml_dict(dataset_info_yaml_dict) for dataset_info_yaml_dict in dataset_card_data['dataset_info']}) + else: + dataset_info = DatasetInfo._from_yaml_dict(dataset_card_data['dataset_info']) + dataset_info.config_name = dataset_card_data['dataset_info'].get('config_name', 'default') + return cls({dataset_info.config_name: dataset_info}) + else: + return cls() + + def to_dataset_card_data(self, dataset_card_data: DatasetCardData) -> None: + if self: + if 'dataset_info' in dataset_card_data and isinstance(dataset_card_data['dataset_info'], dict): + dataset_metadata_infos = {dataset_card_data['dataset_info'].get('config_name', 'default'): dataset_card_data['dataset_info']} + elif 'dataset_info' in dataset_card_data and isinstance(dataset_card_data['dataset_info'], list): + dataset_metadata_infos = {config_metadata['config_name']: config_metadata for config_metadata in dataset_card_data['dataset_info']} + else: + dataset_metadata_infos = {} + total_dataset_infos = {**dataset_metadata_infos, **{config_name: dset_info._to_yaml_dict() for (config_name, dset_info) in self.items()}} + for (config_name, dset_info_yaml_dict) in total_dataset_infos.items(): + dset_info_yaml_dict['config_name'] = config_name + if len(total_dataset_infos) == 1: + dataset_card_data['dataset_info'] = next(iter(total_dataset_infos.values())) + config_name = dataset_card_data['dataset_info'].pop('config_name', None) + if config_name != 'default': + dataset_card_data['dataset_info'] = {'config_name': config_name, **dataset_card_data['dataset_info']} + else: + dataset_card_data['dataset_info'] = [] + for (config_name, dataset_info_yaml_dict) in sorted(total_dataset_infos.items()): + dataset_info_yaml_dict.pop('config_name', None) + dataset_info_yaml_dict = {'config_name': config_name, **dataset_info_yaml_dict} + dataset_card_data['dataset_info'].append(dataset_info_yaml_dict) + +# File: datasets-main/src/datasets/inspect.py +"""""" +import os +from typing import Dict, List, Mapping, Optional, Sequence, Union +from .download.download_config import DownloadConfig +from .download.download_manager import DownloadMode +from .download.streaming_download_manager import StreamingDownloadManager +from .info import DatasetInfo +from .load import dataset_module_factory, get_dataset_builder_class, load_dataset_builder +from .utils.logging import get_logger +from .utils.version import Version +logger = get_logger(__name__) + +class SplitsNotFoundError(ValueError): + pass + +def get_dataset_infos(path: str, data_files: Optional[Union[Dict, List, str]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, revision: Optional[Union[str, Version]]=None, token: Optional[Union[bool, str]]=None, **config_kwargs): + config_names = get_dataset_config_names(path=path, revision=revision, download_config=download_config, download_mode=download_mode, data_files=data_files, token=token) + return {config_name: get_dataset_config_info(path=path, config_name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs) for config_name in config_names} + +def get_dataset_config_names(path: str, revision: Optional[Union[str, Version]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, dynamic_modules_path: Optional[str]=None, data_files: Optional[Union[Dict, List, str]]=None, **download_kwargs): + dataset_module = dataset_module_factory(path, revision=revision, download_config=download_config, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, data_files=data_files, **download_kwargs) + builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) + return list(builder_cls.builder_configs.keys()) or [dataset_module.builder_kwargs.get('config_name', builder_cls.DEFAULT_CONFIG_NAME or 'default')] + +def get_dataset_default_config_name(path: str, revision: Optional[Union[str, Version]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, dynamic_modules_path: Optional[str]=None, data_files: Optional[Union[Dict, List, str]]=None, **download_kwargs) -> Optional[str]: + dataset_module = dataset_module_factory(path, revision=revision, download_config=download_config, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, data_files=data_files, **download_kwargs) + builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basename(path)) + builder_configs = list(builder_cls.builder_configs.keys()) + if builder_configs: + default_config_name = builder_configs[0] if len(builder_configs) == 1 else None + else: + default_config_name = 'default' + return builder_cls.DEFAULT_CONFIG_NAME or default_config_name + +def get_dataset_config_info(path: str, config_name: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, revision: Optional[Union[str, Version]]=None, token: Optional[Union[bool, str]]=None, **config_kwargs) -> DatasetInfo: + builder = load_dataset_builder(path, name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs) + info = builder.info + if info.splits is None: + download_config = download_config.copy() if download_config else DownloadConfig() + if token is not None: + download_config.token = token + builder._check_manual_download(StreamingDownloadManager(base_path=builder.base_path, download_config=download_config)) + try: + info.splits = {split_generator.name: {'name': split_generator.name, 'dataset_name': path} for split_generator in builder._split_generators(StreamingDownloadManager(base_path=builder.base_path, download_config=download_config))} + except Exception as err: + raise SplitsNotFoundError('The split names could not be parsed from the dataset config.') from err + return info + +def get_dataset_split_names(path: str, config_name: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, revision: Optional[Union[str, Version]]=None, token: Optional[Union[bool, str]]=None, **config_kwargs): + info = get_dataset_config_info(path, config_name=config_name, data_files=data_files, download_config=download_config, download_mode=download_mode, revision=revision, token=token, **config_kwargs) + return list(info.splits.keys()) + +# File: datasets-main/src/datasets/io/abc.py +from abc import ABC, abstractmethod +from typing import Optional, Union +from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit +from ..utils.typing import NestedDataStructureLike, PathLike + +class AbstractDatasetReader(ABC): + + def __init__(self, path_or_paths: Optional[NestedDataStructureLike[PathLike]]=None, split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, num_proc: Optional[int]=None, **kwargs): + self.path_or_paths = path_or_paths + self.split = split if split or isinstance(path_or_paths, dict) else 'train' + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: + pass + +class AbstractDatasetInputStream(ABC): + + def __init__(self, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, num_proc: Optional[int]=None, **kwargs): + self.features = features + self.cache_dir = cache_dir + self.keep_in_memory = keep_in_memory + self.streaming = streaming + self.num_proc = num_proc + self.kwargs = kwargs + + @abstractmethod + def read(self) -> Union[Dataset, IterableDataset]: + pass + +# File: datasets-main/src/datasets/io/csv.py +import multiprocessing +import os +from typing import BinaryIO, Optional, Union +import fsspec +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.csv.csv import Csv +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + +class CsvDatasetReader(AbstractDatasetReader): + + def __init__(self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, num_proc: Optional[int]=None, **kwargs): + super().__init__(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Csv(cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs) + + def read(self): + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + self.builder.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc) + dataset = self.builder.as_dataset(split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory) + return dataset + +class CsvDatasetWriter: + + def __init__(self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int]=None, num_proc: Optional[int]=None, storage_options: Optional[dict]=None, **to_csv_kwargs): + if num_proc is not None and num_proc <= 0: + raise ValueError(f'num_proc {num_proc} must be an integer > 0.') + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = 'utf-8' + self.storage_options = storage_options or {} + self.to_csv_kwargs = to_csv_kwargs + + def write(self) -> int: + _ = self.to_csv_kwargs.pop('path_or_buf', None) + header = self.to_csv_kwargs.pop('header', True) + index = self.to_csv_kwargs.pop('index', False) + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, 'wb', **self.storage_options or {}) as buffer: + written = self._write(file_obj=buffer, header=header, index=index, **self.to_csv_kwargs) + else: + written = self._write(file_obj=self.path_or_buf, header=header, index=index, **self.to_csv_kwargs) + return written + + def _batch_csv(self, args): + (offset, header, index, to_csv_kwargs) = args + batch = query_table(table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices) + csv_str = batch.to_pandas().to_csv(path_or_buf=None, header=header if offset == 0 else False, index=index, **to_csv_kwargs) + return csv_str.encode(self.encoding) + + def _write(self, file_obj: BinaryIO, header, index, **to_csv_kwargs) -> int: + written = 0 + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm(range(0, len(self.dataset), self.batch_size), unit='ba', desc='Creating CSV from Arrow format'): + csv_str = self._batch_csv((offset, header, index, to_csv_kwargs)) + written += file_obj.write(csv_str) + else: + (num_rows, batch_size) = (len(self.dataset), self.batch_size) + with multiprocessing.Pool(self.num_proc) as pool: + for csv_str in hf_tqdm(pool.imap(self._batch_csv, [(offset, header, index, to_csv_kwargs) for offset in range(0, num_rows, batch_size)]), total=num_rows // batch_size + 1 if num_rows % batch_size else num_rows // batch_size, unit='ba', desc='Creating CSV from Arrow format'): + written += file_obj.write(csv_str) + return written + +# File: datasets-main/src/datasets/io/generator.py +from typing import Callable, Optional +from .. import Features, NamedSplit, Split +from ..packaged_modules.generator.generator import Generator +from .abc import AbstractDatasetInputStream + +class GeneratorDatasetInputStream(AbstractDatasetInputStream): + + def __init__(self, generator: Callable, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, gen_kwargs: Optional[dict]=None, num_proc: Optional[int]=None, split: NamedSplit=Split.TRAIN, **kwargs): + super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs) + self.builder = Generator(cache_dir=cache_dir, features=features, generator=generator, gen_kwargs=gen_kwargs, split=split, **kwargs) + + def read(self): + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.builder.config.split) + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + self.builder.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc) + dataset = self.builder.as_dataset(split=self.builder.config.split, verification_mode=verification_mode, in_memory=self.keep_in_memory) + return dataset + +# File: datasets-main/src/datasets/io/json.py +import multiprocessing +import os +from typing import BinaryIO, Optional, Union +import fsspec +from .. import Dataset, Features, NamedSplit, config +from ..formatting import query_table +from ..packaged_modules.json.json import Json +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + +class JsonDatasetReader(AbstractDatasetReader): + + def __init__(self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, field: Optional[str]=None, num_proc: Optional[int]=None, **kwargs): + super().__init__(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs) + self.field = field + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Json(cache_dir=cache_dir, data_files=path_or_paths, features=features, field=field, **kwargs) + + def read(self): + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + self.builder.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc) + dataset = self.builder.as_dataset(split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory) + return dataset + +class JsonDatasetWriter: + + def __init__(self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int]=None, num_proc: Optional[int]=None, storage_options: Optional[dict]=None, **to_json_kwargs): + if num_proc is not None and num_proc <= 0: + raise ValueError(f'num_proc {num_proc} must be an integer > 0.') + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.encoding = 'utf-8' + self.storage_options = storage_options or {} + self.to_json_kwargs = to_json_kwargs + + def write(self) -> int: + _ = self.to_json_kwargs.pop('path_or_buf', None) + orient = self.to_json_kwargs.pop('orient', 'records') + lines = self.to_json_kwargs.pop('lines', True if orient == 'records' else False) + if 'index' not in self.to_json_kwargs and orient in ['split', 'table']: + self.to_json_kwargs['index'] = False + default_compression = 'infer' if isinstance(self.path_or_buf, (str, bytes, os.PathLike)) else None + compression = self.to_json_kwargs.pop('compression', default_compression) + if compression not in [None, 'infer', 'gzip', 'bz2', 'xz']: + raise NotImplementedError(f'`datasets` currently does not support {compression} compression') + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, 'wb', compression=compression, **self.storage_options or {}) as buffer: + written = self._write(file_obj=buffer, orient=orient, lines=lines, **self.to_json_kwargs) + else: + if compression: + raise NotImplementedError(f'The compression parameter is not supported when writing to a buffer, but compression={compression} was passed. Please provide a local path instead.') + written = self._write(file_obj=self.path_or_buf, orient=orient, lines=lines, **self.to_json_kwargs) + return written + + def _batch_json(self, args): + (offset, orient, lines, to_json_kwargs) = args + batch = query_table(table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices) + json_str = batch.to_pandas().to_json(path_or_buf=None, orient=orient, lines=lines, **to_json_kwargs) + if not json_str.endswith('\n'): + json_str += '\n' + return json_str.encode(self.encoding) + + def _write(self, file_obj: BinaryIO, orient, lines, **to_json_kwargs) -> int: + written = 0 + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm(range(0, len(self.dataset), self.batch_size), unit='ba', desc='Creating json from Arrow format'): + json_str = self._batch_json((offset, orient, lines, to_json_kwargs)) + written += file_obj.write(json_str) + else: + (num_rows, batch_size) = (len(self.dataset), self.batch_size) + with multiprocessing.Pool(self.num_proc) as pool: + for json_str in hf_tqdm(pool.imap(self._batch_json, [(offset, orient, lines, to_json_kwargs) for offset in range(0, num_rows, batch_size)]), total=num_rows // batch_size + 1 if num_rows % batch_size else num_rows // batch_size, unit='ba', desc='Creating json from Arrow format'): + written += file_obj.write(json_str) + return written + +# File: datasets-main/src/datasets/io/parquet.py +import os +from typing import BinaryIO, Optional, Union +import fsspec +import numpy as np +import pyarrow.parquet as pq +from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config +from ..features.features import FeatureType, _visit +from ..formatting import query_table +from ..packaged_modules import _PACKAGED_DATASETS_MODULES +from ..packaged_modules.parquet.parquet import Parquet +from ..utils import tqdm as hf_tqdm +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + +def get_writer_batch_size(features: Features) -> Optional[int]: + batch_size = np.inf + + def set_batch_size(feature: FeatureType) -> None: + nonlocal batch_size + if isinstance(feature, Image): + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) + elif isinstance(feature, Audio): + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) + elif isinstance(feature, Value) and feature.dtype == 'binary': + batch_size = min(batch_size, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) + _visit(features, set_batch_size) + return None if batch_size is np.inf else batch_size + +class ParquetDatasetReader(AbstractDatasetReader): + + def __init__(self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, num_proc: Optional[int]=None, **kwargs): + super().__init__(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + hash = _PACKAGED_DATASETS_MODULES['parquet'][1] + self.builder = Parquet(cache_dir=cache_dir, data_files=path_or_paths, features=features, hash=hash, **kwargs) + + def read(self): + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + self.builder.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc) + dataset = self.builder.as_dataset(split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory) + return dataset + +class ParquetDatasetWriter: + + def __init__(self, dataset: Dataset, path_or_buf: Union[PathLike, BinaryIO], batch_size: Optional[int]=None, storage_options: Optional[dict]=None, **parquet_writer_kwargs): + self.dataset = dataset + self.path_or_buf = path_or_buf + self.batch_size = batch_size or get_writer_batch_size(dataset.features) + self.storage_options = storage_options or {} + self.parquet_writer_kwargs = parquet_writer_kwargs + + def write(self) -> int: + batch_size = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE + if isinstance(self.path_or_buf, (str, bytes, os.PathLike)): + with fsspec.open(self.path_or_buf, 'wb', **self.storage_options or {}) as buffer: + written = self._write(file_obj=buffer, batch_size=batch_size, **self.parquet_writer_kwargs) + else: + written = self._write(file_obj=self.path_or_buf, batch_size=batch_size, **self.parquet_writer_kwargs) + return written + + def _write(self, file_obj: BinaryIO, batch_size: int, **parquet_writer_kwargs) -> int: + written = 0 + _ = parquet_writer_kwargs.pop('path_or_buf', None) + schema = self.dataset.features.arrow_schema + writer = pq.ParquetWriter(file_obj, schema=schema, **parquet_writer_kwargs) + for offset in hf_tqdm(range(0, len(self.dataset), batch_size), unit='ba', desc='Creating parquet from Arrow format'): + batch = query_table(table=self.dataset._data, key=slice(offset, offset + batch_size), indices=self.dataset._indices) + writer.write_table(batch) + written += batch.nbytes + writer.close() + return written + +# File: datasets-main/src/datasets/io/spark.py +from typing import Optional +import pyspark +from .. import Features, NamedSplit +from ..download import DownloadMode +from ..packaged_modules.spark.spark import Spark +from .abc import AbstractDatasetReader + +class SparkDatasetReader(AbstractDatasetReader): + + def __init__(self, df: pyspark.sql.DataFrame, split: Optional[NamedSplit]=None, features: Optional[Features]=None, streaming: bool=True, cache_dir: str=None, keep_in_memory: bool=False, working_dir: str=None, load_from_cache_file: bool=True, file_format: str='arrow', **kwargs): + super().__init__(split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, **kwargs) + self._load_from_cache_file = load_from_cache_file + self._file_format = file_format + self.builder = Spark(df=df, features=features, cache_dir=cache_dir, working_dir=working_dir, **kwargs) + + def read(self): + if self.streaming: + return self.builder.as_streaming_dataset(split=self.split) + download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD + self.builder.download_and_prepare(download_mode=download_mode, file_format=self._file_format) + return self.builder.as_dataset(split=self.split) + +# File: datasets-main/src/datasets/io/sql.py +import multiprocessing +from typing import TYPE_CHECKING, Optional, Union +from .. import Dataset, Features, config +from ..formatting import query_table +from ..packaged_modules.sql.sql import Sql +from ..utils import tqdm as hf_tqdm +from .abc import AbstractDatasetInputStream +if TYPE_CHECKING: + import sqlite3 + import sqlalchemy + +class SqlDatasetReader(AbstractDatasetInputStream): + + def __init__(self, sql: Union[str, 'sqlalchemy.sql.Selectable'], con: Union[str, 'sqlalchemy.engine.Connection', 'sqlalchemy.engine.Engine', 'sqlite3.Connection'], features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, **kwargs): + super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs) + self.builder = Sql(cache_dir=cache_dir, features=features, sql=sql, con=con, **kwargs) + + def read(self): + download_config = None + download_mode = None + verification_mode = None + base_path = None + self.builder.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path) + dataset = self.builder.as_dataset(split='train', verification_mode=verification_mode, in_memory=self.keep_in_memory) + return dataset + +class SqlDatasetWriter: + + def __init__(self, dataset: Dataset, name: str, con: Union[str, 'sqlalchemy.engine.Connection', 'sqlalchemy.engine.Engine', 'sqlite3.Connection'], batch_size: Optional[int]=None, num_proc: Optional[int]=None, **to_sql_kwargs): + if num_proc is not None and num_proc <= 0: + raise ValueError(f'num_proc {num_proc} must be an integer > 0.') + self.dataset = dataset + self.name = name + self.con = con + self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE + self.num_proc = num_proc + self.to_sql_kwargs = to_sql_kwargs + + def write(self) -> int: + _ = self.to_sql_kwargs.pop('sql', None) + _ = self.to_sql_kwargs.pop('con', None) + index = self.to_sql_kwargs.pop('index', False) + written = self._write(index=index, **self.to_sql_kwargs) + return written + + def _batch_sql(self, args): + (offset, index, to_sql_kwargs) = args + to_sql_kwargs = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs + batch = query_table(table=self.dataset.data, key=slice(offset, offset + self.batch_size), indices=self.dataset._indices) + df = batch.to_pandas() + num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs) + return num_rows or len(df) + + def _write(self, index, **to_sql_kwargs) -> int: + written = 0 + if self.num_proc is None or self.num_proc == 1: + for offset in hf_tqdm(range(0, len(self.dataset), self.batch_size), unit='ba', desc='Creating SQL from Arrow format'): + written += self._batch_sql((offset, index, to_sql_kwargs)) + else: + (num_rows, batch_size) = (len(self.dataset), self.batch_size) + with multiprocessing.Pool(self.num_proc) as pool: + for num_rows in hf_tqdm(pool.imap(self._batch_sql, [(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)]), total=num_rows // batch_size + 1 if num_rows % batch_size else num_rows // batch_size, unit='ba', desc='Creating SQL from Arrow format'): + written += num_rows + return written + +# File: datasets-main/src/datasets/io/text.py +from typing import Optional +from .. import Features, NamedSplit +from ..packaged_modules.text.text import Text +from ..utils.typing import NestedDataStructureLike, PathLike +from .abc import AbstractDatasetReader + +class TextDatasetReader(AbstractDatasetReader): + + def __init__(self, path_or_paths: NestedDataStructureLike[PathLike], split: Optional[NamedSplit]=None, features: Optional[Features]=None, cache_dir: str=None, keep_in_memory: bool=False, streaming: bool=False, num_proc: Optional[int]=None, **kwargs): + super().__init__(path_or_paths, split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs) + path_or_paths = path_or_paths if isinstance(path_or_paths, dict) else {self.split: path_or_paths} + self.builder = Text(cache_dir=cache_dir, data_files=path_or_paths, features=features, **kwargs) + + def read(self): + if self.streaming: + dataset = self.builder.as_streaming_dataset(split=self.split) + else: + download_config = None + download_mode = None + verification_mode = None + base_path = None + self.builder.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc) + dataset = self.builder.as_dataset(split=self.split, verification_mode=verification_mode, in_memory=self.keep_in_memory) + return dataset + +# File: datasets-main/src/datasets/iterable_dataset.py +import copy +import itertools +import sys +from collections import Counter +from copy import deepcopy +from dataclasses import dataclass +from functools import partial +from itertools import cycle, islice +from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union +import fsspec.asyn +import numpy as np +import pyarrow as pa +from . import config +from .arrow_dataset import Dataset, DatasetInfoMixin +from .features import Features +from .features.features import FeatureType, _align_features, _check_if_features_can_be_aligned, cast_to_python_objects +from .formatting import PythonFormatter, TensorFormatter, get_format_type_from_alias, get_formatter +from .info import DatasetInfo +from .splits import NamedSplit, Split +from .table import cast_table_to_features, read_schema_from_file, table_cast +from .utils.logging import get_logger +from .utils.py_utils import Literal +from .utils.sharding import _merge_gen_kwargs, _number_of_shards_in_gen_kwargs, _shuffle_gen_kwargs, _split_gen_kwargs +if TYPE_CHECKING: + import torch +logger = get_logger(__name__) +Key = Union[int, str] + +def identity_func(x): + return x + +def _rename_columns_fn(example: Dict, column_mapping: Dict[str, str]): + if any((col not in example for col in column_mapping)): + raise ValueError(f'Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(column_mapping) - set(example)} are not in the dataset.') + if any((col in example for col in column_mapping.values())): + raise ValueError(f'Error when renaming {list(column_mapping)} to {list(column_mapping.values())}: columns {set(example) - set(column_mapping.values())} are already in the dataset.') + return {new_column_name: example[original_column_name] for (original_column_name, new_column_name) in column_mapping.items()} + +def add_column_fn(example: Dict, idx: int, name: str, column: List[Dict]): + if name in example: + raise ValueError(f'Error when adding {name}: column {name} is already in the dataset.') + return {name: column[idx]} + +def _infer_features_from_batch(batch: Dict[str, list], try_features: Optional[Features]=None) -> Features: + pa_table = pa.Table.from_pydict(batch) + if try_features is not None: + try: + pa_table = table_cast(pa_table, pa.schema(try_features.type)) + except (TypeError, pa.ArrowInvalid, pa.ArrowNotImplementedError): + pass + return Features.from_arrow_schema(pa_table.schema) + +def _examples_to_batch(examples: List[Dict[str, Any]]) -> Dict[str, list]: + cols = {col: None for example in examples for col in example} + arrays = [[example.get(col) for example in examples] for col in cols] + return dict(zip(cols, arrays)) + +def _batch_to_examples(batch: Dict[str, list]) -> Iterator[Dict[str, Any]]: + n_examples = len(batch[next(iter(batch))]) + for i in range(n_examples): + yield {col: array[i] for (col, array) in batch.items()} + +def _convert_to_arrow(iterable: Iterable[Tuple[Key, dict]], batch_size: int, drop_last_batch: bool=False) -> Iterator[Tuple[Key, pa.Table]]: + if batch_size is None or batch_size <= 0: + yield ('all', pa.Table.from_pylist(cast_to_python_objects([example for (_, example) in iterable], only_1d_for_numpy=True))) + return + iterator = iter(iterable) + for (key, example) in iterator: + iterator_batch = islice(iterator, batch_size - 1) + key_examples_list = [(key, example)] + list(iterator_batch) + if len(key_examples_list) < batch_size and drop_last_batch: + return + (keys, examples) = zip(*key_examples_list) + new_key = '_'.join((str(key) for key in keys)) + yield (new_key, pa.Table.from_pylist(cast_to_python_objects(examples, only_1d_for_numpy=True))) + +class _BaseExamplesIterable: + + def __init__(self) -> None: + self._state_dict: Optional[Union[list, dict]] = None + + def __iter__(self) -> Iterator[Tuple[Key, dict]]: + raise NotImplementedError(f"{type(self)} doesn't implement __iter__ yet") + + @property + def iter_arrow(self) -> Optional[Callable[[], Iterator[Tuple[Key, pa.Table]]]]: + return None + + def shuffle_data_sources(self, generator: np.random.Generator) -> '_BaseExamplesIterable': + raise NotImplementedError(f"{type(self)} doesn't implement shuffle_data_sources yet") + + def shard_data_sources(self, worker_id: int, num_workers: int) -> '_BaseExamplesIterable': + raise NotImplementedError(f"{type(self)} doesn't implement shard_data_sources yet") + + def split_shard_indices_by_worker(self, worker_id: int, num_workers: int) -> List[int]: + return list(range(worker_id, self.n_shards, num_workers)) + + @property + def n_shards(self) -> int: + raise NotImplementedError(f"{type(self)} doesn't implement n_shards yet") + + def _init_state_dict(self) -> dict: + raise NotImplementedError(f"{type(self)} doesn't implement _init_state_dict yet") + + def load_state_dict(self, state_dict: dict) -> dict: + + def _inner_load_state_dict(state, new_state): + if new_state is not None and isinstance(state, dict): + for key in new_state: + state[key] = _inner_load_state_dict(state[key], new_state[key]) + return state + elif new_state is not None and isinstance(state, list): + for i in range(len(state)): + state[i] = _inner_load_state_dict(state[i], new_state[i]) + return state + return new_state + return _inner_load_state_dict(self._state_dict, state_dict) + + def state_dict(self) -> dict: + if self._state_dict: + return copy.deepcopy(self._state_dict) + raise RuntimeError('State dict is not initialized, please call ex_iterable._init_state_dict() first.') + +class ExamplesIterable(_BaseExamplesIterable): + + def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict): + super().__init__() + self.generate_examples_fn = generate_examples_fn + self.kwargs = kwargs + + def _init_state_dict(self) -> dict: + self._state_dict = {'shard_idx': 0, 'shard_example_idx': 0} + return self._state_dict + + def __iter__(self): + shard_idx_start = self._state_dict['shard_idx'] if self._state_dict else 0 + for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards), shard_idx_start, None): + shard_example_idx_start = self._state_dict['shard_example_idx'] if self._state_dict else 0 + for key_example in islice(self.generate_examples_fn(**gen_kwags), shard_example_idx_start, None): + if self._state_dict: + self._state_dict['shard_example_idx'] += 1 + yield key_example + if self._state_dict: + self._state_dict['shard_idx'] += 1 + self._state_dict['shard_example_idx'] = 0 + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'ExamplesIterable': + return ShuffledDataSourcesExamplesIterable(self.generate_examples_fn, self.kwargs, generator) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'ExamplesIterable': + gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards) + shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers) + requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices]) + return ExamplesIterable(self.generate_examples_fn, requested_gen_kwargs) + + @property + def n_shards(self) -> int: + return _number_of_shards_in_gen_kwargs(self.kwargs) + +class ShuffledDataSourcesExamplesIterable(ExamplesIterable): + + def __init__(self, generate_examples_fn: Callable[..., Tuple[Key, dict]], kwargs: dict, generator: np.random.Generator): + super().__init__(generate_examples_fn, kwargs) + self.generator = deepcopy(generator) + + def _init_state_dict(self) -> dict: + self._state_dict = {'shard_idx': 0, 'shard_example_idx': 0} + return self._state_dict + + def __iter__(self): + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + shard_idx_start = self._state_dict['shard_idx'] if self._state_dict else 0 + for gen_kwags in islice(_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.n_shards), shard_idx_start, None): + shard_example_idx_start = self._state_dict['shard_example_idx'] if self._state_dict else 0 + for key_example in islice(self.generate_examples_fn(**gen_kwags), shard_example_idx_start, None): + if self._state_dict: + self._state_dict['shard_example_idx'] += 1 + yield key_example + if self._state_dict: + self._state_dict['shard_idx'] += 1 + self._state_dict['shard_example_idx'] = 0 + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'ExamplesIterable': + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + return ExamplesIterable(self.generate_examples_fn, kwargs_with_shuffled_shards).shard_data_sources(worker_id, num_workers) + +class ArrowExamplesIterable(_BaseExamplesIterable): + + def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict): + super().__init__() + self.generate_tables_fn = generate_tables_fn + self.kwargs = kwargs + + @property + def iter_arrow(self): + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = {'shard_idx': 0, 'shard_example_idx': 0} + return self._state_dict + + def __iter__(self): + formatter = PythonFormatter() + shard_idx_start = self._state_dict['shard_idx'] if self._state_dict else 0 + for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards), shard_idx_start, None): + shard_example_idx_start = self._state_dict['shard_example_idx'] if self._state_dict else 0 + shard_example_idx = 0 + for (key, pa_table) in self.generate_tables_fn(**gen_kwags): + if shard_example_idx + len(pa_table) <= shard_example_idx_start: + shard_example_idx += len(pa_table) + continue + for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER): + formatted_batch = formatter.format_batch(pa_subtable) + for example in _batch_to_examples(formatted_batch): + if shard_example_idx >= shard_example_idx_start: + if self._state_dict: + self._state_dict['shard_example_idx'] += 1 + yield (key, example) + shard_example_idx += 1 + if self._state_dict: + self._state_dict['shard_idx'] += 1 + self._state_dict['shard_example_idx'] = 0 + + def _iter_arrow(self): + shard_idx_start = self._state_dict['shard_idx'] if self._state_dict else 0 + for gen_kwags in islice(_split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards), shard_idx_start, None): + shard_example_idx_start = self._state_dict['shard_example_idx'] if self._state_dict else 0 + shard_example_idx = 0 + for (key, pa_table) in self.generate_tables_fn(**gen_kwags): + shard_example_idx += len(pa_table) + if shard_example_idx <= shard_example_idx_start: + continue + if self._state_dict: + self._state_dict['shard_example_idx'] += len(pa_table) + yield (key, pa_table) + if self._state_dict: + self._state_dict['shard_idx'] += 1 + self._state_dict['shard_example_idx'] = 0 + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'ArrowExamplesIterable': + return ShuffledDataSourcesArrowExamplesIterable(self.generate_tables_fn, self.kwargs, generator) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'ArrowExamplesIterable': + gen_kwargs_list = _split_gen_kwargs(self.kwargs, max_num_jobs=self.n_shards) + shard_indices = self.split_shard_indices_by_worker(worker_id, num_workers) + requested_gen_kwargs = _merge_gen_kwargs([gen_kwargs_list[i] for i in shard_indices]) + return ArrowExamplesIterable(self.generate_tables_fn, requested_gen_kwargs) + + @property + def n_shards(self) -> int: + return _number_of_shards_in_gen_kwargs(self.kwargs) + +class ShuffledDataSourcesArrowExamplesIterable(ArrowExamplesIterable): + + def __init__(self, generate_tables_fn: Callable[..., Tuple[Key, pa.Table]], kwargs: dict, generator: np.random.Generator): + super().__init__(generate_tables_fn, kwargs) + self.generator = deepcopy(generator) + + def _init_state_dict(self) -> dict: + self._state_dict = {'shard_idx': 0, 'shard_example_idx': 0} + return self._state_dict + + def __iter__(self): + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + formatter = PythonFormatter() + shard_idx_start = self._state_dict['shard_idx'] if self._state_dict else 0 + for gen_kwags in islice(_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.n_shards), shard_idx_start, None): + shard_example_idx_start = self._state_dict['shard_example_idx'] if self._state_dict else 0 + shard_example_idx = 0 + for (key, pa_table) in self.generate_tables_fn(**gen_kwags): + if shard_example_idx + len(pa_table) <= shard_example_idx_start: + shard_example_idx += len(pa_table) + continue + for pa_subtable in pa_table.to_reader(max_chunksize=config.ARROW_READER_BATCH_SIZE_IN_DATASET_ITER): + formatted_batch = formatter.format_batch(pa_subtable) + for example in _batch_to_examples(formatted_batch): + if shard_example_idx >= shard_example_idx_start: + if self._state_dict: + self._state_dict['shard_example_idx'] += 1 + yield (key, example) + shard_example_idx += 1 + if self._state_dict: + self._state_dict['shard_idx'] += 1 + self._state_dict['shard_example_idx'] = 0 + + def _iter_arrow(self): + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + shard_idx_start = self._state_dict['shard_idx'] if self._state_dict else 0 + for gen_kwags in islice(_split_gen_kwargs(kwargs_with_shuffled_shards, max_num_jobs=self.n_shards), shard_idx_start, None): + shard_example_idx_start = self._state_dict['shard_example_idx'] if self._state_dict else 0 + shard_example_idx = 0 + for (key, pa_table) in self.generate_tables_fn(**gen_kwags): + shard_example_idx += len(pa_table) + if shard_example_idx <= shard_example_idx_start: + continue + if self._state_dict: + self._state_dict['shard_example_idx'] += len(pa_table) + yield (key, pa_table) + if self._state_dict: + self._state_dict['shard_idx'] += 1 + self._state_dict['shard_example_idx'] = 0 + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'ArrowExamplesIterable': + rng = deepcopy(self.generator) + kwargs_with_shuffled_shards = _shuffle_gen_kwargs(rng, self.kwargs) + return ArrowExamplesIterable(self.generate_tables_fn, kwargs_with_shuffled_shards).shard_data_sources(worker_id, num_workers) + +class RebatchedArrowExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, batch_size: Optional[int], drop_last_batch: bool=False): + super().__init__() + self.ex_iterable = ex_iterable + self.batch_size = batch_size + self.drop_last_batch = drop_last_batch + + @property + def iter_arrow(self): + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = {'ex_iterable': self.ex_iterable._init_state_dict(), 'previous_state': None, 'batch_idx': 0, 'num_chunks_since_previous_state': 0, 'cropped_chunk_length': 0} + return self._state_dict + + def __iter__(self): + yield from self.ex_iterable + + def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: + if self._state_dict and self._state_dict['previous_state']: + self.ex_iterable.load_state_dict(self._state_dict['previous_state']) + if self.ex_iterable.iter_arrow: + iterator = self.ex_iterable.iter_arrow() + else: + iterator = _convert_to_arrow(self.ex_iterable, batch_size=1) + if self.batch_size is None or self.batch_size <= 0: + if self._state_dict and self._state_dict['batch_idx'] > 0: + return + all_pa_table = pa.concat_tables([pa_table for (_, pa_table) in iterator]) + if self._state_dict: + self._state_dict['batch_idx'] = 1 + yield ('all', all_pa_table) + return + keys_buffer = [] + chunks_buffer = [] + chunks_buffer_size = 0 + num_chunks_to_skip = self._state_dict['num_chunks_since_previous_state'] if self._state_dict else 0 + chunk_length_to_crop = self._state_dict['cropped_chunk_length'] if self._state_dict else 0 + if self._state_dict: + previous_state = self.ex_iterable.state_dict() + self._state_dict['previous_state'] = previous_state + for (key, pa_table) in iterator: + for (num_chunks_since_previous_state, chunk) in enumerate(pa_table.to_reader(max_chunksize=self.batch_size)): + if num_chunks_to_skip > 1: + num_chunks_to_skip -= 1 + continue + elif num_chunks_to_skip == 1 and chunk_length_to_crop == 0: + num_chunks_to_skip -= 1 + continue + elif num_chunks_to_skip == 1 and chunk_length_to_crop > 0: + chunk = chunk.slice(chunk_length_to_crop, len(chunk) - chunk_length_to_crop) + num_chunks_to_skip = 0 + chunk_length_to_crop = 0 + if len(chunk) == 0: + continue + if chunks_buffer_size + len(chunk) < self.batch_size: + keys_buffer.append(key) + chunks_buffer.append(chunk) + chunks_buffer_size += len(chunk) + continue + elif chunks_buffer_size + len(chunk) == self.batch_size: + keys_buffer.append(key) + chunks_buffer.append(chunk) + new_key = '_'.join((str(_key) for _key in keys_buffer)) + if self._state_dict: + self._state_dict['batch_idx'] += 1 + self._state_dict['num_chunks_since_previous_state'] += len(chunks_buffer) + self._state_dict['cropped_chunk_length'] = 0 + yield (new_key, pa.Table.from_batches(chunks_buffer)) + keys_buffer = [] + chunks_buffer = [] + chunks_buffer_size = 0 + if self._state_dict: + self._state_dict['previous_state'] = previous_state + self._state_dict['num_chunks_since_previous_state'] = num_chunks_since_previous_state + 1 + else: + cropped_chunk_length = self.batch_size - chunks_buffer_size + keys_buffer.append(f'{key}[:{cropped_chunk_length}]') + chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) + new_key = '_'.join((str(_key) for _key in keys_buffer)) + if self._state_dict: + self._state_dict['batch_idx'] += 1 + self._state_dict['num_chunks_since_previous_state'] += len(chunks_buffer) + self._state_dict['cropped_chunk_length'] = cropped_chunk_length + yield (new_key, pa.Table.from_batches(chunks_buffer)) + keys_buffer = [f'{key}[{cropped_chunk_length}:]'] + chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] + chunks_buffer_size = len(chunk) - cropped_chunk_length + if self._state_dict: + self._state_dict['previous_state'] = previous_state + self._state_dict['num_chunks_since_previous_state'] = num_chunks_since_previous_state + if self._state_dict: + previous_state = self.ex_iterable.state_dict() + if not self.drop_last_batch and chunks_buffer: + new_key = '_'.join((str(_key) for _key in keys_buffer)) + if self._state_dict: + self._state_dict['previous_state'] = previous_state + self._state_dict['batch_idx'] += 1 + self._state_dict['num_chunks_since_previous_state'] = 0 + self._state_dict['cropped_chunk_length'] = 0 + yield (new_key, pa.Table.from_batches(chunks_buffer)) + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'RebatchedArrowExamplesIterable': + return RebatchedArrowExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), self.batch_size, self.drop_last_batch) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'RebatchedArrowExamplesIterable': + return RebatchedArrowExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.batch_size, self.drop_last_batch) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class SelectColumnsIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, column_names: List[str]): + super().__init__() + self.ex_iterable = ex_iterable + self.column_names = column_names + + @property + def iter_arrow(self): + if self.ex_iterable.iter_arrow: + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = self.ex_iterable._init_state_dict() + return self._state_dict + + def __iter__(self): + for (idx, row) in self.ex_iterable: + yield (idx, {c: row[c] for c in self.column_names}) + + def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: + for (idx, pa_table) in self.ex_iterable.iter_arrow(): + if len(pa_table) > 0: + yield (idx, pa_table.select(self.column_names)) + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'SelectColumnsIterable': + return SelectColumnsIterable(self.ex_iterable.shuffle_data_sources(generator), self.column_names) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'SelectColumnsIterable': + return SelectColumnsIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), self.column_names) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class StepExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, step: int, offset: int): + super().__init__() + self.ex_iterable = ex_iterable + self.step = step + self.offset = offset + + def _init_state_dict(self) -> dict: + self._state_dict = self.ex_iterable._init_state_dict() + return self._state_dict + + def __iter__(self): + ex_iterator = iter(self.ex_iterable) + while True: + batch = list(islice(ex_iterator, self.step)) + if len(batch) > self.offset: + yield batch[self.offset] + else: + break + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'StepExamplesIterable': + return StepExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), step=self.step, offset=self.offset) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'StepExamplesIterable': + return StepExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), step=self.step, offset=self.offset) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class CyclingMultiSourcesExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterables: List[_BaseExamplesIterable], stopping_strategy: Literal['first_exhausted', 'all_exhausted']='first_exhausted'): + super().__init__() + self.ex_iterables = ex_iterables + self.stopping_strategy = stopping_strategy + self.bool_strategy_func = np.all if stopping_strategy == 'all_exhausted' else np.any + + def _get_indices_iterator(self): + ex_iterable_idx = self._state_dict['ex_iterable_idx'] if self._state_dict else 0 + for next_ex_iterable_idx in islice(cycle(range(len(self.ex_iterables))), ex_iterable_idx + 1, None): + if self._state_dict: + self._state_dict['ex_iterable_idx'] = next_ex_iterable_idx + yield ex_iterable_idx + ex_iterable_idx = next_ex_iterable_idx + + def _init_state_dict(self) -> dict: + self._state_dict = {'ex_iterable_idx': 0, 'ex_iterables': [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables], 'previous_states': [None] * len(self.ex_iterables), 'is_exhausted': [False] * len(self.ex_iterables)} + return self._state_dict + + def __iter__(self): + nexts = [None] * len(self.ex_iterables) + if self._state_dict: + for i in range(len(self.ex_iterables)): + if self._state_dict['previous_states'][i] is not None: + self.ex_iterables[i].load_state_dict(self._state_dict['previous_states'][i]) + iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables] + indices_iterator = self._get_indices_iterator() + is_exhausted = np.array(self._state_dict['is_exhausted']) if self._state_dict else np.full(len(self.ex_iterables), False) + for i in indices_iterator: + if self.bool_strategy_func(is_exhausted): + break + if nexts[i] is None: + nexts[i] = next(iterators[i], False) + result = nexts[i] + if self._state_dict: + self._state_dict['previous_states'][i] = deepcopy(self._state_dict['ex_iterables'][i]) + nexts[i] = next(iterators[i], False) + if nexts[i] is False: + is_exhausted[i] = True + if self._state_dict: + self._state_dict['is_exhausted'][i] = True + nexts[i] = None + if self._state_dict: + self._state_dict['ex_iterables'][i] = self.ex_iterables[i]._init_state_dict() + self._state_dict['previous_states'][i] = None + iterators[i] = iter(self.ex_iterables[i]) + if result is not False: + yield result + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'CyclingMultiSourcesExamplesIterable': + ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables] + return CyclingMultiSourcesExamplesIterable(ex_iterables, self.stopping_strategy) + + @property + def n_shards(self) -> int: + return min((ex_iterable.n_shards for ex_iterable in self.ex_iterables)) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'CyclingMultiSourcesExamplesIterable': + return CyclingMultiSourcesExamplesIterable([iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables], stopping_strategy=self.stopping_strategy) + +class VerticallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterables: List[_BaseExamplesIterable]): + super().__init__() + self.ex_iterables = ex_iterables + + @property + def iter_arrow(self): + if all((ex_iterable.iter_arrow is not None for ex_iterable in self.ex_iterables)): + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = {'ex_iterable_idx': 0, 'ex_iterables': [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables]} + return self._state_dict + + def __iter__(self): + ex_iterable_idx_start = self._state_dict['ex_iterable_idx'] if self._state_dict else 0 + for ex_iterable in islice(self.ex_iterables, ex_iterable_idx_start, None): + yield from ex_iterable + if self._state_dict: + self._state_dict['ex_iterable_idx'] += 1 + + def _iter_arrow(self): + ex_iterable_idx_start = self._state_dict['ex_iterable_idx'] if self._state_dict else 0 + for ex_iterable in islice(self.ex_iterables, ex_iterable_idx_start, None): + yield from ex_iterable.iter_arrow() + if self._state_dict: + self._state_dict['ex_iterable_idx'] += 1 + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'VerticallyConcatenatedMultiSourcesExamplesIterable': + rng = deepcopy(generator) + ex_iterables = list(self.ex_iterables) + rng.shuffle(ex_iterables) + ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in ex_iterables] + return VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) + + @property + def n_shards(self) -> int: + return min((ex_iterable.n_shards for ex_iterable in self.ex_iterables)) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'VerticallyConcatenatedMultiSourcesExamplesIterable': + return VerticallyConcatenatedMultiSourcesExamplesIterable([iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]) + +def _check_column_names(column_names: List[str]): + counter = Counter(column_names) + if not all((count == 1 for count in counter.values())): + duplicated_columns = [col for col in counter if counter[col] > 1] + raise ValueError(f"The examples iterables can't have duplicated columns but columns {duplicated_columns} are duplicated.") + +class HorizontallyConcatenatedMultiSourcesExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterables: List[_BaseExamplesIterable]): + super().__init__() + self.ex_iterables = ex_iterables + + def _init_state_dict(self) -> dict: + self._state_dict = {'ex_iterables': [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables]} + return self._state_dict + + def __iter__(self): + ex_iterators = [iter(ex_iterable) for ex_iterable in self.ex_iterables] + for i in itertools.count(): + keys = [] + examples = [] + for ex_iterator in list(ex_iterators): + try: + (key, example) = next(ex_iterator) + keys.append(key) + examples.append(example) + except StopIteration: + ex_iterators.remove(ex_iterator) + if ex_iterators: + if i == 0: + _check_column_names([column_name for example in examples for column_name in example]) + new_example = {} + for example in examples: + new_example.update(example) + new_key = '_'.join((str(key) for key in keys)) + yield (new_key, new_example) + else: + break + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'HorizontallyConcatenatedMultiSourcesExamplesIterable': + return self + + @property + def n_shards(self) -> int: + return 1 + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'HorizontallyConcatenatedMultiSourcesExamplesIterable': + return HorizontallyConcatenatedMultiSourcesExamplesIterable([iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables]) + +class RandomlyCyclingMultiSourcesExamplesIterable(CyclingMultiSourcesExamplesIterable): + + def __init__(self, ex_iterables: List[_BaseExamplesIterable], generator: np.random.Generator, probabilities: Optional[List[float]]=None, stopping_strategy: Literal['first_exhausted', 'all_exhausted']='first_exhausted'): + super().__init__(ex_iterables, stopping_strategy) + self.generator = deepcopy(generator) + self.probabilities = probabilities + + def _get_indices_iterator(self): + rng = deepcopy(self.generator) + num_sources = len(self.ex_iterables) + random_batch_size = 1000 + index_offset = self._state_dict['bit_generator_index_offset'] if self._state_dict else 0 + if self._state_dict: + rng.bit_generator.state = self._state_dict['bit_generator_state'] + if self.probabilities is None: + while True: + for i in islice(rng.integers(0, num_sources, size=random_batch_size), index_offset, None): + index_offset = (index_offset + 1) % random_batch_size + if self._state_dict: + self._state_dict['bit_generator_index_offset'] = index_offset + if index_offset == 0: + self._state_dict['bit_generator_state'] = rng.bit_generator.state + yield int(i) + else: + while True: + for i in islice(rng.choice(num_sources, size=random_batch_size, p=self.probabilities), index_offset, None): + index_offset = (index_offset + 1) % random_batch_size + if self._state_dict: + self._state_dict['bit_generator_index_offset'] = index_offset + if index_offset == 0: + self._state_dict['bit_generator_state'] = rng.bit_generator.state + yield int(i) + + def _init_state_dict(self) -> dict: + self._state_dict = {'bit_generator_state': self.generator.bit_generator.state, 'bit_generator_index_offset': 0, 'ex_iterables': [ex_iterable._init_state_dict() for ex_iterable in self.ex_iterables], 'previous_states': [None] * len(self.ex_iterables), 'is_exhausted': [False] * len(self.ex_iterables)} + return self._state_dict + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'RandomlyCyclingMultiSourcesExamplesIterable': + ex_iterables = [ex_iterable.shuffle_data_sources(generator) for ex_iterable in self.ex_iterables] + return RandomlyCyclingMultiSourcesExamplesIterable(ex_iterables, generator=generator, probabilities=self.probabilities, stopping_strategy=self.stopping_strategy) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'RandomlyCyclingMultiSourcesExamplesIterable': + return RandomlyCyclingMultiSourcesExamplesIterable([iterable.shard_data_sources(worker_id, num_workers) for iterable in self.ex_iterables], self.generator, self.probabilities, self.stopping_strategy) + +class MappedExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, function: Callable, with_indices: bool=False, input_columns: Optional[List[str]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[List[str]]=None, fn_kwargs: Optional[dict]=None, formatting: Optional['FormattingConfig']=None): + super().__init__() + self.ex_iterable = ex_iterable + self.function = function + self.batched = batched + self.batch_size = batch_size + self.drop_last_batch = drop_last_batch + self.remove_columns = remove_columns + self.with_indices = with_indices + self.input_columns = input_columns + self.fn_kwargs = fn_kwargs or {} + self.formatting = formatting + if formatting and formatting.format_type == 'arrow': + if not isinstance(ex_iterable, RebatchedArrowExamplesIterable): + raise ValueError(f'The Arrow-formatted MappedExamplesIterable has underlying iterablethat is a {type(ex_iterable).__name__} instead of a RebatchedArrowExamplesIterable.') + elif ex_iterable.batch_size != (batch_size if batched else 1): + raise ValueError(f'The Arrow-formatted MappedExamplesIterable has batch_size={(batch_size if batched else 1)} which isdifferent from ex_iterable.batch_size={ex_iterable.batch_size!r} from its underlying iterable.') + + @property + def iter_arrow(self): + if self.formatting and self.formatting.format_type == 'arrow': + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = {'ex_iterable': self.ex_iterable._init_state_dict(), 'previous_state': None, 'num_examples_since_previous_state': 0, 'previous_state_example_idx': 0} + return self._state_dict + + def __iter__(self): + if self.formatting and self.formatting.format_type == 'arrow': + formatter = PythonFormatter() + for (key, pa_table) in self._iter_arrow(max_chunksize=1): + yield (key, formatter.format_row(pa_table)) + else: + yield from self._iter() + + def _iter(self): + current_idx = self._state_dict['previous_state_example_idx'] if self._state_dict else 0 + if self._state_dict and self._state_dict['previous_state']: + self.ex_iterable.load_state_dict(self._state_dict['previous_state']) + num_examples_to_skip = self._state_dict['num_examples_since_previous_state'] + else: + num_examples_to_skip = 0 + iterator = iter(self.ex_iterable) + if self.formatting: + formatter = get_formatter(self.formatting.format_type) + format_dict = formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + else: + format_dict = None + if self.batched: + if self._state_dict: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + self._state_dict['previous_state_example_idx'] = current_idx + for (key, example) in iterator: + iterator_batch = iterator if self.batch_size is None or self.batch_size <= 0 else islice(iterator, self.batch_size - 1) + key_examples_list = [(key, example)] + list(iterator_batch) + (keys, examples) = zip(*key_examples_list) + if self.drop_last_batch and self.batch_size is not None and (self.batch_size > 0) and (len(examples) < self.batch_size): + return + batch = _examples_to_batch(examples) + batch = format_dict(batch) if format_dict else batch + inputs = batch + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append([current_idx + i for i in range(len(key_examples_list))]) + transformed_batch = dict(batch) + transformed_batch.update(self.function(*function_args, **self.fn_kwargs)) + if self.remove_columns: + for c in self.remove_columns: + del transformed_batch[c] + if transformed_batch: + first_col = next(iter(transformed_batch)) + bad_cols = [col for col in transformed_batch if len(transformed_batch[col]) != len(transformed_batch[first_col])] + if bad_cols: + raise ValueError(f'Column lengths mismatch: columns {bad_cols} have length {[len(transformed_batch[col]) for col in bad_cols]} while {first_col} has length {len(transformed_batch[first_col])}.') + new_key = '_'.join((str(key) for key in keys)) + for example in _batch_to_examples(transformed_batch): + current_idx += 1 + if self._state_dict: + self._state_dict['num_examples_since_previous_state'] += 1 + if num_examples_to_skip > 0: + num_examples_to_skip -= 1 + continue + yield (new_key, example) + if self._state_dict: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + self._state_dict['previous_state_example_idx'] = current_idx + else: + for (key, example) in iterator: + example = dict(example) + example = format_dict(example) if format_dict else example + inputs = example + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append(current_idx) + transformed_example = dict(example) + transformed_example.update(self.function(*function_args, **self.fn_kwargs)) + if self.remove_columns: + for c in self.remove_columns: + del transformed_example[c] + current_idx += 1 + if self._state_dict: + self._state_dict['previous_state_example_idx'] += 1 + yield (key, transformed_example) + + def _iter_arrow(self, max_chunksize: Optional[int]=None) -> Iterator[Tuple[Key, pa.Table]]: + if self.ex_iterable.iter_arrow: + iterator = self.ex_iterable.iter_arrow() + else: + iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1, drop_last_batch=self.drop_last_batch) + if self._state_dict and self._state_dict['previous_state']: + self.ex_iterable.load_state_dict(self._state_dict['previous_state']) + num_examples_to_skip = self._state_dict['num_examples_since_previous_state'] + else: + num_examples_to_skip = 0 + if self._state_dict and max_chunksize is not None: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + current_idx = self._state_dict['previous_state_example_idx'] if self._state_dict else 0 + for (key, pa_table) in iterator: + if self.batched and self.batch_size is not None and (len(pa_table) < self.batch_size) and self.drop_last_batch: + return + function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns] + if self.with_indices: + if self.batched: + function_args.append([current_idx + i for i in range(len(pa_table))]) + else: + function_args.append(current_idx) + output_table = self.function(*function_args, **self.fn_kwargs) + if not isinstance(output_table, pa.Table): + raise TypeError(f'Provided `function` which is applied to pyarrow tables returns a variable of type {type(output_table)}. Make sure provided `function` returns a a pyarrow table to update the dataset.') + if self.remove_columns: + for column in self.remove_columns: + if column in output_table.column_names: + output_table = output_table.remove_column(output_table.column_names.index(column)) + if max_chunksize is None: + current_idx += len(pa_table) + if self._state_dict: + self._state_dict['previous_state_example_idx'] += len(pa_table) + yield (key, output_table) + else: + for (i, pa_subtable) in enumerate(output_table.to_reader(max_chunksize=max_chunksize)): + current_idx += 1 + if self._state_dict: + self._state_dict['num_examples_since_previous_state'] += 1 + if num_examples_to_skip > 0: + num_examples_to_skip -= 1 + continue + yield (f'{key}_{i}', pa_subtable) + if self._state_dict: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + self._state_dict['previous_state_example_idx'] += len(pa_table) + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'MappedExamplesIterable': + return MappedExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size, drop_last_batch=self.drop_last_batch, remove_columns=self.remove_columns, fn_kwargs=self.fn_kwargs, formatting=self.formatting) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'MappedExamplesIterable': + return MappedExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size, drop_last_batch=self.drop_last_batch, remove_columns=self.remove_columns, fn_kwargs=self.fn_kwargs, formatting=self.formatting) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class FilteredExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, function: Callable, with_indices: bool=False, input_columns: Optional[List[str]]=None, batched: bool=False, batch_size: Optional[int]=1000, fn_kwargs: Optional[dict]=None, formatting: Optional['FormattingConfig']=None): + super().__init__() + self.ex_iterable = ex_iterable + self.function = function + self.batched = batched + self.batch_size = batch_size + self.with_indices = with_indices + self.input_columns = input_columns + self.fn_kwargs = fn_kwargs or {} + self.formatting = formatting + if formatting and formatting.format_type == 'arrow': + if not isinstance(ex_iterable, RebatchedArrowExamplesIterable): + raise ValueError(f'The Arrow-formatted FilteredExamplesIterable has underlying iterablethat is a {type(ex_iterable).__name__} instead of a RebatchedArrowExamplesIterable.') + elif ex_iterable.batch_size != (batch_size if batched else 1): + raise ValueError(f'The Arrow-formatted FilteredExamplesIterable has batch_size={(batch_size if batched else 1)} which isdifferent from ex_iterable.batch_size={ex_iterable.batch_size!r} from its underlying iterable.') + + @property + def iter_arrow(self): + if self.formatting and self.formatting.format_type == 'arrow': + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = {'ex_iterable': self.ex_iterable._init_state_dict(), 'previous_state': None, 'num_examples_since_previous_state': 0, 'previous_state_example_idx': 0} + return self._state_dict + + def __iter__(self): + if self.formatting and self.formatting.format_type == 'arrow': + formatter = PythonFormatter() + for (key, pa_table) in self._iter_arrow(max_chunksize=1): + yield (key, formatter.format_row(pa_table)) + else: + yield from self._iter() + + def _iter(self): + current_idx = self._state_dict['previous_state_example_idx'] if self._state_dict else 0 + if self._state_dict and self._state_dict['previous_state']: + self.ex_iterable.load_state_dict(self._state_dict['previous_state']) + num_examples_to_skip = self._state_dict['num_examples_since_previous_state'] + else: + num_examples_to_skip = 0 + iterator = iter(self.ex_iterable) + if self.formatting: + formatter = get_formatter(self.formatting.format_type) + format_dict = formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + else: + format_dict = None + if self.batched: + if self._state_dict: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + self._state_dict['previous_state_example_idx'] = current_idx + for (key, example) in iterator: + iterator_batch = iterator if self.batch_size is None or self.batch_size <= 0 else islice(iterator, self.batch_size - 1) + key_examples_list = [(key, example)] + list(iterator_batch) + (keys, examples) = zip(*key_examples_list) + batch = _examples_to_batch(examples) + batch = format_dict(batch) if format_dict else batch + inputs = batch + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append([current_idx + i for i in range(len(key_examples_list))]) + mask = self.function(*function_args, **self.fn_kwargs) + for (key_example, to_keep) in zip(key_examples_list, mask): + current_idx += 1 + if self._state_dict: + self._state_dict['num_examples_since_previous_state'] += 1 + if num_examples_to_skip > 0: + num_examples_to_skip -= 1 + continue + if to_keep: + yield key_example + if self._state_dict: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + self._state_dict['previous_state_example_idx'] = current_idx + else: + for (key, example) in iterator: + example = dict(example) + inputs = format_dict(example) if format_dict else example + function_args = [inputs] if self.input_columns is None else [inputs[col] for col in self.input_columns] + if self.with_indices: + function_args.append(current_idx) + to_keep = self.function(*function_args, **self.fn_kwargs) + current_idx += 1 + if self._state_dict: + self._state_dict['previous_state_example_idx'] += 1 + if to_keep: + yield (key, example) + + def _iter_arrow(self, max_chunksize: Optional[int]=None): + if self.ex_iterable.iter_arrow: + iterator = self.ex_iterable.iter_arrow() + else: + iterator = _convert_to_arrow(self.ex_iterable, batch_size=self.batch_size if self.batched else 1) + if self._state_dict and self._state_dict['previous_state']: + self.ex_iterable.load_state_dict(self._state_dict['previous_state']) + num_examples_to_skip = self._state_dict['num_examples_since_previous_state'] + else: + num_examples_to_skip = 0 + if self._state_dict and max_chunksize is not None: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + current_idx = self._state_dict['previous_state_example_idx'] if self._state_dict else 0 + for (key, pa_table) in iterator: + if self.batched and self.batch_size is not None and (len(pa_table) < self.batch_size) and self.drop_last_batch: + return + function_args = [pa_table] if self.input_columns is None else [pa_table[col] for col in self.input_columns] + if self.with_indices: + if self.batched: + function_args.append([current_idx + i for i in range(len(pa_table))]) + else: + function_args.append(current_idx) + mask = self.function(*function_args, **self.fn_kwargs) + if self.batched: + output_table = pa_table.filter(mask) + elif mask.as_py() if isinstance(mask, pa.BooleanScalar) else mask: + output_table = pa_table + else: + output_table = pa_table.slice(0, 0) + if max_chunksize is None: + current_idx += len(pa_table) + if self._state_dict: + self._state_dict['previous_state_example_idx'] += len(pa_table) + if len(output_table) > 0: + yield (key, output_table) + else: + for (i, pa_subtable) in enumerate(output_table.to_reader(max_chunksize=max_chunksize)): + current_idx += 1 + if self._state_dict: + self._state_dict['num_examples_since_previous_state'] += 1 + if num_examples_to_skip > 0: + num_examples_to_skip -= 1 + continue + yield (f'{key}_{i}', pa_subtable) + if self._state_dict: + self._state_dict['previous_state'] = self.ex_iterable.state_dict() + self._state_dict['num_examples_since_previous_state'] = 0 + self._state_dict['previous_state_example_idx'] += len(pa_table) + + def shuffle_data_sources(self, seed: Optional[int]) -> 'FilteredExamplesIterable': + return FilteredExamplesIterable(self.ex_iterable.shuffle_data_sources(seed), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'FilteredExamplesIterable': + return FilteredExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), function=self.function, with_indices=self.with_indices, input_columns=self.input_columns, batched=self.batched, batch_size=self.batch_size) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class BufferShuffledExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, buffer_size: int, generator: np.random.Generator): + super().__init__() + self.ex_iterable = ex_iterable + self.buffer_size = buffer_size + self.generator = generator + + def _init_state_dict(self) -> dict: + self._state_dict = self.ex_iterable._init_state_dict() + self._original_state_dict = self.state_dict() + return self._state_dict + + def load_state_dict(self, state_dict: dict) -> dict: + if self._state_dict: + if state_dict != self._original_state_dict: + logger.warning('Loading a state dict of a shuffle buffer of a dataset without the buffer content.The shuffle buffer will be refilled before starting to yield new examples.') + return super().load_state_dict(state_dict) + + @staticmethod + def _iter_random_indices(rng: np.random.Generator, buffer_size: int, random_batch_size=1000) -> Iterator[int]: + while True: + yield from (int(i) for i in rng.integers(0, buffer_size, size=random_batch_size)) + + def __iter__(self): + buffer_size = self.buffer_size + rng = deepcopy(self.generator) + indices_iterator = self._iter_random_indices(rng, buffer_size) + mem_buffer = [] + for x in self.ex_iterable: + if len(mem_buffer) == buffer_size: + i = next(indices_iterator) + yield mem_buffer[i] + mem_buffer[i] = x + else: + mem_buffer.append(x) + rng.shuffle(mem_buffer) + yield from mem_buffer + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'BufferShuffledExamplesIterable': + return BufferShuffledExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), buffer_size=self.buffer_size, generator=generator) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'BufferShuffledExamplesIterable': + return BufferShuffledExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), buffer_size=self.buffer_size, generator=self.generator) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class SkipExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, n: int, block_sources_order_when_shuffling: bool=True, split_when_sharding: bool=True): + super().__init__() + self.ex_iterable = ex_iterable + self.n = n + self.block_sources_order_when_shuffling = block_sources_order_when_shuffling + self.split_when_sharding = split_when_sharding + + def _init_state_dict(self) -> dict: + self._state_dict = {'skipped': False, 'ex_iterable': self.ex_iterable._init_state_dict()} + return self._state_dict + + def __iter__(self): + ex_iterable_idx_start = 0 if self._state_dict and self._state_dict['skipped'] else self.n + if self._state_dict: + self._state_dict['skipped'] = True + yield from islice(self.ex_iterable, ex_iterable_idx_start, None) + + @staticmethod + def split_number(num, n): + quotient = num // n + remainder = num % n + result = [quotient] * n + for i in range(remainder): + result[i] += 1 + return result + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'SkipExamplesIterable': + if self.block_sources_order_when_shuffling: + return self + else: + return SkipExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), n=self.n, block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'SkipExamplesIterable': + if self.split_when_sharding: + return SkipExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), n=self.split_number(self.n, num_workers)[worker_id], block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding) + else: + return self + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +class TakeExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, n: int, block_sources_order_when_shuffling: bool=True, split_when_sharding: bool=True): + super().__init__() + self.ex_iterable = ex_iterable + self.n = n + self.block_sources_order_when_shuffling = block_sources_order_when_shuffling + self.split_when_sharding = split_when_sharding + + def _init_state_dict(self) -> dict: + self._state_dict = {'num_taken': 0, 'ex_iterable': self.ex_iterable._init_state_dict()} + return self._state_dict + + def __iter__(self): + ex_iterable_num_taken = self._state_dict['num_taken'] if self._state_dict else 0 + for key_example in islice(self.ex_iterable, self.n - ex_iterable_num_taken): + if self._state_dict: + self._state_dict['num_taken'] += 1 + yield key_example + + @staticmethod + def split_number(num, n): + quotient = num // n + remainder = num % n + result = [quotient] * n + for i in range(remainder): + result[i] += 1 + return result + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'TakeExamplesIterable': + if self.block_sources_order_when_shuffling: + return self + else: + return TakeExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), n=self.n, block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'TakeExamplesIterable': + if self.split_when_sharding: + return TakeExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), n=self.split_number(self.n, num_workers)[worker_id], block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding) + else: + return TakeExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), n=self.n, block_sources_order_when_shuffling=self.block_sources_order_when_shuffling, split_when_sharding=self.split_when_sharding) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +def _apply_feature_types_on_example(example: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]) -> dict: + example = dict(example) + for column_name in features: + if column_name not in example: + example[column_name] = None + encoded_example = features.encode_example(example) + decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id) + return decoded_example + +def _apply_feature_types_on_batch(batch: dict, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]) -> dict: + batch = dict(batch) + n_examples = len(batch[next(iter(batch))]) + for column_name in features: + if column_name not in batch: + batch[column_name] = [None] * n_examples + encoded_batch = features.encode_batch(batch) + decoded_batch = features.decode_batch(encoded_batch, token_per_repo_id=token_per_repo_id) + return decoded_batch + +class TypedExamplesIterable(_BaseExamplesIterable): + + def __init__(self, ex_iterable: _BaseExamplesIterable, features: Features, token_per_repo_id: Dict[str, Union[str, bool, None]]): + super().__init__() + self.ex_iterable = ex_iterable + self.features = features + self.token_per_repo_id = token_per_repo_id + + @property + def iter_arrow(self): + if self.ex_iterable.iter_arrow is not None: + return self._iter_arrow + + def _init_state_dict(self) -> dict: + self._state_dict = self.ex_iterable._init_state_dict() + return self._state_dict + + def __iter__(self): + for (key, example) in self.ex_iterable: + yield (key, _apply_feature_types_on_example(example, self.features, token_per_repo_id=self.token_per_repo_id)) + + def _iter_arrow(self) -> Iterator[Tuple[Key, pa.Table]]: + schema = self.features.arrow_schema + for (key, pa_table) in self.ex_iterable.iter_arrow(): + columns = set(pa_table.column_names) + for column_name in self.features: + if column_name not in columns: + col = pa.NullArray.from_buffers(pa.null(), len(pa_table), [None]) + pa_table = pa_table.append_column(column_name, col) + if pa_table.schema != schema: + pa_table = cast_table_to_features(pa_table, self.features) + yield (key, pa_table) + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'TypedExamplesIterable': + return TypedExamplesIterable(self.ex_iterable.shuffle_data_sources(generator), features=self.features, token_per_repo_id=self.token_per_repo_id) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'TypedExamplesIterable': + return TypedExamplesIterable(self.ex_iterable.shard_data_sources(worker_id, num_workers), features=self.features, token_per_repo_id=self.token_per_repo_id) + + @property + def n_shards(self) -> int: + return self.ex_iterable.n_shards + +@dataclass +class FormattingConfig: + format_type: Optional[str] + + def __post_init__(self): + if self.format_type == 'pandas': + raise NotImplementedError("The 'pandas' formatting is not implemented for iterable datasets. You can use 'numpy' or 'arrow' instead.") + +@dataclass +class ShufflingConfig: + generator: np.random.Generator + _original_seed: Optional[int] = None + +@dataclass +class DistributedConfig: + rank: int + world_size: int + +def _maybe_add_torch_iterable_dataset_parent_class(cls): + if config.TORCH_AVAILABLE: + import torch.utils.data + if torch.utils.data.IterableDataset not in cls.__bases__: + cls.__bases__ += (torch.utils.data.IterableDataset,) + +def _maybe_share_with_torch_persistent_workers(value: Union[int, 'torch.Tensor']) -> Union[int, 'torch.Tensor']: + if config.TORCH_AVAILABLE: + import torch + if isinstance(value, torch.Tensor): + return value.share_memory_() + else: + return torch.tensor(value).share_memory_() + else: + return value + +class IterableDataset(DatasetInfoMixin): + + def __init__(self, ex_iterable: _BaseExamplesIterable, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, formatting: Optional[FormattingConfig]=None, shuffling: Optional[ShufflingConfig]=None, distributed: Optional[DistributedConfig]=None, token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]]=None): + if distributed and distributed.world_size > 1 and shuffling and (shuffling._original_seed is None): + raise RuntimeError("The dataset doesn't have a fixed random seed across nodes to shuffle and split the list of dataset shards by node. Please pass e.g. `seed=42` in `.shuffle()` to make all the nodes use the same seed. ") + info = info.copy() if info is not None else DatasetInfo() + DatasetInfoMixin.__init__(self, info=info, split=split) + self._ex_iterable = copy.copy(ex_iterable) + self._formatting = formatting + self._shuffling = shuffling + self._distributed = distributed + self._token_per_repo_id: Dict[str, Union[str, bool, None]] = token_per_repo_id or {} + self._epoch: Union[int, 'torch.Tensor'] = _maybe_share_with_torch_persistent_workers(0) + self._starting_state_dict: Optional[dict] = None + self._prepared_ex_iterable = self._prepare_ex_iterable_for_iteration() + self._state_dict = self._prepared_ex_iterable._init_state_dict() + _maybe_add_torch_iterable_dataset_parent_class(self.__class__) + + def state_dict(self) -> dict: + return copy.deepcopy(self._state_dict) + + def load_state_dict(self, state_dict: dict) -> None: + self._prepared_ex_iterable.load_state_dict(state_dict) + self._starting_state_dict = state_dict + + def __repr__(self): + return f"IterableDataset({{\n features: {(list(self._info.features.keys()) if self._info.features is not None else 'Unknown')},\n n_shards: {self.n_shards}\n}})" + + def __getstate__(self): + return self.__dict__ + + def __setstate__(self, d): + self.__dict__ = d + self._epoch = _maybe_share_with_torch_persistent_workers(self._epoch) + _maybe_add_torch_iterable_dataset_parent_class(self.__class__) + + def _head(self, n=5): + return _examples_to_batch(list(self.take(n))) + + @property + def epoch(self) -> int: + return int(self._epoch) + + def _effective_generator(self): + if self._shuffling and self.epoch == 0: + return self._shuffling.generator + elif self._shuffling: + effective_seed = deepcopy(self._shuffling.generator).integers(0, 1 << 63) - self.epoch + effective_seed = (1 << 63) + effective_seed if effective_seed < 0 else effective_seed + return np.random.default_rng(effective_seed) + else: + raise ValueError('This dataset is not shuffled') + + @property + def n_shards(self) -> int: + if self._distributed and self._ex_iterable.n_shards % self._distributed.world_size == 0: + return self._ex_iterable.n_shards // self._distributed.world_size + return self._ex_iterable.n_shards + + def _iter_pytorch(self): + ex_iterable = self._prepare_ex_iterable_for_iteration() + fsspec.asyn.reset_lock() + import torch.utils.data + worker_info = torch.utils.data.get_worker_info() + if self._is_main_process() and ex_iterable.n_shards < worker_info.num_workers: + logger.warning(f'Too many dataloader workers: {worker_info.num_workers} (max is dataset.n_shards={ex_iterable.n_shards}). Stopping {worker_info.num_workers - ex_iterable.n_shards} dataloader workers.') + logger.info(f"To parallelize data loading, we give each process some shards (or data sources) to process. Therefore it's unnecessary to have a number of workers greater than dataset.n_shards={ex_iterable.n_shards}. To enable more parallelism, please split the dataset in more files than {ex_iterable.n_shards}.") + _log_prefix = f'node#{self._distributed.rank} ' if self._distributed else '' + shards_indices = ex_iterable.split_shard_indices_by_worker(worker_info.id, worker_info.num_workers) + if shards_indices: + logger.debug(f"{_log_prefix}dataloader worker#{worker_info.id}, ': Starting to iterate over {len(shards_indices)}/{ex_iterable.n_shards} shards.") + ex_iterable = ex_iterable.shard_data_sources(worker_id=worker_info.id, num_workers=worker_info.num_workers) + self._state_dict = ex_iterable._init_state_dict() + if self._starting_state_dict: + ex_iterable.load_state_dict(self._starting_state_dict) + if self._formatting: + formatter = get_formatter(self._formatting.format_type, features=self.features) + format_dict = formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + else: + format_dict = None + if self._formatting and (ex_iterable.iter_arrow or self._formatting == 'arrow'): + if ex_iterable.iter_arrow: + iterator = ex_iterable.iter_arrow() + else: + iterator = _convert_to_arrow(ex_iterable, batch_size=1) + for (key, pa_table) in iterator: + yield formatter.format_row(pa_table) + return + else: + for (key, example) in ex_iterable: + if self.features: + example = _apply_feature_types_on_example(example, self.features, token_per_repo_id=self._token_per_repo_id) + yield (format_dict(example) if format_dict else example) + logger.debug(f"{_log_prefix}dataloader worker#{worker_info.id}, ': Finished iterating over {len(shards_indices)}/{ex_iterable.n_shards} shards.") + else: + logger.debug(f"{_log_prefix}dataloader worker#{worker_info.id}, ': Stopping... Number of dataset shards < num_workers ({ex_iterable.n_shards}<{worker_info.num_workers}).") + + def _is_main_process(self): + if self._distributed and self._distributed.rank > 0: + return False + if 'torch' in sys.modules: + import torch.utils.data + worker_info = torch.utils.data.get_worker_info() + if worker_info is not None and worker_info.id > 0: + return False + return True + + def _prepare_ex_iterable_for_iteration(self, batch_size: int=1, drop_last_batch: bool=False) -> _BaseExamplesIterable: + ex_iterable = self._ex_iterable + if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == 'arrow'): + ex_iterable = RebatchedArrowExamplesIterable(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) + if self._shuffling: + ex_iterable = ex_iterable.shuffle_data_sources(self._effective_generator()) + else: + ex_iterable = ex_iterable + if self._distributed: + rank = self._distributed.rank + world_size = self._distributed.world_size + if ex_iterable.n_shards % world_size == 0: + if self._is_main_process(): + n_shards_per_node = ex_iterable.n_shards // world_size + plural = 's' if n_shards_per_node > 1 else '' + logger.info(f'Assigning {n_shards_per_node} shard{plural} (or data source{plural}) of the dataset to each node.') + ex_iterable = ex_iterable.shard_data_sources(rank, world_size) + else: + if self._is_main_process(): + logger.info(f'Assigning 1 out of {world_size} examples of the dataset to each node. The others are skipped during the iteration.') + logger.info(f'It is more optimized to distribute the dataset shards (or data sources) across nodes. You can do that by using a dataset with number of shards that is a factor of world_size={world_size}. The current dataset has {ex_iterable.n_shards} which is not a factor of {world_size}') + ex_iterable = StepExamplesIterable(ex_iterable, step=world_size, offset=rank) + self._state_dict = ex_iterable._init_state_dict() + if self._starting_state_dict: + ex_iterable.load_state_dict(self._starting_state_dict) + return ex_iterable + + def __iter__(self): + if 'torch' in sys.modules: + import torch.utils.data + worker_info = torch.utils.data.get_worker_info() + if isinstance(self, torch.utils.data.IterableDataset) and worker_info is not None: + yield from self._iter_pytorch() + return + ex_iterable = self._prepare_ex_iterable_for_iteration() + if self._formatting: + formatter = get_formatter(self._formatting.format_type, features=self.features) + format_dict = formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + else: + format_dict = None + if self._formatting and (ex_iterable.iter_arrow or self._formatting.format_type == 'arrow'): + if ex_iterable.iter_arrow: + iterator = ex_iterable.iter_arrow() + else: + iterator = _convert_to_arrow(ex_iterable, batch_size=1) + for (key, pa_table) in iterator: + yield formatter.format_row(pa_table) + return + for (key, example) in ex_iterable: + if self.features: + example = _apply_feature_types_on_example(example, self.features, token_per_repo_id=self._token_per_repo_id) + yield (format_dict(example) if format_dict else example) + + def iter(self, batch_size: int, drop_last_batch: bool=False): + if self._formatting: + formatter = get_formatter(self._formatting.format_type, features=self.features) + format_dict = formatter.recursive_tensorize if isinstance(formatter, TensorFormatter) else cast_to_python_objects + else: + format_dict = None + ex_iterable = self._prepare_ex_iterable_for_iteration(batch_size=batch_size, drop_last_batch=drop_last_batch) + if self._formatting and (ex_iterable.iter_arrow or self._formatting == 'arrow'): + if ex_iterable.iter_arrow: + iterator = ex_iterable.iter_arrow() + else: + iterator = _convert_to_arrow(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) + for (key, pa_table) in iterator: + yield formatter.format_batch(pa_table) + return + iterator = iter(ex_iterable) + for (key, example) in iterator: + examples = [example] + [example for (key, example) in islice(iterator, batch_size - 1)] + if drop_last_batch and len(examples) < batch_size: + return + batch = _examples_to_batch(examples) + if self.features: + batch = _apply_feature_types_on_batch(batch, self.features, token_per_repo_id=self._token_per_repo_id) + yield (format_dict(batch) if format_dict else batch) + + @staticmethod + def from_generator(generator: Callable, features: Optional[Features]=None, gen_kwargs: Optional[dict]=None, split: NamedSplit=Split.TRAIN) -> 'IterableDataset': + from .io.generator import GeneratorDatasetInputStream + return GeneratorDatasetInputStream(generator=generator, features=features, gen_kwargs=gen_kwargs, streaming=True, split=split).read() + + @staticmethod + def from_spark(df: 'pyspark.sql.DataFrame', split: Optional[NamedSplit]=None, features: Optional[Features]=None, **kwargs) -> 'IterableDataset': + from .io.spark import SparkDatasetReader + if sys.platform == 'win32': + raise EnvironmentError('IterableDataset.from_spark is not currently supported on Windows') + return SparkDatasetReader(df, split=split, features=features, streaming=True, **kwargs).read() + + @staticmethod + def from_file(filename: str) -> 'IterableDataset': + pa_table_schema = read_schema_from_file(filename) + inferred_features = Features.from_arrow_schema(pa_table_schema) + ex_iterable = ArrowExamplesIterable(Dataset._generate_tables_from_cache_file, kwargs={'filename': filename}) + return IterableDataset(ex_iterable=ex_iterable, info=DatasetInfo(features=inferred_features)) + + def with_format(self, type: Optional[str]=None) -> 'IterableDataset': + type = get_format_type_from_alias(type) + return IterableDataset(ex_iterable=self._ex_iterable, info=self._info.copy(), split=self._split, formatting=FormattingConfig(format_type=type), shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def map(self, function: Optional[Callable]=None, with_indices: bool=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, drop_last_batch: bool=False, remove_columns: Optional[Union[str, List[str]]]=None, features: Optional[Features]=None, fn_kwargs: Optional[dict]=None) -> 'IterableDataset': + if isinstance(input_columns, str): + input_columns = [input_columns] + if isinstance(remove_columns, str): + remove_columns = [remove_columns] + if function is None: + function = identity_func + if fn_kwargs is None: + fn_kwargs = {} + ex_iterable = TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable + ex_iterable = RebatchedArrowExamplesIterable(ex_iterable, batch_size=batch_size, drop_last_batch=drop_last_batch) if self._formatting and self._formatting.format_type == 'arrow' else ex_iterable + ex_iterable = MappedExamplesIterable(ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, drop_last_batch=drop_last_batch, remove_columns=remove_columns, fn_kwargs=fn_kwargs, formatting=self._formatting) + info = self.info.copy() + info.features = features + return IterableDataset(ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def filter(self, function: Optional[Callable]=None, with_indices=False, input_columns: Optional[Union[str, List[str]]]=None, batched: bool=False, batch_size: Optional[int]=1000, fn_kwargs: Optional[dict]=None) -> 'IterableDataset': + if isinstance(input_columns, str): + input_columns = [input_columns] + info = copy.deepcopy(self._info) + info.features = None + ex_iterable = FilteredExamplesIterable(TypedExamplesIterable(self._ex_iterable, self._info.features, token_per_repo_id=self._token_per_repo_id) if self._info.features is not None else self._ex_iterable, function=function, with_indices=with_indices, input_columns=input_columns, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=self._formatting) + return IterableDataset(ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def shuffle(self, seed=None, generator: Optional[np.random.Generator]=None, buffer_size: int=1000) -> 'IterableDataset': + if generator is None: + generator = np.random.default_rng(seed) + else: + generator = deepcopy(generator) + shuffling = ShufflingConfig(generator=generator, _original_seed=seed) + return IterableDataset(ex_iterable=BufferShuffledExamplesIterable(self._ex_iterable, buffer_size=buffer_size, generator=generator), info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=shuffling, distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def set_epoch(self, epoch: int): + self._epoch += epoch - self._epoch + + def skip(self, n: int) -> 'IterableDataset': + ex_iterable = SkipExamplesIterable(self._ex_iterable, n, block_sources_order_when_shuffling=self._shuffling is None, split_when_sharding=self._distributed is None) + return IterableDataset(ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def take(self, n: int) -> 'IterableDataset': + ex_iterable = TakeExamplesIterable(self._ex_iterable, n, block_sources_order_when_shuffling=self._shuffling is None, split_when_sharding=self._distributed is None) + return IterableDataset(ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + @property + def column_names(self) -> Optional[List[str]]: + return list(self._info.features.keys()) if self._info.features is not None else None + + def add_column(self, name: str, column: Union[list, np.array]) -> 'IterableDataset': + return self.map(partial(add_column_fn, name=name, column=column), with_indices=True) + + def rename_column(self, original_column_name: str, new_column_name: str) -> 'IterableDataset': + return self.rename_columns({original_column_name: new_column_name}) + + def rename_columns(self, column_mapping: Dict[str, str]) -> 'IterableDataset': + original_features = self._info.features.copy() if self._info.features else None + ds_iterable = self.map(partial(_rename_columns_fn, column_mapping=column_mapping), remove_columns=list(column_mapping)) + if original_features is not None: + ds_iterable._info.features = Features({column_mapping[col] if col in column_mapping.keys() else col: feature for (col, feature) in original_features.items()}) + return ds_iterable + + def remove_columns(self, column_names: Union[str, List[str]]) -> 'IterableDataset': + original_features = self._info.features.copy() if self._info.features else None + ds_iterable = self.map(remove_columns=column_names) + if original_features is not None: + ds_iterable._info.features = original_features.copy() + for (col, _) in original_features.items(): + if col in column_names: + del ds_iterable._info.features[col] + return ds_iterable + + def select_columns(self, column_names: Union[str, List[str]]) -> 'IterableDataset': + if isinstance(column_names, str): + column_names = [column_names] + if self._info: + info = copy.deepcopy(self._info) + if self._info.features is not None: + missing_columns = set(column_names) - set(self._info.features.keys()) + if missing_columns: + raise ValueError(f'Column name {list(missing_columns)} not in the dataset. Columns in the dataset: {list(self._info.features.keys())}.') + info.features = Features({c: info.features[c] for c in column_names}) + ex_iterable = SelectColumnsIterable(self._ex_iterable, column_names) + return IterableDataset(ex_iterable=ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=self._shuffling, distributed=self._distributed, token_per_repo_id=self._token_per_repo_id) + + def cast_column(self, column: str, feature: FeatureType) -> 'IterableDataset': + info = self._info.copy() + info.features[column] = feature + return IterableDataset(ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def cast(self, features: Features) -> 'IterableDataset': + info = self._info.copy() + info.features = features + return IterableDataset(ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def _step(self, step: int, offset: int) -> 'IterableDataset': + ex_iterable = StepExamplesIterable(self._ex_iterable, step=step, offset=offset) + return IterableDataset(ex_iterable=ex_iterable, info=self._info.copy(), split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def _resolve_features(self): + if self.features is not None: + return self + elif isinstance(self._ex_iterable, TypedExamplesIterable): + features = self._ex_iterable.features + else: + features = _infer_features_from_batch(self.with_format(None)._head()) + info = self.info.copy() + info.features = features + return IterableDataset(ex_iterable=self._ex_iterable, info=info, split=self._split, formatting=self._formatting, shuffling=copy.deepcopy(self._shuffling), distributed=copy.deepcopy(self._distributed), token_per_repo_id=self._token_per_repo_id) + + def batch(self, batch_size: int, drop_last_batch: bool=False) -> 'IterableDataset': + + def batch_fn(unbatched): + return {k: [v] for (k, v) in unbatched.items()} + return self.map(batch_fn, batched=True, batch_size=batch_size, drop_last_batch=drop_last_batch) + +def _concatenate_iterable_datasets(dsets: List[IterableDataset], info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, axis: int=0) -> IterableDataset: + dsets = [d._resolve_features() for d in dsets] + if axis == 0: + _check_if_features_can_be_aligned([dset.features for dset in dsets]) + else: + _check_column_names([col_name for dset in dsets for col_name in dset.features]) + features = Features({k: v for features in _align_features([dset.features for dset in dsets]) for (k, v) in features.items()}) + ex_iterables = [copy.deepcopy(d._ex_iterable) for d in dsets] + if axis == 0: + ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) + else: + ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable(ex_iterables) + if info is None: + info = DatasetInfo.from_merge([d.info for d in dsets]) + else: + info = info.copy() + info.features = features + token_per_repo_id = {repo_id: token for dataset in dsets for (repo_id, token) in dataset._token_per_repo_id.items()} + return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id) + +def _interleave_iterable_datasets(datasets: List[IterableDataset], probabilities: Optional[List[float]]=None, seed: Optional[int]=None, info: Optional[DatasetInfo]=None, split: Optional[NamedSplit]=None, stopping_strategy: Literal['first_exhausted', 'all_exhausted']='first_exhausted') -> IterableDataset: + datasets = [d._resolve_features() for d in datasets] + _check_if_features_can_be_aligned([dset.features for dset in datasets]) + features = Features({k: v for features in _align_features([dset.features for dset in datasets]) for (k, v) in features.items()}) + ex_iterables = [copy.deepcopy(d._ex_iterable) for d in datasets] + if probabilities is None: + ex_iterable = CyclingMultiSourcesExamplesIterable(ex_iterables, stopping_strategy=stopping_strategy) + else: + generator = np.random.default_rng(seed) + ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable(ex_iterables, generator=generator, probabilities=probabilities, stopping_strategy=stopping_strategy) + if info is None: + info = DatasetInfo.from_merge([d.info for d in datasets]) + else: + info = info.copy() + info.features = features + token_per_repo_id = {repo_id: token for dataset in datasets for (repo_id, token) in dataset._token_per_repo_id.items()} + return IterableDataset(ex_iterable=ex_iterable, info=info, split=split, token_per_repo_id=token_per_repo_id) + +def _split_by_node_iterable_dataset(dataset: IterableDataset, rank: int, world_size: int) -> IterableDataset: + if dataset._distributed: + rank = world_size * dataset._distributed.rank + rank + world_size = world_size * dataset._distributed.world_size + distributed = DistributedConfig(rank=rank, world_size=world_size) + return IterableDataset(ex_iterable=dataset._ex_iterable, info=dataset._info.copy(), split=dataset._split, formatting=dataset._formatting, shuffling=copy.deepcopy(dataset._shuffling), distributed=distributed, token_per_repo_id=dataset._token_per_repo_id) + +# File: datasets-main/src/datasets/keyhash.py +"""""" +from typing import Union +from huggingface_hub.utils import insecure_hashlib + +def _as_bytes(hash_data: Union[str, int, bytes]) -> bytes: + if isinstance(hash_data, bytes): + return hash_data + elif isinstance(hash_data, str): + hash_data = hash_data.replace('\\', '/') + elif isinstance(hash_data, int): + hash_data = str(hash_data) + else: + raise InvalidKeyError(hash_data) + return hash_data.encode('utf-8') + +class InvalidKeyError(Exception): + + def __init__(self, hash_data): + self.prefix = '\nFAILURE TO GENERATE DATASET: Invalid key type detected' + self.err_msg = f'\nFound Key {hash_data} of type {type(hash_data)}' + self.suffix = '\nKeys should be either str, int or bytes type' + super().__init__(f'{self.prefix}{self.err_msg}{self.suffix}') + +class DuplicatedKeysError(Exception): + + def __init__(self, key, duplicate_key_indices, fix_msg=''): + self.key = key + self.duplicate_key_indices = duplicate_key_indices + self.fix_msg = fix_msg + self.prefix = 'Found multiple examples generated with the same key' + if len(duplicate_key_indices) <= 20: + self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices)} have the key {key}" + else: + self.err_msg = f"\nThe examples at index {', '.join(duplicate_key_indices[:20])}... ({len(duplicate_key_indices) - 20} more) have the key {key}" + self.suffix = '\n' + fix_msg if fix_msg else '' + super().__init__(f'{self.prefix}{self.err_msg}{self.suffix}') + +class KeyHasher: + + def __init__(self, hash_salt: str): + self._split_md5 = insecure_hashlib.md5(_as_bytes(hash_salt)) + + def hash(self, key: Union[str, int, bytes]) -> int: + md5 = self._split_md5.copy() + byte_key = _as_bytes(key) + md5.update(byte_key) + return int(md5.hexdigest(), 16) + +# File: datasets-main/src/datasets/load.py +"""""" +import filecmp +import glob +import importlib +import inspect +import json +import os +import posixpath +import shutil +import signal +import time +import warnings +from collections import Counter +from contextlib import nullcontext +from dataclasses import dataclass, field +from pathlib import Path +from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Type, Union +import fsspec +import requests +import yaml +from fsspec.core import url_to_fs +from huggingface_hub import DatasetCard, DatasetCardData, HfApi, HfFileSystem +from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError, RevisionNotFoundError, get_session +from . import config +from .arrow_dataset import Dataset +from .builder import BuilderConfig, DatasetBuilder +from .data_files import DEFAULT_PATTERNS_ALL, DataFilesDict, DataFilesList, DataFilesPatternsDict, DataFilesPatternsList, EmptyDatasetError, get_data_patterns, get_metadata_patterns, sanitize_patterns +from .dataset_dict import DatasetDict, IterableDatasetDict +from .download.download_config import DownloadConfig +from .download.download_manager import DownloadMode +from .download.streaming_download_manager import StreamingDownloadManager, xbasename, xglob, xjoin +from .exceptions import DataFilesNotFoundError, DatasetNotFoundError +from .features import Features +from .fingerprint import Hasher +from .info import DatasetInfo, DatasetInfosDict +from .iterable_dataset import IterableDataset +from .naming import camelcase_to_snakecase, snakecase_to_camelcase +from .packaged_modules import _EXTENSION_TO_MODULE, _MODULE_SUPPORTS_METADATA, _MODULE_TO_EXTENSIONS, _PACKAGED_DATASETS_MODULES, _hash_python_lines +from .splits import Split +from .utils import _dataset_viewer +from .utils.file_utils import OfflineModeIsEnabled, _raise_if_offline_mode_is_enabled, cached_path, get_datasets_user_agent, init_hf_modules, is_relative_path, relative_to_absolute_path, url_or_path_join +from .utils.hub import check_auth, hf_dataset_url +from .utils.info_utils import VerificationMode, is_small_dataset +from .utils.logging import get_logger +from .utils.metadata import MetadataConfigs +from .utils.py_utils import get_imports, lock_importable_file +from .utils.typing import PathLike +from .utils.version import Version +logger = get_logger(__name__) +ALL_ALLOWED_EXTENSIONS = list(_EXTENSION_TO_MODULE.keys()) + ['.zip'] + +def _raise_timeout_error(signum, frame): + raise ValueError('Loading this dataset requires you to execute custom code contained in the dataset repository on your local machine. Please set the option `trust_remote_code=True` to permit loading of this dataset.') + +def resolve_trust_remote_code(trust_remote_code: Optional[bool], repo_id: str) -> bool: + trust_remote_code = trust_remote_code if trust_remote_code is not None else config.HF_DATASETS_TRUST_REMOTE_CODE + if trust_remote_code is None: + if config.TIME_OUT_REMOTE_CODE > 0: + try: + signal.signal(signal.SIGALRM, _raise_timeout_error) + signal.alarm(config.TIME_OUT_REMOTE_CODE) + while trust_remote_code is None: + answer = input(f'The repository for {repo_id} contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\nYou can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\nDo you wish to run the custom code? [y/N] ') + if answer.lower() in ['yes', 'y', '1']: + trust_remote_code = True + elif answer.lower() in ['no', 'n', '0', '']: + trust_remote_code = False + signal.alarm(0) + except Exception: + raise ValueError(f'The repository for {repo_id} contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/{repo_id}.\nPlease pass the argument `trust_remote_code=True` to allow custom code to be run.') + else: + _raise_timeout_error(None, None) + return trust_remote_code + +def init_dynamic_modules(name: str=config.MODULE_NAME_FOR_DYNAMIC_MODULES, hf_modules_cache: Optional[Union[Path, str]]=None): + hf_modules_cache = init_hf_modules(hf_modules_cache) + dynamic_modules_path = os.path.join(hf_modules_cache, name) + os.makedirs(dynamic_modules_path, exist_ok=True) + if not os.path.exists(os.path.join(dynamic_modules_path, '__init__.py')): + with open(os.path.join(dynamic_modules_path, '__init__.py'), 'w'): + pass + return dynamic_modules_path + +def import_main_class(module_path) -> Optional[Type[DatasetBuilder]]: + module = importlib.import_module(module_path) + module_main_cls = None + for (name, obj) in module.__dict__.items(): + if inspect.isclass(obj) and issubclass(obj, DatasetBuilder): + if inspect.isabstract(obj): + continue + module_main_cls = obj + obj_module = inspect.getmodule(obj) + if obj_module is not None and module == obj_module: + break + return module_main_cls + +class _InitializeConfiguredDatasetBuilder: + + def __call__(self, builder_cls, metadata_configs, default_config_name, name): + obj = _InitializeConfiguredDatasetBuilder() + obj.__class__ = configure_builder_class(builder_cls, metadata_configs, default_config_name=default_config_name, dataset_name=name) + return obj + +def configure_builder_class(builder_cls: Type[DatasetBuilder], builder_configs: List[BuilderConfig], default_config_name: Optional[str], dataset_name: str) -> Type[DatasetBuilder]: + + class ConfiguredDatasetBuilder(builder_cls): + BUILDER_CONFIGS = builder_configs + DEFAULT_CONFIG_NAME = default_config_name + __module__ = builder_cls.__module__ + + def __reduce__(self): + parent_builder_cls = self.__class__.__mro__[1] + return (_InitializeConfiguredDatasetBuilder(), (parent_builder_cls, self.BUILDER_CONFIGS, self.DEFAULT_CONFIG_NAME, self.dataset_name), self.__dict__.copy()) + ConfiguredDatasetBuilder.__name__ = f'{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}' + ConfiguredDatasetBuilder.__qualname__ = f'{builder_cls.__name__.lower().capitalize()}{snakecase_to_camelcase(dataset_name)}' + return ConfiguredDatasetBuilder + +def get_dataset_builder_class(dataset_module: 'DatasetModule', dataset_name: Optional[str]=None) -> Type[DatasetBuilder]: + with lock_importable_file(dataset_module.importable_file_path) if dataset_module.importable_file_path else nullcontext(): + builder_cls = import_main_class(dataset_module.module_path) + if dataset_module.builder_configs_parameters.builder_configs: + dataset_name = dataset_name or dataset_module.builder_kwargs.get('dataset_name') + if dataset_name is None: + raise ValueError('dataset_name should be specified but got None') + builder_cls = configure_builder_class(builder_cls, builder_configs=dataset_module.builder_configs_parameters.builder_configs, default_config_name=dataset_module.builder_configs_parameters.default_config_name, dataset_name=dataset_name) + return builder_cls + +def files_to_hash(file_paths: List[str]) -> str: + to_use_files: List[Union[Path, str]] = [] + for file_path in file_paths: + if os.path.isdir(file_path): + to_use_files.extend(list(Path(file_path).rglob('*.[pP][yY]'))) + else: + to_use_files.append(file_path) + lines = [] + for file_path in to_use_files: + with open(file_path, encoding='utf-8') as f: + lines.extend(f.readlines()) + return _hash_python_lines(lines) + +def increase_load_count(name: str): + if not config.HF_HUB_OFFLINE and config.HF_UPDATE_DOWNLOAD_COUNTS: + try: + get_session().head('/'.join((config.S3_DATASETS_BUCKET_PREFIX, name, name + '.py')), user_agent=get_datasets_user_agent(), timeout=3) + except Exception: + pass + +def _download_additional_modules(name: str, base_path: str, imports: Tuple[str, str, str, str], download_config: Optional[DownloadConfig]) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]]]: + local_imports = [] + library_imports = [] + download_config = download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading extra modules' + for (import_type, import_name, import_path, sub_directory) in imports: + if import_type == 'library': + library_imports.append((import_name, import_path)) + continue + if import_name == name: + raise ValueError(f"Error in the {name} script, importing relative {import_name} module but {import_name} is the name of the script. Please change relative import {import_name} to another name and add a '# From: URL_OR_PATH' comment pointing to the original relative import file path.") + if import_type == 'internal': + url_or_filename = url_or_path_join(base_path, import_path + '.py') + elif import_type == 'external': + url_or_filename = import_path + else: + raise ValueError('Wrong import_type') + local_import_path = cached_path(url_or_filename, download_config=download_config) + if sub_directory is not None: + local_import_path = os.path.join(local_import_path, sub_directory) + local_imports.append((import_name, local_import_path)) + return (local_imports, library_imports) + +def _check_library_imports(name: str, library_imports: List[Tuple[str, str]]) -> None: + needs_to_be_installed = {} + for (library_import_name, library_import_path) in library_imports: + try: + lib = importlib.import_module(library_import_name) + except ImportError: + if library_import_name not in needs_to_be_installed or library_import_path != library_import_name: + needs_to_be_installed[library_import_name] = library_import_path + if needs_to_be_installed: + _dependencies_str = 'dependencies' if len(needs_to_be_installed) > 1 else 'dependency' + _them_str = 'them' if len(needs_to_be_installed) > 1 else 'it' + if 'sklearn' in needs_to_be_installed.keys(): + needs_to_be_installed['sklearn'] = 'scikit-learn' + if 'Bio' in needs_to_be_installed.keys(): + needs_to_be_installed['Bio'] = 'biopython' + raise ImportError(f"To be able to use {name}, you need to install the following {_dependencies_str}: {', '.join(needs_to_be_installed)}.\nPlease install {_them_str} using 'pip install {' '.join(needs_to_be_installed.values())}' for instance.") + +def _copy_script_and_other_resources_in_importable_dir(name: str, importable_directory_path: str, subdirectory_name: str, original_local_path: str, local_imports: List[Tuple[str, str]], additional_files: List[Tuple[str, str]], download_mode: Optional[Union[DownloadMode, str]]) -> str: + importable_subdirectory = os.path.join(importable_directory_path, subdirectory_name) + importable_file = os.path.join(importable_subdirectory, name + '.py') + with lock_importable_file(importable_file): + if download_mode == DownloadMode.FORCE_REDOWNLOAD and os.path.exists(importable_directory_path): + shutil.rmtree(importable_directory_path) + os.makedirs(importable_directory_path, exist_ok=True) + init_file_path = os.path.join(importable_directory_path, '__init__.py') + if not os.path.exists(init_file_path): + with open(init_file_path, 'w'): + pass + os.makedirs(importable_subdirectory, exist_ok=True) + init_file_path = os.path.join(importable_subdirectory, '__init__.py') + if not os.path.exists(init_file_path): + with open(init_file_path, 'w'): + pass + if not os.path.exists(importable_file): + shutil.copyfile(original_local_path, importable_file) + meta_path = os.path.splitext(importable_file)[0] + '.json' + if not os.path.exists(meta_path): + meta = {'original file path': original_local_path, 'local file path': importable_file} + with open(meta_path, 'w', encoding='utf-8') as meta_file: + json.dump(meta, meta_file) + for (import_name, import_path) in local_imports: + if os.path.isfile(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name + '.py') + if not os.path.exists(full_path_local_import): + shutil.copyfile(import_path, full_path_local_import) + elif os.path.isdir(import_path): + full_path_local_import = os.path.join(importable_subdirectory, import_name) + if not os.path.exists(full_path_local_import): + shutil.copytree(import_path, full_path_local_import) + else: + raise ImportError(f'Error with local import at {import_path}') + for (file_name, original_path) in additional_files: + destination_additional_path = os.path.join(importable_subdirectory, file_name) + if not os.path.exists(destination_additional_path) or not filecmp.cmp(original_path, destination_additional_path): + shutil.copyfile(original_path, destination_additional_path) + return importable_file + +def _get_importable_file_path(dynamic_modules_path: str, module_namespace: str, subdirectory_name: str, name: str) -> str: + importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace('/', '--')) + return os.path.join(importable_directory_path, subdirectory_name, name.split('/')[-1] + '.py') + +def _create_importable_file(local_path: str, local_imports: List[Tuple[str, str]], additional_files: List[Tuple[str, str]], dynamic_modules_path: str, module_namespace: str, subdirectory_name: str, name: str, download_mode: DownloadMode) -> None: + importable_directory_path = os.path.join(dynamic_modules_path, module_namespace, name.replace('/', '--')) + Path(importable_directory_path).mkdir(parents=True, exist_ok=True) + (Path(importable_directory_path).parent / '__init__.py').touch(exist_ok=True) + importable_local_file = _copy_script_and_other_resources_in_importable_dir(name=name.split('/')[-1], importable_directory_path=importable_directory_path, subdirectory_name=subdirectory_name, original_local_path=local_path, local_imports=local_imports, additional_files=additional_files, download_mode=download_mode) + logger.debug(f'Created importable dataset file at {importable_local_file}') + +def _load_importable_file(dynamic_modules_path: str, module_namespace: str, subdirectory_name: str, name: str) -> Tuple[str, str]: + module_path = '.'.join([os.path.basename(dynamic_modules_path), module_namespace, name.replace('/', '--'), subdirectory_name, name.split('/')[-1]]) + return (module_path, subdirectory_name) + +def infer_module_for_data_files_list(data_files_list: DataFilesList, download_config: Optional[DownloadConfig]=None) -> Tuple[Optional[str], dict]: + extensions_counter = Counter((('.' + suffix.lower(), xbasename(filepath) in ('metadata.jsonl', 'metadata.csv')) for filepath in data_files_list[:config.DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE] for suffix in xbasename(filepath).split('.')[1:])) + if extensions_counter: + + def sort_key(ext_count: Tuple[Tuple[str, bool], int]) -> Tuple[int, bool]: + ((ext, is_metadata), count) = ext_count + return (not is_metadata, count, ext == '.parquet', ext) + for ((ext, _), _) in sorted(extensions_counter.items(), key=sort_key, reverse=True): + if ext in _EXTENSION_TO_MODULE: + return _EXTENSION_TO_MODULE[ext] + elif ext == '.zip': + return infer_module_for_data_files_list_in_archives(data_files_list, download_config=download_config) + return (None, {}) + +def infer_module_for_data_files_list_in_archives(data_files_list: DataFilesList, download_config: Optional[DownloadConfig]=None) -> Tuple[Optional[str], dict]: + archived_files = [] + archive_files_counter = 0 + for filepath in data_files_list: + if str(filepath).endswith('.zip'): + archive_files_counter += 1 + if archive_files_counter > config.GLOBBED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE: + break + extracted = xjoin(StreamingDownloadManager().extract(filepath), '**') + archived_files += [f.split('::')[0] for f in xglob(extracted, recursive=True, download_config=download_config)[:config.ARCHIVED_DATA_FILES_MAX_NUMBER_FOR_MODULE_INFERENCE]] + extensions_counter = Counter(('.' + suffix.lower() for filepath in archived_files for suffix in xbasename(filepath).split('.')[1:])) + if extensions_counter: + most_common = extensions_counter.most_common(1)[0][0] + if most_common in _EXTENSION_TO_MODULE: + return _EXTENSION_TO_MODULE[most_common] + return (None, {}) + +def infer_module_for_data_files(data_files: DataFilesDict, path: Optional[str]=None, download_config: Optional[DownloadConfig]=None) -> Tuple[Optional[str], Dict[str, Any]]: + split_modules = {split: infer_module_for_data_files_list(data_files_list, download_config=download_config) for (split, data_files_list) in data_files.items()} + (module_name, default_builder_kwargs) = next(iter(split_modules.values())) + if any(((module_name, default_builder_kwargs) != split_module for split_module in split_modules.values())): + raise ValueError(f"Couldn't infer the same data file format for all splits. Got {split_modules}") + if not module_name: + raise DataFilesNotFoundError('No (supported) data files found' + (f' in {path}' if path else '')) + return (module_name, default_builder_kwargs) + +def create_builder_configs_from_metadata_configs(module_path: str, metadata_configs: MetadataConfigs, supports_metadata: bool, base_path: Optional[str]=None, default_builder_kwargs: Dict[str, Any]=None, download_config: Optional[DownloadConfig]=None) -> Tuple[List[BuilderConfig], str]: + builder_cls = import_main_class(module_path) + builder_config_cls = builder_cls.BUILDER_CONFIG_CLASS + default_config_name = metadata_configs.get_default_config_name() + builder_configs = [] + default_builder_kwargs = {} if default_builder_kwargs is None else default_builder_kwargs + base_path = base_path if base_path is not None else '' + for (config_name, config_params) in metadata_configs.items(): + config_data_files = config_params.get('data_files') + config_data_dir = config_params.get('data_dir') + config_base_path = xjoin(base_path, config_data_dir) if config_data_dir else base_path + try: + config_patterns = sanitize_patterns(config_data_files) if config_data_files is not None else get_data_patterns(config_base_path, download_config=download_config) + config_data_files_dict = DataFilesPatternsDict.from_patterns(config_patterns, allowed_extensions=ALL_ALLOWED_EXTENSIONS) + except EmptyDatasetError as e: + raise EmptyDatasetError(f"Dataset at '{base_path}' doesn't contain data files matching the patterns for config '{config_name}', check `data_files` and `data_fir` parameters in the `configs` YAML field in README.md. ") from e + if config_data_files is None and supports_metadata and (config_patterns != DEFAULT_PATTERNS_ALL): + try: + config_metadata_patterns = get_metadata_patterns(base_path, download_config=download_config) + except FileNotFoundError: + config_metadata_patterns = None + if config_metadata_patterns is not None: + config_metadata_data_files_list = DataFilesPatternsList.from_patterns(config_metadata_patterns) + config_data_files_dict = DataFilesPatternsDict({split: data_files_list + config_metadata_data_files_list for (split, data_files_list) in config_data_files_dict.items()}) + ignored_params = [param for param in config_params if not hasattr(builder_config_cls, param) and param != 'default'] + if ignored_params: + logger.warning(f'Some datasets params were ignored: {ignored_params}. Make sure to use only valid params for the dataset builder and to have a up-to-date version of the `datasets` library.') + builder_configs.append(builder_config_cls(name=config_name, data_files=config_data_files_dict, data_dir=config_data_dir, **{param: value for (param, value) in {**default_builder_kwargs, **config_params}.items() if hasattr(builder_config_cls, param) and param not in ('default', 'data_files', 'data_dir')})) + return (builder_configs, default_config_name) + +@dataclass +class BuilderConfigsParameters: + metadata_configs: Optional[MetadataConfigs] = None + builder_configs: Optional[List[BuilderConfig]] = None + default_config_name: Optional[str] = None + +@dataclass +class DatasetModule: + module_path: str + hash: str + builder_kwargs: dict + builder_configs_parameters: BuilderConfigsParameters = field(default_factory=BuilderConfigsParameters) + dataset_infos: Optional[DatasetInfosDict] = None + importable_file_path: Optional[str] = None + +class _DatasetModuleFactory: + + def get_module(self) -> DatasetModule: + raise NotImplementedError + +class LocalDatasetModuleFactoryWithScript(_DatasetModuleFactory): + + def __init__(self, path: str, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, dynamic_modules_path: Optional[str]=None, trust_remote_code: Optional[bool]=None): + self.path = path + self.name = Path(path).stem + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + self.trust_remote_code = trust_remote_code + + def get_module(self) -> DatasetModule: + if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None: + warnings.warn(f'The repository for {self.name} contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at {self.path}\nYou can avoid this message in future by passing the argument `trust_remote_code=True`.\nPassing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.', FutureWarning) + dataset_infos_path = Path(self.path).parent / config.DATASETDICT_INFOS_FILENAME + dataset_readme_path = Path(self.path).parent / config.REPOCARD_FILENAME + imports = get_imports(self.path) + (local_imports, library_imports) = _download_additional_modules(name=self.name, base_path=str(Path(self.path).parent), imports=imports, download_config=self.download_config) + additional_files = [] + if dataset_infos_path.is_file(): + additional_files.append((config.DATASETDICT_INFOS_FILENAME, str(dataset_infos_path))) + if dataset_readme_path.is_file(): + additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path)) + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + hash = files_to_hash([self.path] + [loc[1] for loc in local_imports]) + importable_file_path = _get_importable_file_path(dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name) + if not os.path.exists(importable_file_path): + trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name) + if trust_remote_code: + _create_importable_file(local_path=self.path, local_imports=local_imports, additional_files=additional_files, dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name, download_mode=self.download_mode) + else: + raise ValueError(f'Loading {self.name} requires you to execute the dataset script in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.') + _check_library_imports(name=self.name, library_imports=library_imports) + (module_path, hash) = _load_importable_file(dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name) + importlib.invalidate_caches() + builder_kwargs = {'base_path': str(Path(self.path).parent)} + return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path) + +class LocalDatasetModuleFactoryWithoutScript(_DatasetModuleFactory): + + def __init__(self, path: str, data_dir: Optional[str]=None, data_files: Optional[Union[str, List, Dict]]=None, download_mode: Optional[Union[DownloadMode, str]]=None): + if data_dir and os.path.isabs(data_dir): + raise ValueError(f"`data_dir` must be relative to a dataset directory's root: {path}") + self.path = Path(path).as_posix() + self.name = Path(path).stem + self.data_files = data_files + self.data_dir = data_dir + self.download_mode = download_mode + + def get_module(self) -> DatasetModule: + readme_path = os.path.join(self.path, config.REPOCARD_FILENAME) + standalone_yaml_path = os.path.join(self.path, config.REPOYAML_FILENAME) + dataset_card_data = DatasetCard.load(readme_path).data if os.path.isfile(readme_path) else DatasetCardData() + if os.path.exists(standalone_yaml_path): + with open(standalone_yaml_path, 'r', encoding='utf-8') as f: + standalone_yaml_data = yaml.safe_load(f.read()) + if standalone_yaml_data: + _dataset_card_data_dict = dataset_card_data.to_dict() + _dataset_card_data_dict.update(standalone_yaml_data) + dataset_card_data = DatasetCardData(**_dataset_card_data_dict) + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) + base_path = Path(self.path, self.data_dir or '').expanduser().resolve().as_posix() + if self.data_files is not None: + patterns = sanitize_patterns(self.data_files) + elif metadata_configs and (not self.data_dir) and ('data_files' in next(iter(metadata_configs.values()))): + patterns = sanitize_patterns(next(iter(metadata_configs.values()))['data_files']) + else: + patterns = get_data_patterns(base_path) + data_files = DataFilesDict.from_patterns(patterns, base_path=base_path, allowed_extensions=ALL_ALLOWED_EXTENSIONS) + (module_name, default_builder_kwargs) = infer_module_for_data_files(data_files=data_files, path=self.path) + data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name]) + supports_metadata = module_name in _MODULE_SUPPORTS_METADATA + if self.data_files is None and supports_metadata: + try: + metadata_patterns = get_metadata_patterns(base_path) + except FileNotFoundError: + metadata_patterns = None + if metadata_patterns is not None: + metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, base_path=base_path) + if metadata_data_files_list: + data_files = DataFilesDict({split: data_files_list + metadata_data_files_list for (split, data_files_list) in data_files.items()}) + (module_path, _) = _PACKAGED_DATASETS_MODULES[module_name] + if metadata_configs: + (builder_configs, default_config_name) = create_builder_configs_from_metadata_configs(module_path, metadata_configs, base_path=base_path, supports_metadata=supports_metadata, default_builder_kwargs=default_builder_kwargs) + else: + builder_configs: List[BuilderConfig] = [import_main_class(module_path).BUILDER_CONFIG_CLASS(data_files=data_files, **default_builder_kwargs)] + default_config_name = None + builder_kwargs = {'base_path': self.path, 'dataset_name': camelcase_to_snakecase(Path(self.path).name)} + if self.data_dir: + builder_kwargs['data_files'] = data_files + if os.path.isfile(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME)): + with open(os.path.join(self.path, config.DATASETDICT_INFOS_FILENAME), encoding='utf-8') as f: + legacy_dataset_infos = DatasetInfosDict({config_name: DatasetInfo.from_dict(dataset_info_dict) for (config_name, dataset_info_dict) in json.load(f).items()}) + if len(legacy_dataset_infos) == 1: + legacy_config_name = next(iter(legacy_dataset_infos)) + legacy_dataset_infos['default'] = legacy_dataset_infos.pop(legacy_config_name) + legacy_dataset_infos.update(dataset_infos) + dataset_infos = legacy_dataset_infos + if default_config_name is None and len(dataset_infos) == 1: + default_config_name = next(iter(dataset_infos)) + hash = Hasher.hash({'dataset_infos': dataset_infos, 'builder_configs': builder_configs}) + return DatasetModule(module_path, hash, builder_kwargs, dataset_infos=dataset_infos, builder_configs_parameters=BuilderConfigsParameters(metadata_configs=metadata_configs, builder_configs=builder_configs, default_config_name=default_config_name)) + +class PackagedDatasetModuleFactory(_DatasetModuleFactory): + + def __init__(self, name: str, data_dir: Optional[str]=None, data_files: Optional[Union[str, List, Dict]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None): + self.name = name + self.data_files = data_files + self.data_dir = data_dir + self.download_config = download_config + self.download_mode = download_mode + increase_load_count(name) + + def get_module(self) -> DatasetModule: + base_path = Path(self.data_dir or '').expanduser().resolve().as_posix() + patterns = sanitize_patterns(self.data_files) if self.data_files is not None else get_data_patterns(base_path, download_config=self.download_config) + data_files = DataFilesDict.from_patterns(patterns, download_config=self.download_config, base_path=base_path) + supports_metadata = self.name in _MODULE_SUPPORTS_METADATA + if self.data_files is None and supports_metadata and (patterns != DEFAULT_PATTERNS_ALL): + try: + metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config) + except FileNotFoundError: + metadata_patterns = None + if metadata_patterns is not None: + metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, download_config=self.download_config, base_path=base_path) + if metadata_data_files_list: + data_files = DataFilesDict({split: data_files_list + metadata_data_files_list for (split, data_files_list) in data_files.items()}) + (module_path, hash) = _PACKAGED_DATASETS_MODULES[self.name] + builder_kwargs = {'data_files': data_files, 'dataset_name': self.name} + return DatasetModule(module_path, hash, builder_kwargs) + +class HubDatasetModuleFactoryWithoutScript(_DatasetModuleFactory): + + def __init__(self, name: str, revision: Optional[Union[str, Version]]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, List, Dict]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None): + self.name = name + self.revision = revision + self.data_files = data_files + self.data_dir = data_dir + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + increase_load_count(name) + + def get_module(self) -> DatasetModule: + hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(self.name, revision=self.revision, token=self.download_config.token, timeout=100.0) + revision = hfh_dataset_info.sha + base_path = f"hf://datasets/{self.name}@{revision}/{self.data_dir or ''}".rstrip('/') + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading readme' + try: + dataset_readme_path = cached_path(hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=revision), download_config=download_config) + dataset_card_data = DatasetCard.load(Path(dataset_readme_path)).data + except FileNotFoundError: + dataset_card_data = DatasetCardData() + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading standalone yaml' + try: + standalone_yaml_path = cached_path(hf_dataset_url(self.name, config.REPOYAML_FILENAME, revision=revision), download_config=download_config) + with open(standalone_yaml_path, 'r', encoding='utf-8') as f: + standalone_yaml_data = yaml.safe_load(f.read()) + if standalone_yaml_data: + _dataset_card_data_dict = dataset_card_data.to_dict() + _dataset_card_data_dict.update(standalone_yaml_data) + dataset_card_data = DatasetCardData(**_dataset_card_data_dict) + except FileNotFoundError: + pass + metadata_configs = MetadataConfigs.from_dataset_card_data(dataset_card_data) + dataset_infos = DatasetInfosDict.from_dataset_card_data(dataset_card_data) + if self.data_dir or self.data_files or (self.revision and self.revision != 'main'): + use_exported_dataset_infos = False + else: + use_exported_dataset_infos = True + if config.USE_PARQUET_EXPORT and use_exported_dataset_infos: + try: + exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(dataset=self.name, revision=self.revision, token=self.download_config.token) + exported_dataset_infos = DatasetInfosDict({config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name]) for config_name in exported_dataset_infos}) + except _dataset_viewer.DatasetViewerError: + exported_dataset_infos = None + else: + exported_dataset_infos = None + if exported_dataset_infos: + exported_dataset_infos.update(dataset_infos) + dataset_infos = exported_dataset_infos + if self.data_files is not None: + patterns = sanitize_patterns(self.data_files) + elif metadata_configs and (not self.data_dir) and ('data_files' in next(iter(metadata_configs.values()))): + patterns = sanitize_patterns(next(iter(metadata_configs.values()))['data_files']) + else: + patterns = get_data_patterns(base_path, download_config=self.download_config) + data_files = DataFilesDict.from_patterns(patterns, base_path=base_path, allowed_extensions=ALL_ALLOWED_EXTENSIONS, download_config=self.download_config) + (module_name, default_builder_kwargs) = infer_module_for_data_files(data_files=data_files, path=self.name, download_config=self.download_config) + data_files = data_files.filter_extensions(_MODULE_TO_EXTENSIONS[module_name]) + supports_metadata = module_name in _MODULE_SUPPORTS_METADATA + if self.data_files is None and supports_metadata: + try: + metadata_patterns = get_metadata_patterns(base_path, download_config=self.download_config) + except FileNotFoundError: + metadata_patterns = None + if metadata_patterns is not None: + metadata_data_files_list = DataFilesList.from_patterns(metadata_patterns, download_config=self.download_config, base_path=base_path) + if metadata_data_files_list: + data_files = DataFilesDict({split: data_files_list + metadata_data_files_list for (split, data_files_list) in data_files.items()}) + (module_path, _) = _PACKAGED_DATASETS_MODULES[module_name] + if metadata_configs: + (builder_configs, default_config_name) = create_builder_configs_from_metadata_configs(module_path, metadata_configs, base_path=base_path, supports_metadata=supports_metadata, default_builder_kwargs=default_builder_kwargs, download_config=self.download_config) + else: + builder_configs: List[BuilderConfig] = [import_main_class(module_path).BUILDER_CONFIG_CLASS(data_files=data_files, **default_builder_kwargs)] + default_config_name = None + builder_kwargs = {'base_path': hf_dataset_url(self.name, '', revision=revision).rstrip('/'), 'repo_id': self.name, 'dataset_name': camelcase_to_snakecase(Path(self.name).name)} + if self.data_dir: + builder_kwargs['data_files'] = data_files + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading metadata' + try: + dataset_infos_path = cached_path(hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=revision), download_config=download_config) + with open(dataset_infos_path, encoding='utf-8') as f: + legacy_dataset_infos = DatasetInfosDict({config_name: DatasetInfo.from_dict(dataset_info_dict) for (config_name, dataset_info_dict) in json.load(f).items()}) + if len(legacy_dataset_infos) == 1: + legacy_config_name = next(iter(legacy_dataset_infos)) + legacy_dataset_infos['default'] = legacy_dataset_infos.pop(legacy_config_name) + legacy_dataset_infos.update(dataset_infos) + dataset_infos = legacy_dataset_infos + except FileNotFoundError: + pass + if default_config_name is None and len(dataset_infos) == 1: + default_config_name = next(iter(dataset_infos)) + hash = revision + return DatasetModule(module_path, hash, builder_kwargs, dataset_infos=dataset_infos, builder_configs_parameters=BuilderConfigsParameters(metadata_configs=metadata_configs, builder_configs=builder_configs, default_config_name=default_config_name)) + +class HubDatasetModuleFactoryWithParquetExport(_DatasetModuleFactory): + + def __init__(self, name: str, revision: Optional[str]=None, download_config: Optional[DownloadConfig]=None): + self.name = name + self.revision = revision + self.download_config = download_config or DownloadConfig() + increase_load_count(name) + + def get_module(self) -> DatasetModule: + exported_parquet_files = _dataset_viewer.get_exported_parquet_files(dataset=self.name, revision=self.revision, token=self.download_config.token) + exported_dataset_infos = _dataset_viewer.get_exported_dataset_infos(dataset=self.name, revision=self.revision, token=self.download_config.token) + dataset_infos = DatasetInfosDict({config_name: DatasetInfo.from_dict(exported_dataset_infos[config_name]) for config_name in exported_dataset_infos}) + hfh_dataset_info = HfApi(config.HF_ENDPOINT).dataset_info(self.name, revision='refs/convert/parquet', token=self.download_config.token, timeout=100.0) + revision = hfh_dataset_info.sha + metadata_configs = MetadataConfigs._from_exported_parquet_files_and_dataset_infos(revision=revision, exported_parquet_files=exported_parquet_files, dataset_infos=dataset_infos) + (module_path, _) = _PACKAGED_DATASETS_MODULES['parquet'] + (builder_configs, default_config_name) = create_builder_configs_from_metadata_configs(module_path, metadata_configs, supports_metadata=False, download_config=self.download_config) + hash = self.revision + builder_kwargs = {'repo_id': self.name, 'dataset_name': camelcase_to_snakecase(Path(self.name).name)} + return DatasetModule(module_path, hash, builder_kwargs, dataset_infos=dataset_infos, builder_configs_parameters=BuilderConfigsParameters(metadata_configs=metadata_configs, builder_configs=builder_configs, default_config_name=default_config_name)) + +class HubDatasetModuleFactoryWithScript(_DatasetModuleFactory): + + def __init__(self, name: str, revision: Optional[Union[str, Version]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, dynamic_modules_path: Optional[str]=None, trust_remote_code: Optional[bool]=None): + self.name = name + self.revision = revision + self.download_config = download_config or DownloadConfig() + self.download_mode = download_mode + self.dynamic_modules_path = dynamic_modules_path + self.trust_remote_code = trust_remote_code + increase_load_count(name) + + def download_loading_script(self) -> str: + file_path = hf_dataset_url(self.name, self.name.split('/')[-1] + '.py', revision=self.revision) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading builder script' + return cached_path(file_path, download_config=download_config) + + def download_dataset_infos_file(self) -> str: + dataset_infos = hf_dataset_url(self.name, config.DATASETDICT_INFOS_FILENAME, revision=self.revision) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading metadata' + try: + return cached_path(dataset_infos, download_config=download_config) + except (FileNotFoundError, ConnectionError): + return None + + def download_dataset_readme_file(self) -> str: + readme_url = hf_dataset_url(self.name, config.REPOCARD_FILENAME, revision=self.revision) + download_config = self.download_config.copy() + if download_config.download_desc is None: + download_config.download_desc = 'Downloading readme' + try: + return cached_path(readme_url, download_config=download_config) + except (FileNotFoundError, ConnectionError): + return None + + def get_module(self) -> DatasetModule: + if config.HF_DATASETS_TRUST_REMOTE_CODE and self.trust_remote_code is None: + warnings.warn(f'The repository for {self.name} contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/{self.name}\nYou can avoid this message in future by passing the argument `trust_remote_code=True`.\nPassing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.', FutureWarning) + local_path = self.download_loading_script() + dataset_infos_path = self.download_dataset_infos_file() + dataset_readme_path = self.download_dataset_readme_file() + imports = get_imports(local_path) + (local_imports, library_imports) = _download_additional_modules(name=self.name, base_path=hf_dataset_url(self.name, '', revision=self.revision), imports=imports, download_config=self.download_config) + additional_files = [] + if dataset_infos_path: + additional_files.append((config.DATASETDICT_INFOS_FILENAME, dataset_infos_path)) + if dataset_readme_path: + additional_files.append((config.REPOCARD_FILENAME, dataset_readme_path)) + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + hash = files_to_hash([local_path] + [loc[1] for loc in local_imports]) + importable_file_path = _get_importable_file_path(dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name) + if not os.path.exists(importable_file_path): + trust_remote_code = resolve_trust_remote_code(self.trust_remote_code, self.name) + if trust_remote_code: + _create_importable_file(local_path=local_path, local_imports=local_imports, additional_files=additional_files, dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name, download_mode=self.download_mode) + else: + raise ValueError(f'Loading {self.name} requires you to execute the dataset script in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.') + _check_library_imports(name=self.name, library_imports=library_imports) + (module_path, hash) = _load_importable_file(dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name) + importlib.invalidate_caches() + builder_kwargs = {'base_path': hf_dataset_url(self.name, '', revision=self.revision).rstrip('/'), 'repo_id': self.name} + return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path) + +class CachedDatasetModuleFactory(_DatasetModuleFactory): + + def __init__(self, name: str, cache_dir: Optional[str]=None, dynamic_modules_path: Optional[str]=None): + self.name = name + self.cache_dir = cache_dir + self.dynamic_modules_path = dynamic_modules_path + assert self.name.count('/') <= 1 + + def get_module(self) -> DatasetModule: + dynamic_modules_path = self.dynamic_modules_path if self.dynamic_modules_path else init_dynamic_modules() + importable_directory_path = os.path.join(dynamic_modules_path, 'datasets', self.name.replace('/', '--')) + hashes = [h for h in os.listdir(importable_directory_path) if len(h) == 64] if os.path.isdir(importable_directory_path) else None + if hashes: + + def _get_modification_time(module_hash): + return (Path(importable_directory_path) / module_hash / (self.name.split('/')[-1] + '.py')).stat().st_mtime + hash = sorted(hashes, key=_get_modification_time)[-1] + warning_msg = f"Using the latest cached version of the module from {os.path.join(importable_directory_path, hash)} (last modified on {time.ctime(_get_modification_time(hash))}) since it couldn't be found locally at {self.name}" + if not config.HF_HUB_OFFLINE: + warning_msg += ', or remotely on the Hugging Face Hub.' + logger.warning(warning_msg) + importable_file_path = _get_importable_file_path(dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name) + (module_path, hash) = _load_importable_file(dynamic_modules_path=dynamic_modules_path, module_namespace='datasets', subdirectory_name=hash, name=self.name) + importlib.invalidate_caches() + builder_kwargs = {'repo_id': self.name} + return DatasetModule(module_path, hash, builder_kwargs, importable_file_path=importable_file_path) + cache_dir = os.path.expanduser(str(self.cache_dir or config.HF_DATASETS_CACHE)) + namespace_and_dataset_name = self.name.split('/') + namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1]) + cached_relative_path = '___'.join(namespace_and_dataset_name) + cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path) + cached_directory_paths = [cached_directory_path for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, '*', '*', '*')) if os.path.isdir(cached_directory_path)] + if cached_directory_paths: + builder_kwargs = {'repo_id': self.name, 'dataset_name': self.name.split('/')[-1]} + warning_msg = f"Using the latest cached version of the dataset since {self.name} couldn't be found on the Hugging Face Hub" + if config.HF_HUB_OFFLINE: + warning_msg += ' (offline mode is enabled).' + logger.warning(warning_msg) + return DatasetModule('datasets.packaged_modules.cache.cache', 'auto', {**builder_kwargs, 'version': 'auto'}) + raise FileNotFoundError(f'Dataset {self.name} is not cached in {self.cache_dir}') + +def dataset_module_factory(path: str, revision: Optional[Union[str, Version]]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, dynamic_modules_path: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[Dict, List, str, DataFilesDict]]=None, cache_dir: Optional[str]=None, trust_remote_code: Optional[bool]=None, _require_default_config_name=True, _require_custom_configs=False, **download_kwargs) -> DatasetModule: + if download_config is None: + download_config = DownloadConfig(**download_kwargs) + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + download_config.extract_compressed_file = True + download_config.force_extract = True + download_config.force_download = download_mode == DownloadMode.FORCE_REDOWNLOAD + filename = list(filter(lambda x: x, path.replace(os.sep, '/').split('/')))[-1] + if not filename.endswith('.py'): + filename = filename + '.py' + combined_path = os.path.join(path, filename) + if path in _PACKAGED_DATASETS_MODULES: + return PackagedDatasetModuleFactory(path, data_dir=data_dir, data_files=data_files, download_config=download_config, download_mode=download_mode).get_module() + elif path.endswith(filename): + if os.path.isfile(path): + return LocalDatasetModuleFactoryWithScript(path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, trust_remote_code=trust_remote_code).get_module() + else: + raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(path)}") + elif os.path.isfile(combined_path): + return LocalDatasetModuleFactoryWithScript(combined_path, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, trust_remote_code=trust_remote_code).get_module() + elif os.path.isdir(path): + return LocalDatasetModuleFactoryWithoutScript(path, data_dir=data_dir, data_files=data_files, download_mode=download_mode).get_module() + elif is_relative_path(path) and path.count('/') <= 1: + try: + _raise_if_offline_mode_is_enabled() + hf_api = HfApi(config.HF_ENDPOINT) + try: + dataset_info = hf_api.dataset_info(repo_id=path, revision=revision, token=download_config.token, timeout=100.0) + except (OfflineModeIsEnabled, requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError) as e: + raise ConnectionError(f"Couldn't reach '{path}' on the Hub ({e.__class__.__name__})") from e + except RevisionNotFoundError as e: + raise DatasetNotFoundError(f"Revision '{revision}' doesn't exist for dataset '{path}' on the Hub.") from e + except RepositoryNotFoundError as e: + raise DatasetNotFoundError(f"Dataset '{path}' doesn't exist on the Hub or cannot be accessed.") from e + if dataset_info.gated: + try: + check_auth(hf_api, repo_id=path, token=download_config.token) + except GatedRepoError as e: + message = f"Dataset '{path}' is a gated dataset on the Hub." + if '401 Client Error' in str(e): + message += ' You must be authenticated to access it.' + elif '403 Client Error' in str(e): + message += f' Visit the dataset page at https://huggingface.co/datasets/{path} to ask for access.' + raise DatasetNotFoundError(message) from e + if filename in [sibling.rfilename for sibling in dataset_info.siblings]: + fs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token) + if _require_custom_configs or (revision and revision != 'main'): + can_load_config_from_parquet_export = False + elif _require_default_config_name: + with fs.open(f'datasets/{path}/{filename}', 'r', encoding='utf-8') as f: + can_load_config_from_parquet_export = 'DEFAULT_CONFIG_NAME' not in f.read() + else: + can_load_config_from_parquet_export = True + if config.USE_PARQUET_EXPORT and can_load_config_from_parquet_export: + try: + return HubDatasetModuleFactoryWithParquetExport(path, download_config=download_config, revision=dataset_info.sha).get_module() + except _dataset_viewer.DatasetViewerError: + pass + return HubDatasetModuleFactoryWithScript(path, revision=revision, download_config=download_config, download_mode=download_mode, dynamic_modules_path=dynamic_modules_path, trust_remote_code=trust_remote_code).get_module() + else: + return HubDatasetModuleFactoryWithoutScript(path, revision=revision, data_dir=data_dir, data_files=data_files, download_config=download_config, download_mode=download_mode).get_module() + except Exception as e1: + try: + return CachedDatasetModuleFactory(path, dynamic_modules_path=dynamic_modules_path, cache_dir=cache_dir).get_module() + except Exception: + if isinstance(e1, OfflineModeIsEnabled): + raise ConnectionError(f"Couldn't reach the Hugging Face Hub for dataset '{path}': {e1}") from None + if isinstance(e1, (DataFilesNotFoundError, DatasetNotFoundError, EmptyDatasetError)): + raise e1 from None + if isinstance(e1, FileNotFoundError): + if trust_remote_code: + raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory. Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}") from None + else: + raise FileNotFoundError(f"Couldn't find any data file at {relative_to_absolute_path(path)}. Couldn't find '{path}' on the Hugging Face Hub either: {type(e1).__name__}: {e1}") from None + raise e1 from None + elif trust_remote_code: + raise FileNotFoundError(f"Couldn't find a dataset script at {relative_to_absolute_path(combined_path)} or any data file in the same directory.") + else: + raise FileNotFoundError(f"Couldn't find any data file at {relative_to_absolute_path(path)}.") + +def load_dataset_builder(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, revision: Optional[Union[str, Version]]=None, token: Optional[Union[bool, str]]=None, storage_options: Optional[Dict]=None, trust_remote_code: Optional[bool]=None, _require_default_config_name=True, **config_kwargs) -> DatasetBuilder: + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + if token is not None: + download_config = download_config.copy() if download_config else DownloadConfig() + download_config.token = token + if storage_options is not None: + download_config = download_config.copy() if download_config else DownloadConfig() + download_config.storage_options.update(storage_options) + dataset_module = dataset_module_factory(path, revision=revision, download_config=download_config, download_mode=download_mode, data_dir=data_dir, data_files=data_files, cache_dir=cache_dir, trust_remote_code=trust_remote_code, _require_default_config_name=_require_default_config_name, _require_custom_configs=bool(config_kwargs)) + builder_kwargs = dataset_module.builder_kwargs + data_dir = builder_kwargs.pop('data_dir', data_dir) + data_files = builder_kwargs.pop('data_files', data_files) + config_name = builder_kwargs.pop('config_name', name or dataset_module.builder_configs_parameters.default_config_name) + dataset_name = builder_kwargs.pop('dataset_name', None) + info = dataset_module.dataset_infos.get(config_name) if dataset_module.dataset_infos else None + if path in _PACKAGED_DATASETS_MODULES and data_files is None and (dataset_module.builder_configs_parameters.builder_configs[0].data_files is None): + error_msg = f'Please specify the data files or data directory to load for the {path} dataset builder.' + example_extensions = [extension for extension in _EXTENSION_TO_MODULE if _EXTENSION_TO_MODULE[extension] == path] + if example_extensions: + error_msg += f'\nFor example `data_files={{"train": "path/to/data/train/*.{example_extensions[0]}"}}`' + raise ValueError(error_msg) + builder_cls = get_dataset_builder_class(dataset_module, dataset_name=dataset_name) + builder_instance: DatasetBuilder = builder_cls(cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, data_dir=data_dir, data_files=data_files, hash=dataset_module.hash, info=info, features=features, token=token, storage_options=storage_options, **builder_kwargs, **config_kwargs) + builder_instance._use_legacy_cache_dir_if_possible(dataset_module) + return builder_instance + +def load_dataset(path: str, name: Optional[str]=None, data_dir: Optional[str]=None, data_files: Optional[Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]]=None, split: Optional[Union[str, Split]]=None, cache_dir: Optional[str]=None, features: Optional[Features]=None, download_config: Optional[DownloadConfig]=None, download_mode: Optional[Union[DownloadMode, str]]=None, verification_mode: Optional[Union[VerificationMode, str]]=None, keep_in_memory: Optional[bool]=None, save_infos: bool=False, revision: Optional[Union[str, Version]]=None, token: Optional[Union[bool, str]]=None, streaming: bool=False, num_proc: Optional[int]=None, storage_options: Optional[Dict]=None, trust_remote_code: bool=None, **config_kwargs) -> Union[DatasetDict, Dataset, IterableDatasetDict, IterableDataset]: + if data_files is not None and (not data_files): + raise ValueError(f"Empty 'data_files': '{data_files}'. It should be either non-empty or None (default).") + if Path(path, config.DATASET_STATE_JSON_FILENAME).exists(): + raise ValueError('You are trying to load a dataset that was saved using `save_to_disk`. Please use `load_from_disk` instead.') + if streaming and num_proc is not None: + raise NotImplementedError('Loading a streaming dataset in parallel with `num_proc` is not implemented. To parallelize streaming, you can wrap the dataset with a PyTorch DataLoader using `num_workers` > 1 instead.') + download_mode = DownloadMode(download_mode or DownloadMode.REUSE_DATASET_IF_EXISTS) + verification_mode = VerificationMode(verification_mode or VerificationMode.BASIC_CHECKS if not save_infos else VerificationMode.ALL_CHECKS) + builder_instance = load_dataset_builder(path=path, name=name, data_dir=data_dir, data_files=data_files, cache_dir=cache_dir, features=features, download_config=download_config, download_mode=download_mode, revision=revision, token=token, storage_options=storage_options, trust_remote_code=trust_remote_code, _require_default_config_name=name is None, **config_kwargs) + if streaming: + return builder_instance.as_streaming_dataset(split=split) + builder_instance.download_and_prepare(download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, num_proc=num_proc, storage_options=storage_options) + keep_in_memory = keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) + ds = builder_instance.as_dataset(split=split, verification_mode=verification_mode, in_memory=keep_in_memory) + if save_infos: + builder_instance._save_infos() + return ds + +def load_from_disk(dataset_path: PathLike, keep_in_memory: Optional[bool]=None, storage_options: Optional[dict]=None) -> Union[Dataset, DatasetDict]: + fs: fsspec.AbstractFileSystem + (fs, *_) = url_to_fs(dataset_path, **storage_options or {}) + if not fs.exists(dataset_path): + raise FileNotFoundError(f'Directory {dataset_path} not found') + if fs.isfile(posixpath.join(dataset_path, config.DATASET_INFO_FILENAME)) and fs.isfile(posixpath.join(dataset_path, config.DATASET_STATE_JSON_FILENAME)): + return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) + elif fs.isfile(posixpath.join(dataset_path, config.DATASETDICT_JSON_FILENAME)): + return DatasetDict.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) + else: + raise FileNotFoundError(f'Directory {dataset_path} is neither a `Dataset` directory nor a `DatasetDict` directory.') + +# File: datasets-main/src/datasets/naming.py +"""""" +import itertools +import os +import re +_uppercase_uppercase_re = re.compile('([A-Z]+)([A-Z][a-z])') +_lowercase_uppercase_re = re.compile('([a-z\\d])([A-Z])') +_single_underscore_re = re.compile('(?:/\\|?*' + +def camelcase_to_snakecase(name): + name = _uppercase_uppercase_re.sub('\\1_\\2', name) + name = _lowercase_uppercase_re.sub('\\1_\\2', name) + return name.lower() + +def snakecase_to_camelcase(name): + name = _single_underscore_re.split(name) + name = [_multiple_underscores_re.split(n) for n in name] + return ''.join((n.capitalize() for n in itertools.chain.from_iterable(name) if n != '')) + +def filename_prefix_for_name(name): + if os.path.basename(name) != name: + raise ValueError(f'Should be a dataset name, not a path: {name}') + return camelcase_to_snakecase(name) + +def filename_prefix_for_split(name, split): + if os.path.basename(name) != name: + raise ValueError(f'Should be a dataset name, not a path: {name}') + if not re.match(_split_re, split): + raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'.") + return f'{filename_prefix_for_name(name)}-{split}' + +def filepattern_for_dataset_split(dataset_name, split, data_dir, filetype_suffix=None): + prefix = filename_prefix_for_split(dataset_name, split) + if filetype_suffix: + prefix += f'.{filetype_suffix}' + filepath = os.path.join(data_dir, prefix) + return f'{filepath}*' + +def filenames_for_dataset_split(path, dataset_name, split, filetype_suffix=None, shard_lengths=None): + prefix = filename_prefix_for_split(dataset_name, split) + prefix = os.path.join(path, prefix) + if shard_lengths: + num_shards = len(shard_lengths) + filenames = [f'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(num_shards)] + if filetype_suffix: + filenames = [filename + f'.{filetype_suffix}' for filename in filenames] + return filenames + else: + filename = prefix + if filetype_suffix: + filename += f'.{filetype_suffix}' + return [filename] + +# File: datasets-main/src/datasets/packaged_modules/__init__.py +import inspect +import re +from typing import Dict, List, Tuple +from huggingface_hub.utils import insecure_hashlib +from .arrow import arrow +from .audiofolder import audiofolder +from .cache import cache +from .csv import csv +from .imagefolder import imagefolder +from .json import json +from .pandas import pandas +from .parquet import parquet +from .sql import sql +from .text import text +from .webdataset import webdataset + +def _hash_python_lines(lines: List[str]) -> str: + filtered_lines = [] + for line in lines: + line = re.sub('#.*', '', line) + if line: + filtered_lines.append(line) + full_str = '\n'.join(filtered_lines) + full_bytes = full_str.encode('utf-8') + return insecure_hashlib.sha256(full_bytes).hexdigest() +_PACKAGED_DATASETS_MODULES = {'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), 'webdataset': (webdataset.__name__, _hash_python_lines(inspect.getsource(webdataset).splitlines()))} +_PACKAGED_DATASETS_MODULES_2_15_HASHES = {'csv': 'eea64c71ca8b46dd3f537ed218fc9bf495d5707789152eb2764f5c78fa66d59d', 'json': '8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96', 'pandas': '3ac4ffc4563c796122ef66899b9485a3f1a977553e2d2a8a318c72b8cc6f2202', 'parquet': 'ca31c69184d9832faed373922c2acccec0b13a0bb5bbbe19371385c3ff26f1d1', 'arrow': '74f69db2c14c2860059d39860b1f400a03d11bf7fb5a8258ca38c501c878c137', 'text': 'c4a140d10f020282918b5dd1b8a49f0104729c6177f60a6b49ec2a365ec69f34', 'imagefolder': '7b7ce5247a942be131d49ad4f3de5866083399a0f250901bd8dc202f8c5f7ce5', 'audiofolder': 'd3c1655c66c8f72e4efb5c79e952975fa6e2ce538473a6890241ddbddee9071c'} +_EXTENSION_TO_MODULE: Dict[str, Tuple[str, dict]] = {'.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.geoparquet': ('parquet', {}), '.gpq': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), '.tar': ('webdataset', {})} +_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) +_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) +_MODULE_SUPPORTS_METADATA = {'imagefolder', 'audiofolder'} +_MODULE_TO_EXTENSIONS: Dict[str, List[str]] = {} +for (_ext, (_module, _)) in _EXTENSION_TO_MODULE.items(): + _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) +for _module in _MODULE_TO_EXTENSIONS: + _MODULE_TO_EXTENSIONS[_module].append('.zip') + +# File: datasets-main/src/datasets/packaged_modules/arrow/arrow.py +import itertools +from dataclasses import dataclass +from typing import Optional +import pyarrow as pa +import datasets +from datasets.table import table_cast +logger = datasets.utils.logging.get_logger(__name__) + +@dataclass +class ArrowConfig(datasets.BuilderConfig): + features: Optional[datasets.Features] = None + + def __post_init__(self): + super().__post_init__() + +class Arrow(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = ArrowConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + dl_manager.download_config.extract_on_the_fly = True + data_files = dl_manager.download_and_extract(self.config.data_files) + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + if self.info.features is None: + for file in itertools.chain.from_iterable(files): + with open(file, 'rb') as f: + try: + reader = pa.ipc.open_stream(f) + except pa.lib.ArrowInvalid: + reader = pa.ipc.open_file(f) + self.info.features = datasets.Features.from_arrow_schema(reader.schema) + break + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.info.features is not None: + pa_table = table_cast(pa_table, self.info.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for (file_idx, file) in enumerate(itertools.chain.from_iterable(files)): + with open(file, 'rb') as f: + try: + try: + batches = pa.ipc.open_stream(f) + except pa.lib.ArrowInvalid: + reader = pa.ipc.open_file(f) + batches = (reader.get_batch(i) for i in range(reader.num_record_batches)) + for (batch_idx, record_batch) in enumerate(batches): + pa_table = pa.Table.from_batches([record_batch]) + yield (f'{file_idx}_{batch_idx}', self._cast_table(pa_table)) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise + +# File: datasets-main/src/datasets/packaged_modules/audiofolder/audiofolder.py +from typing import List +import datasets +from ..folder_based_builder import folder_based_builder +logger = datasets.utils.logging.get_logger(__name__) + +class AudioFolderConfig(folder_based_builder.FolderBasedBuilderConfig): + drop_labels: bool = None + drop_metadata: bool = None + + def __post_init__(self): + super().__post_init__() + +class AudioFolder(folder_based_builder.FolderBasedBuilder): + BASE_FEATURE = datasets.Audio + BASE_COLUMN_NAME = 'audio' + BUILDER_CONFIG_CLASS = AudioFolderConfig + EXTENSIONS: List[str] +AUDIO_EXTENSIONS = ['.aiff', '.au', '.avr', '.caf', '.flac', '.htk', '.svx', '.mat4', '.mat5', '.mpc2k', '.ogg', '.paf', '.pvf', '.raw', '.rf64', '.sd2', '.sds', '.ircam', '.voc', '.w64', '.wav', '.nist', '.wavex', '.wve', '.xi', '.mp3', '.opus'] +AudioFolder.EXTENSIONS = AUDIO_EXTENSIONS + +# File: datasets-main/src/datasets/packaged_modules/cache/cache.py +import glob +import json +import os +import shutil +import time +from pathlib import Path +from typing import List, Optional, Tuple, Union +import pyarrow as pa +import datasets +import datasets.config +import datasets.data_files +from datasets.naming import camelcase_to_snakecase, filenames_for_dataset_split +logger = datasets.utils.logging.get_logger(__name__) + +def _get_modification_time(cached_directory_path): + return Path(cached_directory_path).stat().st_mtime + +def _find_hash_in_cache(dataset_name: str, config_name: Optional[str], cache_dir: Optional[str], config_kwargs: dict, custom_features: Optional[datasets.Features]) -> Tuple[str, str, str]: + if config_name or config_kwargs or custom_features: + config_id = datasets.BuilderConfig(config_name or 'default').create_config_id(config_kwargs=config_kwargs, custom_features=custom_features) + else: + config_id = None + cache_dir = os.path.expanduser(str(cache_dir or datasets.config.HF_DATASETS_CACHE)) + namespace_and_dataset_name = dataset_name.split('/') + namespace_and_dataset_name[-1] = camelcase_to_snakecase(namespace_and_dataset_name[-1]) + cached_relative_path = '___'.join(namespace_and_dataset_name) + cached_datasets_directory_path_root = os.path.join(cache_dir, cached_relative_path) + cached_directory_paths = [cached_directory_path for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, config_id or '*', '*', '*')) if os.path.isdir(cached_directory_path) and (config_kwargs or custom_features or json.loads(Path(cached_directory_path, 'dataset_info.json').read_text(encoding='utf-8'))['config_name'] == Path(cached_directory_path).parts[-3])] + if not cached_directory_paths: + cached_directory_paths = [cached_directory_path for cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, '*', '*', '*')) if os.path.isdir(cached_directory_path)] + available_configs = sorted({Path(cached_directory_path).parts[-3] for cached_directory_path in cached_directory_paths}) + raise ValueError(f"Couldn't find cache for {dataset_name}" + (f" for config '{config_id}'" if config_id else '') + (f'\nAvailable configs in the cache: {available_configs}' if available_configs else '')) + cached_directory_path = Path(sorted(cached_directory_paths, key=_get_modification_time)[-1]) + (version, hash) = cached_directory_path.parts[-2:] + other_configs = [Path(_cached_directory_path).parts[-3] for _cached_directory_path in glob.glob(os.path.join(cached_datasets_directory_path_root, '*', version, hash)) if os.path.isdir(_cached_directory_path) and (config_kwargs or custom_features or json.loads(Path(_cached_directory_path, 'dataset_info.json').read_text(encoding='utf-8'))['config_name'] == Path(_cached_directory_path).parts[-3])] + if not config_id and len(other_configs) > 1: + raise ValueError(f"There are multiple '{dataset_name}' configurations in the cache: {', '.join(other_configs)}\nPlease specify which configuration to reload from the cache, e.g.\n\tload_dataset('{dataset_name}', '{other_configs[0]}')") + config_name = cached_directory_path.parts[-3] + warning_msg = f"Found the latest cached dataset configuration '{config_name}' at {cached_directory_path} (last modified on {time.ctime(_get_modification_time(cached_directory_path))})." + logger.warning(warning_msg) + return (config_name, version, hash) + +class Cache(datasets.ArrowBasedBuilder): + + def __init__(self, cache_dir: Optional[str]=None, dataset_name: Optional[str]=None, config_name: Optional[str]=None, version: Optional[str]='0.0.0', hash: Optional[str]=None, base_path: Optional[str]=None, info: Optional[datasets.DatasetInfo]=None, features: Optional[datasets.Features]=None, token: Optional[Union[bool, str]]=None, repo_id: Optional[str]=None, data_files: Optional[Union[str, list, dict, datasets.data_files.DataFilesDict]]=None, data_dir: Optional[str]=None, storage_options: Optional[dict]=None, writer_batch_size: Optional[int]=None, **config_kwargs): + if repo_id is None and dataset_name is None: + raise ValueError('repo_id or dataset_name is required for the Cache dataset builder') + if data_files is not None: + config_kwargs['data_files'] = data_files + if data_dir is not None: + config_kwargs['data_dir'] = data_dir + if hash == 'auto' and version == 'auto': + (config_name, version, hash) = _find_hash_in_cache(dataset_name=repo_id or dataset_name, config_name=config_name, cache_dir=cache_dir, config_kwargs=config_kwargs, custom_features=features) + elif hash == 'auto' or version == 'auto': + raise NotImplementedError("Pass both hash='auto' and version='auto' instead") + super().__init__(cache_dir=cache_dir, dataset_name=dataset_name, config_name=config_name, version=version, hash=hash, base_path=base_path, info=info, token=token, repo_id=repo_id, storage_options=storage_options, writer_batch_size=writer_batch_size) + + def _info(self) -> datasets.DatasetInfo: + return datasets.DatasetInfo() + + def download_and_prepare(self, output_dir: Optional[str]=None, *args, **kwargs): + if not os.path.exists(self.cache_dir): + raise ValueError(f"Cache directory for {self.dataset_name} doesn't exist at {self.cache_dir}") + if output_dir is not None and output_dir != self.cache_dir: + shutil.copytree(self.cache_dir, output_dir) + + def _split_generators(self, dl_manager): + if isinstance(self.info.splits, datasets.SplitDict): + split_infos: List[datasets.SplitInfo] = list(self.info.splits.values()) + else: + raise ValueError(f'Missing splits info for {self.dataset_name} in cache directory {self.cache_dir}') + return [datasets.SplitGenerator(name=split_info.name, gen_kwargs={'files': filenames_for_dataset_split(self.cache_dir, dataset_name=self.dataset_name, split=split_info.name, filetype_suffix='arrow', shard_lengths=split_info.shard_lengths)}) for split_info in split_infos] + + def _generate_tables(self, files): + for (file_idx, file) in enumerate(files): + with open(file, 'rb') as f: + try: + for (batch_idx, record_batch) in enumerate(pa.ipc.open_stream(f)): + pa_table = pa.Table.from_batches([record_batch]) + yield (f'{file_idx}_{batch_idx}', pa_table) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise + +# File: datasets-main/src/datasets/packaged_modules/csv/csv.py +import itertools +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Union +import pandas as pd +import pyarrow as pa +import datasets +import datasets.config +from datasets.features.features import require_storage_cast +from datasets.table import table_cast +from datasets.utils.py_utils import Literal +logger = datasets.utils.logging.get_logger(__name__) +_PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS = ['names', 'prefix'] +_PANDAS_READ_CSV_DEPRECATED_PARAMETERS = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] +_PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS = ['encoding_errors', 'on_bad_lines'] +_PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS = ['date_format'] +_PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS = ['verbose'] + +@dataclass +class CsvConfig(datasets.BuilderConfig): + sep: str = ',' + delimiter: Optional[str] = None + header: Optional[Union[int, List[int], str]] = 'infer' + names: Optional[List[str]] = None + column_names: Optional[List[str]] = None + index_col: Optional[Union[int, str, List[int], List[str]]] = None + usecols: Optional[Union[List[int], List[str]]] = None + prefix: Optional[str] = None + mangle_dupe_cols: bool = True + engine: Optional[Literal['c', 'python', 'pyarrow']] = None + converters: Dict[Union[int, str], Callable[[Any], Any]] = None + true_values: Optional[list] = None + false_values: Optional[list] = None + skipinitialspace: bool = False + skiprows: Optional[Union[int, List[int]]] = None + nrows: Optional[int] = None + na_values: Optional[Union[str, List[str]]] = None + keep_default_na: bool = True + na_filter: bool = True + verbose: bool = False + skip_blank_lines: bool = True + thousands: Optional[str] = None + decimal: str = '.' + lineterminator: Optional[str] = None + quotechar: str = '"' + quoting: int = 0 + escapechar: Optional[str] = None + comment: Optional[str] = None + encoding: Optional[str] = None + dialect: Optional[str] = None + error_bad_lines: bool = True + warn_bad_lines: bool = True + skipfooter: int = 0 + doublequote: bool = True + memory_map: bool = False + float_precision: Optional[str] = None + chunksize: int = 10000 + features: Optional[datasets.Features] = None + encoding_errors: Optional[str] = 'strict' + on_bad_lines: Literal['error', 'warn', 'skip'] = 'error' + date_format: Optional[str] = None + + def __post_init__(self): + super().__post_init__() + if self.delimiter is not None: + self.sep = self.delimiter + if self.column_names is not None: + self.names = self.column_names + + @property + def pd_read_csv_kwargs(self): + pd_read_csv_kwargs = {'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format} + for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: + if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): + del pd_read_csv_kwargs[pd_read_csv_parameter] + if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): + for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: + del pd_read_csv_kwargs[pd_read_csv_parameter] + if not datasets.config.PANDAS_VERSION.major >= 2: + for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: + del pd_read_csv_kwargs[pd_read_csv_parameter] + if datasets.config.PANDAS_VERSION.release >= (2, 2): + for pd_read_csv_parameter in _PANDAS_READ_CSV_DEPRECATED_2_2_0_PARAMETERS: + if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig(), pd_read_csv_parameter): + del pd_read_csv_kwargs[pd_read_csv_parameter] + return pd_read_csv_kwargs + +class Csv(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = CsvConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + dl_manager.download_config.extract_on_the_fly = True + data_files = dl_manager.download_and_extract(self.config.data_files) + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all((not require_storage_cast(feature) for feature in self.config.features.values())): + pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) + else: + pa_table = table_cast(pa_table, schema) + return pa_table + + def _generate_tables(self, files): + schema = self.config.features.arrow_schema if self.config.features else None + dtype = {name: dtype.to_pandas_dtype() if not require_storage_cast(feature) else object for (name, dtype, feature) in zip(schema.names, schema.types, self.config.features.values())} if schema is not None else None + for (file_idx, file) in enumerate(itertools.chain.from_iterable(files)): + csv_file_reader = pd.read_csv(file, iterator=True, dtype=dtype, **self.config.pd_read_csv_kwargs) + try: + for (batch_idx, df) in enumerate(csv_file_reader): + pa_table = pa.Table.from_pandas(df) + yield ((file_idx, batch_idx), self._cast_table(pa_table)) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise + +# File: datasets-main/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py +import collections +import itertools +import os +from dataclasses import dataclass +from typing import List, Optional, Tuple, Type +import pandas as pd +import pyarrow as pa +import pyarrow.json as paj +import datasets +from datasets.features.features import FeatureType +logger = datasets.utils.logging.get_logger(__name__) + +def count_path_segments(path): + return path.replace('\\', '/').count('/') + +@dataclass +class FolderBasedBuilderConfig(datasets.BuilderConfig): + features: Optional[datasets.Features] = None + drop_labels: bool = None + drop_metadata: bool = None + + def __post_init__(self): + super().__post_init__() + +class FolderBasedBuilder(datasets.GeneratorBasedBuilder): + BASE_FEATURE: Type[FeatureType] + BASE_COLUMN_NAME: str + BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig + EXTENSIONS: List[str] + METADATA_FILENAMES: List[str] = ['metadata.csv', 'metadata.jsonl'] + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + dl_manager.download_config.extract_on_the_fly = True + do_analyze = not self.config.drop_labels or not self.config.drop_metadata + (labels, path_depths) = (set(), set()) + metadata_files = collections.defaultdict(set) + + def analyze(files_or_archives, downloaded_files_or_dirs, split): + if len(downloaded_files_or_dirs) == 0: + return + if os.path.isfile(downloaded_files_or_dirs[0]): + (original_files, downloaded_files) = (files_or_archives, downloaded_files_or_dirs) + for (original_file, downloaded_file) in zip(original_files, downloaded_files): + (original_file, downloaded_file) = (str(original_file), str(downloaded_file)) + (_, original_file_ext) = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(original_file))) + path_depths.add(count_path_segments(original_file)) + elif os.path.basename(original_file) in self.METADATA_FILENAMES: + metadata_files[split].add((original_file, downloaded_file)) + else: + original_file_name = os.path.basename(original_file) + logger.debug(f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either.") + else: + (archives, downloaded_dirs) = (files_or_archives, downloaded_files_or_dirs) + for (archive, downloaded_dir) in zip(archives, downloaded_dirs): + (archive, downloaded_dir) = (str(archive), str(downloaded_dir)) + for downloaded_dir_file in dl_manager.iter_files(downloaded_dir): + (_, downloaded_dir_file_ext) = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext in self.EXTENSIONS: + if not self.config.drop_labels: + labels.add(os.path.basename(os.path.dirname(downloaded_dir_file))) + path_depths.add(count_path_segments(downloaded_dir_file)) + elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES: + metadata_files[split].add((None, downloaded_dir_file)) + else: + archive_file_name = os.path.basename(archive) + original_file_name = os.path.basename(downloaded_dir_file) + logger.debug(f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either.") + data_files = self.config.data_files + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + (files, archives) = self._split_files_and_archives(files) + downloaded_files = dl_manager.download(files) + downloaded_dirs = dl_manager.download_and_extract(archives) + if do_analyze: + logger.info(f'Searching for labels and/or metadata files in {split_name} data files...') + analyze(files, downloaded_files, split_name) + analyze(archives, downloaded_dirs, split_name) + if metadata_files: + add_metadata = not self.config.drop_metadata + add_labels = self.config.drop_labels is False + else: + add_metadata = False + add_labels = len(labels) > 1 and len(path_depths) == 1 if self.config.drop_labels is None else not self.config.drop_labels + if add_labels: + logger.info("Adding the labels inferred from data directories to the dataset's features...") + if add_metadata: + logger.info('Adding metadata to the dataset...') + else: + (add_labels, add_metadata, metadata_files) = (False, False, {}) + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': list(zip(files, downloaded_files)) + [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs], 'metadata_files': metadata_files, 'split_name': split_name, 'add_labels': add_labels, 'add_metadata': add_metadata})) + if add_metadata: + features_per_metadata_file: List[Tuple[str, datasets.Features]] = [] + metadata_ext = {os.path.splitext(original_metadata_file)[-1] for (original_metadata_file, _) in itertools.chain.from_iterable(metadata_files.values())} + if len(metadata_ext) > 1: + raise ValueError(f'Found metadata files with different extensions: {list(metadata_ext)}') + metadata_ext = metadata_ext.pop() + for (_, downloaded_metadata_file) in itertools.chain.from_iterable(metadata_files.values()): + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + features_per_metadata_file.append((downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))) + for (downloaded_metadata_file, metadata_features) in features_per_metadata_file: + if metadata_features != features_per_metadata_file[0][1]: + raise ValueError(f'Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}') + metadata_features = features_per_metadata_file[0][1] + if 'file_name' not in metadata_features: + raise ValueError('`file_name` must be present as dictionary key in metadata files') + if metadata_features['file_name'] != datasets.Value('string'): + raise ValueError('`file_name` key must be a string') + del metadata_features['file_name'] + else: + metadata_features = None + if self.config.features is None: + if add_labels: + self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE(), 'label': datasets.ClassLabel(names=sorted(labels))}) + else: + self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()}) + if add_metadata: + duplicated_keys = set(self.info.features) & set(metadata_features) + if duplicated_keys: + logger.warning(f'Ignoring metadata columns {list(duplicated_keys)} as they are already present in the features dictionary.') + self.info.features.update({feature: metadata_features[feature] for feature in metadata_features if feature not in duplicated_keys}) + return splits + + def _split_files_and_archives(self, data_files): + (files, archives) = ([], []) + for data_file in data_files: + (_, data_file_ext) = os.path.splitext(data_file) + if data_file_ext.lower() in self.EXTENSIONS: + files.append(data_file) + elif os.path.basename(data_file) in self.METADATA_FILENAMES: + files.append(data_file) + else: + archives.append(data_file) + return (files, archives) + + def _read_metadata(self, metadata_file, metadata_ext: str=''): + if metadata_ext == '.csv': + return pa.Table.from_pandas(pd.read_csv(metadata_file)) + else: + with open(metadata_file, 'rb') as f: + return paj.read_json(f) + + def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels): + split_metadata_files = metadata_files.get(split_name, []) + sample_empty_metadata = {k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {} + last_checked_dir = None + metadata_dir = None + metadata_dict = None + downloaded_metadata_file = None + metadata_ext = '' + if split_metadata_files: + metadata_ext = {os.path.splitext(original_metadata_file)[-1] for (original_metadata_file, _) in split_metadata_files} + metadata_ext = metadata_ext.pop() + file_idx = 0 + for (original_file, downloaded_file_or_dir) in files: + if original_file is not None: + (_, original_file_ext) = os.path.splitext(original_file) + if original_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + current_dir = os.path.dirname(original_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [(os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)), metadata_file_candidate, downloaded_metadata_file) for (metadata_file_candidate, downloaded_metadata_file) in split_metadata_files if metadata_file_candidate is not None and (not os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)).startswith('..'))] + if metadata_file_candidates: + (_, metadata_file, downloaded_metadata_file) = min(metadata_file_candidates, key=lambda x: count_path_segments(x[0])) + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + pa_file_name_array = pa_metadata_table['file_name'] + pa_metadata_table = pa_metadata_table.drop(['file_name']) + metadata_dir = os.path.dirname(metadata_file) + metadata_dict = {os.path.normpath(file_name).replace('\\', '/'): sample_metadata for (file_name, sample_metadata) in zip(pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist())} + else: + raise ValueError(f'One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}.') + if metadata_dir is not None and downloaded_metadata_file is not None: + file_relpath = os.path.relpath(original_file, metadata_dir) + file_relpath = file_relpath.replace('\\', '/') + if file_relpath not in metadata_dict: + raise ValueError(f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}.") + sample_metadata = metadata_dict[file_relpath] + else: + raise ValueError(f'One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}.') + else: + sample_metadata = {} + if add_labels: + sample_label = {'label': os.path.basename(os.path.dirname(original_file))} + else: + sample_label = {} + yield (file_idx, {**sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_file_or_dir, **sample_metadata, **sample_label}) + file_idx += 1 + else: + for downloaded_dir_file in downloaded_file_or_dir: + (_, downloaded_dir_file_ext) = os.path.splitext(downloaded_dir_file) + if downloaded_dir_file_ext.lower() in self.EXTENSIONS: + if add_metadata: + current_dir = os.path.dirname(downloaded_dir_file) + if last_checked_dir is None or last_checked_dir != current_dir: + last_checked_dir = current_dir + metadata_file_candidates = [(os.path.relpath(downloaded_dir_file, os.path.dirname(downloaded_metadata_file)), metadata_file_candidate, downloaded_metadata_file) for (metadata_file_candidate, downloaded_metadata_file) in split_metadata_files if metadata_file_candidate is None and (not os.path.relpath(downloaded_dir_file, os.path.dirname(downloaded_metadata_file)).startswith('..'))] + if metadata_file_candidates: + (_, metadata_file, downloaded_metadata_file) = min(metadata_file_candidates, key=lambda x: count_path_segments(x[0])) + pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext) + pa_file_name_array = pa_metadata_table['file_name'] + pa_metadata_table = pa_metadata_table.drop(['file_name']) + metadata_dir = os.path.dirname(downloaded_metadata_file) + metadata_dict = {os.path.normpath(file_name).replace('\\', '/'): sample_metadata for (file_name, sample_metadata) in zip(pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist())} + else: + raise ValueError(f'One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}.') + if metadata_dir is not None and downloaded_metadata_file is not None: + downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir) + downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace('\\', '/') + if downloaded_dir_file_relpath not in metadata_dict: + raise ValueError(f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}.") + sample_metadata = metadata_dict[downloaded_dir_file_relpath] + else: + raise ValueError(f'One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}.') + else: + sample_metadata = {} + if add_labels: + sample_label = {'label': os.path.basename(os.path.dirname(downloaded_dir_file))} + else: + sample_label = {} + yield (file_idx, {**sample_empty_metadata, self.BASE_COLUMN_NAME: downloaded_dir_file, **sample_metadata, **sample_label}) + file_idx += 1 + +# File: datasets-main/src/datasets/packaged_modules/generator/generator.py +from dataclasses import dataclass +from typing import Callable, Optional +import datasets + +@dataclass +class GeneratorConfig(datasets.BuilderConfig): + generator: Optional[Callable] = None + gen_kwargs: Optional[dict] = None + features: Optional[datasets.Features] = None + split: datasets.NamedSplit = datasets.Split.TRAIN + + def __post_init__(self): + super().__post_init__() + if self.generator is None: + raise ValueError('generator must be specified') + if self.gen_kwargs is None: + self.gen_kwargs = {} + +class Generator(datasets.GeneratorBasedBuilder): + BUILDER_CONFIG_CLASS = GeneratorConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=self.config.split, gen_kwargs=self.config.gen_kwargs)] + + def _generate_examples(self, **gen_kwargs): + for (idx, ex) in enumerate(self.config.generator(**gen_kwargs)): + yield (idx, ex) + +# File: datasets-main/src/datasets/packaged_modules/imagefolder/imagefolder.py +from typing import List +import datasets +from ..folder_based_builder import folder_based_builder +logger = datasets.utils.logging.get_logger(__name__) + +class ImageFolderConfig(folder_based_builder.FolderBasedBuilderConfig): + drop_labels: bool = None + drop_metadata: bool = None + + def __post_init__(self): + super().__post_init__() + +class ImageFolder(folder_based_builder.FolderBasedBuilder): + BASE_FEATURE = datasets.Image + BASE_COLUMN_NAME = 'image' + BUILDER_CONFIG_CLASS = ImageFolderConfig + EXTENSIONS: List[str] +IMAGE_EXTENSIONS = ['.blp', '.bmp', '.dib', '.bufr', '.cur', '.pcx', '.dcx', '.dds', '.ps', '.eps', '.fit', '.fits', '.fli', '.flc', '.ftc', '.ftu', '.gbr', '.gif', '.grib', '.h5', '.hdf', '.png', '.apng', '.jp2', '.j2k', '.jpc', '.jpf', '.jpx', '.j2c', '.icns', '.ico', '.im', '.iim', '.tif', '.tiff', '.jfif', '.jpe', '.jpg', '.jpeg', '.mpg', '.mpeg', '.msp', '.pcd', '.pxr', '.pbm', '.pgm', '.ppm', '.pnm', '.psd', '.bw', '.rgb', '.rgba', '.sgi', '.ras', '.tga', '.icb', '.vda', '.vst', '.webp', '.wmf', '.emf', '.xbm', '.xpm'] +ImageFolder.EXTENSIONS = IMAGE_EXTENSIONS + +# File: datasets-main/src/datasets/packaged_modules/json/json.py +import io +import itertools +from dataclasses import dataclass +from typing import Optional +import pandas as pd +import pyarrow as pa +import pyarrow.json as paj +import datasets +import datasets.config +from datasets.table import table_cast +from datasets.utils.file_utils import readline +logger = datasets.utils.logging.get_logger(__name__) + +def ujson_dumps(*args, **kwargs): + try: + return pd.io.json.ujson_dumps(*args, **kwargs) + except AttributeError: + return pd.io.json.dumps(*args, **kwargs) + +def ujson_loads(*args, **kwargs): + try: + return pd.io.json.ujson_loads(*args, **kwargs) + except AttributeError: + return pd.io.json.loads(*args, **kwargs) + +def pandas_read_json(path_or_buf, **kwargs): + if datasets.config.PANDAS_VERSION.major >= 2: + kwargs['dtype_backend'] = 'pyarrow' + return pd.read_json(path_or_buf, **kwargs) + +@dataclass +class JsonConfig(datasets.BuilderConfig): + features: Optional[datasets.Features] = None + encoding: str = 'utf-8' + encoding_errors: Optional[str] = None + field: Optional[str] = None + use_threads: bool = True + block_size: Optional[int] = None + chunksize: int = 10 << 20 + newlines_in_values: Optional[bool] = None + + def __post_init__(self): + super().__post_init__() + +class Json(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = JsonConfig + + def _info(self): + if self.config.block_size is not None: + logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead') + self.config.chunksize = self.config.block_size + if self.config.use_threads is not True: + logger.warning("The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore.") + if self.config.newlines_in_values is not None: + raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported') + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + dl_manager.download_config.extract_on_the_fly = True + data_files = dl_manager.download_and_extract(self.config.data_files) + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + for column_name in set(self.config.features) - set(pa_table.column_names): + type = self.config.features.arrow_schema.field(column_name).type + pa_table = pa_table.append_column(column_name, pa.array([None] * len(pa_table), type=type)) + pa_table = table_cast(pa_table, self.config.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for (file_idx, file) in enumerate(itertools.chain.from_iterable(files)): + if self.config.field is not None: + with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: + dataset = ujson_loads(f.read()) + dataset = dataset[self.config.field] + df = pandas_read_json(io.StringIO(ujson_dumps(dataset))) + if df.columns.tolist() == [0]: + df.columns = list(self.config.features) if self.config.features else ['text'] + pa_table = pa.Table.from_pandas(df, preserve_index=False) + yield (file_idx, self._cast_table(pa_table)) + else: + with open(file, 'rb') as f: + batch_idx = 0 + block_size = max(self.config.chunksize // 32, 16 << 10) + encoding_errors = self.config.encoding_errors if self.config.encoding_errors is not None else 'strict' + while True: + batch = f.read(self.config.chunksize) + if not batch: + break + try: + batch += f.readline() + except (AttributeError, io.UnsupportedOperation): + batch += readline(f) + if self.config.encoding != 'utf-8': + batch = batch.decode(self.config.encoding, errors=encoding_errors).encode('utf-8') + try: + while True: + try: + pa_table = paj.read_json(io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size)) + break + except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: + if isinstance(e, pa.ArrowInvalid) and 'straddling' not in str(e) or block_size > len(batch): + raise + else: + logger.debug(f"Batch of {len(batch)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.") + block_size *= 2 + except pa.ArrowInvalid as e: + try: + with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: + df = pandas_read_json(f) + except ValueError: + logger.error(f"Failed to load JSON from file '{file}' with error {type(e)}: {e}") + raise e + if df.columns.tolist() == [0]: + df.columns = list(self.config.features) if self.config.features else ['text'] + try: + pa_table = pa.Table.from_pandas(df, preserve_index=False) + except pa.ArrowInvalid as e: + logger.error(f"Failed to convert pandas DataFrame to Arrow Table from file '{file}' with error {type(e)}: {e}") + raise ValueError(f'Failed to convert pandas DataFrame to Arrow Table from file {file}.') from None + yield (file_idx, self._cast_table(pa_table)) + break + yield ((file_idx, batch_idx), self._cast_table(pa_table)) + batch_idx += 1 + +# File: datasets-main/src/datasets/packaged_modules/pandas/pandas.py +import itertools +import warnings +from dataclasses import dataclass +from typing import Optional +import pandas as pd +import pyarrow as pa +import datasets +from datasets.table import table_cast + +@dataclass +class PandasConfig(datasets.BuilderConfig): + features: Optional[datasets.Features] = None + + def __post_init__(self): + super().__post_init__() + +class Pandas(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = PandasConfig + + def _info(self): + warnings.warn('The Pandas builder is deprecated and will be removed in the next major version of datasets.', FutureWarning) + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + data_files = dl_manager.download_and_extract(self.config.data_files) + if isinstance(data_files, (str, list, tuple)): + files = data_files + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'files': files})] + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + pa_table = table_cast(pa_table, self.config.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + for (i, file) in enumerate(itertools.chain.from_iterable(files)): + with open(file, 'rb') as f: + pa_table = pa.Table.from_pandas(pd.read_pickle(f)) + yield (i, self._cast_table(pa_table)) + +# File: datasets-main/src/datasets/packaged_modules/parquet/parquet.py +import itertools +from dataclasses import dataclass +from typing import List, Optional +import pyarrow as pa +import pyarrow.parquet as pq +import datasets +from datasets.table import table_cast +logger = datasets.utils.logging.get_logger(__name__) + +@dataclass +class ParquetConfig(datasets.BuilderConfig): + batch_size: Optional[int] = None + columns: Optional[List[str]] = None + features: Optional[datasets.Features] = None + + def __post_init__(self): + super().__post_init__() + +class Parquet(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = ParquetConfig + + def _info(self): + if self.config.columns is not None and self.config.features is not None and (set(self.config.columns) != set(self.config.features)): + raise ValueError('The columns and features argument must contain the same columns, but got ', f'{self.config.columns} and {self.config.features}') + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + dl_manager.download_config.extract_on_the_fly = True + data_files = dl_manager.download_and_extract(self.config.data_files) + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + if self.info.features is None: + for file in itertools.chain.from_iterable(files): + with open(file, 'rb') as f: + self.info.features = datasets.Features.from_arrow_schema(pq.read_schema(f)) + break + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': files})) + if self.config.columns is not None and set(self.config.columns) != set(self.info.features): + self.info.features = datasets.Features({col: feat for (col, feat) in self.info.features.items() if col in self.config.columns}) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.info.features is not None: + pa_table = table_cast(pa_table, self.info.features.arrow_schema) + return pa_table + + def _generate_tables(self, files): + if self.config.features is not None and self.config.columns is not None: + if sorted((field.name for field in self.info.features.arrow_schema)) != sorted(self.config.columns): + raise ValueError(f"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'") + for (file_idx, file) in enumerate(itertools.chain.from_iterable(files)): + with open(file, 'rb') as f: + parquet_file = pq.ParquetFile(f) + if parquet_file.metadata.num_row_groups > 0: + batch_size = self.config.batch_size or parquet_file.metadata.row_group(0).num_rows + try: + for (batch_idx, record_batch) in enumerate(parquet_file.iter_batches(batch_size=batch_size, columns=self.config.columns)): + pa_table = pa.Table.from_batches([record_batch]) + yield (f'{file_idx}_{batch_idx}', self._cast_table(pa_table)) + except ValueError as e: + logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") + raise + +# File: datasets-main/src/datasets/packaged_modules/spark/spark.py +import os +import posixpath +import uuid +from dataclasses import dataclass +from itertools import islice +from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union +import numpy as np +import pyarrow as pa +import datasets +from datasets.arrow_writer import ArrowWriter, ParquetWriter +from datasets.config import MAX_SHARD_SIZE +from datasets.filesystems import is_remote_filesystem, rename +from datasets.iterable_dataset import _BaseExamplesIterable +from datasets.utils import experimental +from datasets.utils.py_utils import convert_file_size_to_int +logger = datasets.utils.logging.get_logger(__name__) +if TYPE_CHECKING: + import pyspark + import pyspark.sql + +@dataclass +class SparkConfig(datasets.BuilderConfig): + features: Optional[datasets.Features] = None + + def __post_init__(self): + super().__post_init__() + +def _reorder_dataframe_by_partition(df: 'pyspark.sql.DataFrame', new_partition_order: List[int]): + df_combined = df.select('*').where(f'part_id = {new_partition_order[0]}') + for partition_id in new_partition_order[1:]: + partition_df = df.select('*').where(f'part_id = {partition_id}') + df_combined = df_combined.union(partition_df) + return df_combined + +def _generate_iterable_examples(df: 'pyspark.sql.DataFrame', partition_order: List[int], state_dict: Optional[dict]=None): + import pyspark + df_with_partition_id = df.select('*', pyspark.sql.functions.spark_partition_id().alias('part_id')) + partition_idx_start = state_dict['partition_idx'] if state_dict else 0 + partition_df = _reorder_dataframe_by_partition(df_with_partition_id, partition_order[partition_idx_start:]) + rows = partition_df.toLocalIterator(prefetchPartitions=True) + curr_partition = None + row_id = state_dict['partition_example_idx'] if state_dict else 0 + for row in islice(rows, row_id, None): + row_as_dict = row.asDict() + part_id = row_as_dict['part_id'] + row_as_dict.pop('part_id') + if curr_partition != part_id: + if state_dict and curr_partition is not None: + state_dict['partition_idx'] += 1 + curr_partition = part_id + row_id = 0 + if state_dict: + state_dict['partition_example_idx'] = row_id + 1 + yield (f'{part_id}_{row_id}', row_as_dict) + row_id += 1 + +class SparkExamplesIterable(_BaseExamplesIterable): + + def __init__(self, df: 'pyspark.sql.DataFrame', partition_order=None): + super().__init__() + self.df = df + self.partition_order = partition_order or range(self.df.rdd.getNumPartitions()) + + def _init_state_dict(self) -> dict: + self._state_dict = {'partition_idx': 0, 'partition_example_idx': 0} + return self._state_dict + + @experimental + def load_state_dict(self, state_dict: dict) -> dict: + return super().load_state_dict(state_dict) + + def __iter__(self): + yield from _generate_iterable_examples(self.df, self.partition_order, self._state_dict) + + def shuffle_data_sources(self, generator: np.random.Generator) -> 'SparkExamplesIterable': + partition_order = list(range(self.df.rdd.getNumPartitions())) + generator.shuffle(partition_order) + return SparkExamplesIterable(self.df, partition_order=partition_order) + + def shard_data_sources(self, worker_id: int, num_workers: int) -> 'SparkExamplesIterable': + partition_order = self.split_shard_indices_by_worker(worker_id, num_workers) + return SparkExamplesIterable(self.df, partition_order=partition_order) + + @property + def n_shards(self) -> int: + return len(self.partition_order) + +class Spark(datasets.DatasetBuilder): + BUILDER_CONFIG_CLASS = SparkConfig + + def __init__(self, df: 'pyspark.sql.DataFrame', cache_dir: str=None, working_dir: str=None, **config_kwargs): + import pyspark + self._spark = pyspark.sql.SparkSession.builder.getOrCreate() + self.df = df + self._working_dir = working_dir + super().__init__(cache_dir=cache_dir, config_name=str(self.df.semanticHash()), **config_kwargs) + + def _validate_cache_dir(self): + cache_dir = self._cache_dir + + def create_cache_and_write_probe(context): + os.makedirs(cache_dir, exist_ok=True) + probe_file = os.path.join(cache_dir, 'fs_test' + uuid.uuid4().hex) + open(probe_file, 'a') + return [probe_file] + if self._spark.conf.get('spark.master', '').startswith('local'): + return + if self._cache_dir: + probe = self._spark.sparkContext.parallelize(range(1), 1).mapPartitions(create_cache_and_write_probe).collect() + if os.path.isfile(probe[0]): + return + raise ValueError('When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir') + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager: datasets.download.download_manager.DownloadManager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN)] + + def _repartition_df_if_needed(self, max_shard_size): + import pyspark + + def get_arrow_batch_size(it): + for batch in it: + yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]}) + df_num_rows = self.df.count() + sample_num_rows = df_num_rows if df_num_rows <= 100 else 100 + approx_bytes_per_row = self.df.limit(sample_num_rows).repartition(1).mapInArrow(get_arrow_batch_size, 'batch_bytes: long').agg(pyspark.sql.functions.sum('batch_bytes').alias('sample_bytes')).collect()[0].sample_bytes / sample_num_rows + approx_total_size = approx_bytes_per_row * df_num_rows + if approx_total_size > max_shard_size: + new_num_partitions = min(df_num_rows, int(approx_total_size / max_shard_size)) + self.df = self.df.repartition(new_num_partitions) + + def _prepare_split_single(self, fpath: str, file_format: str, max_shard_size: int) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: + import pyspark + writer_class = ParquetWriter if file_format == 'parquet' else ArrowWriter + working_fpath = os.path.join(self._working_dir, os.path.basename(fpath)) if self._working_dir else fpath + embed_local_files = file_format == 'parquet' + features = self.config.features + writer_batch_size = self._writer_batch_size + storage_options = self._fs.storage_options + + def write_arrow(it): + task_id = pyspark.TaskContext().taskAttemptId() + first_batch = next(it, None) + if first_batch is None: + return pa.RecordBatch.from_arrays([[task_id], [0], [0]], names=['task_id', 'num_examples', 'num_bytes']) + shard_id = 0 + writer = writer_class(features=features, path=working_fpath.replace('SSSSS', f'{shard_id:05d}').replace('TTTTT', f'{task_id:05d}'), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files) + table = pa.Table.from_batches([first_batch]) + writer.write_table(table) + for batch in it: + if max_shard_size is not None and writer._num_bytes >= max_shard_size: + (num_examples, num_bytes) = writer.finalize() + writer.close() + yield pa.RecordBatch.from_arrays([[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes']) + shard_id += 1 + writer = writer_class(features=writer._features, path=working_fpath.replace('SSSSS', f'{shard_id:05d}').replace('TTTTT', f'{task_id:05d}'), writer_batch_size=writer_batch_size, storage_options=storage_options, embed_local_files=embed_local_files) + table = pa.Table.from_batches([batch]) + writer.write_table(table) + if writer._num_bytes > 0: + (num_examples, num_bytes) = writer.finalize() + writer.close() + yield pa.RecordBatch.from_arrays([[task_id], [num_examples], [num_bytes]], names=['task_id', 'num_examples', 'num_bytes']) + if working_fpath != fpath: + for file in os.listdir(os.path.dirname(working_fpath)): + dest = os.path.join(os.path.dirname(fpath), os.path.basename(file)) + shutil.move(file, dest) + stats = self.df.mapInArrow(write_arrow, 'task_id: long, num_examples: long, num_bytes: long').groupBy('task_id').agg(pyspark.sql.functions.sum('num_examples').alias('total_num_examples'), pyspark.sql.functions.sum('num_bytes').alias('total_num_bytes'), pyspark.sql.functions.count('num_bytes').alias('num_shards'), pyspark.sql.functions.collect_list('num_examples').alias('shard_lengths')).collect() + for row in stats: + yield (row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)) + + def _prepare_split(self, split_generator: 'datasets.SplitGenerator', file_format: str='arrow', max_shard_size: Optional[Union[str, int]]=None, num_proc: Optional[int]=None, **kwargs): + self._validate_cache_dir() + max_shard_size = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE) + self._repartition_df_if_needed(max_shard_size) + is_local = not is_remote_filesystem(self._fs) + path_join = os.path.join if is_local else posixpath.join + SUFFIX = '-TTTTT-SSSSS-of-NNNNN' + fname = f'{self.name}-{split_generator.name}{SUFFIX}.{file_format}' + fpath = path_join(self._output_dir, fname) + total_num_examples = 0 + total_num_bytes = 0 + total_shards = 0 + task_id_and_num_shards = [] + all_shard_lengths = [] + for (task_id, content) in self._prepare_split_single(fpath, file_format, max_shard_size): + (num_examples, num_bytes, num_shards, shard_lengths) = content + if num_bytes > 0: + total_num_examples += num_examples + total_num_bytes += num_bytes + total_shards += num_shards + task_id_and_num_shards.append((task_id, num_shards)) + all_shard_lengths.extend(shard_lengths) + split_generator.split_info.num_examples = total_num_examples + split_generator.split_info.num_bytes = total_num_bytes + logger.debug(f'Renaming {total_shards} shards.') + if total_shards > 1: + split_generator.split_info.shard_lengths = all_shard_lengths + fs = self._fs + + def _rename_shard(task_id: int, shard_id: int, global_shard_id: int): + rename(fs, fpath.replace('SSSSS', f'{shard_id:05d}').replace('TTTTT', f'{task_id:05d}'), fpath.replace('TTTTT-SSSSS', f'{global_shard_id:05d}').replace('NNNNN', f'{total_shards:05d}')) + args = [] + global_shard_id = 0 + for i in range(len(task_id_and_num_shards)): + (task_id, num_shards) = task_id_and_num_shards[i] + for shard_id in range(num_shards): + args.append([task_id, shard_id, global_shard_id]) + global_shard_id += 1 + self._spark.sparkContext.parallelize(args, len(args)).map(lambda args: _rename_shard(*args)).collect() + else: + shard_id = 0 + task_id = task_id_and_num_shards[0][0] + self._rename(fpath.replace('SSSSS', f'{shard_id:05d}').replace('TTTTT', f'{task_id:05d}'), fpath.replace(SUFFIX, '')) + + def _get_examples_iterable_for_split(self, split_generator: 'datasets.SplitGenerator') -> SparkExamplesIterable: + return SparkExamplesIterable(self.df) + +# File: datasets-main/src/datasets/packaged_modules/sql/sql.py +import sys +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +import pandas as pd +import pyarrow as pa +import datasets +import datasets.config +from datasets.features.features import require_storage_cast +from datasets.table import table_cast +if TYPE_CHECKING: + import sqlite3 + import sqlalchemy +logger = datasets.utils.logging.get_logger(__name__) + +@dataclass +class SqlConfig(datasets.BuilderConfig): + sql: Union[str, 'sqlalchemy.sql.Selectable'] = None + con: Union[str, 'sqlalchemy.engine.Connection', 'sqlalchemy.engine.Engine', 'sqlite3.Connection'] = None + index_col: Optional[Union[str, List[str]]] = None + coerce_float: bool = True + params: Optional[Union[List, Tuple, Dict]] = None + parse_dates: Optional[Union[List, Dict]] = None + columns: Optional[List[str]] = None + chunksize: Optional[int] = 10000 + features: Optional[datasets.Features] = None + + def __post_init__(self): + super().__post_init__() + if self.sql is None: + raise ValueError('sql must be specified') + if self.con is None: + raise ValueError('con must be specified') + + def create_config_id(self, config_kwargs: dict, custom_features: Optional[datasets.Features]=None) -> str: + config_kwargs = config_kwargs.copy() + sql = config_kwargs['sql'] + if not isinstance(sql, str): + if datasets.config.SQLALCHEMY_AVAILABLE and 'sqlalchemy' in sys.modules: + import sqlalchemy + if isinstance(sql, sqlalchemy.sql.Selectable): + engine = sqlalchemy.create_engine(config_kwargs['con'].split('://')[0] + '://') + sql_str = str(sql.compile(dialect=engine.dialect)) + config_kwargs['sql'] = sql_str + else: + raise TypeError(f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}") + else: + raise TypeError(f"Supported types for 'sql' are string and sqlalchemy.sql.Selectable but got {type(sql)}: {sql}") + con = config_kwargs['con'] + if not isinstance(con, str): + config_kwargs['con'] = id(con) + logger.info(f"SQL connection 'con' of type {type(con)} couldn't be hashed properly. To enable hashing, specify 'con' as URI string instead.") + return super().create_config_id(config_kwargs, custom_features=custom_features) + + @property + def pd_read_sql_kwargs(self): + pd_read_sql_kwargs = {'index_col': self.index_col, 'columns': self.columns, 'params': self.params, 'coerce_float': self.coerce_float, 'parse_dates': self.parse_dates} + return pd_read_sql_kwargs + +class Sql(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = SqlConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={})] + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all((not require_storage_cast(feature) for feature in self.config.features.values())): + pa_table = pa.Table.from_arrays([pa_table[field.name] for field in schema], schema=schema) + else: + pa_table = table_cast(pa_table, schema) + return pa_table + + def _generate_tables(self): + chunksize = self.config.chunksize + sql_reader = pd.read_sql(self.config.sql, self.config.con, chunksize=chunksize, **self.config.pd_read_sql_kwargs) + sql_reader = [sql_reader] if chunksize is None else sql_reader + for (chunk_idx, df) in enumerate(sql_reader): + pa_table = pa.Table.from_pandas(df) + yield (chunk_idx, self._cast_table(pa_table)) + +# File: datasets-main/src/datasets/packaged_modules/text/text.py +import itertools +from dataclasses import dataclass +from io import StringIO +from typing import Optional +import pyarrow as pa +import datasets +from datasets.features.features import require_storage_cast +from datasets.table import table_cast +logger = datasets.utils.logging.get_logger(__name__) + +@dataclass +class TextConfig(datasets.BuilderConfig): + features: Optional[datasets.Features] = None + encoding: str = 'utf-8' + encoding_errors: Optional[str] = None + chunksize: int = 10 << 20 + keep_linebreaks: bool = False + sample_by: str = 'line' + +class Text(datasets.ArrowBasedBuilder): + BUILDER_CONFIG_CLASS = TextConfig + + def _info(self): + return datasets.DatasetInfo(features=self.config.features) + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + dl_manager.download_config.extract_on_the_fly = True + data_files = dl_manager.download_and_extract(self.config.data_files) + splits = [] + for (split_name, files) in data_files.items(): + if isinstance(files, str): + files = [files] + files = [dl_manager.iter_files(file) for file in files] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'files': files})) + return splits + + def _cast_table(self, pa_table: pa.Table) -> pa.Table: + if self.config.features is not None: + schema = self.config.features.arrow_schema + if all((not require_storage_cast(feature) for feature in self.config.features.values())): + pa_table = pa_table.cast(schema) + else: + pa_table = table_cast(pa_table, schema) + return pa_table + else: + return pa_table.cast(pa.schema({'text': pa.string()})) + + def _generate_tables(self, files): + pa_table_names = list(self.config.features) if self.config.features is not None else ['text'] + for (file_idx, file) in enumerate(itertools.chain.from_iterable(files)): + with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f: + if self.config.sample_by == 'line': + batch_idx = 0 + while True: + batch = f.read(self.config.chunksize) + if not batch: + break + batch += f.readline() + batch = StringIO(batch).readlines() + if not self.config.keep_linebreaks: + batch = [line.rstrip('\n') for line in batch] + pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names) + yield ((file_idx, batch_idx), self._cast_table(pa_table)) + batch_idx += 1 + elif self.config.sample_by == 'paragraph': + batch_idx = 0 + batch = '' + while True: + new_batch = f.read(self.config.chunksize) + if not new_batch: + break + batch += new_batch + batch += f.readline() + batch = batch.split('\n\n') + pa_table = pa.Table.from_arrays([pa.array([example for example in batch[:-1] if example])], names=pa_table_names) + yield ((file_idx, batch_idx), self._cast_table(pa_table)) + batch_idx += 1 + batch = batch[-1] + if batch: + pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names) + yield ((file_idx, batch_idx), self._cast_table(pa_table)) + elif self.config.sample_by == 'document': + text = f.read() + pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names) + yield (file_idx, self._cast_table(pa_table)) + +# File: datasets-main/src/datasets/packaged_modules/webdataset/_tenbin.py +"""""" +import struct +import sys +import numpy as np + +def bytelen(a): + if hasattr(a, 'nbytes'): + return a.nbytes + elif isinstance(a, (bytearray, bytes)): + return len(a) + else: + raise ValueError(a, 'cannot determine nbytes') + +def bytedata(a): + if isinstance(a, (bytearray, bytes, memoryview)): + return a + elif hasattr(a, 'data'): + return a.data + else: + raise ValueError(a, 'cannot return bytedata') +long_to_short = '\nfloat16 f2\nfloat32 f4\nfloat64 f8\nint8 i1\nint16 i2\nint32 i4\nint64 i8\nuint8 u1\nuint16 u2\nunit32 u4\nuint64 u8\n'.strip() +long_to_short = [x.split() for x in long_to_short.split('\n')] +long_to_short = {x[0]: x[1] for x in long_to_short} +short_to_long = {v: k for (k, v) in long_to_short.items()} + +def check_acceptable_input_type(data, allow64): + for a in data: + if a.dtype.name not in long_to_short: + raise ValueError('unsupported dataypte') + if not allow64 and a.dtype.name not in ['float64', 'int64', 'uint64']: + raise ValueError('64 bit datatypes not allowed unless explicitly enabled') + +def str64(s): + s = s + '\x00' * (8 - len(s)) + s = s.encode('ascii') + return struct.unpack('@q', s)[0] + +def unstr64(i): + b = struct.pack('@q', i) + return b.decode('ascii').strip('\x00') + +def check_infos(data, infos, required_infos=None): + if required_infos is False or required_infos is None: + return data + if required_infos is True: + return (data, infos) + if not isinstance(required_infos, (tuple, list)): + raise ValueError('required_infos must be tuple or list') + for (required, actual) in zip(required_infos, infos): + raise ValueError(f"actual info {actual} doesn't match required info {required}") + return data + +def encode_header(a, info=''): + if a.ndim >= 10: + raise ValueError('too many dimensions') + if a.nbytes != np.prod(a.shape) * a.itemsize: + raise ValueError('mismatch between size and shape') + if a.dtype.name not in long_to_short: + raise ValueError('unsupported array type') + header = [str64(long_to_short[a.dtype.name]), str64(info), len(a.shape)] + list(a.shape) + return bytedata(np.array(header, dtype='i8')) + +def decode_header(h): + h = np.frombuffer(h, dtype='i8') + if unstr64(h[0]) not in short_to_long: + raise ValueError('unsupported array type') + dtype = np.dtype(short_to_long[unstr64(h[0])]) + info = unstr64(h[1]) + rank = int(h[2]) + shape = tuple(h[3:3 + rank]) + return (shape, dtype, info) + +def encode_list(l, infos=None): + if infos is None: + infos = [''] + elif len(l) != len(infos): + raise ValueError(f'length of list {l} must muatch length of infos {infos}') + result = [] + for (i, a) in enumerate(l): + header = encode_header(a, infos[i % len(infos)]) + result += [header, bytedata(a)] + return result + +def decode_list(l, infos=False): + result = [] + infos0 = [] + for (header, data) in zip(l[::2], l[1::2]): + (shape, dtype, info) = decode_header(header) + a = np.frombuffer(data, dtype=dtype, count=np.prod(shape)).reshape(*shape) + result += [a] + infos0 += [info] + return check_infos(result, infos0, infos) +magic_str = '~TenBin~' +magic = str64(magic_str) +magic_bytes = unstr64(magic).encode('ascii') + +def roundup(n, k=64): + return k * ((n + k - 1) // k) + +def encode_chunks(l): + size = sum((16 + roundup(b.nbytes) for b in l)) + result = bytearray(size) + offset = 0 + for b in l: + result[offset:offset + 8] = magic_bytes + offset += 8 + result[offset:offset + 8] = struct.pack('@q', b.nbytes) + offset += 8 + result[offset:offset + bytelen(b)] = b + offset += roundup(bytelen(b)) + return result + +def decode_chunks(buf): + result = [] + offset = 0 + total = bytelen(buf) + while offset < total: + if magic_bytes != buf[offset:offset + 8]: + raise ValueError('magic bytes mismatch') + offset += 8 + nbytes = struct.unpack('@q', buf[offset:offset + 8])[0] + offset += 8 + b = buf[offset:offset + nbytes] + offset += roundup(nbytes) + result.append(b) + return result + +def encode_buffer(l, infos=None): + if not isinstance(l, list): + raise ValueError('requires list') + return encode_chunks(encode_list(l, infos=infos)) + +def decode_buffer(buf, infos=False): + return decode_list(decode_chunks(buf), infos=infos) + +def write_chunk(stream, buf): + nbytes = bytelen(buf) + stream.write(magic_bytes) + stream.write(struct.pack('@q', nbytes)) + stream.write(bytedata(buf)) + padding = roundup(nbytes) - nbytes + if padding > 0: + stream.write(b'\x00' * padding) + +def read_chunk(stream): + magic = stream.read(8) + if magic == b'': + return None + if magic != magic_bytes: + raise ValueError('magic number does not match') + nbytes = stream.read(8) + nbytes = struct.unpack('@q', nbytes)[0] + if nbytes < 0: + raise ValueError('negative nbytes') + data = stream.read(nbytes) + padding = roundup(nbytes) - nbytes + if padding > 0: + stream.read(padding) + return data + +def write(stream, l, infos=None): + for chunk in encode_list(l, infos=infos): + write_chunk(stream, chunk) + +def read(stream, n=sys.maxsize, infos=False): + chunks = [] + for _ in range(n): + header = read_chunk(stream) + if header is None: + break + data = read_chunk(stream) + if data is None: + raise ValueError('premature EOF') + chunks += [header, data] + return decode_list(chunks, infos=infos) + +def save(fname, *args, infos=None, nocheck=False): + if not nocheck and (not fname.endswith('.ten')): + raise ValueError('file name should end in .ten') + with open(fname, 'wb') as stream: + write(stream, args, infos=infos) + +def load(fname, infos=False, nocheck=False): + if not nocheck and (not fname.endswith('.ten')): + raise ValueError('file name should end in .ten') + with open(fname, 'rb') as stream: + return read(stream, infos=infos) + +# File: datasets-main/src/datasets/packaged_modules/webdataset/webdataset.py +import io +import json +from itertools import islice +from typing import Any, Callable, Dict, List +import fsspec +import numpy as np +import pyarrow as pa +import datasets +from datasets.features.features import cast_to_python_objects +from datasets.utils.file_utils import SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL, xbasename +logger = datasets.utils.logging.get_logger(__name__) + +class WebDataset(datasets.GeneratorBasedBuilder): + DEFAULT_WRITER_BATCH_SIZE = 100 + IMAGE_EXTENSIONS: List[str] + AUDIO_EXTENSIONS: List[str] + DECODERS: Dict[str, Callable[[Any], Any]] + NUM_EXAMPLES_FOR_FEATURES_INFERENCE = 5 + + @classmethod + def _get_pipeline_from_tar(cls, tar_path, tar_iterator): + current_example = {} + fs: fsspec.AbstractFileSystem = fsspec.filesystem('memory') + streaming_download_manager = datasets.StreamingDownloadManager() + for (filename, f) in tar_iterator: + if '.' in filename: + (example_key, field_name) = filename.split('.', 1) + if current_example and current_example['__key__'] != example_key: + yield current_example + current_example = {} + current_example['__key__'] = example_key + current_example['__url__'] = tar_path + current_example[field_name.lower()] = f.read() + if field_name.split('.')[-1] in SINGLE_FILE_COMPRESSION_EXTENSION_TO_PROTOCOL: + fs.write_bytes(filename, current_example[field_name.lower()]) + extracted_file_path = streaming_download_manager.extract(f'memory://{filename}') + with fsspec.open(extracted_file_path) as f: + current_example[field_name.lower()] = f.read() + fs.delete(filename) + data_extension = xbasename(extracted_file_path).split('.')[-1] + else: + data_extension = field_name.split('.')[-1] + if data_extension in cls.DECODERS: + current_example[field_name] = cls.DECODERS[data_extension](current_example[field_name]) + if current_example: + yield current_example + + def _info(self) -> datasets.DatasetInfo: + return datasets.DatasetInfo() + + def _split_generators(self, dl_manager): + if not self.config.data_files: + raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}') + data_files = dl_manager.download(self.config.data_files) + splits = [] + for (split_name, tar_paths) in data_files.items(): + if isinstance(tar_paths, str): + tar_paths = [tar_paths] + tar_iterators = [dl_manager.iter_archive(tar_path) for tar_path in tar_paths] + splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={'tar_paths': tar_paths, 'tar_iterators': tar_iterators})) + if not self.info.features: + pipeline = self._get_pipeline_from_tar(tar_paths[0], tar_iterators[0]) + first_examples = list(islice(pipeline, self.NUM_EXAMPLES_FOR_FEATURES_INFERENCE)) + if any((example.keys() != first_examples[0].keys() for example in first_examples)): + raise ValueError("The TAR archives of the dataset should be in WebDataset format, but the files in the archive don't share the same prefix or the same types.") + pa_tables = [pa.Table.from_pylist(cast_to_python_objects([example], only_1d_for_numpy=True)) for example in first_examples] + inferred_arrow_schema = pa.concat_tables(pa_tables, promote_options='default').schema + features = datasets.Features.from_arrow_schema(inferred_arrow_schema) + for field_name in first_examples[0]: + extension = field_name.rsplit('.', 1)[-1] + if extension in self.IMAGE_EXTENSIONS: + features[field_name] = datasets.Image() + for field_name in first_examples[0]: + extension = field_name.rsplit('.', 1)[-1] + if extension in self.AUDIO_EXTENSIONS: + features[field_name] = datasets.Audio() + self.info.features = features + return splits + + def _generate_examples(self, tar_paths, tar_iterators): + image_field_names = [field_name for (field_name, feature) in self.info.features.items() if isinstance(feature, datasets.Image)] + audio_field_names = [field_name for (field_name, feature) in self.info.features.items() if isinstance(feature, datasets.Audio)] + all_field_names = list(self.info.features.keys()) + for (tar_idx, (tar_path, tar_iterator)) in enumerate(zip(tar_paths, tar_iterators)): + for (example_idx, example) in enumerate(self._get_pipeline_from_tar(tar_path, tar_iterator)): + for field_name in all_field_names: + if field_name not in example: + example[field_name] = None + for field_name in image_field_names + audio_field_names: + if example[field_name] is not None: + example[field_name] = {'path': example['__key__'] + '.' + field_name, 'bytes': example[field_name]} + yield (f'{tar_idx}_{example_idx}', example) +IMAGE_EXTENSIONS = ['blp', 'bmp', 'dib', 'bufr', 'cur', 'pcx', 'dcx', 'dds', 'ps', 'eps', 'fit', 'fits', 'fli', 'flc', 'ftc', 'ftu', 'gbr', 'gif', 'grib', 'h5', 'hdf', 'png', 'apng', 'jp2', 'j2k', 'jpc', 'jpf', 'jpx', 'j2c', 'icns', 'ico', 'im', 'iim', 'tif', 'tiff', 'jfif', 'jpe', 'jpg', 'jpeg', 'mpg', 'mpeg', 'msp', 'pcd', 'pxr', 'pbm', 'pgm', 'ppm', 'pnm', 'psd', 'bw', 'rgb', 'rgba', 'sgi', 'ras', 'tga', 'icb', 'vda', 'vst', 'webp', 'wmf', 'emf', 'xbm', 'xpm'] +WebDataset.IMAGE_EXTENSIONS = IMAGE_EXTENSIONS +AUDIO_EXTENSIONS = ['aiff', 'au', 'avr', 'caf', 'flac', 'htk', 'svx', 'mat4', 'mat5', 'mpc2k', 'ogg', 'paf', 'pvf', 'raw', 'rf64', 'sd2', 'sds', 'ircam', 'voc', 'w64', 'wav', 'nist', 'wavex', 'wve', 'xi', 'mp3', 'opus'] +WebDataset.AUDIO_EXTENSIONS = AUDIO_EXTENSIONS + +def text_loads(data: bytes): + return data.decode('utf-8') + +def tenbin_loads(data: bytes): + from . import _tenbin + return _tenbin.decode_buffer(data) + +def msgpack_loads(data: bytes): + import msgpack + return msgpack.unpackb(data) + +def npy_loads(data: bytes): + import numpy.lib.format + stream = io.BytesIO(data) + return numpy.lib.format.read_array(stream, allow_pickle=False) + +def npz_loads(data: bytes): + return np.load(io.BytesIO(data), allow_pickle=False) + +def cbor_loads(data: bytes): + import cbor + return cbor.loads(data) + +def torch_loads(data: bytes): + import torch + return torch.load(io.BytesIO(data), weights_only=True) +DECODERS = {'txt': text_loads, 'text': text_loads, 'transcript': text_loads, 'cls': int, 'cls2': int, 'index': int, 'inx': int, 'id': int, 'json': json.loads, 'jsn': json.loads, 'ten': tenbin_loads, 'tb': tenbin_loads, 'mp': msgpack_loads, 'msg': msgpack_loads, 'npy': npy_loads, 'npz': npz_loads, 'cbor': cbor_loads, 'pth': torch_loads} +WebDataset.DECODERS = DECODERS + +# File: datasets-main/src/datasets/parallel/parallel.py +import contextlib +from multiprocessing import Pool, RLock +from tqdm.auto import tqdm +from ..utils import experimental, logging +logger = logging.get_logger(__name__) + +class ParallelBackendConfig: + backend_name = None + +@experimental +def parallel_map(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func): + if ParallelBackendConfig.backend_name is None: + return _map_with_multiprocessing_pool(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func) + return _map_with_joblib(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func) + +def _map_with_multiprocessing_pool(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func): + num_proc = num_proc if num_proc <= len(iterable) else len(iterable) + split_kwds = [] + for index in range(num_proc): + div = len(iterable) // num_proc + mod = len(iterable) % num_proc + start = div * index + min(index, mod) + end = start + div + (1 if index < mod else 0) + split_kwds.append((function, iterable[start:end], batched, batch_size, types, index, disable_tqdm, desc)) + if len(iterable) != sum((len(i[1]) for i in split_kwds)): + raise ValueError(f'Error dividing inputs iterable among processes. Total number of objects {len(iterable)}, length: {sum((len(i[1]) for i in split_kwds))}') + logger.info(f'Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}') + (initargs, initializer) = (None, None) + if not disable_tqdm: + (initargs, initializer) = ((RLock(),), tqdm.set_lock) + with Pool(num_proc, initargs=initargs, initializer=initializer) as pool: + mapped = pool.map(single_map_nested_func, split_kwds) + logger.info(f'Finished {num_proc} processes') + mapped = [obj for proc_res in mapped for obj in proc_res] + logger.info(f'Unpacked {len(mapped)} objects') + return mapped + +def _map_with_joblib(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func): + import joblib + with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc): + return joblib.Parallel()((joblib.delayed(single_map_nested_func)((function, obj, batched, batch_size, types, None, True, None)) for obj in iterable)) + +@experimental +@contextlib.contextmanager +def parallel_backend(backend_name: str): + ParallelBackendConfig.backend_name = backend_name + if backend_name == 'spark': + from joblibspark import register_spark + register_spark() + try: + yield + finally: + ParallelBackendConfig.backend_name = None + +# File: datasets-main/src/datasets/search.py +import importlib.util +import os +import tempfile +from pathlib import PurePath +from typing import TYPE_CHECKING, Dict, List, NamedTuple, Optional, Union +import fsspec +import numpy as np +from .features import Sequence +from .utils import logging +from .utils import tqdm as hf_tqdm +if TYPE_CHECKING: + from .arrow_dataset import Dataset + try: + from elasticsearch import Elasticsearch + except ImportError: + pass + try: + import faiss + except ImportError: + pass +_has_elasticsearch = importlib.util.find_spec('elasticsearch') is not None +_has_faiss = importlib.util.find_spec('faiss') is not None +logger = logging.get_logger(__name__) + +class MissingIndex(Exception): + pass + +class SearchResults(NamedTuple): + scores: List[float] + indices: List[int] + +class BatchedSearchResults(NamedTuple): + total_scores: List[List[float]] + total_indices: List[List[int]] + +class NearestExamplesResults(NamedTuple): + scores: List[float] + examples: dict + +class BatchedNearestExamplesResults(NamedTuple): + total_scores: List[List[float]] + total_examples: List[dict] + +class BaseIndex: + + def search(self, query, k: int=10, **kwargs) -> SearchResults: + raise NotImplementedError + + def search_batch(self, queries, k: int=10, **kwargs) -> BatchedSearchResults: + (total_scores, total_indices) = ([], []) + for query in queries: + (scores, indices) = self.search(query, k) + total_scores.append(scores) + total_indices.append(indices) + return BatchedSearchResults(total_scores, total_indices) + + def save(self, file: Union[str, PurePath]): + raise NotImplementedError + + @classmethod + def load(cls, file: Union[str, PurePath]) -> 'BaseIndex': + raise NotImplementedError + +class ElasticSearchIndex(BaseIndex): + + def __init__(self, host: Optional[str]=None, port: Optional[int]=None, es_client: Optional['Elasticsearch']=None, es_index_name: Optional[str]=None, es_index_config: Optional[dict]=None): + if not _has_elasticsearch: + raise ImportError('You must install ElasticSearch to use ElasticSearchIndex. To do so you can run `pip install elasticsearch==7.7.1 for example`') + if es_client is not None and (host is not None or port is not None): + raise ValueError('Please specify either `es_client` or `(host, port)`, but not both.') + host = host or 'localhost' + port = port or 9200 + import elasticsearch.helpers + from elasticsearch import Elasticsearch + self.es_client = es_client if es_client is not None else Elasticsearch([{'host': host, 'port': str(port)}]) + self.es_index_name = es_index_name if es_index_name is not None else 'huggingface_datasets_' + os.path.basename(tempfile.NamedTemporaryFile().name) + self.es_index_config = es_index_config if es_index_config is not None else {'settings': {'number_of_shards': 1, 'analysis': {'analyzer': {'stop_standard': {'type': 'standard', ' stopwords': '_english_'}}}}, 'mappings': {'properties': {'text': {'type': 'text', 'analyzer': 'standard', 'similarity': 'BM25'}}}} + + def add_documents(self, documents: Union[List[str], 'Dataset'], column: Optional[str]=None): + index_name = self.es_index_name + index_config = self.es_index_config + self.es_client.indices.create(index=index_name, body=index_config) + number_of_docs = len(documents) + progress = hf_tqdm(unit='docs', total=number_of_docs) + successes = 0 + + def passage_generator(): + if column is not None: + for (i, example) in enumerate(documents): + yield {'text': example[column], '_id': i} + else: + for (i, example) in enumerate(documents): + yield {'text': example, '_id': i} + import elasticsearch as es + for (ok, action) in es.helpers.streaming_bulk(client=self.es_client, index=index_name, actions=passage_generator()): + progress.update(1) + successes += ok + if successes != len(documents): + logger.warning(f'Some documents failed to be added to ElasticSearch. Failures: {len(documents) - successes}/{len(documents)}') + logger.info(f'Indexed {successes:d} documents') + + def search(self, query: str, k=10, **kwargs) -> SearchResults: + response = self.es_client.search(index=self.es_index_name, body={'query': {'multi_match': {'query': query, 'fields': ['text'], 'type': 'cross_fields'}}, 'size': k}, **kwargs) + hits = response['hits']['hits'] + return SearchResults([hit['_score'] for hit in hits], [int(hit['_id']) for hit in hits]) + + def search_batch(self, queries, k: int=10, max_workers=10, **kwargs) -> BatchedSearchResults: + import concurrent.futures + (total_scores, total_indices) = ([None] * len(queries), [None] * len(queries)) + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_index = {executor.submit(self.search, query, k, **kwargs): i for (i, query) in enumerate(queries)} + for future in concurrent.futures.as_completed(future_to_index): + index = future_to_index[future] + results: SearchResults = future.result() + total_scores[index] = results.scores + total_indices[index] = results.indices + return BatchedSearchResults(total_indices=total_indices, total_scores=total_scores) + +class FaissIndex(BaseIndex): + + def __init__(self, device: Optional[Union[int, List[int]]]=None, string_factory: Optional[str]=None, metric_type: Optional[int]=None, custom_index: Optional['faiss.Index']=None): + if string_factory is not None and custom_index is not None: + raise ValueError('Please specify either `string_factory` or `custom_index` but not both.') + if device is not None and custom_index is not None: + raise ValueError("Cannot pass both 'custom_index' and 'device'. Pass 'custom_index' already transferred to the target device instead.") + self.device = device + self.string_factory = string_factory + self.metric_type = metric_type + self.faiss_index = custom_index + if not _has_faiss: + raise ImportError('You must install Faiss to use FaissIndex. To do so you can run `conda install -c pytorch faiss-cpu` or `conda install -c pytorch faiss-gpu`. A community supported package is also available on pypi: `pip install faiss-cpu` or `pip install faiss-gpu`. Note that pip may not have the latest version of FAISS, and thus, some of the latest features and bug fixes may not be available.') + + def add_vectors(self, vectors: Union[np.array, 'Dataset'], column: Optional[str]=None, batch_size: int=1000, train_size: Optional[int]=None, faiss_verbose: Optional[bool]=None): + import faiss + if column and (not isinstance(vectors.features[column], Sequence)): + raise ValueError(f"Wrong feature type for column '{column}'. Expected 1d array, got {vectors.features[column]}") + if self.faiss_index is None: + size = len(vectors[0]) if column is None else len(vectors[0][column]) + if self.string_factory is not None: + if self.metric_type is None: + index = faiss.index_factory(size, self.string_factory) + else: + index = faiss.index_factory(size, self.string_factory, self.metric_type) + elif self.metric_type is None: + index = faiss.IndexFlat(size) + else: + index = faiss.IndexFlat(size, self.metric_type) + self.faiss_index = self._faiss_index_to_device(index, self.device) + logger.info(f'Created faiss index of type {type(self.faiss_index)}') + if faiss_verbose is not None: + self.faiss_index.verbose = faiss_verbose + if hasattr(self.faiss_index, 'index') and self.faiss_index.index is not None: + self.faiss_index.index.verbose = faiss_verbose + if hasattr(self.faiss_index, 'quantizer') and self.faiss_index.quantizer is not None: + self.faiss_index.quantizer.verbose = faiss_verbose + if hasattr(self.faiss_index, 'clustering_index') and self.faiss_index.clustering_index is not None: + self.faiss_index.clustering_index.verbose = faiss_verbose + if train_size is not None: + train_vecs = vectors[:train_size] if column is None else vectors[:train_size][column] + logger.info(f'Training the index with the first {len(train_vecs)} vectors') + self.faiss_index.train(train_vecs) + else: + logger.info('Ignored the training step of the faiss index as `train_size` is None.') + logger.info(f'Adding {len(vectors)} vectors to the faiss index') + for i in hf_tqdm(range(0, len(vectors), batch_size)): + vecs = vectors[i:i + batch_size] if column is None else vectors[i:i + batch_size][column] + self.faiss_index.add(vecs) + + @staticmethod + def _faiss_index_to_device(index: 'faiss.Index', device: Optional[Union[int, List[int]]]=None) -> 'faiss.Index': + if device is None: + return index + import faiss + if isinstance(device, int): + if device > -1: + faiss_res = faiss.StandardGpuResources() + index = faiss.index_cpu_to_gpu(faiss_res, device, index) + else: + index = faiss.index_cpu_to_all_gpus(index) + elif isinstance(device, (list, tuple)): + index = faiss.index_cpu_to_gpus_list(index, gpus=list(device)) + else: + raise TypeError(f'The argument type: {type(device)} is not expected. ' + 'Please pass in either nothing, a positive int, a negative int, or a list of positive ints.') + return index + + def search(self, query: np.array, k=10, **kwargs) -> SearchResults: + if len(query.shape) != 1 and (len(query.shape) != 2 or query.shape[0] != 1): + raise ValueError('Shape of query is incorrect, it has to be either a 1D array or 2D (1, N)') + queries = query.reshape(1, -1) + if not queries.flags.c_contiguous: + queries = np.asarray(queries, order='C') + (scores, indices) = self.faiss_index.search(queries, k, **kwargs) + return SearchResults(scores[0], indices[0].astype(int)) + + def search_batch(self, queries: np.array, k=10, **kwargs) -> BatchedSearchResults: + if len(queries.shape) != 2: + raise ValueError('Shape of query must be 2D') + if not queries.flags.c_contiguous: + queries = np.asarray(queries, order='C') + (scores, indices) = self.faiss_index.search(queries, k, **kwargs) + return BatchedSearchResults(scores, indices.astype(int)) + + def save(self, file: Union[str, PurePath], storage_options: Optional[Dict]=None): + import faiss + if self.device is not None and isinstance(self.device, (int, list, tuple)): + index = faiss.index_gpu_to_cpu(self.faiss_index) + else: + index = self.faiss_index + with fsspec.open(str(file), 'wb', **storage_options or {}) as f: + faiss.write_index(index, faiss.BufferedIOWriter(faiss.PyCallbackIOWriter(f.write))) + + @classmethod + def load(cls, file: Union[str, PurePath], device: Optional[Union[int, List[int]]]=None, storage_options: Optional[Dict]=None) -> 'FaissIndex': + import faiss + faiss_index = cls(device=device) + with fsspec.open(str(file), 'rb', **storage_options or {}) as f: + index = faiss.read_index(faiss.BufferedIOReader(faiss.PyCallbackIOReader(f.read))) + faiss_index.faiss_index = faiss_index._faiss_index_to_device(index, faiss_index.device) + return faiss_index + +class IndexableMixin: + + def __init__(self): + self._indexes: Dict[str, BaseIndex] = {} + + def __len__(self): + raise NotImplementedError + + def __getitem__(self, key): + raise NotImplementedError + + def is_index_initialized(self, index_name: str) -> bool: + return index_name in self._indexes + + def _check_index_is_initialized(self, index_name: str): + if not self.is_index_initialized(index_name): + raise MissingIndex(f"Index with index_name '{index_name}' not initialized yet. Please make sure that you call `add_faiss_index` or `add_elasticsearch_index` first.") + + def list_indexes(self) -> List[str]: + return list(self._indexes) + + def get_index(self, index_name: str) -> BaseIndex: + self._check_index_is_initialized(index_name) + return self._indexes[index_name] + + def add_faiss_index(self, column: str, index_name: Optional[str]=None, device: Optional[Union[int, List[int]]]=None, string_factory: Optional[str]=None, metric_type: Optional[int]=None, custom_index: Optional['faiss.Index']=None, batch_size: int=1000, train_size: Optional[int]=None, faiss_verbose: bool=False): + index_name = index_name if index_name is not None else column + faiss_index = FaissIndex(device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index) + faiss_index.add_vectors(self, column=column, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose) + self._indexes[index_name] = faiss_index + + def add_faiss_index_from_external_arrays(self, external_arrays: np.array, index_name: str, device: Optional[Union[int, List[int]]]=None, string_factory: Optional[str]=None, metric_type: Optional[int]=None, custom_index: Optional['faiss.Index']=None, batch_size: int=1000, train_size: Optional[int]=None, faiss_verbose: bool=False): + faiss_index = FaissIndex(device=device, string_factory=string_factory, metric_type=metric_type, custom_index=custom_index) + faiss_index.add_vectors(external_arrays, column=None, batch_size=batch_size, train_size=train_size, faiss_verbose=faiss_verbose) + self._indexes[index_name] = faiss_index + + def save_faiss_index(self, index_name: str, file: Union[str, PurePath], storage_options: Optional[Dict]=None): + index = self.get_index(index_name) + if not isinstance(index, FaissIndex): + raise ValueError(f"Index '{index_name}' is not a FaissIndex but a '{type(index)}'") + index.save(file, storage_options=storage_options) + logger.info(f'Saved FaissIndex {index_name} at {file}') + + def load_faiss_index(self, index_name: str, file: Union[str, PurePath], device: Optional[Union[int, List[int]]]=None, storage_options: Optional[Dict]=None): + index = FaissIndex.load(file, device=device, storage_options=storage_options) + if index.faiss_index.ntotal != len(self): + raise ValueError(f"Index size should match Dataset size, but Index '{index_name}' at {file} has {index.faiss_index.ntotal} elements while the dataset has {len(self)} examples.") + self._indexes[index_name] = index + logger.info(f'Loaded FaissIndex {index_name} from {file}') + + def add_elasticsearch_index(self, column: str, index_name: Optional[str]=None, host: Optional[str]=None, port: Optional[int]=None, es_client: Optional['Elasticsearch']=None, es_index_name: Optional[str]=None, es_index_config: Optional[dict]=None): + index_name = index_name if index_name is not None else column + es_index = ElasticSearchIndex(host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config) + es_index.add_documents(self, column=column) + self._indexes[index_name] = es_index + + def load_elasticsearch_index(self, index_name: str, es_index_name: str, host: Optional[str]=None, port: Optional[int]=None, es_client: Optional['Elasticsearch']=None, es_index_config: Optional[dict]=None): + self._indexes[index_name] = ElasticSearchIndex(host=host, port=port, es_client=es_client, es_index_name=es_index_name, es_index_config=es_index_config) + + def drop_index(self, index_name: str): + del self._indexes[index_name] + + def search(self, index_name: str, query: Union[str, np.array], k: int=10, **kwargs) -> SearchResults: + self._check_index_is_initialized(index_name) + return self._indexes[index_name].search(query, k, **kwargs) + + def search_batch(self, index_name: str, queries: Union[List[str], np.array], k: int=10, **kwargs) -> BatchedSearchResults: + self._check_index_is_initialized(index_name) + return self._indexes[index_name].search_batch(queries, k, **kwargs) + + def get_nearest_examples(self, index_name: str, query: Union[str, np.array], k: int=10, **kwargs) -> NearestExamplesResults: + self._check_index_is_initialized(index_name) + (scores, indices) = self.search(index_name, query, k, **kwargs) + top_indices = [i for i in indices if i >= 0] + return NearestExamplesResults(scores[:len(top_indices)], self[top_indices]) + + def get_nearest_examples_batch(self, index_name: str, queries: Union[List[str], np.array], k: int=10, **kwargs) -> BatchedNearestExamplesResults: + self._check_index_is_initialized(index_name) + (total_scores, total_indices) = self.search_batch(index_name, queries, k, **kwargs) + total_scores = [scores_i[:len([i for i in indices_i if i >= 0])] for (scores_i, indices_i) in zip(total_scores, total_indices)] + total_samples = [self[[i for i in indices if i >= 0]] for indices in total_indices] + return BatchedNearestExamplesResults(total_scores, total_samples) + +# File: datasets-main/src/datasets/splits.py +"""""" +import abc +import collections +import copy +import dataclasses +import re +from dataclasses import dataclass +from typing import Dict, List, Optional, Union +from .arrow_reader import FileInstructions, make_file_instructions +from .naming import _split_re +from .utils.py_utils import NonMutableDict, asdict + +@dataclass +class SplitInfo: + name: str = dataclasses.field(default='', metadata={'include_in_asdict_even_if_is_default': True}) + num_bytes: int = dataclasses.field(default=0, metadata={'include_in_asdict_even_if_is_default': True}) + num_examples: int = dataclasses.field(default=0, metadata={'include_in_asdict_even_if_is_default': True}) + shard_lengths: Optional[List[int]] = None + dataset_name: Optional[str] = dataclasses.field(default=None, metadata={'include_in_asdict_even_if_is_default': True}) + + @property + def file_instructions(self): + instructions = make_file_instructions(name=self.dataset_name, split_infos=[self], instruction=str(self.name)) + return instructions.file_instructions + +@dataclass +class SubSplitInfo: + instructions: FileInstructions + + @property + def num_examples(self): + return self.instructions.num_examples + + @property + def file_instructions(self): + return self.instructions.file_instructions + +class SplitBase(metaclass=abc.ABCMeta): + + @abc.abstractmethod + def get_read_instruction(self, split_dict): + raise NotImplementedError('Abstract method') + + def __eq__(self, other): + if isinstance(other, (NamedSplit, str)): + return False + raise NotImplementedError('Equality is not implemented between merged/sub splits.') + + def __ne__(self, other): + return not self.__eq__(other) + + def __add__(self, other): + return _SplitMerged(self, other) + + def subsplit(self, arg=None, k=None, percent=None, weighted=None): + if sum((bool(x) for x in (arg, k, percent, weighted))) != 1: + raise ValueError('Only one argument of subsplit should be set.') + if isinstance(arg, int): + k = arg + elif isinstance(arg, slice): + percent = arg + elif isinstance(arg, list): + weighted = arg + if not (k or percent or weighted): + raise ValueError(f'Invalid split argument {arg}. Only list, slice and int supported. One of k, weighted or percent should be set to a non empty value.') + + def assert_slices_coverage(slices): + assert sum((list(range(*s.indices(100))) for s in slices), []) == list(range(100)) + if k: + if not 0 < k <= 100: + raise ValueError(f'Subsplit k should be between 0 and 100, got {k}') + shift = 100 // k + slices = [slice(i * shift, (i + 1) * shift) for i in range(k)] + slices[-1] = slice(slices[-1].start, 100) + assert_slices_coverage(slices) + return tuple((_SubSplit(self, s) for s in slices)) + elif percent: + return _SubSplit(self, percent) + elif weighted: + total = sum(weighted) + weighted = [100 * x // total for x in weighted] + start = 0 + stop = 0 + slices = [] + for v in weighted: + stop += v + slices.append(slice(start, stop)) + start = stop + slices[-1] = slice(slices[-1].start, 100) + assert_slices_coverage(slices) + return tuple((_SubSplit(self, s) for s in slices)) + else: + raise ValueError('Could not determine the split') + +class PercentSliceMeta(type): + + def __getitem__(cls, slice_value): + if not isinstance(slice_value, slice): + raise ValueError(f'datasets.percent should only be called with slice, not {slice_value}') + return slice_value + +class PercentSlice(metaclass=PercentSliceMeta): + pass +percent = PercentSlice + +class _SplitMerged(SplitBase): + + def __init__(self, split1, split2): + self._split1 = split1 + self._split2 = split2 + + def get_read_instruction(self, split_dict): + read_instruction1 = self._split1.get_read_instruction(split_dict) + read_instruction2 = self._split2.get_read_instruction(split_dict) + return read_instruction1 + read_instruction2 + + def __repr__(self): + return f'({repr(self._split1)} + {repr(self._split2)})' + +class _SubSplit(SplitBase): + + def __init__(self, split, slice_value): + self._split = split + self._slice_value = slice_value + + def get_read_instruction(self, split_dict): + return self._split.get_read_instruction(split_dict)[self._slice_value] + + def __repr__(self): + slice_str = '{start}:{stop}' + if self._slice_value.step is not None: + slice_str += ':{step}' + slice_str = slice_str.format(start='' if self._slice_value.start is None else self._slice_value.start, stop='' if self._slice_value.stop is None else self._slice_value.stop, step=self._slice_value.step) + return f'{repr(self._split)}(datasets.percent[{slice_str}])' + +class NamedSplit(SplitBase): + + def __init__(self, name): + self._name = name + split_names_from_instruction = [split_instruction.split('[')[0] for split_instruction in name.split('+')] + for split_name in split_names_from_instruction: + if not re.match(_split_re, split_name): + raise ValueError(f"Split name should match '{_split_re}' but got '{split_name}'.") + + def __str__(self): + return self._name + + def __repr__(self): + return f'NamedSplit({self._name!r})' + + def __eq__(self, other): + if isinstance(other, NamedSplit): + return self._name == other._name + elif isinstance(other, SplitBase): + return False + elif isinstance(other, str): + return self._name == other + else: + return False + + def __lt__(self, other): + return self._name < other._name + + def __hash__(self): + return hash(self._name) + + def get_read_instruction(self, split_dict): + return SplitReadInstruction(split_dict[self._name]) + +class NamedSplitAll(NamedSplit): + + def __init__(self): + super().__init__('all') + + def __repr__(self): + return 'NamedSplitAll()' + + def get_read_instruction(self, split_dict): + read_instructions = [SplitReadInstruction(s) for s in split_dict.values()] + return sum(read_instructions, SplitReadInstruction()) + +class Split: + TRAIN = NamedSplit('train') + TEST = NamedSplit('test') + VALIDATION = NamedSplit('validation') + ALL = NamedSplitAll() + + def __new__(cls, name): + return NamedSplitAll() if name == 'all' else NamedSplit(name) +SlicedSplitInfo = collections.namedtuple('SlicedSplitInfo', ['split_info', 'slice_value']) + +class SplitReadInstruction: + + def __init__(self, split_info=None): + self._splits = NonMutableDict(error_msg='Overlap between splits. Split {key} has been added with itself.') + if split_info: + self.add(SlicedSplitInfo(split_info=split_info, slice_value=None)) + + def add(self, sliced_split): + self._splits[sliced_split.split_info.name] = sliced_split + + def __add__(self, other): + split_instruction = SplitReadInstruction() + split_instruction._splits.update(self._splits) + split_instruction._splits.update(other._splits) + return split_instruction + + def __getitem__(self, slice_value): + split_instruction = SplitReadInstruction() + for v in self._splits.values(): + if v.slice_value is not None: + raise ValueError(f'Trying to slice Split {v.split_info.name} which has already been sliced') + v = v._asdict() + v['slice_value'] = slice_value + split_instruction.add(SlicedSplitInfo(**v)) + return split_instruction + + def get_list_sliced_split_info(self): + return list(self._splits.values()) + +class SplitDict(dict): + + def __init__(self, *args, dataset_name=None, **kwargs): + super().__init__(*args, **kwargs) + self.dataset_name = dataset_name + + def __getitem__(self, key: Union[SplitBase, str]): + if str(key) in self: + return super().__getitem__(str(key)) + else: + instructions = make_file_instructions(name=self.dataset_name, split_infos=self.values(), instruction=key) + return SubSplitInfo(instructions) + + def __setitem__(self, key: Union[SplitBase, str], value: SplitInfo): + if key != value.name: + raise ValueError(f"Cannot add elem. (key mismatch: '{key}' != '{value.name}')") + super().__setitem__(key, value) + + def add(self, split_info: SplitInfo): + if split_info.name in self: + raise ValueError(f'Split {split_info.name} already present') + split_info.dataset_name = self.dataset_name + super().__setitem__(split_info.name, split_info) + + @property + def total_num_examples(self): + return sum((s.num_examples for s in self.values())) + + @classmethod + def from_split_dict(cls, split_infos: Union[List, Dict], dataset_name: Optional[str]=None): + if isinstance(split_infos, dict): + split_infos = list(split_infos.values()) + if dataset_name is None: + dataset_name = split_infos[0].get('dataset_name') if split_infos else None + split_dict = cls(dataset_name=dataset_name) + for split_info in split_infos: + if isinstance(split_info, dict): + split_info = SplitInfo(**split_info) + split_dict.add(split_info) + return split_dict + + def to_split_dict(self): + out = [] + for (split_name, split_info) in self.items(): + split_info = copy.deepcopy(split_info) + split_info.name = split_name + out.append(split_info) + return out + + def copy(self): + return SplitDict.from_split_dict(self.to_split_dict(), self.dataset_name) + + def _to_yaml_list(self) -> list: + out = [asdict(s) for s in self.to_split_dict()] + for split_info_dict in out: + split_info_dict.pop('shard_lengths', None) + for split_info_dict in out: + split_info_dict.pop('dataset_name', None) + return out + + @classmethod + def _from_yaml_list(cls, yaml_data: list) -> 'SplitDict': + return cls.from_split_dict(yaml_data) + +@dataclass +class SplitGenerator: + name: str + gen_kwargs: Dict = dataclasses.field(default_factory=dict) + split_info: SplitInfo = dataclasses.field(init=False) + + def __post_init__(self): + self.name = str(self.name) + NamedSplit(self.name) + self.split_info = SplitInfo(name=self.name) + +# File: datasets-main/src/datasets/streaming.py +import importlib +import inspect +from functools import wraps +from typing import TYPE_CHECKING, Optional +from .download.download_config import DownloadConfig +from .utils.file_utils import xbasename, xdirname, xet_parse, xexists, xgetsize, xglob, xgzip_open, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xpandas_read_csv, xpandas_read_excel, xPath, xpyarrow_parquet_read_table, xrelpath, xsio_loadmat, xsplit, xsplitext, xwalk, xxml_dom_minidom_parse +from .utils.logging import get_logger +from .utils.patching import patch_submodule +from .utils.py_utils import get_imports, lock_importable_file +logger = get_logger(__name__) +if TYPE_CHECKING: + from .builder import DatasetBuilder + +def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig]=None): + module = importlib.import_module(module_path) + if hasattr(module, '_patched_for_streaming') and module._patched_for_streaming: + if isinstance(module._patched_for_streaming, DownloadConfig): + module._patched_for_streaming.token = download_config.token + module._patched_for_streaming.storage_options = download_config.storage_options + return + + def wrap_auth(function): + + @wraps(function) + def wrapper(*args, **kwargs): + return function(*args, download_config=download_config, **kwargs) + wrapper._decorator_name_ = 'wrap_auth' + return wrapper + patch_submodule(module, 'open', wrap_auth(xopen)).start() + patch_submodule(module, 'os.listdir', wrap_auth(xlistdir)).start() + patch_submodule(module, 'os.walk', wrap_auth(xwalk)).start() + patch_submodule(module, 'glob.glob', wrap_auth(xglob)).start() + patch_submodule(module, 'os.path.join', xjoin).start() + patch_submodule(module, 'os.path.dirname', xdirname).start() + patch_submodule(module, 'os.path.basename', xbasename).start() + patch_submodule(module, 'os.path.relpath', xrelpath).start() + patch_submodule(module, 'os.path.split', xsplit).start() + patch_submodule(module, 'os.path.splitext', xsplitext).start() + patch_submodule(module, 'os.path.exists', wrap_auth(xexists)).start() + patch_submodule(module, 'os.path.isdir', wrap_auth(xisdir)).start() + patch_submodule(module, 'os.path.isfile', wrap_auth(xisfile)).start() + patch_submodule(module, 'os.path.getsize', wrap_auth(xgetsize)).start() + patch_submodule(module, 'pathlib.Path', xPath).start() + patch_submodule(module, 'gzip.open', wrap_auth(xgzip_open)).start() + patch_submodule(module, 'numpy.load', wrap_auth(xnumpy_load)).start() + patch_submodule(module, 'pandas.read_csv', wrap_auth(xpandas_read_csv), attrs=['__version__']).start() + patch_submodule(module, 'pandas.read_excel', wrap_auth(xpandas_read_excel), attrs=['__version__']).start() + patch_submodule(module, 'scipy.io.loadmat', wrap_auth(xsio_loadmat), attrs=['__version__']).start() + patch_submodule(module, 'xml.etree.ElementTree.parse', wrap_auth(xet_parse)).start() + patch_submodule(module, 'xml.dom.minidom.parse', wrap_auth(xxml_dom_minidom_parse)).start() + if not module.__name__.startswith('datasets.packaged_modules.'): + patch_submodule(module, 'pyarrow.parquet.read_table', wrap_auth(xpyarrow_parquet_read_table)).start() + module._patched_for_streaming = download_config + +def extend_dataset_builder_for_streaming(builder: 'DatasetBuilder'): + download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token) + extend_module_for_streaming(builder.__module__, download_config=download_config) + if not builder.__module__.startswith('datasets.'): + importable_file = inspect.getfile(builder.__class__) + with lock_importable_file(importable_file): + for imports in get_imports(importable_file): + if imports[0] == 'internal': + internal_import_name = imports[1] + internal_module_name = '.'.join(builder.__module__.split('.')[:-1] + [internal_import_name]) + extend_module_for_streaming(internal_module_name, download_config=download_config) + from .builder import DatasetBuilder + parent_builder_modules = [cls.__module__ for cls in type(builder).__mro__[1:] if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__] + for module in parent_builder_modules: + extend_module_for_streaming(module, download_config=download_config) + +# File: datasets-main/src/datasets/table.py +import copy +import os +from functools import partial +from itertools import groupby +from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Optional, Tuple, TypeVar, Union +import numpy as np +import pyarrow as pa +import pyarrow.compute as pc +import pyarrow.types +from .utils.logging import get_logger +if TYPE_CHECKING: + from .features.features import Features, FeatureType +logger = get_logger(__name__) + +def inject_arrow_table_documentation(arrow_table_method): + + def wrapper(fn): + fn.__doc__ = arrow_table_method.__doc__ + (fn.__doc__ if fn.__doc__ is not None else '') + fn.__doc__ = fn.__doc__.replace('pyarrow.Table', 'Table') + if hasattr(arrow_table_method, '__annotations__'): + fn.__annotations__ = arrow_table_method.__annotations__ + return fn + return wrapper + +def _in_memory_arrow_table_from_file(filename: str) -> pa.Table: + in_memory_stream = pa.input_stream(filename) + opened_stream = pa.ipc.open_stream(in_memory_stream) + pa_table = opened_stream.read_all() + return pa_table + +def _in_memory_arrow_table_from_buffer(buffer: pa.Buffer) -> pa.Table: + stream = pa.BufferReader(buffer) + opened_stream = pa.ipc.open_stream(stream) + table = opened_stream.read_all() + return table + +def _memory_mapped_record_batch_reader_from_file(filename: str) -> pa.RecordBatchStreamReader: + memory_mapped_stream = pa.memory_map(filename) + return pa.ipc.open_stream(memory_mapped_stream) + +def read_schema_from_file(filename: str) -> pa.Schema: + with pa.memory_map(filename) as memory_mapped_stream: + schema = pa.ipc.open_stream(memory_mapped_stream).schema + return schema + +def _memory_mapped_arrow_table_from_file(filename: str) -> pa.Table: + opened_stream = _memory_mapped_record_batch_reader_from_file(filename) + pa_table = opened_stream.read_all() + return pa_table + +def _deepcopy(x, memo: dict): + cls = x.__class__ + result = cls.__new__(cls) + memo[id(x)] = result + for (k, v) in x.__dict__.items(): + setattr(result, k, copy.deepcopy(v, memo)) + return result + +def _interpolation_search(arr: List[int], x: int) -> int: + (i, j) = (0, len(arr) - 1) + while i < j and arr[i] <= x < arr[j]: + k = i + (j - i) * (x - arr[i]) // (arr[j] - arr[i]) + if arr[k] <= x < arr[k + 1]: + return k + elif arr[k] < x: + (i, j) = (k + 1, j) + else: + (i, j) = (i, k) + raise IndexError(f"Invalid query '{x}' for size {(arr[-1] if len(arr) else 'none')}.") + +class IndexedTableMixin: + + def __init__(self, table: pa.Table): + self._schema: pa.Schema = table.schema + self._batches: List[pa.RecordBatch] = [recordbatch for recordbatch in table.to_batches() if len(recordbatch) > 0] + self._offsets: np.ndarray = np.cumsum([0] + [len(b) for b in self._batches], dtype=np.int64) + + def fast_gather(self, indices: Union[List[int], np.ndarray]) -> pa.Table: + if not len(indices): + raise ValueError('Indices must be non-empty') + batch_indices = np.searchsorted(self._offsets, indices, side='right') - 1 + return pa.Table.from_batches([self._batches[batch_idx].slice(i - self._offsets[batch_idx], 1) for (batch_idx, i) in zip(batch_indices, indices)], schema=self._schema) + + def fast_slice(self, offset=0, length=None) -> pa.Table: + if offset < 0: + raise IndexError('Offset must be non-negative') + elif offset >= self._offsets[-1] or (length is not None and length <= 0): + return pa.Table.from_batches([], schema=self._schema) + i = _interpolation_search(self._offsets, offset) + if length is None or length + offset >= self._offsets[-1]: + batches = self._batches[i:] + batches[0] = batches[0].slice(offset - self._offsets[i]) + else: + j = _interpolation_search(self._offsets, offset + length - 1) + batches = self._batches[i:j + 1] + batches[-1] = batches[-1].slice(0, offset + length - self._offsets[j]) + batches[0] = batches[0].slice(offset - self._offsets[i]) + return pa.Table.from_batches(batches, schema=self._schema) + +class Table(IndexedTableMixin): + + def __init__(self, table: pa.Table): + super().__init__(table) + self.table = table + + def __deepcopy__(self, memo: dict): + memo[id(self.table)] = self.table + memo[id(self._batches)] = list(self._batches) + return _deepcopy(self, memo) + + def validate(self, *args, **kwargs): + return self.table.validate(*args, **kwargs) + + def equals(self, *args, **kwargs): + args = tuple((arg.table if isinstance(arg, Table) else arg for arg in args)) + kwargs = {k: v.table if isinstance(v, Table) else v for (k, v) in kwargs} + return self.table.equals(*args, **kwargs) + + def to_batches(self, *args, **kwargs): + return self.table.to_batches(*args, **kwargs) + + def to_pydict(self, *args, **kwargs): + return self.table.to_pydict(*args, **kwargs) + + def to_pylist(self, *args, **kwargs): + return self.table.to_pylist(*args, **kwargs) + + def to_pandas(self, *args, **kwargs): + return self.table.to_pandas(*args, **kwargs) + + def to_string(self, *args, **kwargs): + return self.table.to_string(*args, **kwargs) + + def to_reader(self, max_chunksize: Optional[int]=None): + return self.table.to_reader(max_chunksize=max_chunksize) + + def field(self, *args, **kwargs): + return self.table.field(*args, **kwargs) + + def column(self, *args, **kwargs): + return self.table.column(*args, **kwargs) + + def itercolumns(self, *args, **kwargs): + return self.table.itercolumns(*args, **kwargs) + + @property + def schema(self): + return self.table.schema + + @property + def columns(self): + return self.table.columns + + @property + def num_columns(self): + return self.table.num_columns + + @property + def num_rows(self): + return self.table.num_rows + + @property + def shape(self): + return self.table.shape + + @property + def nbytes(self): + return self.table.nbytes + + @property + def column_names(self): + return self.table.column_names + + def __eq__(self, other): + return self.equals(other) + + def __getitem__(self, i): + return self.table[i] + + def __len__(self): + return len(self.table) + + def __repr__(self): + return self.table.__repr__().replace('pyarrow.Table', self.__class__.__name__) + + def __str__(self): + return self.table.__str__().replace('pyarrow.Table', self.__class__.__name__) + + def slice(self, *args, **kwargs): + raise NotImplementedError() + + def filter(self, *args, **kwargs): + raise NotImplementedError() + + def flatten(self, *args, **kwargs): + raise NotImplementedError() + + def combine_chunks(self, *args, **kwargs): + raise NotImplementedError() + + def cast(self, *args, **kwargs): + raise NotImplementedError() + + def replace_schema_metadata(self, *args, **kwargs): + raise NotImplementedError() + + def add_column(self, *args, **kwargs): + raise NotImplementedError() + + def append_column(self, *args, **kwargs): + raise NotImplementedError() + + def remove_column(self, *args, **kwargs): + raise NotImplementedError() + + def set_column(self, *args, **kwargs): + raise NotImplementedError() + + def rename_columns(self, *args, **kwargs): + raise NotImplementedError() + + def drop(self, *args, **kwargs): + raise NotImplementedError() + + def select(self, *args, **kwargs): + raise NotImplementedError() + +class TableBlock(Table): + pass + +class InMemoryTable(TableBlock): + + @classmethod + def from_file(cls, filename: str): + table = _in_memory_arrow_table_from_file(filename) + return cls(table) + + @classmethod + def from_buffer(cls, buffer: pa.Buffer): + table = _in_memory_arrow_table_from_buffer(buffer) + return cls(table) + + @classmethod + def from_pandas(cls, *args, **kwargs): + return cls(pa.Table.from_pandas(*args, **kwargs)) + + @classmethod + def from_arrays(cls, *args, **kwargs): + return cls(pa.Table.from_arrays(*args, **kwargs)) + + @classmethod + def from_pydict(cls, *args, **kwargs): + return cls(pa.Table.from_pydict(*args, **kwargs)) + + @classmethod + def from_pylist(cls, mapping, *args, **kwargs): + return cls(pa.Table.from_pylist(mapping, *args, **kwargs)) + + @classmethod + def from_batches(cls, *args, **kwargs): + return cls(pa.Table.from_batches(*args, **kwargs)) + + def slice(self, offset=0, length=None): + return InMemoryTable(self.fast_slice(offset=offset, length=length)) + + def filter(self, *args, **kwargs): + return InMemoryTable(self.table.filter(*args, **kwargs)) + + def flatten(self, *args, **kwargs): + return InMemoryTable(table_flatten(self.table, *args, **kwargs)) + + def combine_chunks(self, *args, **kwargs): + return InMemoryTable(self.table.combine_chunks(*args, **kwargs)) + + def cast(self, *args, **kwargs): + return InMemoryTable(table_cast(self.table, *args, **kwargs)) + + def replace_schema_metadata(self, *args, **kwargs): + return InMemoryTable(self.table.replace_schema_metadata(*args, **kwargs)) + + def add_column(self, *args, **kwargs): + return InMemoryTable(self.table.add_column(*args, **kwargs)) + + def append_column(self, *args, **kwargs): + return InMemoryTable(self.table.append_column(*args, **kwargs)) + + def remove_column(self, *args, **kwargs): + return InMemoryTable(self.table.remove_column(*args, **kwargs)) + + def set_column(self, *args, **kwargs): + return InMemoryTable(self.table.set_column(*args, **kwargs)) + + def rename_columns(self, *args, **kwargs): + return InMemoryTable(self.table.rename_columns(*args, **kwargs)) + + def drop(self, *args, **kwargs): + return InMemoryTable(self.table.drop(*args, **kwargs)) + + def select(self, *args, **kwargs): + return InMemoryTable(self.table.select(*args, **kwargs)) +Replay = Tuple[str, tuple, dict] + +class MemoryMappedTable(TableBlock): + + def __init__(self, table: pa.Table, path: str, replays: Optional[List[Replay]]=None): + super().__init__(table) + self.path = os.path.abspath(path) + self.replays: List[Replay] = replays if replays is not None else [] + + @classmethod + def from_file(cls, filename: str, replays=None): + table = _memory_mapped_arrow_table_from_file(filename) + table = cls._apply_replays(table, replays) + return cls(table, filename, replays) + + def __getstate__(self): + return {'path': self.path, 'replays': self.replays} + + def __setstate__(self, state): + path = state['path'] + replays = state['replays'] + table = _memory_mapped_arrow_table_from_file(path) + table = self._apply_replays(table, replays) + MemoryMappedTable.__init__(self, table, path=path, replays=replays) + + @staticmethod + def _apply_replays(table: pa.Table, replays: Optional[List[Replay]]=None) -> pa.Table: + if replays is not None: + for (name, args, kwargs) in replays: + if name == 'cast': + table = table_cast(table, *args, **kwargs) + elif name == 'flatten': + table = table_flatten(table, *args, **kwargs) + else: + table = getattr(table, name)(*args, **kwargs) + return table + + def _append_replay(self, replay: Replay) -> List[Replay]: + replays = copy.deepcopy(self.replays) + replays.append(replay) + return replays + + def slice(self, offset=0, length=None): + replay = ('slice', (offset, length), {}) + replays = self._append_replay(replay) + return MemoryMappedTable(self.fast_slice(offset=offset, length=length), self.path, replays) + + def filter(self, *args, **kwargs): + replay = ('filter', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.filter(*args, **kwargs), self.path, replays) + + def flatten(self, *args, **kwargs): + replay = ('flatten', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(table_flatten(self.table, *args, **kwargs), self.path, replays) + + def combine_chunks(self, *args, **kwargs): + replay = ('combine_chunks', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.combine_chunks(*args, **kwargs), self.path, replays) + + def cast(self, *args, **kwargs): + replay = ('cast', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(table_cast(self.table, *args, **kwargs), self.path, replays) + + def replace_schema_metadata(self, *args, **kwargs): + replay = ('replace_schema_metadata', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.replace_schema_metadata(*args, **kwargs), self.path, replays) + + def add_column(self, *args, **kwargs): + replay = ('add_column', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.add_column(*args, **kwargs), self.path, replays) + + def append_column(self, *args, **kwargs): + replay = ('append_column', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.append_column(*args, **kwargs), self.path, replays) + + def remove_column(self, *args, **kwargs): + replay = ('remove_column', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.remove_column(*args, **kwargs), self.path, replays) + + def set_column(self, *args, **kwargs): + replay = ('set_column', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.set_column(*args, **kwargs), self.path, replays) + + def rename_columns(self, *args, **kwargs): + replay = ('rename_columns', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.rename_columns(*args, **kwargs), self.path, replays) + + def drop(self, *args, **kwargs): + replay = ('drop', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.drop(*args, **kwargs), self.path, replays) + + def select(self, *args, **kwargs): + replay = ('select', copy.deepcopy(args), copy.deepcopy(kwargs)) + replays = self._append_replay(replay) + return MemoryMappedTable(self.table.select(*args, **kwargs), self.path, replays) +TableBlockContainer = TypeVar('TableBlockContainer', TableBlock, List[TableBlock], List[List[TableBlock]]) + +class ConcatenationTable(Table): + + def __init__(self, table: pa.Table, blocks: List[List[TableBlock]]): + super().__init__(table) + self.blocks = blocks + for subtables in blocks: + for subtable in subtables: + if not isinstance(subtable, TableBlock): + raise TypeError(f'The blocks of a ConcatenationTable must be InMemoryTable or MemoryMappedTable objects, but got {_short_str(subtable)}.') + + def __getstate__(self): + return {'blocks': self.blocks, 'schema': self.table.schema} + + def __setstate__(self, state): + blocks = state['blocks'] + schema = state['schema'] + table = self._concat_blocks_horizontally_and_vertically(blocks) + if schema is not None and table.schema != schema: + empty_table = pa.Table.from_batches([], schema=schema) + table = pa.concat_tables([table, empty_table], promote_options='default') + ConcatenationTable.__init__(self, table, blocks=blocks) + + @staticmethod + def _concat_blocks(blocks: List[Union[TableBlock, pa.Table]], axis: int=0) -> pa.Table: + pa_tables = [table.table if hasattr(table, 'table') else table for table in blocks] + if axis == 0: + return pa.concat_tables(pa_tables, promote_options='default') + elif axis == 1: + for (i, table) in enumerate(pa_tables): + if i == 0: + pa_table = table + else: + for (name, col) in zip(table.column_names, table.columns): + pa_table = pa_table.append_column(name, col) + return pa_table + else: + raise ValueError("'axis' must be either 0 or 1") + + @classmethod + def _concat_blocks_horizontally_and_vertically(cls, blocks: List[List[TableBlock]]) -> pa.Table: + pa_tables_to_concat_vertically = [] + for (i, tables) in enumerate(blocks): + if not tables: + continue + pa_table_horizontally_concatenated = cls._concat_blocks(tables, axis=1) + pa_tables_to_concat_vertically.append(pa_table_horizontally_concatenated) + return cls._concat_blocks(pa_tables_to_concat_vertically, axis=0) + + @classmethod + def _merge_blocks(cls, blocks: TableBlockContainer, axis: Optional[int]=None) -> TableBlockContainer: + if axis is not None: + merged_blocks = [] + for (is_in_memory, block_group) in groupby(blocks, key=lambda x: isinstance(x, InMemoryTable)): + if is_in_memory: + block_group = [InMemoryTable(cls._concat_blocks(list(block_group), axis=axis))] + merged_blocks += list(block_group) + else: + merged_blocks = [cls._merge_blocks(row_block, axis=1) for row_block in blocks] + if all((len(row_block) == 1 for row_block in merged_blocks)): + merged_blocks = cls._merge_blocks([block for row_block in merged_blocks for block in row_block], axis=0) + return merged_blocks + + @classmethod + def _consolidate_blocks(cls, blocks: TableBlockContainer) -> TableBlockContainer: + if isinstance(blocks, TableBlock): + return blocks + elif isinstance(blocks[0], TableBlock): + return cls._merge_blocks(blocks, axis=0) + else: + return cls._merge_blocks(blocks) + + @classmethod + def from_blocks(cls, blocks: TableBlockContainer) -> 'ConcatenationTable': + blocks = cls._consolidate_blocks(blocks) + if isinstance(blocks, TableBlock): + table = blocks + return cls(table.table, [[table]]) + elif isinstance(blocks[0], TableBlock): + table = cls._concat_blocks(blocks, axis=0) + blocks = [[t] for t in blocks] + return cls(table, blocks) + else: + table = cls._concat_blocks_horizontally_and_vertically(blocks) + return cls(table, blocks) + + @classmethod + def from_tables(cls, tables: List[Union[pa.Table, Table]], axis: int=0) -> 'ConcatenationTable': + + def to_blocks(table: Union[pa.Table, Table]) -> List[List[TableBlock]]: + if isinstance(table, pa.Table): + return [[InMemoryTable(table)]] + elif isinstance(table, ConcatenationTable): + return copy.deepcopy(table.blocks) + else: + return [[table]] + + def _slice_row_block(row_block: List[TableBlock], length: int) -> Tuple[List[TableBlock], List[TableBlock]]: + sliced = [table.slice(0, length) for table in row_block] + remainder = [table.slice(length, len(row_block[0]) - length) for table in row_block] + return (sliced, remainder) + + def _split_both_like(result: List[List[TableBlock]], blocks: List[List[TableBlock]]) -> Tuple[List[List[TableBlock]], List[List[TableBlock]]]: + (result, blocks) = (list(result), list(blocks)) + (new_result, new_blocks) = ([], []) + while result and blocks: + if len(result[0][0]) > len(blocks[0][0]): + new_blocks.append(blocks[0]) + (sliced, result[0]) = _slice_row_block(result[0], len(blocks.pop(0)[0])) + new_result.append(sliced) + elif len(result[0][0]) < len(blocks[0][0]): + new_result.append(result[0]) + (sliced, blocks[0]) = _slice_row_block(blocks[0], len(result.pop(0)[0])) + new_blocks.append(sliced) + else: + new_result.append(result.pop(0)) + new_blocks.append(blocks.pop(0)) + if result or blocks: + raise ValueError("Failed to concatenate on axis=1 because tables don't have the same number of rows") + return (new_result, new_blocks) + + def _extend_blocks(result: List[List[TableBlock]], blocks: List[List[TableBlock]], axis: int=0) -> List[List[TableBlock]]: + if axis == 0: + result.extend(blocks) + elif axis == 1: + (result, blocks) = _split_both_like(result, blocks) + for (i, row_block) in enumerate(blocks): + result[i].extend(row_block) + return result + blocks = to_blocks(tables[0]) + for table in tables[1:]: + table_blocks = to_blocks(table) + blocks = _extend_blocks(blocks, table_blocks, axis=axis) + return cls.from_blocks(blocks) + + @property + def _slices(self): + offset = 0 + for tables in self.blocks: + length = len(tables[0]) + yield (offset, length) + offset += length + + def slice(self, offset=0, length=None): + table = self.table.slice(offset, length=length) + length = length if length is not None else self.num_rows - offset + blocks = [] + for tables in self.blocks: + n_rows = len(tables[0]) + if length == 0: + break + elif n_rows <= offset: + offset = offset - n_rows + elif n_rows <= offset + length: + blocks.append([t.slice(offset) for t in tables]) + (length, offset) = (length + offset - n_rows, 0) + else: + blocks.append([t.slice(offset, length) for t in tables]) + (length, offset) = (0, 0) + return ConcatenationTable(table, blocks) + + def filter(self, mask, *args, **kwargs): + table = self.table.filter(mask, *args, **kwargs) + blocks = [] + for ((offset, length), tables) in zip(self._slices, self.blocks): + submask = mask.slice(offset, length) + blocks.append([t.filter(submask, *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def flatten(self, *args, **kwargs): + table = table_flatten(self.table, *args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.flatten(*args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def combine_chunks(self, *args, **kwargs): + table = self.table.combine_chunks(*args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.combine_chunks(*args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def cast(self, target_schema, *args, **kwargs): + from .features import Features + table = table_cast(self.table, target_schema, *args, **kwargs) + target_features = Features.from_arrow_schema(target_schema) + blocks = [] + for subtables in self.blocks: + new_tables = [] + fields = list(target_schema) + for subtable in subtables: + subfields = [] + for name in subtable.column_names: + subfields.append(fields.pop(next((i for (i, field) in enumerate(fields) if field.name == name)))) + subfeatures = Features({subfield.name: target_features[subfield.name] for subfield in subfields}) + subschema = subfeatures.arrow_schema + new_tables.append(subtable.cast(subschema, *args, **kwargs)) + blocks.append(new_tables) + return ConcatenationTable(table, blocks) + + def replace_schema_metadata(self, *args, **kwargs): + table = self.table.replace_schema_metadata(*args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.replace_schema_metadata(*args, **kwargs) for t in tables]) + return ConcatenationTable(table, self.blocks) + + def add_column(self, *args, **kwargs): + raise NotImplementedError() + + def append_column(self, *args, **kwargs): + raise NotImplementedError() + + def remove_column(self, i, *args, **kwargs): + table = self.table.remove_column(i, *args, **kwargs) + name = self.table.column_names[i] + blocks = [] + for tables in self.blocks: + blocks.append([t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables]) + return ConcatenationTable(table, blocks) + + def set_column(self, *args, **kwargs): + raise NotImplementedError() + + def rename_columns(self, names, *args, **kwargs): + table = self.table.rename_columns(names, *args, **kwargs) + names = dict(zip(self.table.column_names, names)) + blocks = [] + for tables in self.blocks: + blocks.append([t.rename_columns([names[name] for name in t.column_names], *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def drop(self, columns, *args, **kwargs): + table = self.table.drop(columns, *args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.drop([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + + def select(self, columns, *args, **kwargs): + table = self.table.select(columns, *args, **kwargs) + blocks = [] + for tables in self.blocks: + blocks.append([t.select([c for c in columns if c in t.column_names], *args, **kwargs) for t in tables]) + return ConcatenationTable(table, blocks) + +def concat_tables(tables: List[Table], axis: int=0) -> Table: + tables = list(tables) + if len(tables) == 1: + return tables[0] + return ConcatenationTable.from_tables(tables, axis=axis) + +def list_table_cache_files(table: Table) -> List[str]: + if isinstance(table, ConcatenationTable): + cache_files = [] + for subtables in table.blocks: + for subtable in subtables: + cache_files += list_table_cache_files(subtable) + return cache_files + elif isinstance(table, MemoryMappedTable): + return [table.path] + else: + return [] + +def _wrap_for_chunked_arrays(func): + + def wrapper(array, *args, **kwargs): + if isinstance(array, pa.ChunkedArray): + return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks]) + else: + return func(array, *args, **kwargs) + return wrapper + +def _are_list_values_of_length(array: pa.ListArray, length: int) -> bool: + return pc.all(pc.equal(array.value_lengths(), length)).as_py() or array.null_count == len(array) + +def _combine_list_array_offsets_with_mask(array: pa.ListArray) -> pa.Array: + offsets = array.offsets + if array.null_count > 0: + offsets = pa.concat_arrays([pc.replace_with_mask(offsets[:-1], array.is_null(), pa.nulls(len(array), pa.int32())), offsets[-1:]]) + return offsets + +def _storage_type(type: pa.DataType) -> pa.DataType: + if isinstance(type, pa.ExtensionType): + return _storage_type(type.storage_type) + elif isinstance(type, pa.StructType): + return pa.struct([pa.field(field.name, _storage_type(field.type)) for field in type]) + elif isinstance(type, pa.ListType): + return pa.list_(_storage_type(type.value_type)) + elif isinstance(type, pa.FixedSizeListType): + return pa.list_(_storage_type(type.value_type), type.list_size) + return type + +def _short_str(value: Any) -> str: + out = str(value) + if len(out) > 3000: + out = out[:1500] + '\n...\n' + out[-1500:] + return out + +@_wrap_for_chunked_arrays +def array_cast(array: pa.Array, pa_type: pa.DataType, allow_primitive_to_str: bool=True, allow_decimal_to_str: bool=True) -> Union[pa.Array, pa.FixedSizeListArray, pa.ListArray, pa.StructArray, pa.ExtensionArray]: + _c = partial(array_cast, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) + if isinstance(array, pa.ExtensionArray): + array = array.storage + if isinstance(pa_type, pa.ExtensionType): + return pa_type.wrap_array(_c(array, pa_type.storage_type)) + elif array.type == pa_type: + return array + elif pa.types.is_struct(array.type): + if pa.types.is_struct(pa_type) and {field.name for field in pa_type} == {field.name for field in array.type}: + if array.type.num_fields == 0: + return array + arrays = [_c(array.field(field.name), field.type) for field in pa_type] + return pa.StructArray.from_arrays(arrays, fields=list(pa_type), mask=array.is_null()) + elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): + if pa.types.is_fixed_size_list(pa_type): + if _are_list_values_of_length(array, pa_type.list_size): + if array.null_count > 0: + array_type = array.type + storage_type = _storage_type(array_type) + if array_type != storage_type: + array = _c(array, storage_type) + array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True) + array = _c(array, array_type) + else: + array = pc.list_slice(array, 0, pa_type.list_size, return_fixed_size_list=True) + array_values = array.values + return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()) + else: + array_values = array.values[array.offset * pa_type.list_size:(array.offset + len(array)) * pa_type.list_size] + return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size) + elif pa.types.is_list(pa_type): + array_offsets = _combine_list_array_offsets_with_mask(array) + return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type)) + elif pa.types.is_large_list(pa_type): + array_offsets = _combine_list_array_offsets_with_mask(array) + return pa.LargeListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type)) + elif pa.types.is_fixed_size_list(array.type): + if pa.types.is_fixed_size_list(pa_type): + if pa_type.list_size == array.type.list_size: + array_values = array.values[array.offset * array.type.list_size:(array.offset + len(array)) * array.type.list_size] + return pa.FixedSizeListArray.from_arrays(_c(array_values, pa_type.value_type), pa_type.list_size, mask=array.is_null()) + elif pa.types.is_list(pa_type): + array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size + return pa.ListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null()) + elif pa.types.is_large_list(pa_type): + array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size + return pa.LargeListArray.from_arrays(array_offsets, _c(array.values, pa_type.value_type), mask=array.is_null()) + else: + if pa.types.is_string(pa_type): + if not allow_primitive_to_str and pa.types.is_primitive(array.type): + raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)} since allow_primitive_to_str is set to {allow_primitive_to_str} ") + if not allow_decimal_to_str and pa.types.is_decimal(array.type): + raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)} and allow_decimal_to_str is set to {allow_decimal_to_str}") + if pa.types.is_null(pa_type) and (not pa.types.is_null(array.type)): + raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}") + return array.cast(pa_type) + raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}") + +@_wrap_for_chunked_arrays +def cast_array_to_feature(array: pa.Array, feature: 'FeatureType', allow_primitive_to_str: bool=True, allow_decimal_to_str: bool=True) -> pa.Array: + from .features.features import LargeList, Sequence, get_nested_type + _c = partial(cast_array_to_feature, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) + if isinstance(array, pa.ExtensionArray): + array = array.storage + if hasattr(feature, 'cast_storage'): + return feature.cast_storage(array) + elif pa.types.is_struct(array.type): + if isinstance(feature, Sequence) and isinstance(feature.feature, dict): + sequence_kwargs = vars(feature).copy() + feature = sequence_kwargs.pop('feature') + feature = {name: Sequence(subfeature, **sequence_kwargs) for (name, subfeature) in feature.items()} + if isinstance(feature, dict) and {field.name for field in array.type} == set(feature): + if array.type.num_fields == 0: + return array + arrays = [_c(array.field(name), subfeature) for (name, subfeature) in feature.items()] + return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) + elif pa.types.is_list(array.type) or pa.types.is_large_list(array.type): + if isinstance(feature, list): + casted_array_values = _c(array.values, feature[0]) + if pa.types.is_list(array.type) and casted_array_values.type == array.values.type: + return array + else: + array_offsets = _combine_list_array_offsets_with_mask(array) + return pa.ListArray.from_arrays(array_offsets, casted_array_values) + elif isinstance(feature, LargeList): + casted_array_values = _c(array.values, feature.feature) + if pa.types.is_large_list(array.type) and casted_array_values.type == array.values.type: + return array + else: + array_offsets = _combine_list_array_offsets_with_mask(array) + return pa.LargeListArray.from_arrays(array_offsets, casted_array_values) + elif isinstance(feature, Sequence): + if feature.length > -1: + if _are_list_values_of_length(array, feature.length): + if array.null_count > 0: + array_type = array.type + storage_type = _storage_type(array_type) + if array_type != storage_type: + array = array_cast(array, storage_type, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) + array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) + array = array_cast(array, array_type, allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) + else: + array = pc.list_slice(array, 0, feature.length, return_fixed_size_list=True) + array_values = array.values + casted_array_values = _c(array_values, feature.feature) + return pa.FixedSizeListArray.from_arrays(casted_array_values, feature.length, mask=array.is_null()) + else: + array_values = array.values[array.offset * feature.length:(array.offset + len(array)) * feature.length] + return pa.FixedSizeListArray.from_arrays(_c(array_values, feature.feature), feature.length) + else: + casted_array_values = _c(array.values, feature.feature) + if pa.types.is_list(array.type) and casted_array_values.type == array.values.type: + return array + else: + array_offsets = _combine_list_array_offsets_with_mask(array) + return pa.ListArray.from_arrays(array_offsets, casted_array_values) + elif pa.types.is_fixed_size_list(array.type): + if isinstance(feature, list): + array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size + return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature[0]), mask=array.is_null()) + elif isinstance(feature, LargeList): + array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size + return pa.LargeListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null()) + elif isinstance(feature, Sequence): + if feature.length > -1: + if feature.length == array.type.list_size: + array_values = array.values[array.offset * array.type.list_size:(array.offset + len(array)) * array.type.list_size] + casted_array_values = _c(array_values, feature.feature) + return pa.FixedSizeListArray.from_arrays(casted_array_values, feature.length, mask=array.is_null()) + else: + array_offsets = (np.arange(len(array) + 1) + array.offset) * array.type.list_size + return pa.ListArray.from_arrays(array_offsets, _c(array.values, feature.feature), mask=array.is_null()) + if pa.types.is_null(array.type): + return array_cast(array, get_nested_type(feature), allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) + elif not isinstance(feature, (Sequence, dict, list, tuple)): + return array_cast(array, feature(), allow_primitive_to_str=allow_primitive_to_str, allow_decimal_to_str=allow_decimal_to_str) + raise TypeError(f"Couldn't cast array of type\n{_short_str(array.type)}\nto\n{_short_str(feature)}") + +@_wrap_for_chunked_arrays +def embed_array_storage(array: pa.Array, feature: 'FeatureType'): + from .features import Sequence + _e = embed_array_storage + if isinstance(array, pa.ExtensionArray): + array = array.storage + if hasattr(feature, 'embed_storage'): + return feature.embed_storage(array) + elif pa.types.is_struct(array.type): + if isinstance(feature, Sequence) and isinstance(feature.feature, dict): + feature = {name: Sequence(subfeature, length=feature.length) for (name, subfeature) in feature.feature.items()} + if isinstance(feature, dict): + arrays = [_e(array.field(name), subfeature) for (name, subfeature) in feature.items()] + return pa.StructArray.from_arrays(arrays, names=list(feature), mask=array.is_null()) + elif pa.types.is_list(array.type): + array_offsets = _combine_list_array_offsets_with_mask(array) + if isinstance(feature, list): + return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature[0])) + if isinstance(feature, Sequence) and feature.length == -1: + return pa.ListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) + elif pa.types.is_large_list(array.type): + array_offsets = _combine_list_array_offsets_with_mask(array) + return pa.LargeListArray.from_arrays(array_offsets, _e(array.values, feature.feature)) + elif pa.types.is_fixed_size_list(array.type): + if isinstance(feature, Sequence) and feature.length > -1: + array_values = array.values[array.offset * array.type.list_size:(array.offset + len(array)) * array.type.list_size] + embedded_array_values = _e(array_values, feature.feature) + return pa.FixedSizeListArray.from_arrays(embedded_array_values, feature.length, mask=array.is_null()) + if not isinstance(feature, (Sequence, dict, list, tuple)): + return array + raise TypeError(f"Couldn't embed array of type\n{_short_str(array.type)}\nwith\n{_short_str(feature)}") + +class CastError(ValueError): + + def __init__(self, *args, table_column_names: List[str], requested_column_names: List[str]) -> None: + super().__init__(*args) + self.table_column_names = table_column_names + self.requested_column_names = requested_column_names + + def __reduce__(self): + return (partial(CastError, table_column_names=self.table_column_names, requested_column_names=self.requested_column_names), ()) + + def details(self): + new_columns = set(self.table_column_names) - set(self.requested_column_names) + missing_columns = set(self.requested_column_names) - set(self.table_column_names) + if new_columns and missing_columns: + return f'there are {len(new_columns)} new columns ({_short_str(new_columns)}) and {len(missing_columns)} missing columns ({_short_str(missing_columns)}).' + elif new_columns: + return f'there are {len(new_columns)} new columns ({_short_str(new_columns)})' + else: + return f'there are {len(missing_columns)} missing columns ({_short_str(missing_columns)})' + +def cast_table_to_features(table: pa.Table, features: 'Features'): + if sorted(table.column_names) != sorted(features): + raise CastError(f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features)) + arrays = [cast_array_to_feature(table[name], feature) for (name, feature) in features.items()] + return pa.Table.from_arrays(arrays, schema=features.arrow_schema) + +def cast_table_to_schema(table: pa.Table, schema: pa.Schema): + from .features import Features + features = Features.from_arrow_schema(schema) + if sorted(table.column_names) != sorted(features): + raise CastError(f"Couldn't cast\n{_short_str(table.schema)}\nto\n{_short_str(features)}\nbecause column names don't match", table_column_names=table.column_names, requested_column_names=list(features)) + arrays = [cast_array_to_feature(table[name], feature) for (name, feature) in features.items()] + return pa.Table.from_arrays(arrays, schema=schema) + +def embed_table_storage(table: pa.Table): + from .features.features import Features, require_storage_embed + features = Features.from_arrow_schema(table.schema) + arrays = [embed_array_storage(table[name], feature) if require_storage_embed(feature) else table[name] for (name, feature) in features.items()] + return pa.Table.from_arrays(arrays, schema=features.arrow_schema) + +def table_cast(table: pa.Table, schema: pa.Schema): + if table.schema != schema: + return cast_table_to_schema(table, schema) + elif table.schema.metadata != schema.metadata: + return table.replace_schema_metadata(schema.metadata) + else: + return table + +def table_flatten(table: pa.Table): + from .features import Features + features = Features.from_arrow_schema(table.schema) + if any((hasattr(subfeature, 'flatten') and subfeature.flatten() == subfeature for subfeature in features.values())): + flat_arrays = [] + flat_column_names = [] + for field in table.schema: + array = table.column(field.name) + subfeature = features[field.name] + if pa.types.is_struct(field.type) and (not hasattr(subfeature, 'flatten') or subfeature.flatten() != subfeature): + flat_arrays.extend(array.flatten()) + flat_column_names.extend([f'{field.name}.{subfield.name}' for subfield in field.type]) + else: + flat_arrays.append(array) + flat_column_names.append(field.name) + flat_table = pa.Table.from_arrays(flat_arrays, names=flat_column_names) + else: + flat_table = table.flatten() + flat_features = features.flatten(max_depth=2) + flat_features = Features({column_name: flat_features[column_name] for column_name in flat_table.column_names}) + return flat_table.replace_schema_metadata(flat_features.arrow_schema.metadata) + +def table_visitor(table: pa.Table, function: Callable[[pa.Array], None]): + from .features import Features, Sequence + features = Features.from_arrow_schema(table.schema) + + def _visit(array, feature): + if isinstance(array, pa.ChunkedArray): + for chunk in array.chunks: + _visit(chunk, feature) + else: + if isinstance(array, pa.ExtensionArray): + array = array.storage + function(array, feature) + if pa.types.is_struct(array.type) and (not hasattr(feature, 'cast_storage')): + if isinstance(feature, Sequence) and isinstance(feature.feature, dict): + feature = {name: Sequence(subfeature, length=feature.length) for (name, subfeature) in feature.feature.items()} + for (name, subfeature) in feature.items(): + _visit(array.field(name), subfeature) + elif pa.types.is_list(array.type): + if isinstance(feature, list): + _visit(array.values, feature[0]) + elif isinstance(feature, Sequence): + _visit(array.values, feature.feature) + for (name, feature) in features.items(): + _visit(table[name], feature) + +def table_iter(table: Table, batch_size: int, drop_last_batch=False) -> Iterator[pa.Table]: + chunks_buffer = [] + chunks_buffer_size = 0 + for chunk in table.to_reader(max_chunksize=batch_size): + if len(chunk) == 0: + continue + elif chunks_buffer_size + len(chunk) < batch_size: + chunks_buffer.append(chunk) + chunks_buffer_size += len(chunk) + continue + elif chunks_buffer_size + len(chunk) == batch_size: + chunks_buffer.append(chunk) + yield pa.Table.from_batches(chunks_buffer) + chunks_buffer = [] + chunks_buffer_size = 0 + else: + cropped_chunk_length = batch_size - chunks_buffer_size + chunks_buffer.append(chunk.slice(0, cropped_chunk_length)) + yield pa.Table.from_batches(chunks_buffer) + chunks_buffer = [chunk.slice(cropped_chunk_length, len(chunk) - cropped_chunk_length)] + chunks_buffer_size = len(chunk) - cropped_chunk_length + if not drop_last_batch and chunks_buffer: + yield pa.Table.from_batches(chunks_buffer) + +# File: datasets-main/templates/new_dataset_script.py +"""""" +import csv +import json +import os +import datasets +_CITATION = '@InProceedings{huggingface:dataset,\ntitle = {A great new dataset},\nauthor={huggingface, Inc.\n},\nyear={2020}\n}\n' +_DESCRIPTION = 'This new dataset is designed to solve this great NLP task and is crafted with a lot of care.\n' +_HOMEPAGE = '' +_LICENSE = '' +_URLS = {'first_domain': 'https://huggingface.co/great-new-dataset-first_domain.zip', 'second_domain': 'https://huggingface.co/great-new-dataset-second_domain.zip'} + +class NewDataset(datasets.GeneratorBasedBuilder): + VERSION = datasets.Version('1.1.0') + BUILDER_CONFIGS = [datasets.BuilderConfig(name='first_domain', version=VERSION, description='This part of my dataset covers a first domain'), datasets.BuilderConfig(name='second_domain', version=VERSION, description='This part of my dataset covers a second domain')] + DEFAULT_CONFIG_NAME = 'first_domain' + + def _info(self): + if self.config.name == 'first_domain': + features = datasets.Features({'sentence': datasets.Value('string'), 'option1': datasets.Value('string'), 'answer': datasets.Value('string')}) + else: + features = datasets.Features({'sentence': datasets.Value('string'), 'option2': datasets.Value('string'), 'second_domain_answer': datasets.Value('string')}) + return datasets.DatasetInfo(description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION) + + def _split_generators(self, dl_manager): + urls = _URLS[self.config.name] + data_dir = dl_manager.download_and_extract(urls) + return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': os.path.join(data_dir, 'train.jsonl'), 'split': 'train'}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepath': os.path.join(data_dir, 'dev.jsonl'), 'split': 'dev'}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': os.path.join(data_dir, 'test.jsonl'), 'split': 'test'})] + + def _generate_examples(self, filepath, split): + with open(filepath, encoding='utf-8') as f: + for (key, row) in enumerate(f): + data = json.loads(row) + if self.config.name == 'first_domain': + yield (key, {'sentence': data['sentence'], 'option1': data['option1'], 'answer': '' if split == 'test' else data['answer']}) + else: + yield (key, {'sentence': data['sentence'], 'option2': data['option2'], 'second_domain_answer': '' if split == 'test' else data['second_domain_answer']}) +