# File: distil-whisper-main/training/create_student_model.py """""" import argparse import copy import logging import numpy as np import torch from transformers import GenerationConfig, WhisperForConditionalGeneration, WhisperProcessor logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description='Initialise a student Whisper model from a teacher model, copying the relevant layer weights and adjusting the processor as necessary.') parser.add_argument('--teacher_checkpoint', type=str, required=True, help='The HF Hub ID of the teacher checkpoint.') parser.add_argument('--subfolder', type=str, default='', help='In case the relevant teacher weights are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.') parser.add_argument('--encoder_layers', type=int, default=None, help='Number of encoder layers to use in the student model. Defaults to all layers from the teacher.') parser.add_argument('--decoder_layers', type=int, default=2, help='Number of decoder layers to use in the student model. Defaults to 2 layers.') parser.add_argument('--decoder_layers_numbers', type=int, nargs='*', help='Layers numbers of the decoder teacher to use in the student model. Defaults to None, equivalent to taking first and last layer (and equivalent to `--decoder_layers_numbers 0 -1`).') parser.add_argument('--save_dir', type=str, required=True, help='Where to save the student weights and processor.') parser.add_argument('--push_to_hub', type=bool, required=False, default=False, help='Whether to push the student weights and processor to the Hub.') parser.add_argument('--cache_dir', type=str, default=None, help='Where to store the pretrained models downloaded from huggingface.co') args = parser.parse_args() return args def init_student_model_from_teacher(teacher_checkpoint, encoder_layers=None, decoder_layers=2, decoder_layers_numbers=None, save_dir=None, push_to_hub=None, cache_dir=None, subfolder=''): if decoder_layers_numbers is not None and len(decoder_layers_numbers) != decoder_layers: raise ValueError(f'Got {len(decoder_layers_numbers)} layers number for {decoder_layers} decoder layers.') teacher_model = WhisperForConditionalGeneration.from_pretrained(teacher_checkpoint, cache_dir=cache_dir, subfolder=subfolder, low_cpu_mem_usage=True) processor = WhisperProcessor.from_pretrained(teacher_checkpoint) generation_config = GenerationConfig.from_pretrained(teacher_checkpoint) generation_config.forced_decoder_ids = None teacher_config = teacher_model.config teacher_encoder_layers = teacher_config.encoder_layers teacher_decoder_layers = teacher_config.decoder_layers student_config = copy.deepcopy(teacher_config) student_config.update({'encoder_layers': encoder_layers if encoder_layers is not None else teacher_encoder_layers, 'decoder_layers': decoder_layers}) encoder_mapping = np.linspace(0, teacher_encoder_layers - 1, student_config.encoder_layers, dtype=int) encoder_mapping[-1] = teacher_encoder_layers - 1 encoder_map = {} for (student_layer, teacher_layer) in enumerate(encoder_mapping): encoder_map[teacher_layer] = student_layer if decoder_layers_numbers is None: decoder_mapping = np.linspace(0, teacher_decoder_layers - 1, student_config.decoder_layers, dtype=int) decoder_mapping[-1] = teacher_decoder_layers - 1 else: decoder_mapping = decoder_layers_numbers decoder_map = {} for (student_layer, teacher_layer) in enumerate(decoder_mapping): decoder_map[teacher_layer] = student_layer student_model = WhisperForConditionalGeneration(student_config) (missing_keys, unexpected_keys) = student_model.load_state_dict(teacher_model.state_dict(), strict=False) if len(missing_keys) > 0: raise RuntimeError(f'Error(s) in loading state_dict for WhisperForConditionalGeneration. \nMissing key(s) in state_dict: {missing_keys}') if decoder_layers == teacher_decoder_layers: decoder_keys = [key for key in unexpected_keys if 'model.decoder.layers' in key] if len(decoder_keys) > 0: raise RuntimeError(f'Error(s) in loading state_dict for WhisperForConditionalGeneration. \nUnexpected key(s) in state_dict: {decoder_keys}') if encoder_layers == teacher_encoder_layers: encoder_keys = [key for key in unexpected_keys if 'model.encoder.layers' in key] if len(encoder_keys) > 0: raise RuntimeError(f'Error(s) in loading state_dict for WhisperForConditionalGeneration. \nUnexpected key(s) in state_dict: {encoder_keys}') for layer in range(teacher_decoder_layers): if layer in decoder_map: student_model.model.decoder.layers[decoder_map[layer]].load_state_dict(teacher_model.model.decoder.layers[layer].state_dict()) if encoder_layers is not None: for layer in range(teacher_encoder_layers): if layer in encoder_map: student_model.model.encoder.layers[encoder_map[layer]].load_state_dict(teacher_model.model.encoder.layers[layer].state_dict()) del teacher_model if save_dir is not None: student_model.save_pretrained(save_dir) processor.save_pretrained(save_dir) generation_config.save_pretrained(save_dir) logger.info('Checking we can load the saved model...') student_model = WhisperForConditionalGeneration.from_pretrained(save_dir, low_cpu_mem_usage=True) processor = WhisperProcessor.from_pretrained(save_dir) input_features = processor(np.ones(16000), sampling_rate=16000, return_tensors='pt').input_features decoder_start_token_id = student_model.config.decoder_start_token_id decoder_input_ids = torch.ones((input_features.shape[0], 1), dtype=torch.long) * decoder_start_token_id logger.info('Checking we can run the converted model forward...') _ = student_model(input_features, decoder_input_ids=decoder_input_ids).logits logger.info('Conversion successful!') if push_to_hub: student_model.push_to_hub(save_dir) processor.push_to_hub(save_dir) generation_config.push_to_hub(save_dir) if __name__ == '__main__': args = parse_args() init_student_model_from_teacher(teacher_checkpoint=args.teacher_checkpoint, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, decoder_layers_numbers=args.decoder_layers_numbers, save_dir=args.save_dir, push_to_hub=args.push_to_hub, cache_dir=args.cache_dir, subfolder=args.subfolder) # File: distil-whisper-main/training/flax/convert_train_state_to_hf.py """""" import logging import os import sys from dataclasses import field from pathlib import Path from typing import Callable, Optional import flax import jax import jax.numpy as jnp import optax from flax import jax_utils, traverse_util from flax.serialization import from_bytes from flax.training import train_state from flax.training.common_utils import shard_prng_key from huggingface_hub import Repository, create_repo from optax._src import linear_algebra from transformers import AutoConfig, HfArgumentParser, Seq2SeqTrainingArguments from transformers.file_utils import get_full_repo_name from transformers.utils import check_min_version from transformers.utils.versions import require_version from distil_whisper import FlaxWhisperForConditionalGeneration jax.distributed.initialize() check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: pip install -r examples/flax/speech-recogintion/requirements.txt') logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained student model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) load_with_scan_weights: bool = field(default=False, metadata={'help': 'Whether the pre-trained checkpoint has its weights stored in scan format. Set to True for scanned weights, defaults to False for non-scan (unrolled) weights.'}) use_scan: bool = field(default=True, metadata={'help': 'Whether or not to use `scan_with_axes` over the encoder and decoder blocks.'}) def create_learning_rate_fn(num_train_steps: int, lr_scheduler_type: str, num_warmup_steps: int, learning_rate: float) -> Callable[[int], jnp.array]: lr_scheduler_types = ('linear', 'constant_with_warmup') if lr_scheduler_type not in lr_scheduler_types: raise ValueError(f'lr_scheduler_type of type {lr_scheduler_type} not supported, choose from {lr_scheduler_types}.') warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule(init_value=learning_rate, end_value=0 if lr_scheduler_type == 'linear' else learning_rate, transition_steps=num_train_steps - num_warmup_steps) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray max_grad_norm: float def apply_gradients(self, *, grads, **kwargs): g_norm = linear_algebra.global_norm(grads) g_norm = jnp.maximum(self.max_grad_norm, g_norm) grads = jax.tree_map(lambda t: t / g_norm * self.max_grad_norm, grads) (updates, new_opt_state) = self.tx.update(grads, self.opt_state, self.params) new_params = optax.apply_updates(self.params, updates) return self.replace(step=self.step + 1, params=new_params, opt_state=new_opt_state, **kwargs) def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def unreplicate(self): return jax_utils.unreplicate(self) def main(): parser = HfArgumentParser((ModelArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, training_args) = parser.parse_args_into_dataclasses() if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name(Path(training_args.output_dir).absolute().name, token=training_args.hub_token) else: repo_name = training_args.hub_model_id create_repo(repo_name, exist_ok=True, token=training_args.hub_token) repo = Repository(training_args.output_dir, clone_from=repo_name, token=training_args.hub_token) config = AutoConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) (student_model, student_params) = FlaxWhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, dtype=getattr(jnp, model_args.dtype), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, _do_init=False, use_scan=model_args.load_with_scan_weights) if model_args.use_scan: student_model.enable_scan() student_params = student_model.convert_unroll_to_scan(student_params) rng = jax.random.PRNGKey(training_args.seed) (rng, dropout_rng) = jax.random.split(rng) total_train_steps = int(training_args.max_steps) linear_decay_lr_schedule_fn = create_learning_rate_fn(total_train_steps, training_args.lr_scheduler_type, training_args.warmup_steps, training_args.learning_rate) def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) layer_norm_candidates = ['layer_norm', 'self_attn_layer_norm', 'final_layer_norm', 'encoder_attn_layer_norm'] layer_norm_named_params = {layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in ''.join(layer).lower()} flat_mask = {path: path[-1] != 'bias' and path[-2:] not in layer_norm_named_params for path in flat_params} return traverse_util.unflatten_dict(flat_mask) adamw = optax.adamw(learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn) student_state = TrainState.create(apply_fn=student_model.__call__, params=student_params, tx=adamw, dropout_rng=dropout_rng, max_grad_norm=training_args.max_grad_norm) if training_args.resume_from_checkpoint is not None: if os.path.isfile(os.path.join(training_args.resume_from_checkpoint, 'train_state.msgpack')): logger.info(f'Checkpoint detected, resuming training at {training_args.resume_from_checkpoint}. To avoid this behavior, omit the resume_from_checkpoint argument.') with Path(os.path.join(training_args.resume_from_checkpoint, 'train_state.msgpack')).open('rb') as f: student_state = from_bytes(student_state, f.read()) else: logger.warning(f'Checkpoint {training_args.resume_from_checkpoint} not detected, training from scratch. Ensure you pass the path to a folder with a valid checkpoint for your model.') cur_step = int(jax.device_get(student_state.step)) if jax.process_index() == 0: student_model.disable_scan() student_state_params = student_model.convert_scan_to_unroll(student_state.params) student_params = jax.device_get(student_state_params) student_model.save_pretrained(os.path.join(training_args.output_dir, f'checkpoint-{cur_step}'), params=student_params) if training_args.push_to_hub: repo.push_to_hub(commit_message=f'Saving weights of step {cur_step}', blocking=False) if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/create_student_model.py """""" import argparse import copy import logging import jax import numpy as np from flax.core import freeze, unfreeze from transformers import GenerationConfig, WhisperFeatureExtractor, WhisperProcessor from distil_whisper import FlaxWhisperForConditionalGeneration logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser(description='Initialise a student Whisper model from a teacher model, copying the relevant layer weights and adjusting the processor as necessary.') parser.add_argument('--teacher_checkpoint', type=str, required=True, help='The HF Hub ID of the teacher checkpoint.') parser.add_argument('--subfolder', type=str, default='', help='In case the relevant teacher weights are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here.') parser.add_argument('--encoder_layers', type=int, default=None, help='Number of encoder layers to use in the student model. Defaults to all layers from the teacher.') parser.add_argument('--decoder_layers', type=int, default=2, help='Number of decoder layers to use in the student model. Defaults to 2 layers.') parser.add_argument('--max_source_positions', type=int, default=None, help='The maximum sequence length of log-mel filter-bank features that this model might ever be used with. Can be used to create a student model with a shorter context length than the teacher model. Defaults to the number of source positions in the teacher model (1500).') parser.add_argument('--save_dir', type=str, required=True, help='Where to save the student weights and processor.') parser.add_argument('--push_to_hub', type=bool, required=False, default=False, help='Whether to push the student weights and processor to the Hub.') parser.add_argument('--cache_dir', type=str, default=None, help='Where to store the pretrained models downloaded from huggingface.co') args = parser.parse_args() return args def init_student_model_from_teacher(teacher_checkpoint, encoder_layers=None, decoder_layers=2, max_source_positions=None, save_dir=None, push_to_hub=None, cache_dir=None, subfolder=''): (teacher_model, teacher_params) = FlaxWhisperForConditionalGeneration.from_pretrained(teacher_checkpoint, _do_init=False, cache_dir=cache_dir, subfolder=subfolder) processor = WhisperProcessor.from_pretrained(teacher_checkpoint) generation_config = GenerationConfig.from_pretrained(teacher_checkpoint) teacher_config = teacher_model.config teacher_encoder_layers = teacher_config.encoder_layers teacher_decoder_layers = teacher_config.decoder_layers student_config = copy.deepcopy(teacher_config) student_config.update({'encoder_layers': encoder_layers if encoder_layers is not None else teacher_encoder_layers, 'decoder_layers': decoder_layers, 'max_source_positions': max_source_positions if max_source_positions is not None else student_config.max_source_positions}) encoder_mapping = np.linspace(0, teacher_encoder_layers - 1, student_config.encoder_layers, dtype=int) encoder_mapping[-1] = teacher_encoder_layers - 1 encoder_map = {} for (student_layer, teacher_layer) in enumerate(encoder_mapping): encoder_map[str(teacher_layer)] = str(student_layer) decoder_mapping = np.linspace(0, teacher_decoder_layers - 1, student_config.decoder_layers, dtype=int) decoder_mapping[-1] = teacher_decoder_layers - 1 decoder_map = {} for (student_layer, teacher_layer) in enumerate(decoder_mapping): decoder_map[str(teacher_layer)] = str(student_layer) student_params = unfreeze(teacher_params) student_params['model']['decoder']['layers'] = {} for layer in teacher_params['model']['decoder']['layers']: if layer in decoder_map: student_params['model']['decoder']['layers'][decoder_map[layer]] = teacher_params['model']['decoder']['layers'][layer] if encoder_layers is not None: student_params['model']['encoder']['layers'] = {} for layer in teacher_params['model']['encoder']['layers']: if layer in encoder_map: student_params['model']['encoder']['layers'][encoder_map[layer]] = teacher_params['model']['encoder']['layers'][layer] if max_source_positions is not None: student_params['model']['encoder']['embed_positions']['embedding'] = teacher_params['model']['encoder']['embed_positions']['embedding'][:student_config.max_source_positions, :] chunk_length = int(student_config.max_source_positions * 2 / 100) processor.feature_extractor = WhisperFeatureExtractor(chunk_length=chunk_length) del teacher_params, teacher_model student_params = freeze(student_params) student_model = FlaxWhisperForConditionalGeneration(student_config, _do_init=False) if save_dir is not None: student_model.save_pretrained(save_dir, params=student_params) processor.save_pretrained(save_dir) generation_config.save_pretrained(save_dir) logger.info('Checking we can load the saved model...') (student_model, student_params) = FlaxWhisperForConditionalGeneration.from_pretrained(save_dir, _do_init=False) processor = WhisperProcessor.from_pretrained(save_dir) input_features = processor(np.ones(16000), sampling_rate=16000, return_tensors='np').input_features decoder_start_token_id = student_model.config.decoder_start_token_id decoder_input_ids = np.ones((input_features.shape[0], 1)) * decoder_start_token_id logger.info('Checking we can run the converted model forward...') _ = student_model(input_features, decoder_input_ids=decoder_input_ids, params=student_params).logits logger.info('Conversion successful!') if push_to_hub: student_model.push_to_hub(save_dir, params=student_params) processor.push_to_hub(save_dir) generation_config.push_to_hub(save_dir) if __name__ == '__main__': args = parse_args() logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) init_student_model_from_teacher(teacher_checkpoint=args.teacher_checkpoint, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, max_source_positions=args.max_source_positions, save_dir=args.save_dir, push_to_hub=args.push_to_hub, cache_dir=args.cache_dir, subfolder=args.subfolder) # File: distil-whisper-main/training/flax/distil_whisper/layers.py """""" import dataclasses import functools import operator from typing import Any, Callable, Iterable, List, Optional, Sequence, Tuple, Union import jax import jax.numpy as jnp import numpy as np from flax import linen as nn from flax.linen import partitioning as nn_partitioning from flax.linen.dtypes import promote_dtype from jax import lax, random param_with_axes = nn_partitioning.param_with_axes with_sharding_constraint = nn_partitioning.with_sharding_constraint Array = jnp.ndarray DType = jnp.dtype PRNGKey = jnp.ndarray Shape = Iterable[int] Activation = Callable[..., Array] PrecisionLike = Union[None, str, lax.Precision, Tuple[str, str], Tuple[lax.Precision, lax.Precision]] DotGeneralT = Callable[..., Array] ConvGeneralDilatedT = Callable[..., Array] PaddingLike = Union[str, int, Sequence[Union[int, Tuple[int, int]]]] LaxPadding = Union[str, Sequence[Tuple[int, int]]] Initializer = Callable[[PRNGKey, Shape, DType], Array] InitializerAxis = Union[int, Tuple[int, ...]] NdInitializer = Callable[[PRNGKey, Shape, DType, InitializerAxis, InitializerAxis], Array] default_embed_init = nn.initializers.variance_scaling(1.0, 'fan_in', 'normal', out_axis=0) def _compute_fans(shape: jax.core.NamedShape, in_axis=-2, out_axis=-1): if isinstance(in_axis, int): in_size = shape[in_axis] else: in_size = int(np.prod([shape[i] for i in in_axis])) if isinstance(out_axis, int): out_size = shape[out_axis] else: out_size = int(np.prod([shape[i] for i in out_axis])) receptive_field_size = shape.total / in_size / out_size fan_in = in_size * receptive_field_size fan_out = out_size * receptive_field_size return (fan_in, fan_out) def variance_scaling(scale, mode, distribution, in_axis=-2, out_axis=-1, dtype=jnp.float_): def init(key, shape, dtype=dtype): return jnp.zeros(shape, dtype=dtype) dtype = jax.dtypes.canonicalize_dtype(dtype) shape = jax.core.as_named_shape(shape) (fan_in, fan_out) = _compute_fans(shape, in_axis, out_axis) if mode == 'fan_in': denominator = fan_in elif mode == 'fan_out': denominator = fan_out elif mode == 'fan_avg': denominator = (fan_in + fan_out) / 2 else: raise ValueError('invalid mode for variance scaling initializer: {}'.format(mode)) variance = jnp.array(scale / denominator, dtype=dtype) if distribution == 'truncated_normal': stddev = jnp.sqrt(variance) / jnp.array(0.8796256610342398, dtype) return random.truncated_normal(key, -2, 2, shape, dtype) * stddev elif distribution == 'normal': return random.normal(key, shape, dtype) * jnp.sqrt(variance) elif distribution == 'uniform': return random.uniform(key, shape, dtype, -1) * jnp.sqrt(3 * variance) else: raise ValueError('invalid distribution for variance scaling initializer: {}'.format(distribution)) return init def nd_dense_init(scale, mode, distribution): def init_fn(key, shape, dtype, in_axis, out_axis): fn = variance_scaling(scale, mode, distribution, in_axis, out_axis) return fn(key, shape, dtype) return init_fn def dot_product_attention(query: Array, key: Array, value: Array, bias: Optional[Array]=None, dropout_rng: Optional[PRNGKey]=None, dropout_rate: float=0.0, deterministic: bool=False, dtype: DType=jnp.float32, float32_logits: bool=False): assert key.ndim == query.ndim == value.ndim, 'q, k, v must have same rank.' assert query.shape[:-3] == key.shape[:-3] == value.shape[:-3], 'q, k, v batch dims must match.' assert query.shape[-2] == key.shape[-2] == value.shape[-2], 'q, k, v num_heads must match.' assert key.shape[-3] == value.shape[-3], 'k, v lengths must match.' assert query.shape[-1] == key.shape[-1], 'q, k depths must match.' if float32_logits: query = query.astype(jnp.float32) key = key.astype(jnp.float32) attn_weights = jnp.einsum('bqhd,bkhd->bhqk', query, key) if bias is not None: attn_weights = attn_weights + bias.astype(attn_weights.dtype) attn_weights = jax.nn.softmax(attn_weights).astype(dtype) if not deterministic and dropout_rate > 0.0: keep_prob = 1.0 - dropout_rate dropout_shape = list(attn_weights.shape) dropout_shape[-2] = 1 keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape) keep = jnp.broadcast_to(keep, attn_weights.shape) multiplier = keep.astype(attn_weights.dtype) / jnp.asarray(keep_prob, dtype=dtype) attn_weights = attn_weights * multiplier return jnp.einsum('bhqk,bkhd->bqhd', attn_weights, value) dynamic_vector_slice_in_dim = jax.vmap(lax.dynamic_slice_in_dim, in_axes=(None, 0, None, None)) class MultiHeadDotProductAttention(nn.Module): num_heads: int head_dim: int dtype: DType = jnp.float32 dropout_rate: float = 0.0 kernel_init: NdInitializer = nd_dense_init(1.0, 'fan_in', 'normal') float32_logits: bool = False @nn.compact def __call__(self, inputs_q: Array, inputs_kv: Array, mask: Optional[Array]=None, bias: Optional[Array]=None, *, decode: bool=False, deterministic: bool=False) -> Array: projection = functools.partial(DenseGeneral, axis=-1, features=(self.num_heads, self.head_dim), kernel_axes=('embed', 'heads', 'kv'), dtype=self.dtype) depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype) def query_init(*args): return self.kernel_init(*args) / depth_scaling query = projection(kernel_init=query_init, name='query')(inputs_q) key = projection(kernel_init=self.kernel_init, name='key')(inputs_kv) value = projection(kernel_init=self.kernel_init, name='value')(inputs_kv) query = with_sharding_constraint(query, ('batch', 'length', 'heads', 'kv')) key = with_sharding_constraint(key, ('batch', 'length', 'heads', 'kv')) value = with_sharding_constraint(value, ('batch', 'length', 'heads', 'kv')) if decode: is_initialized = self.has_variable('cache', 'cached_key') def swap_dims(x): return x[:-3] + tuple((x[i] for i in [-2, -1, -3])) cached_key = self.variable('cache', 'cached_key', jnp.zeros, swap_dims(key.shape), key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, swap_dims(value.shape), value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (batch, num_heads, head_dim, length) = cached_key.value.shape expected_shape = (batch, 1, num_heads, head_dim) if expected_shape != query.shape: raise ValueError('Autoregressive cache shape error, expected query shape %s instead got %s.' % (expected_shape, query.shape)) cur_index = cache_index.value one_hot_indices = jax.nn.one_hot(cur_index, length, dtype=key.dtype) one_token_key = jnp.moveaxis(key, -3, -1) one_token_value = jnp.moveaxis(value, -3, -1) key = cached_key.value + one_token_key * one_hot_indices value = cached_value.value + one_token_value * one_hot_indices cached_key.value = key cached_value.value = value cache_index.value = cache_index.value + 1 key = jnp.moveaxis(key, -1, -3) value = jnp.moveaxis(value, -1, -3) mask = combine_masks(mask, jnp.broadcast_to(jnp.arange(length) <= cur_index, (batch, 1, 1, length))) if bias is not None: bias = dynamic_vector_slice_in_dim(jnp.squeeze(bias, axis=0), jnp.reshape(cur_index, -1), 1, -2) if mask is not None: attention_bias = lax.select(mask > 0, jnp.full(mask.shape, 0.0).astype(self.dtype), jnp.full(mask.shape, -10000000000.0).astype(self.dtype)) else: attention_bias = None if bias is not None: attention_bias = combine_biases(attention_bias, bias) dropout_rng = None if not deterministic and self.dropout_rate > 0.0: dropout_rng = self.make_rng('dropout') x = dot_product_attention(query, key, value, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout_rate, deterministic=deterministic, dtype=self.dtype, float32_logits=self.float32_logits) out = DenseGeneral(features=inputs_q.shape[-1], axis=(-2, -1), kernel_init=self.kernel_init, kernel_axes=('heads', 'kv', 'embed'), dtype=self.dtype, name='out')(x) return out def _normalize_axes(axes: Iterable[int], ndim: int) -> Tuple[int]: return tuple([ax if ax >= 0 else ndim + ax for ax in axes]) def _canonicalize_tuple(x): if isinstance(x, Iterable): return tuple(x) else: return (x,) class DenseGeneral(nn.Module): features: Union[Iterable[int], int] axis: Union[Iterable[int], int] = -1 dtype: DType = jnp.float32 params_dtype: DType = jnp.float32 kernel_init: NdInitializer = nd_dense_init(1.0, 'fan_in', 'normal') kernel_axes: Tuple[str, ...] = () use_bias: bool = True bias_init: Any = nn.initializers.zeros @nn.compact def __call__(self, inputs: Array) -> Array: features = _canonicalize_tuple(self.features) axis = _canonicalize_tuple(self.axis) inputs = jnp.asarray(inputs, self.dtype) axis = _normalize_axes(axis, inputs.ndim) kernel_shape = tuple([inputs.shape[ax] for ax in axis]) + features kernel_in_axis = np.arange(len(axis)) kernel_out_axis = np.arange(len(axis), len(axis) + len(features)) kernel = param_with_axes('kernel', self.kernel_init, kernel_shape, self.params_dtype, kernel_in_axis, kernel_out_axis, axes=self.kernel_axes) if self.use_bias: bias = param_with_axes('bias', self.bias_init, features, self.params_dtype, axes=(self.kernel_axes[-1],)) kernel = jnp.asarray(kernel, self.dtype) contract_ind = tuple(range(0, len(axis))) y = lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ()))) if self.use_bias: bias = jnp.asarray(bias, self.dtype) y += jnp.reshape(bias, (1,) * (len(features) - y.ndim) + bias.shape[:]) return y def _convert_to_activation_function(fn_or_string: Union[str, Callable]) -> Callable: if fn_or_string == 'linear': return lambda x: x elif isinstance(fn_or_string, str): return getattr(nn, fn_or_string) elif callable(fn_or_string): return fn_or_string else: raise ValueError("don't know how to convert %s to an activation function" % (fn_or_string,)) class MlpBlock(nn.Module): intermediate_dim: int = 2048 activations: Sequence[Union[str, Callable]] = ('relu',) kernel_init: NdInitializer = nd_dense_init(1.0, 'fan_in', 'truncated_normal') intermediate_dropout_rate: float = 0.1 dtype: Any = jnp.float32 @nn.compact def __call__(self, inputs, decode: bool=False, deterministic: bool=False): activations = [] for (idx, act_fn) in enumerate(self.activations): dense_name = 'wi' if len(self.activations) == 1 else f'wi_{idx}' x = DenseGeneral(self.intermediate_dim, dtype=self.dtype, kernel_init=self.kernel_init, kernel_axes=('embed', 'mlp'), name=dense_name)(inputs) x = _convert_to_activation_function(act_fn)(x) activations.append(x) x = functools.reduce(operator.mul, activations) x = nn.Dropout(rate=self.intermediate_dropout_rate, broadcast_dims=(-2,))(x, deterministic=deterministic) x = with_sharding_constraint(x, ('batch', 'length', 'mlp')) output = DenseGeneral(inputs.shape[-1], dtype=self.dtype, kernel_init=self.kernel_init, kernel_axes=('mlp', 'embed'), name='wo')(x) return output class Embed(nn.Module): num_embeddings: int features: int cast_input_dtype: Optional[DType] = None dtype: DType = jnp.float32 params_dtype: DType = jnp.float32 attend_dtype: Optional[DType] = None embedding_init: Initializer = default_embed_init one_hot: bool = True embedding: Array = dataclasses.field(init=False) def setup(self): self.embedding = param_with_axes('embedding', self.embedding_init, (self.num_embeddings, self.features), self.params_dtype, axes=('vocab', 'embed')) def __call__(self, inputs: Array) -> Array: if self.cast_input_dtype: inputs = inputs.astype(self.cast_input_dtype) if not jnp.issubdtype(inputs.dtype, jnp.integer): raise ValueError('Input type must be an integer or unsigned integer.') if self.one_hot: iota = lax.iota(jnp.int32, self.num_embeddings) one_hot = jnp.array(inputs[..., jnp.newaxis] == iota, dtype=self.dtype) output = jnp.dot(one_hot, jnp.asarray(self.embedding, self.dtype)) else: output = jnp.asarray(self.embedding, self.dtype)[inputs] output = with_sharding_constraint(output, ('batch', 'length', 'embed')) return output def attend(self, query: Array) -> Array: dtype = self.attend_dtype if self.attend_dtype is not None else self.dtype return jnp.dot(query, jnp.asarray(self.embedding, dtype).T) class RelativePositionBiases(nn.Module): num_buckets: int max_distance: int num_heads: int dtype: Any embedding_init: Callable[..., Array] = nn.linear.default_embed_init @staticmethod def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): ret = 0 n = -relative_position if bidirectional: num_buckets //= 2 ret += (n < 0).astype(np.int32) * num_buckets n = np.abs(n) else: n = np.maximum(n, 0) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + (np.log(n.astype(np.float32) / max_exact + np.finfo(np.float32).eps) / np.log(max_distance / max_exact) * (num_buckets - max_exact)).astype(np.int32) val_if_large = np.minimum(val_if_large, num_buckets - 1) ret += np.where(is_small, n, val_if_large) return ret @nn.compact def __call__(self, qlen, klen, bidirectional=True): context_position = np.arange(qlen, dtype=jnp.int32)[:, None] memory_position = np.arange(klen, dtype=jnp.int32)[None, :] relative_position = memory_position - context_position rp_bucket = self._relative_position_bucket(relative_position, bidirectional=bidirectional, num_buckets=self.num_buckets, max_distance=self.max_distance) relative_attention_bias = param_with_axes('rel_embedding', self.embedding_init, (self.num_heads, self.num_buckets), jnp.float32, axes=('heads', 'relpos_buckets')) relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype) bcast_iota = lax.broadcasted_iota(jnp.int32, (self.num_buckets, 1, 1), 0) rp_bucket_one_hot = jnp.array(rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype) values = lax.dot_general(relative_attention_bias, rp_bucket_one_hot, (((1,), (0,)), ((), ()))) return values[jnp.newaxis, ...] class LayerNorm(nn.Module): epsilon: float = 1e-06 dtype: Any = jnp.float32 params_dtype: DType = jnp.float32 use_bias: bool = True use_scale: bool = True bias_init: Callable[[PRNGKey, Shape, Any], Array] = nn.initializers.zeros scale_init: Callable[[PRNGKey, Shape, Any], Array] = nn.initializers.ones @nn.compact def __call__(self, x): x = jnp.asarray(x, jnp.float32) features = x.shape[-1] mean = jnp.mean(x, axis=-1, keepdims=True) mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True) var = mean2 - lax.square(mean) mul = lax.rsqrt(var + self.epsilon) if self.use_scale: scale = param_with_axes('scale', self.scale_init, (features,), self.params_dtype, axes=('embed',)) mul = mul * jnp.asarray(scale, self.dtype) y = (x - mean) * mul if self.use_bias: bias = param_with_axes('bias', self.bias_init, (features,), self.params_dtype, axes=('embed',)) y = y + jnp.asarray(bias, self.dtype) return jnp.asarray(y, self.dtype) def make_attention_mask(query_input: Array, key_input: Array, pairwise_fn: Callable=jnp.multiply, extra_batch_dims: int=0, dtype: DType=jnp.float32) -> Array: mask = pairwise_fn(jnp.expand_dims(query_input, axis=-1), jnp.expand_dims(key_input, axis=-2)) mask = jnp.expand_dims(mask, axis=-3) mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims))) return mask.astype(dtype) def make_causal_mask(x: Array, extra_batch_dims: int=0, dtype: DType=jnp.float32) -> Array: idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape) return make_attention_mask(idxs, idxs, jnp.greater_equal, extra_batch_dims=extra_batch_dims, dtype=dtype) def combine_masks(*masks: Optional[Array], dtype: DType=jnp.float32): masks = [m for m in masks if m is not None] if not masks: return None assert all((x.ndim == masks[0].ndim for x in masks)), f'masks must have same rank: {tuple((x.ndim for x in masks))}' (mask, *other_masks) = masks for other_mask in other_masks: mask = jnp.logical_and(mask, other_mask) return mask.astype(dtype) def combine_biases(*masks: Optional[Array]): masks = [m for m in masks if m is not None] if not masks: return None assert all((x.ndim == masks[0].ndim for x in masks)), f'masks must have same rank: {tuple((x.ndim for x in masks))}' (mask, *other_masks) = masks for other_mask in other_masks: mask = mask + other_mask return mask def make_decoder_mask(decoder_target_tokens: Array, dtype: DType, decoder_causal_attention: Optional[Array]=None, decoder_segment_ids: Optional[Array]=None) -> Array: masks = [] causal_mask = make_causal_mask(decoder_target_tokens, dtype=dtype) if decoder_causal_attention is not None: inputs_mask = make_attention_mask(decoder_causal_attention, decoder_causal_attention, jnp.logical_and, dtype=dtype) masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype)) else: masks.append(causal_mask) masks.append(make_attention_mask(decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=dtype)) if decoder_segment_ids is not None: masks.append(make_attention_mask(decoder_segment_ids, decoder_segment_ids, jnp.equal, dtype=dtype)) return combine_masks(*masks, dtype=dtype) def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding: if isinstance(padding, str): return padding if isinstance(padding, int): return [(padding, padding)] * rank if isinstance(padding, Sequence) and len(padding) == rank: new_pad = [] for p in padding: if isinstance(p, int): new_pad.append((p, p)) elif isinstance(p, tuple) and len(p) == 2: new_pad.append(p) else: break if len(new_pad) == rank: return new_pad raise ValueError(f'Invalid padding format: {padding}, should be str, int, or a sequence of len {rank} where each element is an int or pair of ints.') def _conv_dimension_numbers(input_shape): ndim = len(input_shape) lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1)) rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2)) out_spec = lhs_spec return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec) class _Conv(nn.Module): features: int kernel_size: Sequence[int] strides: Union[None, int, Sequence[int]] = 1 padding: PaddingLike = 'SAME' input_dilation: Union[None, int, Sequence[int]] = 1 kernel_dilation: Union[None, int, Sequence[int]] = 1 feature_group_count: int = 1 use_bias: bool = True mask: Optional[Array] = None dtype: Optional[DType] = None params_dtype: DType = jnp.float32 precision: PrecisionLike = None kernel_init: Callable[[PRNGKey, Shape, DType], Array] = nn.initializers.lecun_normal() bias_init: Callable[[PRNGKey, Shape, DType], Array] = nn.initializers.zeros conv_general_dilated: ConvGeneralDilatedT = lax.conv_general_dilated kernel_axes: Tuple[str, ...] = () @property def shared_weights(self) -> bool: ... @nn.compact def __call__(self, inputs: Array) -> Array: if isinstance(self.kernel_size, int): raise TypeError(f'Expected Conv kernel_size to be a tuple/list of integers (eg.: [3, 3]) but got {self.kernel_size}.') else: kernel_size = tuple(self.kernel_size) def maybe_broadcast(x: Optional[Union[int, Sequence[int]]]) -> Tuple[int, ...]: if x is None: x = 1 if isinstance(x, int): return (x,) * len(kernel_size) return tuple(x) num_batch_dimensions = inputs.ndim - (len(kernel_size) + 1) if num_batch_dimensions != 1: input_batch_shape = inputs.shape[:num_batch_dimensions] total_batch_size = int(np.prod(input_batch_shape)) flat_input_shape = (total_batch_size,) + inputs.shape[num_batch_dimensions:] inputs = jnp.reshape(inputs, flat_input_shape) strides = maybe_broadcast(self.strides) input_dilation = maybe_broadcast(self.input_dilation) kernel_dilation = maybe_broadcast(self.kernel_dilation) padding_lax = canonicalize_padding(self.padding, len(kernel_size)) if padding_lax == 'CIRCULAR': kernel_size_dilated = [(k - 1) * d + 1 for (k, d) in zip(kernel_size, kernel_dilation)] zero_pad: List[Tuple[int, int]] = [(0, 0)] pads = zero_pad + [((k - 1) // 2, k // 2) for k in kernel_size_dilated] + [(0, 0)] inputs = jnp.pad(inputs, pads, mode='wrap') padding_lax = 'VALID' elif padding_lax == 'CAUSAL': if len(kernel_size) != 1: raise ValueError('Causal padding is only implemented for 1D convolutions.') left_pad = kernel_dilation[0] * (kernel_size[0] - 1) pads = [(0, 0), (left_pad, 0), (0, 0)] inputs = jnp.pad(inputs, pads) padding_lax = 'VALID' dimension_numbers = _conv_dimension_numbers(inputs.shape) in_features = jnp.shape(inputs)[-1] if self.shared_weights: assert in_features % self.feature_group_count == 0 kernel_shape = kernel_size + (in_features // self.feature_group_count, self.features) else: if self.feature_group_count != 1: raise NotImplementedError(f'`lax.conv_general_dilated_local` does not support `feature_group_count != 1`, got `{self.feature_group_count}`.') conv_output_shape = jax.eval_shape(lambda lhs, rhs: self.conv_general_dilated(lhs=lhs, rhs=rhs, window_strides=strides, padding=padding_lax, dimension_numbers=dimension_numbers, lhs_dilation=input_dilation, rhs_dilation=kernel_dilation), inputs, jax.ShapedArray(kernel_size + (in_features, self.features), inputs.dtype)).shape kernel_shape = conv_output_shape[1:-1] + (np.prod(kernel_size) * in_features, self.features) if self.mask is not None and self.mask.shape != kernel_shape: raise ValueError(f'Mask needs to have the same shape as weights. Shapes are: {self.mask.shape}, {kernel_shape}') kernel = param_with_axes('kernel', self.kernel_init, kernel_shape, self.params_dtype, axes=self.kernel_axes) if self.mask is not None: kernel *= self.mask if self.use_bias: if self.shared_weights: bias_shape = (self.features,) else: bias_shape = conv_output_shape[1:] bias = param_with_axes('bias', self.bias_init, bias_shape, self.params_dtype, axes=(self.kernel_axes[-1],)) else: bias = None (inputs, kernel, bias) = promote_dtype(inputs, kernel, bias, dtype=self.dtype) if self.shared_weights: y = self.conv_general_dilated(inputs, kernel, strides, padding_lax, lhs_dilation=input_dilation, rhs_dilation=kernel_dilation, dimension_numbers=dimension_numbers, feature_group_count=self.feature_group_count, precision=self.precision) else: y = lax.conv_general_dilated_local(lhs=inputs, rhs=kernel, window_strides=strides, padding=padding_lax, filter_shape=kernel_size, lhs_dilation=input_dilation, rhs_dilation=kernel_dilation, dimension_numbers=dimension_numbers, precision=self.precision) if self.use_bias: bias = bias.reshape((1,) * (y.ndim - bias.ndim) + bias.shape) y += bias if num_batch_dimensions != 1: output_shape = input_batch_shape + y.shape[1:] y = jnp.reshape(y, output_shape) return y class Conv(_Conv): @property def shared_weights(self) -> bool: return True # File: distil-whisper-main/training/flax/distil_whisper/modeling_flax_whisper.py """""" import random from functools import partial from typing import Dict, Optional, Tuple, Union import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen.attention import dot_product_attention_weights from flax.linen.partitioning import remat, scan_with_axes from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from jax.random import PRNGKey from transformers import WhisperConfig from transformers.generation.flax_logits_process import FlaxLogitsProcessor, FlaxLogitsProcessorList, FlaxWhisperTimeStampLogitsProcessor from transformers.modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxSeq2SeqLMOutput, FlaxSeq2SeqModelOutput from transformers.modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, append_replace_return_docstrings, overwrite_call_docstring from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .layers import Conv, DenseGeneral, Embed, LayerNorm, with_sharding_constraint logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = 'openai/whisper-tiny' _CONFIG_FOR_DOC = 'WhisperConfig' WHISPER_START_DOCSTRING = '\n This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.) This model is also a Flax Linen\n [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a\n regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.\n Finally, this model supports inherent JAX features such as:\n - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)\n - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)\n - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)\n - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)\n\n Parameters:\n config ([`WhisperConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs). This can be used to enable mixed-precision training or half-precision\n inference on GPUs or TPUs. If specified all the computation will be performed with the given `dtype`.\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.** If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`]\n and [`~FlaxPreTrainedModel.to_bf16`].\n' WHISPER_INPUTS_DOCSTRING = "\n Args:\n input_features (`numpy.ndarray` of shape `(batch_size, feature_size, sequence_length)`):\n Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by\n loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via\n the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the\n [`WhisperFeatureExtractor`] should be used for extracting the features, padding and conversion into a\n tensor of type `numpy.ndarray`. See [`~WhisperFeatureExtractor.__call__`]\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but\n is not used. By default the silence in the input log mel spectrogram are ignored.\n decoder_input_ids (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using\n [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.\n [What are decoder input IDs?](../glossary#decoder-input-ids) Whisper uses the `decoder_start_token_id` as\n the starting token for `decoder_input_ids` generation.\n decoder_attention_mask (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1\n in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Whisper does not use `position_ids` in the encoder as `input_features` is always the same size and doesn't\n use masking, but this argument is preserved for compatibility. By default the silence in the input log mel\n spectrogram are ignored.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n" WHISPER_ENCODE_INPUTS_DOCSTRING = '\n Args:\n input_features (`numpy.ndarray` of shape `(batch_size, feature_size, sequence_length)`):\n Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by\n loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via\n the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the\n [`WhisperFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a\n tensor of type `numpy.ndarray`. See [`~WhisperFeatureExtractor.__call__`].\n attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Whisper does not support masking of the `input_features`, this argument is preserved for compatibility, but\n is not used. By default the silence in the input log mel spectrogram are ignored.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' WHISPER_DECODE_INPUTS_DOCSTRING = '\n Args:\n decoder_input_ids (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`):\n Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using\n [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.\n [What are decoder input IDs?](../glossary#decoder-input-ids)\n encoder_outputs (`tuple(tuple(numpy.ndarray)`):\n Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)\n `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of\n hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.\n encoder_attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Whisper does not support masking of the `input_features`, this argument is preserved for compatibility,\n but it is not used. By default the silence in the input log mel spectrogram are ignored.\n decoder_attention_mask (`numpy.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):\n Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also\n be used by default. If you want to change padding behavior, you should modify to your needs. See diagram 1\n in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.\n decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the\n range `[0, config.max_position_embeddings - 1]`.\n past_key_values (`Dict[str, numpy.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):\n Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast\n auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' class FlaxStaticForceTokensLogitsProcessor(FlaxLogitsProcessor): def __init__(self, force_token_map): force_token_map = jnp.array(force_token_map) force_token_array = jnp.ones(3, dtype=jnp.int32) * -1 for (index, token) in force_token_map: force_token_array = force_token_array.at[index].set(token) self.force_token_array = jnp.int32(force_token_array) def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: def _force_token(generation_idx): batch_size = scores.shape[0] current_token = self.force_token_array[generation_idx] new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float('inf') updates = jnp.zeros((batch_size, 1), dtype=scores.dtype) new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token)) return new_scores scores = lax.cond(cur_len >= self.force_token_array.shape[0], lambda : scores, lambda : lax.cond(self.force_token_array[cur_len] >= 0, lambda : _force_token(cur_len), lambda : scores)) return scores class FlaxWhisperAttention(nn.Module): config: WhisperConfig embed_dim: int num_heads: int dropout: float = 0.0 causal: bool = False bias: bool = True dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 def setup(self) -> None: self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError(f'embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads}).') dense = partial(DenseGeneral, self.embed_dim, axis=-1, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('embed', 'joined_kv')) self.q_proj = dense(use_bias=self.bias) self.k_proj = dense(use_bias=False) self.v_proj = dense(use_bias=self.bias) self.out_proj = DenseGeneral(self.embed_dim, axis=-1, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('joined_kv', 'embed'), use_bias=self.bias) if self.causal: self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_target_positions), dtype='bool'), dtype='bool') def __call__(self, hidden_states: jnp.ndarray, key_value_states: Optional[jnp.ndarray]=None, attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, deterministic: bool=True) -> Tuple[jnp.ndarray]: is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] query_states = self.q_proj(hidden_states) if is_cross_attention: key_states = self.k_proj(key_value_states) value_states = self.v_proj(key_value_states) else: key_states = self.k_proj(hidden_states) value_states = self.v_proj(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) query_states = with_sharding_constraint(query_states, ('batch', 'length', 'heads', 'kv')) key_states = with_sharding_constraint(key_states, ('batch', 'length', 'heads', 'kv')) value_states = with_sharding_constraint(value_states, ('batch', 'length', 'heads', 'kv')) if self.causal: (query_length, key_length) = (query_states.shape[1], key_states.shape[1]) if self.has_variable('cache', 'cached_key'): mask_shift = self.variables['cache']['cache_index'] max_decoder_length = self.variables['cache']['cached_key'].shape[-1] causal_mask = lax.dynamic_slice(self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) if self.causal and (self.has_variable('cache', 'cached_key') or init_cache): (key_states, value_states, attention_mask) = self._concatenate_to_cache(key_states, value_states, query_states, attention_mask) if attention_mask is not None: attention_bias = lax.select(attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype)) else: attention_bias = None dropout_rng = None if not deterministic and self.dropout > 0.0: dropout_rng = self.make_rng('dropout') attn_weights = dot_product_attention_weights(query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.dropout, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None) attn_output = jnp.einsum('...hqk,...khd->...qhd', attn_weights, value_states) attn_output = self._merge_heads(attn_output) attn_output = self.out_proj(attn_output) return (attn_output, attn_weights) def _split_heads(self, hidden_state) -> jnp.ndarray: return hidden_state.reshape(hidden_state.shape[:2] + (self.num_heads, self.head_dim)) def _merge_heads(self, hidden_state) -> jnp.ndarray: return hidden_state.reshape(hidden_state.shape[:2] + (self.embed_dim,)) @nn.compact def _concatenate_to_cache(self, key, value, query, attention_mask): is_initialized = self.has_variable('cache', 'cached_key') def swap_dims(x): return x[:-3] + tuple((x[i] for i in [-2, -1, -3])) cached_key = self.variable('cache', 'cached_key', jnp.zeros, swap_dims(key.shape), key.dtype) cached_value = self.variable('cache', 'cached_value', jnp.zeros, swap_dims(value.shape), value.dtype) cache_index = self.variable('cache', 'cache_index', lambda : jnp.array(0, dtype=jnp.int32)) if is_initialized: (batch_size, num_heads, head_dim, seq_length) = cached_key.value.shape num_updated_cache_vectors = query.shape[1] expected_shape = (batch_size, 1, num_heads, head_dim) if num_updated_cache_vectors == 1 and expected_shape != query.shape: raise ValueError(f'Autoregressive cache shape error, expected query shape {expected_shape} instead got {query.shape}') cur_index = cache_index.value one_token_key = jnp.moveaxis(key, -3, -1) one_token_value = jnp.moveaxis(value, -3, -1) if num_updated_cache_vectors > 1: indices = jnp.eye(num_updated_cache_vectors, seq_length)[None, None] key = cached_key.value + jnp.matmul(one_token_key, indices) value = cached_value.value + jnp.matmul(one_token_value, indices) else: one_hot_indices = jax.nn.one_hot(cur_index, seq_length, dtype=key.dtype) key = cached_key.value + one_token_key * one_hot_indices value = cached_value.value + one_token_value * one_hot_indices cached_key.value = key cached_value.value = value cache_index.value = cache_index.value + num_updated_cache_vectors key = jnp.moveaxis(key, -1, -3) value = jnp.moveaxis(value, -1, -3) pad_mask = jnp.broadcast_to(jnp.arange(seq_length) < cur_index + num_updated_cache_vectors, (batch_size,) + (1, num_updated_cache_vectors, seq_length)) attention_mask = combine_masks(pad_mask, attention_mask) return (key, value, attention_mask) class FlaxWhisperEncoderLayer(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxWhisperAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.encoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, params_dtype=self.params_dtype) self.self_attn_layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.fc1 = DenseGeneral(self.config.encoder_ffn_dim, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('embed', 'mlp')) self.fc2 = DenseGeneral(self.embed_dim, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('mlp', 'embed')) self.final_layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, output_attentions: bool=True, deterministic: bool=True, all_hidden_states=None) -> Tuple[jnp.ndarray]: if self.use_scan: hidden_states = hidden_states[0] hidden_states = with_sharding_constraint(hidden_states, ('batch', 'length', 'embed')) residual = hidden_states layernorm_output = self.self_attn_layer_norm(hidden_states) layernorm_output = with_sharding_constraint(layernorm_output, ('batch', 'length', 'embed')) (attn_output, attn_weights) = self.self_attn(hidden_states=layernorm_output, attention_mask=attention_mask) attn_output = self.dropout_layer(attn_output, deterministic=deterministic) attn_output = residual + attn_output attn_output = with_sharding_constraint(attn_output, ('batch', 'length', 'embed')) residual = attn_output post_layer_norm = self.final_layer_norm(attn_output) post_layer_norm = with_sharding_constraint(post_layer_norm, ('batch', 'length', 'embed')) fc1_output = self.activation_fn(self.fc1(post_layer_norm)) fc1_output = self.activation_dropout_layer(fc1_output, deterministic=deterministic) fc1_output = with_sharding_constraint(fc1_output, ('batch', 'length', 'mlp')) hidden_states = self.fc2(fc1_output) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = with_sharding_constraint(hidden_states, ('batch', 'length', 'embed')) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) if self.use_scan: if all_hidden_states is not None: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (outputs, all_hidden_states) return outputs class FlaxWhisperEncoderLayerCollection(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False gradient_checkpointing: bool = False @nn.compact def __call__(self, hidden_states, attention_mask, deterministic: bool=True, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None FlaxWhisperEncoderCheckpointLayer = remat(FlaxWhisperEncoderLayer, static_argnums=(2, 3), prevent_cse=not self.use_scan) if self.gradient_checkpointing else FlaxWhisperEncoderLayer if self.use_scan: if output_attentions: raise ValueError('Cannot use `scan` with `output_attentions` set to True') input_hidden_states = hidden_states hidden_states = (hidden_states,) (hidden_states, all_hidden_states) = scan_with_axes(FlaxWhisperEncoderCheckpointLayer, variable_axes={'params': 0, 'cache': 0}, split_rngs={'params': True, 'dropout': True}, in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast), variable_carry='all_hidden_states', length=self.config.encoder_layers)(self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=True, name='FlaxEncoderScanLayers')(hidden_states, attention_mask, output_attentions, deterministic, all_hidden_states) hidden_states = hidden_states[0] if output_hidden_states: all_hidden_states = jnp.vstack([input_hidden_states[None, ...], all_hidden_states[0]]) else: for layer_idx in range(self.config.encoder_layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.config.encoder_layerdrop: layer_outputs = (None, None) else: layer_outputs = FlaxWhisperEncoderCheckpointLayer(self.config, dtype=self.dtype, params_dtype=self.params_dtype, name=str(layer_idx))(hidden_states, attention_mask, output_attentions, deterministic) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions) if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions) class FlaxWhisperDecoderLayer(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False def setup(self) -> None: self.embed_dim = self.config.d_model self.self_attn = FlaxWhisperAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, causal=True, dtype=self.dtype, params_dtype=self.params_dtype) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.activation_fn = ACT2FN[self.config.activation_function] self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout) self.self_attn_layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) self.encoder_attn = FlaxWhisperAttention(config=self.config, embed_dim=self.embed_dim, num_heads=self.config.decoder_attention_heads, dropout=self.config.attention_dropout, dtype=self.dtype, params_dtype=self.params_dtype) self.encoder_attn_layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) self.fc1 = DenseGeneral(self.config.decoder_ffn_dim, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('embed', 'mlp')) self.fc2 = DenseGeneral(self.embed_dim, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('mlp', 'embed')) self.final_layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) def __call__(self, hidden_states: jnp.ndarray, attention_mask: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=True, deterministic: bool=True, all_hidden_states=None) -> Tuple[jnp.ndarray]: if self.use_scan: hidden_states = hidden_states[0] hidden_states = with_sharding_constraint(hidden_states, ('batch', 'length', 'embed')) residual = hidden_states layer_norm_output = self.self_attn_layer_norm(hidden_states) layer_norm_output = with_sharding_constraint(layer_norm_output, ('batch', 'length', 'embed')) (self_attn_output, self_attn_weights) = self.self_attn(hidden_states=layer_norm_output, attention_mask=attention_mask, init_cache=init_cache) self_attn_output = self.dropout_layer(self_attn_output, deterministic=deterministic) self_attn_output = residual + self_attn_output self_attn_output = with_sharding_constraint(self_attn_output, ('batch', 'length', 'embed')) cross_attn_weights = None if encoder_hidden_states is not None: residual = self_attn_output encoder_layer_norm_output = self.encoder_attn_layer_norm(self_attn_output) encoder_layer_norm_output = with_sharding_constraint(encoder_layer_norm_output, ('batch', 'length', 'embed')) (cross_attn_output, cross_attn_weights) = self.encoder_attn(hidden_states=encoder_layer_norm_output, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask) cross_attn_output = self.dropout_layer(cross_attn_output, deterministic=deterministic) cross_attn_output = residual + cross_attn_output cross_attn_output = with_sharding_constraint(cross_attn_output, ('batch', 'length', 'embed')) residual = cross_attn_output post_layer_norm = self.final_layer_norm(cross_attn_output) post_layer_norm = with_sharding_constraint(post_layer_norm, ('batch', 'length', 'embed')) fc1_output = self.activation_fn(self.fc1(post_layer_norm)) fc1_output = self.activation_dropout_layer(fc1_output, deterministic=deterministic) fc1_output = with_sharding_constraint(fc1_output, ('batch', 'length', 'mlp')) hidden_states = self.fc2(fc1_output) hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) hidden_states = residual + hidden_states hidden_states = with_sharding_constraint(hidden_states, ('batch', 'length', 'embed')) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if self.use_scan: if all_hidden_states is not None: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (outputs, all_hidden_states) return outputs class FlaxWhisperDecoderLayerCollection(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False gradient_checkpointing: bool = False @nn.compact def __call__(self, hidden_states, attention_mask, encoder_hidden_states: Optional[jnp.ndarray]=None, encoder_attention_mask: Optional[jnp.ndarray]=None, deterministic: bool=True, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True): all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if output_attentions and encoder_hidden_states is not None else None FlaxWhisperDecoderCheckpointLayer = remat(FlaxWhisperDecoderLayer, static_argnums=(4, 5, 6), prevent_cse=not self.use_scan) if self.gradient_checkpointing else FlaxWhisperDecoderLayer if self.use_scan: if output_attentions: raise ValueError('Cannot use `scan` with `output_attentions` set to True') input_hidden_states = hidden_states hidden_states = (hidden_states,) (hidden_states, all_hidden_states) = scan_with_axes(FlaxWhisperDecoderCheckpointLayer, variable_axes={'params': 0, 'cache': 0}, split_rngs={'params': True, 'dropout': True}, in_axes=(nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast, nn.broadcast), variable_carry='all_hidden_states', length=self.config.decoder_layers)(self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=True, name='FlaxDecoderScanLayers')(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, init_cache, output_attentions, deterministic, all_hidden_states) hidden_states = hidden_states[0] if output_hidden_states: all_hidden_states = jnp.vstack([input_hidden_states[None, ...], all_hidden_states[0]]) else: for layer_idx in range(self.config.decoder_layers): if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if not deterministic and dropout_probability < self.config.decoder_layerdrop: layer_outputs = (None, None, None) else: layer_outputs = FlaxWhisperDecoderCheckpointLayer(self.config, dtype=self.dtype, params_dtype=self.params_dtype, name=str(layer_idx))(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, init_cache, output_attentions, deterministic) hidden_states = layer_outputs[0] if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions] if not return_dict: return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions) class FlaxWhisperEncoder(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False gradient_checkpointing: bool = False def setup(self) -> None: self.conv1 = Conv(self.config.d_model, kernel_size=(3,), padding=1, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('channels', 'num_mel', 'embed')) self.conv2 = Conv(self.config.d_model, kernel_size=(3,), strides=2, padding=1, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('channels', 'embed', 'num_mel')) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.layers = FlaxWhisperEncoderLayerCollection(self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) self.embed_positions = Embed(self.config.max_source_positions, self.config.d_model, dtype=self.dtype, params_dtype=self.params_dtype) self.layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) def __call__(self, input_features: jnp.ndarray, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: if input_features.shape[1:] != (self.config.num_mel_bins, self.config.max_source_positions * 2): raise ValueError(f'input_features.shape[1:], must be equal to (self.config.num_mel_bins, self.config.max_source_positions * 2) (got {input_features.shape[1:]}, but should be ({self.config.num_mel_bins}, {self.config.max_source_positions * 2}))') input_features = input_features.transpose(0, 2, 1) hidden_states = jax.nn.gelu(self.conv1(input_features), approximate=False) hidden_states = with_sharding_constraint(hidden_states, ('batch', 'embed', 'num_mel')) hidden_states = jax.nn.gelu(self.conv2(hidden_states), approximate=False) hidden_states = with_sharding_constraint(hidden_states, ('batch', 'length', 'embed')) embed_positions = self.embed_positions(jnp.arange(self.config.max_source_positions)) embed_positions = jax.lax.stop_gradient(embed_positions) hidden_states = hidden_states + embed_positions hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask=None, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) hidden_states = None if output_hidden_states: hidden_states = outputs[1] if self.use_scan: hidden_states = jnp.vstack([hidden_states[:-1], last_hidden_states[None, ...]]) else: hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutput(last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions) class FlaxWhisperDecoder(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False gradient_checkpointing: bool = False def setup(self) -> None: self.embed_tokens = Embed(self.config.vocab_size, self.config.d_model, dtype=self.dtype, params_dtype=self.params_dtype) self.embed_positions = Embed(self.config.max_target_positions, self.config.d_model, dtype=self.dtype, params_dtype=self.params_dtype) self.layers = FlaxWhisperDecoderLayerCollection(self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) self.dropout_layer = nn.Dropout(rate=self.config.dropout) self.layer_norm = LayerNorm(dtype=self.dtype, epsilon=1e-05, params_dtype=self.params_dtype) def __call__(self, input_ids: jnp.ndarray, attention_mask: jnp.ndarray, position_ids: jnp.ndarray, encoder_hidden_states: Optional[jnp.ndarray]=None, init_cache: bool=False, output_attentions: bool=False, output_hidden_states: bool=False, return_dict: bool=True, deterministic: bool=True) -> Tuple[jnp.ndarray]: input_embeds = self.embed_tokens(input_ids) position_embeds = self.embed_positions(position_ids) hidden_states = input_embeds + position_embeds hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) outputs = self.layers(hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, deterministic=deterministic, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict) last_hidden_states = outputs[0] last_hidden_states = self.layer_norm(last_hidden_states) hidden_states = None if output_hidden_states: hidden_states = outputs[1] if self.use_scan: hidden_states = jnp.vstack([hidden_states[:-1], last_hidden_states[None, ...]]) else: hidden_states = hidden_states[:-1] + (last_hidden_states,) if not return_dict: outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:]) return tuple((v for v in outputs if v is not None)) return FlaxBaseModelOutputWithPastAndCrossAttentions(last_hidden_state=last_hidden_states, hidden_states=hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions) class FlaxWhisperModule(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False gradient_checkpointing: bool = False def setup(self) -> None: self.encoder = FlaxWhisperEncoder(self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) self.decoder = FlaxWhisperDecoder(self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) def __call__(self, input_features: jnp.ndarray, decoder_input_ids: jnp.ndarray, decoder_attention_mask: jnp.ndarray, decoder_position_ids: jnp.ndarray, output_attentions: bool=False, output_hidden_states: bool=False, freeze_encoder: bool=False, return_dict: bool=True, deterministic: bool=True): encoder_outputs = self.encoder(input_features, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) encoder_hidden_states = encoder_outputs[0] if freeze_encoder: encoder_hidden_states = jax.lax.stop_gradient(encoder_hidden_states) decoder_outputs = self.decoder(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=deterministic) if not return_dict: return decoder_outputs + encoder_outputs return FlaxSeq2SeqModelOutput(last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions) def _get_encoder_module(self): return self.encoder def _get_decoder_module(self): return self.decoder class FlaxWhisperPreTrainedModel(FlaxPreTrainedModel): config_class = WhisperConfig base_model_prefix: str = 'model' main_input_name = 'input_features' module_class: nn.Module = None def __init__(self, config: WhisperConfig, input_shape: Tuple[int, int, int]=None, seed: int=0, dtype: jnp.dtype=jnp.float32, params_dtype: jnp.dtype=jnp.float32, _do_init: bool=True, use_scan: bool=False, gradient_checkpointing: bool=False, **kwargs): self.use_scan = use_scan self.gradient_checkpointing = gradient_checkpointing module = self.module_class(config=config, dtype=dtype, params_dtype=params_dtype, use_scan=use_scan, gradient_checkpointing=gradient_checkpointing, **kwargs) if input_shape is None: input_shape = (1, config.num_mel_bins, 2 * config.max_source_positions) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict=None) -> FrozenDict: input_features = jnp.zeros(input_shape, dtype='f4') input_features = input_features.at[..., -1].set(self.config.eos_token_id) decoder_input_ids = jnp.zeros((input_shape[0], 1), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) (batch_size, sequence_length) = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) (params_rng, dropout_rng) = jax.random.split(rng) rngs = {'params': params_rng, 'dropout': dropout_rng} random_params = self.module.init(rngs, input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids)['params'] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params def enable_gradient_checkpointing(self): self.gradient_checkpointing = True self._module = self.module_class(config=self.config, dtype=self.dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) def enable_scan(self): self.use_scan = True self._module = self.module_class(config=self.config, dtype=self.dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) init_fn = partial(self.init_weights, input_shape=self.input_shape) params_shape_tree = jax.eval_shape(init_fn, self.key) self._params_shape_tree = params_shape_tree self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) if self._is_initialized: self.params = self.convert_unroll_to_scan(self.params) def disable_scan(self): self.use_scan = False self._module = self.module_class(config=self.config, dtype=self.dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) init_fn = partial(self.init_weights, input_shape=self.input_shape) params_shape_tree = jax.eval_shape(init_fn, self.key) self._params_shape_tree = params_shape_tree self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys()) if self._is_initialized: self.params = self.convert_scan_to_unroll(self.params) def convert_unroll_to_scan(self, params: Union[Dict, FrozenDict]): if isinstance(params, FrozenDict): params = unfreeze(params) params = flatten_dict(params, sep='/') keys = list(params.keys()) for k in keys: if 'layers/0' in k: if 'decoder' in k: block_prefix = 'Decoder' num_hidden_layers = self.config.decoder_layers else: block_prefix = 'Encoder' num_hidden_layers = self.config.encoder_layers scan_key = k.replace('0', f'Flax{block_prefix}ScanLayers') stacked_params = [] for i in range(num_hidden_layers): unrolled_layer = params.pop(k.replace('0', str(i))) stacked_params.append(unrolled_layer) params[scan_key] = jnp.stack(stacked_params) params = unflatten_dict(params, sep='/') return params def convert_scan_to_unroll(self, params: Union[Dict, FrozenDict]): if isinstance(params, FrozenDict): params = unfreeze(params) params = flatten_dict(params, sep='/') keys = list(params.keys()) for k in keys: if 'FlaxEncoderScanLayers' in k: scan_layer = params.pop(k) for i in range(self.config.encoder_layers): unrolled_key = k.replace('FlaxEncoderScanLayers', str(i)) (params[unrolled_key], scan_layer) = (scan_layer[0], scan_layer[1:]) elif 'FlaxDecoderScanLayers' in k: scan_layer = params.pop(k) for i in range(self.config.decoder_layers): unrolled_key = k.replace('FlaxDecoderScanLayers', str(i)) (params[unrolled_key], scan_layer) = (scan_layer[0], scan_layer[1:]) params = unflatten_dict(params, sep='/') return params def init_cache(self, batch_size, max_length, encoder_outputs): decoder_input_ids = jnp.ones((batch_size, max_length), dtype='i4') decoder_attention_mask = jnp.ones_like(decoder_input_ids) decoder_position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape) def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs) init_variables = self.module.init(jax.random.PRNGKey(0), decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], init_cache=True, method=_decoder_forward) return unfreeze(init_variables['cache']) @add_start_docstrings(WHISPER_ENCODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=WhisperConfig) def encode(self, input_features: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None, **kwargs): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng def _encoder_forward(module, input_features, **kwargs): encode_module = module._get_encoder_module() return encode_module(input_features, **kwargs) return self.module.apply({'params': params or self.params}, input_features=jnp.array(input_features, dtype='f4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, method=_encoder_forward) @add_start_docstrings(WHISPER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=WhisperConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] (batch_size, sequence_length) = decoder_input_ids.shape if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(-1) * decoder_attention_mask - 1 else: decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length)) rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() return decoder_module(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is not None and return_dict: (outputs, past) = outputs outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): (outputs, past) = outputs outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) def __call__(self, input_features: jnp.ndarray, decoder_input_ids: jnp.ndarray, attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, position_ids: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, freeze_encoder: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict if decoder_position_ids is None: if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(-1) * decoder_attention_mask - 1 else: (batch_size, sequence_length) = decoder_input_ids.shape decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones_like(decoder_input_ids) rngs = {'dropout': dropout_rng} if dropout_rng is not None else {} return self.module.apply({'params': params or self.params}, input_features=jnp.array(input_features, dtype='f4'), decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_encoder=freeze_encoder, return_dict=return_dict, deterministic=not train, rngs=rngs) @add_start_docstrings('The bare Whisper Model transformer outputting raw hidden-states without any specific head on top.', WHISPER_START_DOCSTRING) class FlaxWhisperModel(FlaxWhisperPreTrainedModel): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 module_class = FlaxWhisperModule append_call_sample_docstring(FlaxWhisperModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC) class FlaxWhisperForConditionalGenerationModule(nn.Module): config: WhisperConfig dtype: jnp.dtype = jnp.float32 params_dtype: jnp.dtype = jnp.float32 use_scan: bool = False gradient_checkpointing: bool = False def setup(self) -> None: self.model = FlaxWhisperModule(config=self.config, dtype=self.dtype, params_dtype=self.params_dtype, use_scan=self.use_scan, gradient_checkpointing=self.gradient_checkpointing) self.lm_head = DenseGeneral(self.config.vocab_size, use_bias=False, dtype=self.dtype, params_dtype=self.params_dtype, kernel_axes=('embed', 'vocab')) def _get_encoder_module(self): return self.model.encoder def _get_decoder_module(self): return self.model.decoder def __call__(self, input_features, decoder_input_ids, decoder_attention_mask: jnp.ndarray=None, decoder_position_ids: jnp.ndarray=None, position_ids: jnp.ndarray=None, attention_mask: jnp.ndarray=None, output_attentions: bool=False, output_hidden_states: bool=False, freeze_encoder: bool=False, return_dict: bool=True, deterministic: bool=True): outputs = self.model(input_features=input_features, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, freeze_encoder=freeze_encoder, return_dict=return_dict, deterministic=deterministic) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.model.decoder.embed_tokens.variables['params']['embedding'] lm_logits = self.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = self.lm_head(hidden_states) if not return_dict: output = (lm_logits,) + outputs[1:] return output return FlaxSeq2SeqLMOutput(logits=lm_logits, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions) @add_start_docstrings('The Whisper Model with a language modeling head.', WHISPER_START_DOCSTRING) class FlaxWhisperForConditionalGeneration(FlaxWhisperPreTrainedModel): module_class = FlaxWhisperForConditionalGenerationModule @add_start_docstrings(WHISPER_DECODE_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=WhisperConfig) def decode(self, decoder_input_ids, encoder_outputs, encoder_attention_mask: Optional[jnp.ndarray]=None, decoder_attention_mask: Optional[jnp.ndarray]=None, decoder_position_ids: Optional[jnp.ndarray]=None, past_key_values: dict=None, output_attentions: Optional[bool]=None, output_hidden_states: Optional[bool]=None, return_dict: Optional[bool]=None, train: bool=False, params: dict=None, dropout_rng: PRNGKey=None): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states return_dict = return_dict if return_dict is not None else self.config.return_dict encoder_hidden_states = encoder_outputs[0] (batch_size, sequence_length) = decoder_input_ids.shape if decoder_position_ids is None: if past_key_values is not None: raise ValueError('Make sure to provide `decoder_position_ids` when passing `past_key_values`.') if decoder_attention_mask is not None: decoder_position_ids = decoder_attention_mask.cumsum(-1) * decoder_attention_mask - 1 else: decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) if decoder_attention_mask is None: decoder_attention_mask = jnp.ones((batch_size, sequence_length), dtype='i4') rngs = {} if dropout_rng is not None: rngs['dropout'] = dropout_rng inputs = {'params': params or self.params} if past_key_values: inputs['cache'] = past_key_values mutable = ['cache'] else: mutable = False def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs): decoder_module = module._get_decoder_module() outputs = decoder_module(input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, **kwargs) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = module.model.decoder.embed_tokens.variables['params']['embedding'] lm_logits = module.lm_head.apply({'params': {'kernel': shared_embedding.T}}, hidden_states) else: lm_logits = module.lm_head(hidden_states) return (lm_logits, outputs) outputs = self.module.apply(inputs, decoder_input_ids=jnp.array(decoder_input_ids, dtype='i4'), decoder_attention_mask=jnp.array(decoder_attention_mask, dtype='i4'), decoder_position_ids=jnp.array(decoder_position_ids, dtype='i4'), encoder_hidden_states=encoder_hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, deterministic=not train, rngs=rngs, mutable=mutable, method=_decoder_forward) if past_key_values is None: (lm_logits, decoder_outputs) = outputs else: ((lm_logits, decoder_outputs), past) = outputs if return_dict: outputs = FlaxCausalLMOutputWithCrossAttentions(logits=lm_logits, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions) else: outputs = (lm_logits,) + decoder_outputs[1:] if past_key_values is not None and return_dict: outputs['past_key_values'] = unfreeze(past['cache']) return outputs elif past_key_values is not None and (not return_dict): outputs = outputs[:1] + (unfreeze(past['cache']),) + outputs[1:] return outputs def generate(self, input_features, generation_config=None, logits_processor=None, return_timestamps=None, task=None, language=None, is_multilingual=None, **kwargs): if generation_config is None: generation_config = self.generation_config if return_timestamps is not None: generation_config.return_timestamps = return_timestamps if task is not None: generation_config.task = task if is_multilingual is not None: generation_config.is_multilingual = is_multilingual if language is not None: generation_config.language = language if kwargs is not None and 'decoder_input_ids' in kwargs: decoder_input_length = len(kwargs['decoder_input_ids']) else: decoder_input_length = 1 forced_decoder_ids = [] if hasattr(generation_config, 'is_multilingual') and generation_config.is_multilingual: if hasattr(generation_config, 'language'): forced_decoder_ids.append((1, generation_config.lang_to_id[generation_config.language])) else: forced_decoder_ids.append((1, None)) if hasattr(generation_config, 'task'): forced_decoder_ids.append((2, generation_config.task_to_id[generation_config.task])) else: forced_decoder_ids.append((2, generation_config.task_to_id['transcribe'])) if hasattr(generation_config, 'return_timestamps') and generation_config.return_timestamps or return_timestamps: logits_processor = [FlaxWhisperTimeStampLogitsProcessor(generation_config, self.config, decoder_input_length)] elif forced_decoder_ids and forced_decoder_ids[-1][0] != generation_config.no_timestamps_token_id: idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1 forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id)) if len(forced_decoder_ids) > 0: generation_config.forced_decoder_ids = forced_decoder_ids return super().generate(input_features, generation_config, logits_processor=logits_processor, **kwargs) def pipeline_generate(self, input_features, forced_decoder_ids, return_timestamps=False, generation_config=None, **kwargs): if generation_config is None: generation_config = self.generation_config generation_config.forced_decoder_ids = None logits_processor = FlaxLogitsProcessorList() logits_processor.append(FlaxStaticForceTokensLogitsProcessor(forced_decoder_ids)) if hasattr(generation_config, 'return_timestamps') and return_timestamps: logits_processor.append(FlaxWhisperTimeStampLogitsProcessor(generation_config, self.config, 1)) return super().generate(input_features, generation_config, logits_processor=logits_processor, **kwargs) def prepare_inputs_for_generation(self, decoder_input_ids, max_length, attention_mask: Optional[jax.Array]=None, decoder_attention_mask: Optional[jax.Array]=None, encoder_outputs=None, **kwargs): (batch_size, seq_length) = decoder_input_ids.shape past_key_values = self.init_cache(batch_size, max_length, encoder_outputs) extended_attention_mask = jnp.ones((batch_size, max_length), dtype='i4') if decoder_attention_mask is not None: position_ids = decoder_attention_mask.cumsum(-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype='i4')[None, :], (batch_size, seq_length)) return {'past_key_values': past_key_values, 'encoder_outputs': encoder_outputs, 'encoder_attention_mask': attention_mask, 'decoder_attention_mask': extended_attention_mask, 'decoder_position_ids': position_ids} def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs['past_key_values'] = model_outputs.past_key_values model_kwargs['decoder_position_ids'] = model_kwargs['decoder_position_ids'][:, -1:] + 1 return model_kwargs FLAX_WHISPER_CONDITIONAL_GENERATION_DOCSTRING = '\n Returns:\n\n Transcription example:\n\n ```python\n >>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration\n >>> from datasets import load_dataset\n\n >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")\n >>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)\n >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")\n >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np")\n >>> input_features = inputs.input_features\n >>> generated_ids = model.generate(input_ids=input_features)\n >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]\n >>> transcription\n \' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.\'\n ```\n' overwrite_call_docstring(FlaxWhisperForConditionalGeneration, WHISPER_INPUTS_DOCSTRING + FLAX_WHISPER_CONDITIONAL_GENERATION_DOCSTRING) append_replace_return_docstrings(FlaxWhisperForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) # File: distil-whisper-main/training/flax/distil_whisper/partitioner.py """""" import abc import collections import dataclasses import typing from typing import Any, Callable, Optional, Sequence, Tuple, Union import cached_property import jax import numpy as np from absl import logging from flax import traverse_util from flax.linen import partitioning as flax_partitioning from jax import numpy as jnp from jax import random from jax.experimental import multihost_utils from jax.experimental.mesh_utils import create_hybrid_device_mesh from jax.experimental.pjit import pjit as jax_pjit from jax.sharding import Mesh, PartitionSpec JaxDevice = Any TpuMesh = Tuple[int, int, int, int] OtherMesh = Tuple[int, int] HardwareMesh = Union[TpuMesh, OtherMesh] PyTreeDef = type(jax.tree_util.tree_structure(None)) TrainState = Any LogicalAxisRules = Sequence[Tuple[str, Optional[str]]] if typing.TYPE_CHECKING: cached_property = property else: cached_property = cached_property.cached_property class AxisNames(tuple): def __new__(cls, *names): return tuple.__new__(AxisNames, names) def __repr__(self): return 'AxisNames%s' % tuple.__repr__(self) def pjit(fun: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]]=(), donate_argnums: Union[int, Sequence[int]]=(), backend: Optional[str]=None): del backend return jax_pjit(fun, in_axis_resources, out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums) def pjit_with_cpu_fallback(fun: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]]=(), donate_argnums: Union[int, Sequence[int]]=(), backend: Optional[str]=None): if jax.devices(backend)[0].platform == 'cpu': return jax.jit(fun, static_argnums=static_argnums, donate_argnums=donate_argnums) else: return jax_pjit(fun, in_axis_resources, out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums) def with_sharding_constraint(x, axis_resources): if jax.devices()[0].platform == 'cpu' or not global_mesh_defined(): return x else: return jax.experimental.pjit.with_sharding_constraint(x, axis_resources) def bounds_from_last_device(last_device: JaxDevice) -> HardwareMesh: if hasattr(last_device, 'coords'): (x, y, z) = last_device.coords return (x + 1, y + 1, z + 1, last_device.core_on_chip + 1) else: return (jax.host_count(), jax.local_device_count()) def get_coords(device: JaxDevice) -> HardwareMesh: if hasattr(device, 'coords'): return (*device.coords, device.core_on_chip) return (device.process_index, device.id % jax.local_device_count()) def global_mesh_defined(): maps_env = jax.experimental.maps.thread_resources.env return maps_env.physical_mesh.devices.shape != () def get_mesh(model_parallel_submesh: HardwareMesh, input_devices: Sequence[JaxDevice]=(), input_local_devices: Sequence[JaxDevice]=(), tile_by_host_if_needed: bool=True, backend: Optional[str]=None) -> Mesh: input_devices = input_devices or jax.devices(backend) input_local_devices = input_local_devices or jax.local_devices(0, backend) last_device = sorted(input_devices, key=get_coords)[-1] last_input_local_devices = sorted(input_local_devices, key=get_coords)[-1] logging.info('last device coords : %r\nlast local device coords: %r', get_coords(last_device), get_coords(last_input_local_devices)) global_hardware_mesh = bounds_from_last_device(last_device) mesh_ndim = len(global_hardware_mesh) local_hardware_mesh = bounds_from_last_device(last_input_local_devices) mesh_err = f'each dimension of the model parallel submesh {model_parallel_submesh} must be a factor of the corresponding dimension of the global device mesh {global_hardware_mesh}' assert not any((g % m for (g, m) in zip(global_hardware_mesh, model_parallel_submesh))), mesh_err assert not any((g % l for (g, l) in zip(global_hardware_mesh, local_hardware_mesh))) devices = np.empty(global_hardware_mesh, dtype=object) for device in input_devices: device_coords = get_coords(device) devices[device_coords] = device tile_by_host = tile_by_host_if_needed if len(global_hardware_mesh) == 4: global_hardware_mesh = typing.cast(Tuple[int, int, int, int], global_hardware_mesh) model_parallel_submesh = typing.cast(Tuple[int, int, int, int], model_parallel_submesh) (gx, gy, gz, gc) = global_hardware_mesh (mx, my, mz, mc) = model_parallel_submesh if mx == gx > 1 and my == mz == 1 or (mx == 1 and my == gy > 1 and (mz == gz > 1)): logging.info('ensuring YZ plane has a Z-major device order') assert mc == gc, (mc, gc) global_hardware_mesh = (gx, gz, gy, gc) model_parallel_submesh = (mx, mz, my, mc) devices = devices.swapaxes(1, 2) tile_by_host = False if my == gy > 1 and mx == mz == 1 or (my == 1 and mx == gx > 1 and (mz == gz > 1)): logging.info('ensuring XZ plane has a Z-major device order') assert mc == gc, (mc, gc) global_hardware_mesh = (gz, gy, gx, gc) model_parallel_submesh = (mz, my, mx, mc) devices = devices.swapaxes(0, 2) tile_by_host = False if tile_by_host: logging.warning('Tiling device assignment mesh by hosts, which may lead to reduced XLA collective performance. To avoid this, modify the model parallel submesh or run with more tasks per host.') tile_err = 'to tile the mesh by hosts, each dimension of the model parallel submesh must be either a factor or a multiple of the corresponding dimension of the per-host submesh' def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]: d = g // m if m >= l: assert not m % l, tile_err return (d, 1, m // l, l) else: assert not l % m, tile_err return (d // (l // m), l // m, 1, m) dh_dd_mh_md_tups = map(dh_dd_mh_md, global_hardware_mesh, model_parallel_submesh, local_hardware_mesh) devices = devices.reshape(*(s for t in dh_dd_mh_md_tups for s in t)) devices = devices.transpose(*(4 * i for i in range(mesh_ndim)), *(4 * i + 1 for i in range(mesh_ndim)), *(4 * i + 2 for i in range(mesh_ndim)), *(4 * i + 3 for i in range(mesh_ndim))) else: model_data_tups = [(g // m, m) for (g, m) in zip(global_hardware_mesh, model_parallel_submesh)] devices = devices.reshape(*(s for t in model_data_tups for s in t)) devices = devices.transpose(*(2 * i for i in range(mesh_ndim)), *(2 * i + 1 for i in range(mesh_ndim))) devices = devices.reshape(-1, np.prod(model_parallel_submesh)) global_mesh = Mesh(devices, ['data', 'model']) logging.info('global_mesh axis_names: %s', global_mesh.axis_names) logging.info('global_mesh devices: %s', global_mesh.devices) logging.info('global_mesh devices shape: %s', global_mesh.devices.shape) return global_mesh def get_cpu_mesh() -> Mesh: devices = np.empty((jax.host_count(), jax.local_device_count()), dtype=object) for device in jax.devices(): devices[device.process_index, device.id % jax.local_device_count()] = device return Mesh(devices, ['data', 'model']) def get_gpu_mesh(num_partitions: int) -> Mesh: nvlink_size = jax.local_device_count() dcn_size = jax.process_count() nvlink_mp = min(num_partitions, nvlink_size) (nvlink_dp, extra1) = divmod(nvlink_size, nvlink_mp) (dcn_mp, extra2) = divmod(num_partitions, nvlink_mp) assert not (extra1 or extra2), 'number of partitions on GPU must be a factor or multiple of the number of local devices' dcn_dp = dcn_size // dcn_mp devices = create_hybrid_device_mesh(mesh_shape=[nvlink_dp, nvlink_mp], dcn_mesh_shape=[dcn_dp, dcn_mp], process_is_granule=True) global_mesh = Mesh(devices, ['data', 'model']) logging.info('global_mesh axis_names: %s', global_mesh.axis_names) logging.info('global_mesh devices: %s', global_mesh.devices) return global_mesh def default_mesh(num_partitions: int, model_parallel_submesh: Optional[HardwareMesh]=None, backend: Optional[str]=None) -> Mesh: last_device = jax.devices(backend)[-1] platform = last_device.platform device_kind = last_device.device_kind bounds = bounds_from_last_device(last_device) if model_parallel_submesh: return get_mesh(model_parallel_submesh, backend=backend) if platform == 'cpu': return get_cpu_mesh() elif platform == 'gpu': return get_gpu_mesh(num_partitions) mps = None if device_kind in ('TPU v2', 'TPU v3'): if num_partitions == 1: mps = (1, 1, 1, 1) elif num_partitions == 2: mps = (1, 1, 1, 2) elif num_partitions == 4: mps = (2, 1, 1, 2) elif num_partitions == 8: mps = (2, 2, 1, 2) elif num_partitions == 16: mps = (4, 2, 1, 2) elif (device_kind == 'TPU v4' or device_kind == 'TPU v4 lite') and bounds[3] == 1: if num_partitions == 1: mps = (1, 1, 1, 1) elif num_partitions == 2: mps = (1, 2, 1, 1) elif num_partitions == 4: if bounds[0] >= 4: mps = (4, 1, 1, 1) else: mps = (2, 2, 1, 1) elif num_partitions == 8: if bounds[2] >= 8: mps = (1, 1, 8, 1) else: mps = (4, 2, 1, 1) elif num_partitions == 16: if bounds[2] >= 16: mps = (1, 1, 16, 1) elif bounds[0] >= 8: mps = (8, 2, 1, 1) elif bounds[0] >= 4: mps = (4, 4, 1, 1) else: mps = (2, 2, 4, 1) if mps is None: raise ValueError('No default mesh for this configuration: specify config.model_parallel_submesh explicitly.') return get_mesh(mps, backend=backend) @dataclasses.dataclass class LocalChunkInfo: slice: Tuple[slice, ...] replica_id: int class LocalChunker: def __init__(self, global_mesh: Mesh): self.global_mesh = global_mesh local_mesh = global_mesh.local_mesh first_local_device = local_mesh.devices.reshape(-1)[0] host_location = collections.OrderedDict(zip(global_mesh.shape.keys(), list(zip(*np.nonzero(global_mesh.devices == first_local_device)))[0])) self.num_chunks = collections.OrderedDict() self.chunk_ids = collections.OrderedDict() self.mesh_axes = list(global_mesh.shape.keys()) for mesh_axis in self.mesh_axes: num_devices_per_chunk = local_mesh.shape[mesh_axis] self.num_chunks[mesh_axis] = global_mesh.shape[mesh_axis] // num_devices_per_chunk self.chunk_ids[mesh_axis] = host_location[mesh_axis] // num_devices_per_chunk def get_local_chunk_info(self, global_shape: Tuple[int, ...], mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo: local_slice = [slice(None) for dim in global_shape] sharded_mesh_axes = set() for (i, (mesh_axis, size)) in enumerate(zip(mesh_axes, global_shape)): if not mesh_axis: continue sharded_mesh_axes.add(mesh_axis) if not isinstance(mesh_axis, str): raise NotImplementedError('TODO(jekbradbury)') chunk_id = self.chunk_ids[mesh_axis] chunk_size = size // self.num_chunks[mesh_axis] local_slice[i] = slice(chunk_id * chunk_size, (chunk_id + 1) * chunk_size) replicated_mesh_axes = [mesh_axis for mesh_axis in self.mesh_axes if mesh_axis not in sharded_mesh_axes] replica_id = 0 for mesh_axis in replicated_mesh_axes: chunk_id = self.chunk_ids[mesh_axis] replica_id = replica_id * self.num_chunks[mesh_axis] + chunk_id return LocalChunkInfo(tuple(local_slice), replica_id) def standard_logical_axis_rules(activation_partitioning_dims: int=1, parameter_partitioning_dims: int=1, additional_rules: Optional[LogicalAxisRules]=None) -> LogicalAxisRules: logging.info('`activation_partitioning_dims` = %d, `parameter_partitioning_dims` = %d', activation_partitioning_dims, parameter_partitioning_dims) if activation_partitioning_dims == 1 and parameter_partitioning_dims == 1: rules = [('batch', 'data'), ('vocab', 'model'), ('embed', None), ('mlp', 'model'), ('heads', 'model'), ('kv', None), ('joined_kv', 'model')] elif activation_partitioning_dims == 2 and parameter_partitioning_dims == 1: rules = [('batch', 'data'), ('vocab', 'model'), ('mlp', 'model'), ('heads', 'model'), ('kv', None), ('joined_kv', 'model'), ('embed', 'model')] elif activation_partitioning_dims == 1 and parameter_partitioning_dims == 2: rules = [('batch', 'data'), ('vocab', 'model'), ('mlp', 'model'), ('heads', 'model'), ('kv', None), ('joined_kv', 'model'), ('embed', 'data')] elif activation_partitioning_dims == 2 and parameter_partitioning_dims == 2: rules = [('batch', 'data'), ('vocab', 'model'), ('mlp', 'model'), ('heads', 'model'), ('kv', None), ('joined_kv', 'model'), ('embed', 'model'), ('embed', 'data')] else: raise ValueError(f'`activation_partitioning_dims` = {activation_partitioning_dims} `parameter_partitioning_dims` = {parameter_partitioning_dims} is not supported.') replicated_rules = [('relpos_buckets', None), ('abspos_buckets', None), ('length', None), ('layers', None), ('stack', None), ('mlp_activations', None)] rules.extend(replicated_rules) if additional_rules: rules.extend(additional_rules) return rules def _id_fn(x, ix): y = random.split(random.PRNGKey(jnp.array(ix, dtype=jnp.uint32))) return (x, y) @dataclasses.dataclass class DataLayout: batch_size: int shard_id: int num_shards: int is_first_host_in_replica_set: bool PartitionedCallable = Callable[..., Any] CompiledPartitionedCallable = Callable[..., Any] class BasePartitioner(metaclass=abc.ABCMeta): def __init__(self, num_partitions: Optional[int]=None, model_parallel_submesh: Optional[HardwareMesh]=None, params_on_devices: bool=True, backend: Optional[str]=None): if not num_partitions and (not model_parallel_submesh): raise ValueError('At least one of `num_partitions` or `model_parallel_submesh` must be set.') if model_parallel_submesh is not None and len(model_parallel_submesh) != 4: logging.error('`model_parallel_submesh` must be either None or a 4-tuple. Got `model_parallel_submesh`=%s. A ValueError will be raised beginning March 1, 2022.', model_parallel_submesh) if bool(num_partitions) and bool(model_parallel_submesh): logging.error('At most one of `num_partitions` or `model_parallel_submesh` can be set. Got `num_partitions=%s` and `model_parallel_submesh`=%s. A ValueError will be raised beginning March 21, 2022.', num_partitions, model_parallel_submesh) self._num_partitions = num_partitions self._model_parallel_submesh = model_parallel_submesh self._params_on_devices = params_on_devices self._data_axis = 'data' self._backend = backend @property def mesh(self) -> Mesh: raise NotImplementedError @property def data_partition_spec(self) -> PartitionSpec: return PartitionSpec(self._data_axis) def get_data_layout(self, batch_size: Optional[int]=None, host_index: Optional[int]=None) -> DataLayout: if host_index is not None: raise NotImplementedError('Explicit host_index is not yet implemented.') if self._data_axis is None: return DataLayout(batch_size=batch_size, shard_id=0, num_shards=1, is_first_host_in_replica_set=jax.process_index() == 0) mesh_size = self._local_chunker.global_mesh.shape[self._data_axis] batch_size = batch_size or mesh_size if batch_size % mesh_size: raise ValueError(f'Batch size ({batch_size}) must be divisible by corresponding mesh size ({mesh_size}).') num_shards = self._local_chunker.num_chunks[self._data_axis] if batch_size % num_shards: raise ValueError(f'Batch size ({batch_size}) must be divisible by number of replicas ({num_shards}).') replica_id = self._local_chunker.get_local_chunk_info((batch_size,), [self._data_axis]).replica_id return DataLayout(batch_size=int(batch_size), shard_id=int(self._local_chunker.chunk_ids[self._data_axis]), num_shards=int(num_shards), is_first_host_in_replica_set=replica_id == 0) def get_local_chunk_info(self, global_shape: Tuple[int, ...], mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo: return self._local_chunker.get_local_chunk_info(global_shape, mesh_axes) @property def params_on_devices(self): return self._params_on_devices def move_params_to_devices(self, train_state: TrainState, train_state_axes: TrainState) -> TrainState: p_id_fn = self.partition(_id_fn, in_axis_resources=(train_state_axes, None), out_axis_resources=(train_state_axes, None), donate_argnums=(0,)) if jax.config.jax_array and jax.process_count() > 1: train_state = multihost_utils.host_local_array_to_global_array(train_state, self.mesh, train_state_axes) (train_state, _) = p_id_fn(train_state, jnp.ones((), dtype=jnp.uint32)) return train_state @property @abc.abstractmethod def _local_chunker(self): raise NotImplementedError def get_logical_axes(self, train_state: TrainState) -> TrainState: return train_state.restore_state(jax.tree_map(lambda x: None, train_state.state_dict())) def get_mesh_axes(self, train_state: TrainState) -> TrainState: raise NotImplementedError @abc.abstractmethod def partition(self, fn: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]]=(), donate_argnums: Union[int, Sequence[int]]=()) -> PartitionedCallable: raise NotImplementedError @abc.abstractmethod def compile(self, partitioned_fn: PartitionedCallable, *args) -> CompiledPartitionedCallable: raise NotImplementedError class PjittedFnWithContext(PartitionedCallable): def __init__(self, pjitted_fn, partition_mesh: Mesh, logical_axis_rules: flax_partitioning.LogicalRules=()): self._pjitted_fn = pjitted_fn self._mesh = partition_mesh self._logical_axis_rules = logical_axis_rules def __call__(self, *args): with Mesh(self._mesh.devices, self._mesh.axis_names), flax_partitioning.axis_rules(self._logical_axis_rules): return self._pjitted_fn(*args) def lower(self, *args): with Mesh(self._mesh.devices, self._mesh.axis_names), flax_partitioning.axis_rules(self._logical_axis_rules): return self._pjitted_fn.lower(*args) class BasePjitPartitioner(BasePartitioner): @cached_property def _local_chunker(self) -> LocalChunker: return LocalChunker(self.mesh) @cached_property def mesh(self) -> Mesh: return default_mesh(self._num_partitions, self._model_parallel_submesh, self._backend) def partition(self, fn: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]]=(), donate_argnums: Union[int, Sequence[int]]=()) -> PjittedFnWithContext: pjitted = pjit(fn, in_axis_resources=in_axis_resources, out_axis_resources=out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums, backend=self._backend) return PjittedFnWithContext(pjitted, self.mesh) def compile(self, partitioned_fn: PjittedFnWithContext, *args) -> CompiledPartitionedCallable: return partitioned_fn.lower(*args).compile() class PjitPartitioner(BasePjitPartitioner): def __init__(self, num_partitions: Optional[int]=None, model_parallel_submesh: Optional[HardwareMesh]=None, params_on_devices: bool=True, backend: Optional[str]=None, logical_axis_rules: Optional[LogicalAxisRules]=None, use_cpu_pjit: Optional[bool]=False): super().__init__(num_partitions=num_partitions, model_parallel_submesh=model_parallel_submesh, params_on_devices=params_on_devices, backend=backend) if logical_axis_rules is None: logical_axis_rules = standard_logical_axis_rules() self._logical_axis_rules = tuple(logical_axis_rules) (self._data_axis,) = flax_partitioning.logical_to_mesh_axes(['batch'], logical_axis_rules) self._use_cpu_pjit = use_cpu_pjit def partition(self, fn: Callable, in_axis_resources, out_axis_resources, static_argnums: Union[int, Sequence[int]]=(), donate_argnums: Union[int, Sequence[int]]=()) -> PjittedFnWithContext: if self._use_cpu_pjit: pjit_fn = pjit_with_cpu_fallback else: pjit_fn = pjit pjitted = pjit_fn(fn, in_axis_resources=in_axis_resources, out_axis_resources=out_axis_resources, static_argnums=static_argnums, donate_argnums=donate_argnums, backend=self._backend) return PjittedFnWithContext(pjitted, self.mesh, self._logical_axis_rules) @property def logical_axis_rules(self): return self._logical_axis_rules def get_logical_axes(self, train_state: TrainState) -> TrainState: return train_state.as_logical_axes() def get_mesh_axes(self, train_state: TrainState) -> TrainState: logical_axes = self.get_logical_axes(train_state) def _logical_to_mesh_axes(param_name, logical_axes): if logical_axes is None: return None elif logical_axes is traverse_util.empty_node: return traverse_util.empty_node try: return flax_partitioning.logical_to_mesh_axes(logical_axes, self._logical_axis_rules) except ValueError as e: raise ValueError(f'Failed to map logical axes for {param_name}') from e flat_logical_axes = traverse_util.flatten_dict(logical_axes.state_dict(), keep_empty_nodes=True, sep='/') flat_mesh_axes = {k: _logical_to_mesh_axes(k, v) for (k, v) in flat_logical_axes.items()} return logical_axes.restore_state(traverse_util.unflatten_dict(flat_mesh_axes, sep='/')) # File: distil-whisper-main/training/flax/distil_whisper/pipeline.py """""" import math import jax import jax.numpy as jnp import numpy as np import requests import torch from flax import jax_utils from flax.core.frozen_dict import freeze from flax.training.common_utils import shard from transformers import WhisperFeatureExtractor, WhisperTokenizerFast from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE from transformers.pipelines.audio_utils import ffmpeg_read from transformers.utils import logging from .modeling_flax_whisper import FlaxWhisperForConditionalGeneration logger = logging.get_logger(__name__) class FlaxWhisperFeatureExtractor(WhisperFeatureExtractor): def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: waveform = torch.from_numpy(waveform).type(torch.float32) window = torch.hann_window(self.n_fft) stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32) mel_spec = mel_filters.T @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec.numpy() class FlaxWhisperPipeline: def __init__(self, checkpoint='openai/whisper-large-v2', dtype=jnp.float32, batch_size=None, max_length=None, **kwargs): self.checkpoint = checkpoint self.dtype = dtype self.feature_extractor = FlaxWhisperFeatureExtractor.from_pretrained(self.checkpoint) self.tokenizer = WhisperTokenizerFast.from_pretrained(self.checkpoint) (self.model, self.params) = FlaxWhisperForConditionalGeneration.from_pretrained(self.checkpoint, _do_init=False, dtype=self.dtype, **kwargs) self.max_length = max_length if max_length is not None else self.model.generation_config.max_length self.min_batch_size = jax.local_device_count() self.batch_size = batch_size if batch_size is not None else self.min_batch_size def generate(params, input_features, forced_decoder_ids, return_timestamps, num_beams, length_penalty, do_sample, top_k, temperature): output_ids = self.model.pipeline_generate(input_features, params=params, forced_decoder_ids=forced_decoder_ids, return_timestamps=return_timestamps, max_length=self.max_length, num_beams=num_beams, length_penalty=length_penalty, do_sample=do_sample, top_k=top_k, temperature=temperature) return output_ids self.params = jax_utils.replicate(self.params) self.p_generate = jax.pmap(generate, 'input_features', in_axes=(0, 0, None, None, None, None, None, None, None), static_broadcasted_argnums=(3, 4, 5, 6, 7, 8)) def generate(self, input_features, language=None, task=None, return_timestamps=False, num_beams=1, length_penalty=1.0, do_sample=False, top_k=50, temperature=1.0): forced_decoder_ids = self.get_forced_decoder_ids(language=language, task=task, return_timestamps=return_timestamps) output_ids = self.p_generate(freeze(self.params), shard(input_features), forced_decoder_ids, return_timestamps, num_beams, length_penalty, do_sample, top_k, temperature).sequences output_ids = jax.device_get(output_ids.reshape(-1, self.max_length)) return output_ids def get_forced_decoder_ids(self, generation_config=None, task=None, language=None, return_timestamps=False): if generation_config is None: generation_config = self.model.generation_config if hasattr(generation_config, 'is_multilingual'): is_multilingual = generation_config.is_multilingual else: is_multilingual = None forced_decoder_ids = [] if is_multilingual: if language is not None: language = language.lower() if language in generation_config.lang_to_id.keys(): language_token = language elif language in TO_LANGUAGE_CODE.values(): language_token = f'<|{language}|>' elif language in TO_LANGUAGE_CODE.keys(): language_token = f'<|{TO_LANGUAGE_CODE[language]}|>' else: if len(language) == 2: acceptable_languages = list(TO_LANGUAGE_CODE.values()) elif '<' in language or '|' in language or '>' in language: acceptable_languages = list(generation_config.lang_to_id.keys()) else: acceptable_languages = list(TO_LANGUAGE_CODE.keys()) raise ValueError(f'Unsupported language: {language}. Language should be one of: {acceptable_languages}.') forced_decoder_ids.append((1, generation_config.lang_to_id[language_token])) if task is not None: forced_decoder_ids.append((2, generation_config.task_to_id[task])) else: forced_decoder_ids.append((2, generation_config.task_to_id['transcribe'])) if not return_timestamps: if forced_decoder_ids and forced_decoder_ids[-1][0] != generation_config.no_timestamps_token_id: idx = forced_decoder_ids[-1][0] + 1 if forced_decoder_ids else 1 forced_decoder_ids.append((idx, generation_config.no_timestamps_token_id)) else: forced_decoder_ids.append((1, generation_config.no_timestamps_token_id)) return forced_decoder_ids def chunk_iter_with_batch(self, inputs, chunk_len, stride_left, stride_right, batch_size): inputs_len = inputs.shape[0] step = chunk_len - stride_left - stride_right all_chunk_start_idx = np.arange(0, inputs_len, step) num_samples = len(all_chunk_start_idx) num_batches = math.ceil(num_samples / batch_size) batch_idx = np.array_split(np.arange(num_samples), num_batches) for idx in batch_idx: chunk_start_idx = all_chunk_start_idx[idx] chunk_end_idx = chunk_start_idx + chunk_len chunks = [inputs[chunk_start:chunk_end] for (chunk_start, chunk_end) in zip(chunk_start_idx, chunk_end_idx)] processed = self.feature_extractor(chunks, sampling_rate=self.feature_extractor.sampling_rate, return_tensors='np') _stride_left = np.where(chunk_start_idx == 0, 0, stride_left) is_last = np.where(stride_right > 0, chunk_end_idx > inputs_len, chunk_end_idx >= inputs_len) _stride_right = np.where(is_last, 0, stride_right) chunk_lens = [chunk.shape[0] for chunk in chunks] strides = [(chunk_l, _stride_l, _stride_r) for (chunk_l, _stride_l, _stride_r) in zip(chunk_lens, _stride_left, _stride_right)] yield {'stride': strides, **processed} def preprocess_batch(self, inputs, chunk_length_s=30.0, stride_length_s=None, batch_size=None): if isinstance(inputs, np.ndarray): logger.warning("Numpy array passed as input - no sampling rate checks will be performed.It is strongly recommended to pass the input as a dictionary with an 'array' key containing the numpy array representing the audio, and a 'sampling_rate' key containing the sampling rate associated with the audio array.Failing to do so can result in silent errors that might be hard to debug.") if isinstance(inputs, str): if inputs.startswith('http://') or inputs.startswith('https://'): inputs = requests.get(inputs).content else: with open(inputs, 'rb') as f: inputs = f.read() if isinstance(inputs, bytes): inputs = ffmpeg_read(inputs, self.feature_extractor.sampling_rate) stride = None if isinstance(inputs, dict): stride = inputs.get('stride', None) if not ('sampling_rate' in inputs and 'array' in inputs): raise ValueError("When passing a dictionary to FlaxWhisperPipline, the dict needs to contain an 'array' key containing the numpy array representing the audio, and a 'sampling_rate' key containing the sampling rate associated with the audio array.") in_sampling_rate = inputs.get('sampling_rate') inputs = inputs.get('array', None) if in_sampling_rate != self.feature_extractor.sampling_rate: try: import librosa except ImportError as err: raise ImportError("To support resampling audio files, please install 'librosa' and 'soundfile'.") from err inputs = librosa.resample(inputs, orig_sr=in_sampling_rate, target_sr=self.feature_extractor.sampling_rate) ratio = self.feature_extractor.sampling_rate / in_sampling_rate else: ratio = 1 if not isinstance(inputs, np.ndarray): raise ValueError(f'We expect a numpy ndarray as input, got `{type(inputs)}`') if len(inputs.shape) != 1: raise ValueError('We expect a single channel audio input for AutomaticSpeechRecognitionPipeline') if stride is not None: if stride[0] + stride[1] > inputs.shape[0]: raise ValueError('Stride is too large for input') stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio))) if chunk_length_s: if stride_length_s is None: stride_length_s = chunk_length_s / 6 if isinstance(stride_length_s, (int, float)): stride_length_s = [stride_length_s, stride_length_s] chunk_len = round(chunk_length_s * self.feature_extractor.sampling_rate) stride_left = round(stride_length_s[0] * self.feature_extractor.sampling_rate) stride_right = round(stride_length_s[1] * self.feature_extractor.sampling_rate) if chunk_len < stride_left + stride_right: raise ValueError('Chunk length must be superior to stride length') for item in self.chunk_iter_with_batch(inputs, chunk_len, stride_left, stride_right, batch_size): yield item else: processed = self.feature_extractor(inputs, sampling_rate=self.feature_extractor.sampling_rate, return_tensors='np') if stride is not None: processed['stride'] = stride yield processed def postprocess(self, model_outputs, return_timestamps=None, return_language=None): model_outputs = [dict(zip(output, t)) for output in model_outputs for t in zip(*output.values())] time_precision = self.feature_extractor.chunk_length / self.model.config.max_source_positions sampling_rate = self.feature_extractor.sampling_rate for output in model_outputs: if 'stride' in output: (chunk_len, stride_left, stride_right) = output['stride'] chunk_len /= sampling_rate stride_left /= sampling_rate stride_right /= sampling_rate output['stride'] = (chunk_len, stride_left, stride_right) (text, optional) = self.tokenizer._decode_asr(model_outputs, return_timestamps=return_timestamps, return_language=return_language, time_precision=time_precision) return {'text': text, **optional} def forward(self, model_inputs, batch_size=None, language=None, task=None, return_timestamps=False, num_beams=1, length_penalty=1.0, do_sample=False, top_k=50, temperature=1.0): input_features = model_inputs.pop('input_features') input_batch_size = input_features.shape[0] if input_batch_size != batch_size: padding = np.zeros([batch_size - input_batch_size, *input_features.shape[1:]], input_features.dtype) input_features = np.concatenate([input_features, padding]) pred_ids = self.generate(input_features, language=language, task=task, return_timestamps=return_timestamps, num_beams=num_beams, length_penalty=length_penalty, do_sample=do_sample, top_k=top_k, temperature=temperature)[:input_batch_size] out = {'tokens': pred_ids[:, None, :]} stride = model_inputs.pop('stride', None) if stride is not None: out['stride'] = stride return out def __call__(self, inputs, chunk_length_s=30.0, stride_length_s=None, batch_size=None, language=None, task=None, return_timestamps=None, num_beams=1, length_penalty=1.0, do_sample=False, top_k=50, temperature=1.0): batch_size = batch_size if batch_size is not None else self.batch_size if batch_size % self.min_batch_size != 0: raise ValueError(f'Batch size must be a multiple of the number of JAX devices, but got batch size {batch_size} and num devices {self.min_batch_size}.') dataloader = self.preprocess_batch(inputs, chunk_length_s=chunk_length_s, stride_length_s=stride_length_s, batch_size=batch_size) model_outputs = [] for batch in dataloader: model_outputs.append(self.forward(batch, batch_size=batch_size, language=language, task=task, return_timestamps=return_timestamps, num_beams=num_beams, length_penalty=length_penalty, do_sample=do_sample, top_k=top_k, temperature=temperature)) post_processed = self.postprocess(model_outputs, return_timestamps=return_timestamps) return post_processed # File: distil-whisper-main/training/flax/distil_whisper/train_state.py from typing import Any, Mapping, MutableMapping, Optional, Tuple import flax.core import flax.serialization import flax.struct import jax.numpy as jnp from flax import traverse_util from flax.core import scope as flax_scope from flax.linen import partitioning as flax_partitioning EMPTY_DICT = flax.core.freeze({}) FrozenDict = flax_scope.FrozenDict FrozenVariableDict = flax_scope.FrozenVariableDict MutableVariableDict = flax_scope.MutableVariableDict VariableDict = flax_scope.VariableDict def _validate_params_axes(params_axes, params): axis_names = flax_partitioning.get_axis_names(params_axes) missing_params_axes = set(traverse_util.flatten_dict(params, sep='/')) - set(traverse_util.flatten_dict(axis_names, sep='/')) if missing_params_axes: raise ValueError(f'Missing axis names for parameters: {missing_params_axes}') def _split_variables_and_axes(variables_and_axes: FrozenVariableDict) -> Tuple[FrozenVariableDict, FrozenVariableDict]: variables = {} axes = {} for (k, v) in variables_and_axes.items(): if k.endswith('_axes'): axes[k[:-5]] = v _validate_params_axes(v, variables_and_axes[k[:-5]]) else: variables[k] = v return (flax.core.freeze(variables), flax.core.freeze(axes)) class InferenceState(flax.struct.PyTreeNode): step: jnp.ndarray params: flax_scope.FrozenVariableDict params_axes: Optional[flax_scope.FrozenVariableDict] = None flax_mutables: flax_scope.FrozenDict = EMPTY_DICT flax_mutables_axes: Optional[flax_scope.FrozenVariableDict] = None @classmethod def create(cls, model_variables: FrozenVariableDict) -> 'InferenceState': (other_variables, params) = model_variables.pop('params') if 'params_axes' in other_variables: (other_variables, params_axes) = other_variables.pop('params_axes') _validate_params_axes(params_axes, params) else: params_axes = None (flax_mutables, flax_mutables_axes) = _split_variables_and_axes(other_variables) flax_mutables_axes = flax_mutables_axes or None return InferenceState(step=jnp.array(0), params=params, params_axes=params_axes, flax_mutables=flax_mutables, flax_mutables_axes=flax_mutables_axes) @property def param_states(self) -> FrozenVariableDict: raise NotImplementedError('InferenceState has no optimizer states.') def apply_gradient(self, *args, **kwargs) -> 'InferenceState': raise NotImplementedError('InferenceState does not support `apply_gradient`.') def state_dict(self) -> MutableMapping[str, Any]: state_dict = {'target': flax.core.unfreeze(self.params), 'state': {'step': self.step}} if self.flax_mutables: state_dict['flax_mutables'] = flax.core.unfreeze(self.flax_mutables) return state_dict def replace_step(self, step: jnp.ndarray) -> 'InferenceState': return self.replace(step=step) def replace_params(self, params: FrozenVariableDict) -> 'InferenceState': return self.replace(params=params) def replace_flax_mutables(self, flax_mutables: FrozenDict) -> 'InferenceState': return self.replace(flax_mutables=flax_mutables) def restore_state(self, state_dict: Mapping[str, Any]) -> 'InferenceState': return self.replace(params=flax.core.freeze(state_dict['target']), step=state_dict['state']['step'], flax_mutables=flax.core.freeze(state_dict['flax_mutables']) if 'flax_mutables' in state_dict else EMPTY_DICT) def as_logical_axes(self) -> 'InferenceState': flax_mutables_axes = self.flax_mutables_axes or EMPTY_DICT return InferenceState(step=None, params=flax_partitioning.get_axis_names(self.params_axes), flax_mutables=flax_partitioning.get_axis_names(flax_mutables_axes)) # File: distil-whisper-main/training/flax/run_distillation.py """""" import logging import os import re import shutil import string import sys import time from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union import datasets import evaluate import flax import jax import jax.numpy as jnp import numpy as np import optax import torch import transformers from datasets import DatasetDict, IterableDataset, IterableDatasetDict, concatenate_datasets, interleave_datasets, load_dataset from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad, unreplicate from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import Repository, create_repo from jax.experimental.compilation_cache import compilation_cache as cc from optax._src import linear_algebra from torch.utils.data import DataLoader from torchdata.datapipes.iter import IterableWrapper from tqdm import tqdm from transformers import AddedToken, HfArgumentParser, Seq2SeqTrainingArguments, WhisperConfig, WhisperFeatureExtractor, WhisperProcessor, WhisperTokenizerFast, is_tensorboard_available, is_wandb_available, set_seed from transformers.file_utils import get_full_repo_name from transformers.modeling_flax_outputs import FlaxBaseModelOutput from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from distil_whisper import FlaxWhisperForConditionalGeneration check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: pip install -r examples/flax/speech-recogintion/requirements.txt') logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained student model or model identifier from huggingface.co/models'}) teacher_model_name_or_path: str = field(metadata={'help': 'Path to pretrained teacher model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) load_with_scan_weights: bool = field(default=False, metadata={'help': 'Whether the pre-trained checkpoint has its weights stored in scan format. Set to True for scanned weights, defaults to False for non-scan (unrolled) weights.'}) activation_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'}) attention_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for the attention probabilities.'}) dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.'}) @flax.struct.dataclass class DataTrainingArguments: train_dataset_name: str = field(default=None, metadata={'help': "The name of the training dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset ids by a '+' symbol. For example, to load and combine librispeech and common voice, set `train_dataset_name='librispeech_asr+common_voice'`."}) train_dataset_config_name: Optional[str] = field(default=None, metadata={'help': "The configuration name of the training dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset configs by a '+' symbol."}) train_dataset_samples: str = field(default=None, metadata={'help': "Number of samples in the training data. Load and combine multiple datasets by separating dataset samples by a '+' symbol."}) eval_dataset_name: str = field(default=None, metadata={'help': 'The name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset name if unspecified.'}) eval_dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset config name if unspecified'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) train_text_column_name: str = field(default='whisper_transcript', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'whisper_transcript'which is the pseudo-labelled Whisper transcription data."}) eval_text_column_name: str = field(default='text', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text', which is the original text data"}) max_duration_in_seconds: float = field(default=30.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'}) min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'}) max_label_length: int = field(default=128, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) pad_target_to_multiple_of: Optional[int] = field(default=None, metadata={'help': 'If set will pad the target sequence to a multiple of the provided value. This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) train_split_name: str = field(default='train', metadata={'help': "The name of the training data set split to use (via the datasets library). Defaults to 'train'"}) eval_split_name: str = field(default='validation', metadata={'help': "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'"}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experiment reproducibility and to diff code across experiments in the UI.'}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and the data."}) wer_threshold: float = field(default=None, metadata={'help': 'Filter training data with Whisper transcriptions that have greater than `wer_threshold` WER with the normalised transcriptions.'}) prefetch_size: int = field(default=0, metadata={'help': 'Number of samples to pre-fetch if using an iterable dataset.'}) timestamp_probability: float = field(default=0.5, metadata={'help': 'Probability for training on timestamped tokens if the data contains it.'}) return_timestamps: bool = field(default=False, metadata={'help': 'Whether or not to predict timestamps in the generation step.'}) round_timestamps: bool = field(default=False, metadata={'help': 'Whether or not to round the timestamp tokens to the nearest tenth of a second.By default, Whisper predicts timestamps to the nearest hundredth of a second.Reducing the timestamp precision to one tenth of a second simplifies the timestampprediction task, at the expense of timestamp granularity.'}) @dataclass class FlaxSeq2SeqTrainingArguments(Seq2SeqTrainingArguments): use_scan: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to use `scan_with_axes` over the encoder and decoder blocks. Using scan results in faster compile times and more efficient memory use during training, since all of the layers in the encoder/decoder are stacked, and we perform a lax.scan over the stacked block to index each layer. However, it results in slower inference time due to the overhead of stacking the layers this way. Thus, we **always** default to disabling scan for the inference step.'}) freeze_encoder: Optional[bool] = field(default=False, metadata={'help': 'Whether to freeze the entire encoder model. Only recommended when the entire encoder has been copied from the teacher model.'}) temperature: Optional[float] = field(default=2.0, metadata={'help': 'Temperature to anneal the logits when computing the softmax.'}) kl_weight: Optional[float] = field(default=1.0, metadata={'help': 'Weighting assigned to the MSE loss in the KD formulation. MSE loss is computed between the teacher-student hidden states and attentions.'}) mse_weight: Optional[float] = field(default=0.0, metadata={'help': 'Weighting assigned to the MSE loss in the KD formulation. MSE loss is computed between the teacher-student hidden states and attentions.'}) precision: Optional[str] = field(default='half_mixed', metadata={'help': 'Precision with which run training, Can be one of `full`, `half_mixed` or `full_mixed`, the latter twoof which enable *mixed-precision* training. **Note that this only specifies the dtype of the computation and optimizer state. It does not influence the dtype of model parameters.** An explanation of the three settings is provided below: 1. Full precision: forward pass, backward pass and optimiser states all in float32. 2. Half mixed precision: forward pass in bfloat16, backward pass and optimiser states in float32. This corresponds to setting the dtype argument to bfloat16 when instantiating the model. 3. Full mixed precision: forward pass, backward pass and optimiser states all in bfloat16. The dtype argument is set to bfloat16 for the forward pass, and the gradients computed with respect to the bfloat16 parameters in the backward pass (giving bfloat16 gradients). The new optimiser states and parameter updates are computed in float32 by upcasting the bfloat16 gradients and optimiser states to float32 prior to the optimiser update step. The optimiser states are returned in float32 (but not saved to memory) and then downcasted to bfloat16 (saved to memory) for the subsequent train step.For further details, refer to https://github.com/deepmind/optax/discussions/336'}) compilation_cache: Optional[bool] = field(default=False, metadata={'help': 'Whether to enable the JAX (experimental) compilation cache. The compilation step is *cached* the first time it is run. Successive compilation steps for the same function utilise the cache to reducethe compilation time.'}) save_train_state: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to save the Flax Train State on each `save_steps` steps. Required if you intendto resume training from partial training runs. If False, only the model weights will be saved.If True, both the model weights and Flax Train state will be saved.'}) def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: shifted_label_ids = np.zeros_like(label_ids) shifted_label_ids[:, 1:] = label_ids[:, :-1] shifted_label_ids[:, 0] = decoder_start_token_id return shifted_label_ids @flax.struct.dataclass class FlaxDataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int decoder_prev_token_id: int input_padding: Union[bool, str] = 'max_length' target_padding: Union[bool, str] = 'max_length' max_target_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: model_input_name = self.processor.model_input_names[0] input_features = {model_input_name: [feature[model_input_name] for feature in features]} label_features = {'input_ids': [feature['labels'] for feature in features]} batch = self.processor.feature_extractor.pad(input_features, padding=self.input_padding, return_tensors='np') labels_batch = self.processor.tokenizer.pad(label_features, max_length=self.max_target_length, padding=self.target_padding, return_tensors='np') labels = labels_batch['input_ids'] if set(np.unique(labels[:, 0])).issubset({self.decoder_start_token_id, self.decoder_prev_token_id}): decoder_input_ids = labels[:, :-1] labels = labels[:, 1:] labels_batch.attention_mask = labels_batch.attention_mask[:, 1:] else: decoder_input_ids = shift_tokens_right(labels, self.decoder_start_token_id) labels = np.ma.array(labels, mask=np.not_equal(labels_batch.attention_mask, 1)) labels = labels.filled(fill_value=-100) bos_index = np.argmax(labels == self.decoder_start_token_id, axis=1) prompt_mask = np.arange(labels.shape[1]) < bos_index[:, None] labels = np.where(prompt_mask, -100, labels) batch['labels'] = labels batch['decoder_input_ids'] = decoder_input_ids return batch def get_data_loader(seed: int, dataset: IterableDataset, batch_size: int, data_collator: FlaxDataCollatorSpeechSeq2SeqWithPadding, shuffle: bool=True, drop_last: bool=True, dataloader_num_workers: int=0, skip_batches: int=0, pin_memory: bool=True, prefetch_size: int=0) -> DataLoader: if shuffle: dataset = dataset.shuffle(seed) if skip_batches > 0: dataset = dataset.skip(skip_batches * batch_size) if prefetch_size > 0: dataset = IterableWrapper(dataset) dataset = dataset.prefetch(prefetch_size) data_loader = DataLoader(dataset, batch_size=batch_size, drop_last=drop_last, pin_memory=pin_memory, collate_fn=data_collator, num_workers=dataloader_num_workers) return data_loader def sorted_checkpoints(output_dir=None, checkpoint_prefix='checkpoint', use_mtime=False) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f'{checkpoint_prefix}-*') if os.path.isdir(x)] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f'.*{checkpoint_prefix}-([0-9]+)', path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted def rotate_checkpoints(save_total_limit=None, use_mtime=False, output_dir=None, checkpoint_prefix='checkpoint') -> None: if save_total_limit is None or save_total_limit <= 0: return checkpoints_sorted = sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir, checkpoint_prefix=checkpoint_prefix) if len(checkpoints_sorted) <= save_total_limit: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f'Deleting older checkpoint [{checkpoint}] due to args.save_total_limit') shutil.rmtree(checkpoint, ignore_errors=True) def to_fp32(t): return jax.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, t) def to_bf16(t): return jax.tree_map(lambda x: x.astype(jnp.bfloat16) if x.dtype == jnp.float32 else x, t) class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray max_grad_norm: float def apply_gradients(self, *, grads, to_dtype: to_fp32, **kwargs): casted_max_grad_norm = to_dtype(self.max_grad_norm) g_norm = linear_algebra.global_norm(grads) g_norm = jnp.maximum(casted_max_grad_norm, g_norm) grads = jax.tree_map(lambda t: t / g_norm * casted_max_grad_norm, grads) (updates, new_opt_state) = self.tx.update(to_fp32(grads), to_fp32(self.opt_state), self.params) new_params = optax.apply_updates(self.params, updates) return self.replace(step=self.step + 1, params=new_params, opt_state=to_dtype(new_opt_state), **kwargs) @classmethod def create(cls, *, apply_fn, params, tx, to_dtype: to_fp32, **kwargs): opt_state = tx.init(to_dtype(params)) return cls(step=0, apply_fn=apply_fn, params=params, tx=tx, opt_state=opt_state, **kwargs) def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def unreplicate(self): return jax_utils.unreplicate(self) def save_state(self, output_dir, save_total_limit=None, checkpoint_prefix='checkpoint'): step = int(jax.device_get(unreplicate(self.step))) serialized_state = to_bytes(self.unreplicate()) output_file = Path(os.path.join(output_dir, f'{checkpoint_prefix}-{step}', 'train_state.msgpack')) output_file.parent.mkdir(exist_ok=True, parents=True) with output_file.open('wb') as f: f.write(serialized_state) logger.info(f'Flax train state saved in {output_file}') rotate_checkpoints(save_total_limit=save_total_limit, output_dir=output_dir, checkpoint_prefix=checkpoint_prefix) def save_hf_weights(student_state: TrainState, student_model: FlaxWhisperForConditionalGeneration, processor: WhisperProcessor, output_dir: str, cur_step: int, total_train_steps: int, use_scan: bool=True, checkpoint_prefix: str='checkpoint') -> None: student_state_params = unreplicate(student_state.params) student_state_params = student_model.convert_scan_to_unroll(student_state_params) student_params = jax.device_get(student_state_params) student_model.disable_scan() if cur_step != total_train_steps: output_dir = os.path.join(output_dir, f'{checkpoint_prefix}-{cur_step}') os.makedirs(output_dir, exist_ok=True) student_model.save_pretrained(output_dir, params=student_params) processor.save_pretrained(output_dir) if use_scan: student_model.enable_scan() def write_train_metric(summary_writer, train_metrics, train_time, step, logging_steps): summary_writer.scalar('train/time', train_time, step) train_metrics = get_metrics(train_metrics) for (key, vals) in train_metrics.items(): steps_arr = np.arange(0, step, logging_steps)[-len(vals):] tag = f'train/{key}' for (i, val) in enumerate(vals): summary_writer.scalar(tag, val, steps_arr[i]) def write_eval_metric(summary_writer, eval_metrics, step, prefix='eval'): for (metric_name, value) in eval_metrics.items(): summary_writer.scalar(f'{prefix}/{metric_name}', value, step) def write_wandb_metric(wandb_logger, metrics, train_time, step, epoch, prefix='train'): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time log_metrics[f'{prefix}/epoch'] = epoch wandb_logger.log(log_metrics, step) def write_wandb_pred(wandb_logger, pred_str, label_str, norm_pred_str, norm_label_str, cur_step, prefix='eval', num_lines=200000): cur_step_pretty = f'{int(cur_step // 1000)}k' if cur_step > 1000 else cur_step str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] wandb_logger.log({f"predictions/{prefix.replace('/', '-')}-step-{cur_step_pretty}": wandb_logger.Table(columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data[:num_lines])}, cur_step) str_data = np.asarray(str_data) str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] wandb_logger.log({f"incorrect_predictions/{prefix.replace('/', '-')}-step-{cur_step_pretty}": wandb_logger.Table(columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data_incorrect[:num_lines])}, cur_step) def create_learning_rate_fn(num_train_steps: int, lr_scheduler_type: str, num_warmup_steps: int, learning_rate: float) -> Callable[[int], jnp.array]: lr_scheduler_types = ('linear', 'constant_with_warmup') if lr_scheduler_type not in lr_scheduler_types: raise ValueError(f'lr_scheduler_type of type {lr_scheduler_type} not supported, choose from {lr_scheduler_types}.') warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule(init_value=learning_rate, end_value=0 if lr_scheduler_type == 'linear' else learning_rate, transition_steps=num_train_steps - num_warmup_steps) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_samples=None, default_split='train'): if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') for i in range(len(dataset_names)): ds_name = dataset_names[i] dataset_names[i] = f'distil-whisper/{ds_name}' if '/' not in ds_name else ds_name dataset_config_names = dataset_config_names.split('+') splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_samples = dataset_samples.split('+') if dataset_samples is not None else None if len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_samples is not None: if len(dataset_samples) != len(dataset_names): raise ValueError(f'Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_samples)} samples.') dataset_samples = [float(ds_sample) for ds_sample in dataset_samples] else: dataset_samples = [None] * len(dataset_names) text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'samples': dataset_samples[i]}) return dataset_names_dict def load_multiple_datasets(dataset_names: Union[List, str], dataset_config_names: Union[List, str], splits: Optional[Union[List, str]]=None, text_column_names: Optional[List]=None, sampling_rate: Optional[int]=16000, stopping_strategy: Optional[str]='first_exhausted', dataset_samples: Optional[Union[List, np.array]]=None, streaming: bool=True, seed: int=None, **kwargs) -> IterableDataset: dataset_names_dict = convert_dataset_str_to_list(dataset_names, dataset_config_names, splits, text_column_names, dataset_samples) if dataset_samples is not None: dataset_samples = [ds_dict['samples'] for ds_dict in dataset_names_dict] probabilities = np.array(dataset_samples) / np.sum(dataset_samples) else: probabilities = None if len(dataset_names_dict) == 1: dataset_dict = dataset_names_dict[0] return load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], streaming=streaming, **kwargs) all_datasets = [] for dataset_dict in tqdm(dataset_names_dict, desc='Combining datasets...'): dataset = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], streaming=streaming, **kwargs) dataset = dataset.cast_column('audio', datasets.features.Audio(sampling_rate)) dataset = dataset.remove_columns(set(dataset.features.keys()) - {'audio', dataset_dict['text_column_name'], 'whisper_transcript'}) all_datasets.append(dataset) if streaming: interleaved_dataset = interleave_datasets(all_datasets, stopping_strategy=stopping_strategy, probabilities=probabilities, seed=seed) else: interleaved_dataset = concatenate_datasets(all_datasets) return interleaved_dataset def get_layers_to_supervise(student_layers: int, teacher_layers: int) -> dict: layer_intervals = np.linspace(teacher_layers // student_layers - 1, teacher_layers - 1, student_layers, dtype=int) layer_intervals[-1] = teacher_layers - 1 layer_map = {} for (student_layer, teacher_layer) in enumerate(layer_intervals): layer_map[student_layer] = teacher_layer return layer_map class FlaxWhisperFeatureExtractor(WhisperFeatureExtractor): def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: waveform = torch.from_numpy(waveform).type(torch.float32) window = torch.hann_window(self.n_fft) stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32) mel_spec = mel_filters.T @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec.numpy() def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, FlaxSeq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() send_example_telemetry('run_flax_speech_recognition_seq2seq', model_args, data_args, framework='flax') has_tensorboard = is_tensorboard_available() if has_tensorboard: if jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=os.path.join(Path(training_args.output_dir), 'runs')) except ImportError as ie: has_tensorboard = False logger.warning(f'Unable to display metrics through TensorBoard because some package are not installed: {ie}') else: logger.warning('Unable to display metrics through TensorBoard because the package is not installed: Please run `pip install tensorboard` to enable.') has_wandb = is_wandb_available() if has_wandb: import wandb as wandb_logger if jax.process_index() == 0: wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb) else: logger.warning('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Training/evaluation parameters %s', training_args) if os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use `--overwrite_output_dir` to overcome.') if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name(Path(training_args.output_dir).absolute().name, token=training_args.hub_token) else: repo_name = training_args.hub_model_id create_repo(repo_name, exist_ok=True, token=training_args.hub_token) repo = Repository(training_args.output_dir, clone_from=repo_name, token=training_args.hub_token) if training_args.compilation_cache: cc.initialize_cache(os.path.join(model_args.cache_dir, 'jax_cache')) raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() set_seed(training_args.seed) if training_args.do_train: raw_datasets['train'] = load_multiple_datasets(data_args.train_dataset_name, data_args.train_dataset_config_name, splits=data_args.train_split_name, streaming=data_args.streaming, dataset_samples=data_args.train_dataset_samples, seed=training_args.seed, cache_dir=data_args.dataset_cache_dir, token=True if model_args.use_auth_token else None) if training_args.do_eval: dataset_names_dict = convert_dataset_str_to_list(data_args.eval_dataset_name if data_args.eval_dataset_name else data_args.train_dataset_name, data_args.eval_dataset_config_name if data_args.eval_dataset_config_name else data_args.train_dataset_config_name, splits=data_args.eval_split_name, text_column_names=data_args.eval_text_column_name) all_eval_splits = [] if len(dataset_names_dict) == 1: dataset_dict = dataset_names_dict[0] all_eval_splits.append('eval') raw_datasets['eval'] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, token=True if model_args.use_auth_token else None, streaming=data_args.streaming) else: for dataset_dict in dataset_names_dict: if dataset_dict['name'] == 'esb/diagnostic-dataset': pretty_name = f"{dataset_dict['config']}-diagnostic/{dataset_dict['split']}" else: pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" all_eval_splits.append(pretty_name) raw_datasets[pretty_name] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, token=True if model_args.use_auth_token else None, streaming=data_args.streaming) features = raw_datasets[pretty_name].features.keys() if 'text' not in features: raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(dataset_dict['text_column_name'], 'text') raw_datasets[pretty_name] = raw_datasets[pretty_name].remove_columns(set(raw_datasets[pretty_name].features.keys()) - {'audio', 'text'}) if not training_args.do_train and (not training_args.do_eval): raise ValueError('Cannot not train and not do evaluation. At least one of training or evaluation has to be performed.') raw_datasets_train_features = list(raw_datasets['train'].features.keys()) if data_args.audio_column_name not in raw_datasets_train_features: raise ValueError(f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(raw_datasets_train_features)}.") if data_args.train_text_column_name not in raw_datasets_train_features: raise ValueError(f"--train_text_column_name {data_args.train_text_column_name} not found in dataset '{data_args.dataset_name}'. Make sure to set `--train_text_column_name` to the correct text column - one of {', '.join(raw_datasets_train_features)}.") config = WhisperConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None) feature_extractor = FlaxWhisperFeatureExtractor.from_pretrained(model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None) tokenizer = WhisperTokenizerFast.from_pretrained(model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None) timestamps = [AddedToken('<|%.2f|>' % (i * 0.02), lstrip=False, rstrip=False) for i in range(1500 + 1)] tokenizer.add_tokens(timestamps) config.update({'activation_dropout': model_args.activation_dropout, 'attention_dropout': model_args.attention_dropout, 'dropout': model_args.dropout}) if training_args.precision == 'full_mixed': dtype = jnp.bfloat16 to_dtype = to_bf16 elif training_args.precision == 'half_mixed' or model_args.dtype == 'bfloat16': dtype = jnp.bfloat16 to_dtype = to_fp32 else: if training_args.precision != 'full': raise ValueError(f'`precision` should be one of: `full`, `half_mixed` or `full_mixed`, got {training_args.precision}') dtype = jnp.float32 to_dtype = to_fp32 (student_model, student_params) = FlaxWhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, dtype=dtype, cache_dir=model_args.cache_dir, revision=model_args.model_revision, subfolder=model_args.subfolder, token=True if model_args.use_auth_token else None, _do_init=False, use_scan=model_args.load_with_scan_weights) (teacher_model, teacher_params) = FlaxWhisperForConditionalGeneration.from_pretrained(model_args.teacher_model_name_or_path, dtype=dtype, cache_dir=model_args.cache_dir, token=True if model_args.use_auth_token else None, _do_init=False) if student_model.config.decoder_start_token_id is None or teacher_model.config.decoder_start_token_id is None: raise ValueError(f'Make sure that `config.decoder_start_token_id` is correctly defined for both the student and teacher model. Got {student_model.config.decoder_start_token_id} for the student and {teacher_model.config.decoder_start_token_id} for the teacher.') if training_args.use_scan: student_model.enable_scan() student_params = student_model.convert_unroll_to_scan(student_params) teacher_model.enable_scan() teacher_params = teacher_model.convert_unroll_to_scan(teacher_params) if training_args.gradient_checkpointing: student_model.enable_gradient_checkpointing() teacher_model.enable_gradient_checkpointing() if hasattr(teacher_model.generation_config, 'is_multilingual') and teacher_model.generation_config.is_multilingual: tokenizer.set_prefix_tokens(language='English', task='transcribe', predict_timestamps=False) student_model.generation_config.update(**{'language': '<|en|>', 'task': 'transcribe'}) raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)) max_input_length = int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) min_input_length = int(data_args.min_duration_in_seconds * feature_extractor.sampling_rate) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else student_model.config.max_length audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers dataloader_num_workers = training_args.dataloader_num_workers dataloader_prefetch_size = data_args.prefetch_size train_text_column_name = data_args.train_text_column_name eval_text_column_name = 'text' model_input_name = feature_extractor.model_input_names[0] normalizer = EnglishTextNormalizer(tokenizer.english_spelling_normalizer) wer_threshold = data_args.wer_threshold round_timestamps = data_args.round_timestamps if training_args.do_train and data_args.max_train_samples is not None: raw_datasets['train'] = raw_datasets['train'].take(data_args.max_train_samples) if data_args.streaming else raw_datasets['train'].select(range(data_args.max_train_samples)) if training_args.do_eval and data_args.max_eval_samples is not None: for eval_split in all_eval_splits: raw_datasets[eval_split] = raw_datasets[eval_split].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets[eval_split].select(range(data_args.max_eval_samples)) def is_wer_in_range(ground_truth, whisper_transcript): norm_ground_truth = normalizer(ground_truth) if len(norm_ground_truth) > 0 and whisper_transcript is not None: norm_whisper_transcript = normalizer(whisper_transcript) wer = 100 * metric.compute(predictions=[norm_whisper_transcript], references=[norm_ground_truth]) return wer < wer_threshold else: return False filter_by_wer_threshold = partial(raw_datasets['train'].filter, function=is_wer_in_range, input_columns=[eval_text_column_name, train_text_column_name]) if wer_threshold is not None: raw_datasets['train'] = filter_by_wer_threshold(num_proc=num_workers, desc='filtering train dataset by wer') if not data_args.streaming else filter_by_wer_threshold() def has_timestamp_tokens(input_str): return bool(re.search('\\<[^\\>]*\\>', input_str)) def round_timestamp_tokens(input_str: str, ndigits: int=1): timestamps = re.findall('\\<[^\\>]*\\>', input_str, re.DOTALL) for token in timestamps: time_digit = token[2:-2] time_digit = round(float(time_digit), ndigits=ndigits) input_str = input_str.replace(token, '<|{:.2f}|>'.format(time_digit)) return input_str def prepare_train_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch[model_input_name] = inputs.get(model_input_name)[0] batch['input_length'] = len(sample['array']) input_str = batch[train_text_column_name] if input_str.startswith('<|startoftranscript|>') or input_str.startswith('<|startofprev|>'): batch['labels'] = tokenizer(input_str, add_special_tokens=False).input_ids return batch has_timestamps = has_timestamp_tokens(input_str) if has_timestamps: predict_timestamps = bool(np.random.binomial(1, data_args.timestamp_probability)) if not predict_timestamps: input_str = tokenizer._filter_timestamp_ids(input_str) elif round_timestamps: input_str = round_timestamp_tokens(input_str) else: predict_timestamps = False tokenizer.set_prefix_tokens(language='English', task='transcribe', predict_timestamps=predict_timestamps) input_ids = tokenizer(input_str).input_ids batch['labels'] = input_ids return batch def prepare_eval_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch[model_input_name] = inputs.get(model_input_name)[0] batch['input_length'] = len(sample['array']) input_str = batch[eval_text_column_name] batch['labels'] = tokenizer(input_str).input_ids return batch vectorized_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() if training_args.do_train: map_fn_train = partial(raw_datasets['train'].map, function=prepare_train_dataset, remove_columns=raw_datasets_train_features) vectorized_datasets['train'] = map_fn_train(num_proc=num_workers, desc='preprocess train dataset') if not data_args.streaming else map_fn_train() if training_args.do_eval: for eval_split in all_eval_splits: raw_datasets_eval_features = list(raw_datasets[eval_split].features.keys()) map_fn_eval = partial(raw_datasets[eval_split].map, function=prepare_eval_dataset, remove_columns=raw_datasets_eval_features) vectorized_datasets[eval_split] = map_fn_eval(num_proc=num_workers, desc='preprocess eval dataset') if not data_args.streaming else map_fn_eval() def is_audio_in_length_range(length): return min_input_length < length < max_input_length filter_by_audio_fn = partial(vectorized_datasets.filter, function=is_audio_in_length_range, input_columns=['input_length']) vectorized_datasets = filter_by_audio_fn(num_proc=num_workers, desc='filtering train dataset by audio length') if not data_args.streaming else filter_by_audio_fn() def is_labels_in_length_range(labels): return 0 < len(labels) < max_label_length filter_by_labels_fn = partial(vectorized_datasets.filter, function=is_labels_in_length_range, input_columns=['labels']) vectorized_datasets = filter_by_labels_fn(num_proc=num_workers, desc='filtering train dataset') if not data_args.streaming else filter_by_labels_fn() if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return metric = evaluate.load('wer') all_punctuation = list(string.punctuation.replace("'", '')) return_timestamps = data_args.return_timestamps if data_args.timestamp_probability > 0 else False def compute_metrics(preds, labels): for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) spaced_pred_str = [pred_str[i].replace(punctuation, f' {punctuation} ') for punctuation in all_punctuation for i in range(len(pred_str))] spaced_label_str = [label_str[i].replace(punctuation, f' {punctuation} ') for punctuation in all_punctuation for i in range(len(label_str))] wer_ortho = 100 * metric.compute(predictions=spaced_pred_str, references=spaced_label_str) norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return ({'wer': wer, 'wer_ortho': wer_ortho}, pred_str, label_str, norm_pred_str, norm_label_str) feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) student_model.generation_config.save_pretrained(training_args.output_dir) processor = WhisperProcessor.from_pretrained(training_args.output_dir) data_collator = FlaxDataCollatorSpeechSeq2SeqWithPadding(processor=processor, decoder_start_token_id=student_model.config.decoder_start_token_id, decoder_prev_token_id=tokenizer.all_special_ids[-3], input_padding='longest', target_padding='max_length', max_target_length=max_label_length) rng = jax.random.PRNGKey(training_args.seed) (rng, dropout_rng) = jax.random.split(rng) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() gradient_accumulation_steps = int(training_args.gradient_accumulation_steps) per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() if not data_args.streaming and training_args.max_steps < 0: num_epochs = int(training_args.num_train_epochs) steps_per_epoch = len(vectorized_datasets['train']) // train_batch_size total_train_steps = steps_per_epoch * num_epochs elif training_args.max_steps > 0: logger.info('max_steps is given, it will override any value given in num_train_epochs') total_train_steps = int(training_args.max_steps) num_epochs = sys.maxsize steps_per_epoch = total_train_steps else: raise ValueError('max_steps must be specified when training with a streaming (iterable) dataset') if training_args.eval_steps is None: logger.info(f"eval_steps is not set, evaluating at the end of {('each epoch' if not data_args.streaming else 'training')}") eval_steps = steps_per_epoch else: eval_steps = training_args.eval_steps linear_decay_lr_schedule_fn = create_learning_rate_fn(total_train_steps * gradient_accumulation_steps, training_args.lr_scheduler_type, training_args.warmup_steps * gradient_accumulation_steps, training_args.learning_rate) def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) layer_norm_candidates = ['layer_norm', 'self_attn_layer_norm', 'final_layer_norm', 'encoder_attn_layer_norm'] layer_norm_named_params = {layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in ''.join(layer).lower()} flat_mask = {path: path[-1] != 'bias' and path[-2:] not in layer_norm_named_params for path in flat_params} return traverse_util.unflatten_dict(flat_mask) adamw = optax.adamw(learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn) if gradient_accumulation_steps > 1: adamw = optax.MultiSteps(adamw, every_k_schedule=gradient_accumulation_steps) share_hidden_states = training_args.freeze_encoder and student_model.config.d_model == teacher_model.config.d_model encoder_layer_mapping = get_layers_to_supervise(student_model.config.encoder_layers, teacher_model.config.encoder_layers) decoder_layer_mapping = get_layers_to_supervise(student_model.config.decoder_layers, teacher_model.config.decoder_layers) student_state = TrainState.create(apply_fn=student_model.decode if share_hidden_states else student_model.__call__, params=student_params, tx=adamw, to_dtype=to_dtype, dropout_rng=dropout_rng, max_grad_norm=training_args.max_grad_norm) if training_args.resume_from_checkpoint is not None: if os.path.isfile(os.path.join(training_args.resume_from_checkpoint, 'train_state.msgpack')): logger.info(f'Checkpoint detected, resuming training at {training_args.resume_from_checkpoint}. To avoid this behavior, omit the resume_from_checkpoint argument.') with Path(os.path.join(training_args.resume_from_checkpoint, 'train_state.msgpack')).open('rb') as f: student_state = from_bytes(student_state, f.read()) else: logger.warning(f'Checkpoint {training_args.resume_from_checkpoint} not detected, training from scratch. Ensure you pass the path to a folder with a valid checkpoint for your model.') def cross_entropy_loss(logits, labels): vocab_size = logits.shape[-1] onehot_targets = to_dtype(onehot(labels, vocab_size)) loss = optax.softmax_cross_entropy(logits, onehot_targets) padding = labels >= 0 loss = loss * padding loss = loss.sum() num_labels = padding.sum() return (loss, num_labels) def kl_divergence(target_distribution, log_predicted_distribution, labels, eps=1e-20): divergence = -target_distribution * (log_predicted_distribution - jnp.log(target_distribution + eps)) padding_mask = labels >= 0 padding_mask = jnp.expand_dims(padding_mask, axis=-1) divergence = (divergence * padding_mask).sum() return to_dtype(divergence) def mean_square_error_loss(student_outputs, teacher_outputs): mse = dtype(0.0) mse += jnp.mean(jnp.square(teacher_outputs.encoder_hidden_states[0] - student_outputs.encoder_hidden_states[0])) for (student_layer_id, teacher_layer_id) in encoder_layer_mapping.items(): student_hidden_state = student_outputs.encoder_hidden_states[student_layer_id + 1] teacher_hidden_state = teacher_outputs.encoder_hidden_states[teacher_layer_id + 1] mse += jnp.mean(jnp.square(teacher_hidden_state - student_hidden_state)) mse += jnp.mean(jnp.square(teacher_outputs.decoder_hidden_states[0] - student_outputs.decoder_hidden_states[0])) for (student_layer_id, teacher_layer_id) in decoder_layer_mapping.items(): student_hidden_state = student_outputs.decoder_hidden_states[student_layer_id + 1] teacher_hidden_state = teacher_outputs.decoder_hidden_states[teacher_layer_id + 1] mse += jnp.mean(jnp.square(teacher_hidden_state - student_hidden_state)) return to_dtype(mse) def train_step(student_state, teacher_params, batch, freeze_encoder, share_hidden_states, temperature=2.0): (dropout_rng, new_dropout_rng) = jax.random.split(student_state.dropout_rng) def compute_loss(student_params): labels = batch.pop('labels') output_hidden_states = not share_hidden_states and training_args.mse_weight > 0.0 teacher_outputs = teacher_model(**batch, params=teacher_params, freeze_encoder=True, output_hidden_states=output_hidden_states, train=False) if share_hidden_states: encoder_hidden_states = jax.lax.stop_gradient(teacher_outputs.encoder_last_hidden_state) encoder_outputs = FlaxBaseModelOutput(last_hidden_state=encoder_hidden_states) student_outputs = student_state.apply_fn(decoder_input_ids=batch['decoder_input_ids'], encoder_outputs=encoder_outputs, params=student_params, dropout_rng=dropout_rng, train=True) else: student_outputs = student_state.apply_fn(**batch, params=student_params, dropout_rng=dropout_rng, freeze_encoder=freeze_encoder, output_hidden_states=output_hidden_states, train=True) (ce_loss, num_labels) = cross_entropy_loss(student_outputs.logits, labels) teacher_distribution = jax.nn.softmax(teacher_outputs.logits / temperature, axis=-1) teacher_distribution = jax.lax.stop_gradient(teacher_distribution) student_distribution = jax.nn.log_softmax(student_outputs.logits / temperature, axis=-1) kl_loss = kl_divergence(teacher_distribution, student_distribution, labels) * temperature ** 2 mse_loss = mean_square_error_loss(student_outputs, teacher_outputs) if output_hidden_states else jnp.zeros_like(kl_loss) ce_weight = 0.8 if training_args.kl_weight > 0 else 1.0 loss = ce_weight * ce_loss + training_args.kl_weight * kl_loss + training_args.mse_weight * mse_loss return (loss, (ce_loss, kl_loss, mse_loss, num_labels)) grad_fn = jax.value_and_grad(compute_loss, has_aux=True) ((loss, (ce_loss, kl_loss, mse_loss, num_labels)), grad) = grad_fn(to_dtype(student_state.params)) loss = jax.lax.psum(loss, 'batch') num_labels = jax.lax.psum(num_labels, 'batch') loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) grad = jax.lax.psum(grad, 'batch') grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = student_state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng, to_dtype=to_dtype) ce_loss = jax.lax.psum(ce_loss, 'batch') ce_loss = jax.tree_util.tree_map(lambda x: x / num_labels, ce_loss) kl_loss = jax.lax.psum(kl_loss, 'batch') kl_loss = jax.tree_util.tree_map(lambda x: x / num_labels, kl_loss) mse_loss = jax.lax.psum(mse_loss, 'batch') mse_loss = jax.tree_util.tree_map(lambda x: x / num_labels, mse_loss) metrics = {'loss': loss, 'learning_rate': linear_decay_lr_schedule_fn(student_state.step), 'ce_loss': ce_loss, 'kl_loss': kl_loss, 'mse_loss': mse_loss} return (new_state, metrics) def eval_step(student_params, teacher_params, batch): labels = batch.pop('labels') output_hidden_states = not share_hidden_states and training_args.mse_weight > 0 student_outputs = student_model(**batch, params=student_params, output_hidden_states=output_hidden_states, train=False) student_distribution = jax.nn.log_softmax(student_outputs.logits, axis=-1) (ce_loss, num_labels) = cross_entropy_loss(student_outputs.logits, labels) teacher_outputs = teacher_model(**batch, params=teacher_params, output_hidden_states=output_hidden_states, train=False) teacher_distribution = jax.nn.softmax(teacher_outputs.logits, axis=-1) kl_loss = kl_divergence(teacher_distribution, student_distribution, labels) mse_loss = mean_square_error_loss(student_outputs, teacher_outputs) if output_hidden_states else jnp.zeros_like(kl_loss) ce_weight = 0.8 if training_args.kl_weight > 0 else 1.0 loss = ce_weight * ce_loss + training_args.kl_weight * kl_loss + training_args.mse_weight * mse_loss loss = jax.lax.psum(loss, 'batch') num_labels = jax.lax.psum(num_labels, 'batch') loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) ce_loss = jax.lax.psum(ce_loss, 'batch') ce_loss = jax.tree_util.tree_map(lambda x: x / num_labels, ce_loss) kl_loss = jax.lax.psum(kl_loss, 'batch') kl_loss = jax.tree_util.tree_map(lambda x: x / num_labels, kl_loss) mse_loss = jax.lax.psum(mse_loss, 'batch') mse_loss = jax.tree_util.tree_map(lambda x: x / num_labels, mse_loss) metrics = {'loss': loss, 'ce_loss': ce_loss, 'kl_loss': kl_loss, 'mse_loss': mse_loss} return metrics num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else student_model.config.num_beams gen_kwargs = {'max_length': max_label_length, 'num_beams': num_beams, 'language': '<|en|>', 'task': 'transcribe', 'return_timestamps': return_timestamps} def generate_step(student_params, batch): output_ids = student_model.generate(batch[model_input_name], attention_mask=batch.get('attention_mask'), params=student_params, **gen_kwargs) return output_ids.sequences student_state = student_state.replicate() teacher_params = jax_utils.replicate(teacher_params) p_train_step = jax.pmap(train_step, 'batch', in_axes=(0, 0, 0, None, None, None), donate_argnums=(0,), static_broadcasted_argnums=(3, 4)) p_eval_step = jax.pmap(eval_step, 'batch') p_generate_step = jax.pmap(generate_step, 'batch') logger.info('***** Running training *****') logger.info(f' Num examples = {total_train_steps * train_batch_size * gradient_accumulation_steps}') logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}') logger.info(f' Gradient accumulation steps = {gradient_accumulation_steps}') logger.info(f' Total train batch size (w. parallel & distributed) = {train_batch_size * gradient_accumulation_steps}') logger.info(f' Total optimization steps = {total_train_steps}') train_time = 0 train_start = time.time() train_metrics = [] batches_to_skip = jax.device_get(unreplicate(student_state.step)) cur_step = int(batches_to_skip) epochs_trained = batches_to_skip // steps_per_epoch steps_trained_progress_bar = tqdm(range(total_train_steps), desc='Train steps ... ', position=0) steps_trained_progress_bar.update(batches_to_skip) continue_training = True minibatch_steps = 0 if batches_to_skip > 0: logger.info(' Continuing training from checkpoint, will skip to saved global_step') logger.info(f' Continuing training from epoch {epochs_trained}') logger.info(f' Continuing training from global step {batches_to_skip}') train_loader = get_data_loader(training_args.seed, vectorized_datasets['train'], batch_size=train_batch_size, data_collator=data_collator, dataloader_num_workers=dataloader_num_workers, skip_batches=batches_to_skip, prefetch_size=dataloader_prefetch_size) for epoch in range(epochs_trained, num_epochs): if hasattr(train_loader, 'dataset') and isinstance(train_loader.dataset, IterableDataset): train_loader.dataset.set_epoch(epoch) for batch in train_loader: minibatch_steps += 1 update_step = minibatch_steps == gradient_accumulation_steps if update_step: steps_trained_progress_bar.update(1) cur_step += 1 minibatch_steps = 0 batch = shard(batch.data) (student_state, train_metric) = p_train_step(student_state, teacher_params, batch, training_args.freeze_encoder, share_hidden_states, training_args.temperature) if cur_step % training_args.logging_steps == 0 and update_step: train_metrics.append(train_metric) train_metric_to_write = unreplicate(train_metric) steps_trained_progress_bar.write(f"Step... ({cur_step} / {total_train_steps} | Loss: {train_metric_to_write['loss']}, Learning Rate: {train_metric_to_write['learning_rate']})") if has_wandb and jax.process_index() == 0: write_wandb_metric(wandb_logger, train_metric_to_write, train_time + time.time() - train_start, cur_step, epoch, prefix='train') if cur_step % training_args.save_steps == 0 and update_step or cur_step == total_train_steps: if jax.process_index() == 0: save_hf_weights(student_state, student_model, processor, training_args.output_dir, cur_step, total_train_steps, use_scan=training_args.use_scan) if training_args.save_train_state: student_state.save_state(training_args.output_dir, save_total_limit=training_args.save_total_limit) if training_args.push_to_hub: repo.push_to_hub(commit_message=f'Saving train state of step {cur_step}', blocking=False) if training_args.do_eval and (cur_step % eval_steps == 0 and update_step or cur_step == total_train_steps): train_time += time.time() - train_start for eval_split in all_eval_splits: eval_metrics = [] eval_preds = [] eval_labels = [] eval_start = time.time() eval_loader = get_data_loader(training_args.seed, vectorized_datasets[eval_split], batch_size=eval_batch_size, data_collator=data_collator, shuffle=False, drop_last=False, dataloader_num_workers=dataloader_num_workers) for batch in tqdm(eval_loader, desc=f'Evaluating {eval_split}...', position=2): labels = batch['labels'] metrics = pad_shard_unpad(p_eval_step, static_argnums=(0, 1), static_return=True)(student_state.params, teacher_params, batch.data, min_device_batch=per_device_eval_batch_size) eval_metrics.append(metrics) if training_args.predict_with_generate: generated_ids = pad_shard_unpad(p_generate_step)(student_state.params, batch.data, min_device_batch=per_device_eval_batch_size) eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs['max_length']))) eval_labels.extend(labels) eval_time = time.time() - eval_start eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) wer_desc = '' if training_args.predict_with_generate: (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str) = compute_metrics(eval_preds, eval_labels) eval_metrics.update(wer_metric) wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in wer_metric.items()]) steps_trained_progress_bar.write(f"Eval results for step ({cur_step} / {total_train_steps} | Eval Loss: {eval_metrics['loss']} | {wer_desc})") if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, cur_step, prefix=eval_split) if has_wandb and jax.process_index() == 0: write_wandb_metric(wandb_logger, eval_metrics, eval_time, cur_step, epoch, prefix=eval_split) if training_args.predict_with_generate: write_wandb_pred(wandb_logger, pred_str, label_str, norm_pred_str, norm_label_str, cur_step, prefix=eval_split) if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step, training_args.logging_steps) train_start = time.time() train_metrics = [] if cur_step == total_train_steps: continue_training = False break if not continue_training: break if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/run_eval.py """""" import logging import os import string import sys import time from dataclasses import field from functools import partial from pathlib import Path from typing import Any, Dict, List, Optional, Union import datasets import evaluate import flax import jax import jax.numpy as jnp import numpy as np import optax import torch import transformers from datasets import Dataset, DatasetDict, IterableDatasetDict, load_dataset from flax import jax_utils from flax.jax_utils import pad_shard_unpad from flax.training.common_utils import get_metrics, onehot from torch.utils.data import DataLoader from tqdm import tqdm from transformers import HfArgumentParser, Seq2SeqTrainingArguments, WhisperConfig, WhisperFeatureExtractor, WhisperProcessor, WhisperTokenizerFast, is_tensorboard_available, is_wandb_available from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from distil_whisper import FlaxWhisperForConditionalGeneration check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: pip install -r examples/flax/speech-recogintion/requirements.txt') logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'}) processor_name: Optional[str] = field(default=None, metadata={'help': 'processor name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) load_with_scan: Optional[bool] = field(default=False, metadata={'help': 'Whether to load the model with scan enabled. Required when the model was saved with scan enabled'}) return_timestamps: bool = field(default=False, metadata={'help': 'Whether or not to predict timestamps in the generation step.'}) @flax.struct.dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': "The name of the dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset hours by a '+' symbol."}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_split_name: Optional[str] = field(default=None, metadata={'help': 'The split name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default=None, metadata={'help': 'The name of the dataset column containing the text data. Defaults to `text`.'}) max_duration_in_seconds: float = field(default=30.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'}) min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'}) max_label_length: int = field(default=128, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) pad_target_to_multiple_of: Optional[int] = field(default=None, metadata={'help': 'If set will pad the target sequence to a multiple of the provided value. This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experiment reproducibility and to diff code across experiments in the UI.'}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and the data."}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of eval examples to this value if set.'}) log_audio: Optional[bool] = field(default=False, metadata={'help': 'For debugging purposes, record the audio samples as well as the ground truths / preds.'}) def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: shifted_label_ids = np.zeros_like(label_ids) shifted_label_ids[:, 1:] = label_ids[:, :-1] shifted_label_ids[:, 0] = decoder_start_token_id return shifted_label_ids @flax.struct.dataclass class FlaxDataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int input_padding: Union[bool, str] = 'max_length' target_padding: Union[bool, str] = 'max_length' max_target_length: Optional[int] = None log_audio: Optional[bool] = False audio_column_name: Optional[str] = 'audio' def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: model_input_name = self.processor.model_input_names[0] input_features = {model_input_name: [feature[model_input_name] for feature in features]} label_features = {'input_ids': [feature['labels'] for feature in features]} batch = self.processor.feature_extractor.pad(input_features, padding=self.input_padding, return_tensors='np') labels_batch = self.processor.tokenizer.pad(label_features, max_length=self.max_target_length, padding=self.target_padding, return_tensors='np') labels = labels_batch['input_ids'] if (labels[:, 0] == self.decoder_start_token_id).all().item(): labels = labels[:, 1:] labels_batch.attention_mask = labels_batch.attention_mask[:, 1:] decoder_input_ids = shift_tokens_right(labels, self.decoder_start_token_id) labels = np.ma.array(labels, mask=np.not_equal(labels_batch.attention_mask, 1)) labels = labels.filled(fill_value=-100) batch['labels'] = labels batch['decoder_input_ids'] = decoder_input_ids if self.log_audio: audio_samples = [feature[self.audio_column_name] for feature in features] batch['audio'] = audio_samples return batch def get_data_loader(dataset: Dataset, batch_size: int, data_collator: FlaxDataCollatorSpeechSeq2SeqWithPadding, dataloader_num_workers: int=0, pin_memory: bool=True) -> DataLoader: data_loader = DataLoader(dataset, batch_size=batch_size, drop_last=False, pin_memory=pin_memory, collate_fn=data_collator, num_workers=dataloader_num_workers) return data_loader def write_metric(summary_writer, eval_metrics, step, prefix='eval'): for (metric_name, value) in eval_metrics.items(): summary_writer.scalar(f'{prefix}/{metric_name}', value, step) def write_wandb_metric(wandb_logger, metrics, train_time, prefix): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time wandb_logger.log(log_metrics) def convert_audio_to_wandb(wandb_logger, audio): return wandb_logger.Audio(audio['array'][:, np.newaxis], sample_rate=audio['sampling_rate']) def write_wandb_pred(wandb_logger, eval_audios, pred_str, label_str, norm_pred_str, norm_label_str, prefix='eval', num_lines=200000): columns = ['Target', 'Pred', 'Norm Target', 'Norm Pred'] str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] if len(eval_audios) > 0: columns.insert(0, 'Audio') str_data = [[convert_audio_to_wandb(wandb_logger, eval_audios[i]), *str_data[i]] for i in range(len(pred_str))] wandb_logger.log({f'{prefix}/all_predictions': wandb_logger.Table(columns=columns, data=str_data[:num_lines])}) str_data = np.asarray(str_data) str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] wandb_logger.log({f'{prefix}/incorrect_predictions': wandb_logger.Table(columns=columns, data=str_data_incorrect[:num_lines])}) def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_hours=None, default_split='train'): if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') for i in range(len(dataset_names)): ds_name = dataset_names[i] dataset_names[i] = f'distil-whisper/{ds_name}' if '/' not in ds_name else ds_name dataset_config_names = dataset_config_names.split('+') splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_hours = dataset_hours.split('+') if dataset_hours is not None else None if len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_hours is not None: if len(dataset_hours) != len(dataset_names): raise ValueError(f'Ensure one probability is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_hours)} hours.') dataset_hours = [float(ds_hours) for ds_hours in dataset_hours] else: dataset_hours = [None] * len(dataset_names) text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'hours': dataset_hours[i]}) return dataset_names_dict class FlaxWhisperFeatureExtractor(WhisperFeatureExtractor): def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: waveform = torch.from_numpy(waveform).type(torch.float32) window = torch.hann_window(self.n_fft) stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32) mel_spec = mel_filters.T @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec.numpy() def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() send_example_telemetry('run_flax_speech_recognition_seq2seq', model_args, data_args, framework='flax') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Evaluation parameters %s', training_args) has_tensorboard = is_tensorboard_available() if 'tensorboard' in training_args.report_to: if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning(f'Unable to display metrics through TensorBoard because some package are not installed: {ie}') else: logger.warning('Unable to display metrics through TensorBoard because the package is not installed: Please run `pip install tensorboard` to enable.') has_wandb = is_wandb_available() if 'wandb' in training_args.report_to: if has_wandb and jax.process_index() == 0: import wandb as wandb_logger wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb) else: logger.warning('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() dataset_names_dict = convert_dataset_str_to_list(data_args.dataset_name, data_args.dataset_config_name, splits=data_args.dataset_split_name, text_column_names=data_args.text_column_name) if len(dataset_names_dict) == 1: dataset_dict = dataset_names_dict[0] raw_datasets['eval'] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, use_auth_token=True if model_args.use_auth_token else None, streaming=data_args.streaming) if dataset_dict['text_column_name'] not in list(raw_datasets['eval'].features.keys()): raise ValueError(f"--text column name {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(raw_datasets['eval'].features.keys()))}") if dataset_dict['text_column_name'] != 'text': raw_datasets['eval'] = raw_datasets['eval'].rename_column(dataset_dict['text_column_name'], 'text') else: for dataset_dict in tqdm(dataset_names_dict, desc='Loading datasets...'): pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" raw_datasets[pretty_name] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, use_auth_token=True if model_args.use_auth_token else None, streaming=data_args.streaming) if dataset_dict['text_column_name'] not in list(raw_datasets[pretty_name].features.keys()): raise ValueError(f"`--text_column_name` {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(raw_datasets[pretty_name].features.keys()))}") if dataset_dict['text_column_name'] != 'text': raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(dataset_dict['text_column_name'], 'text') config = WhisperConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) feature_extractor = FlaxWhisperFeatureExtractor.from_pretrained(model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) tokenizer = WhisperTokenizerFast.from_pretrained(model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) processor = WhisperProcessor.from_pretrained(model_args.processor_name if model_args.processor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) (model, params) = FlaxWhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, dtype=getattr(jnp, model_args.dtype), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, _do_init=False, subfolder=model_args.subfolder) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') if model_args.load_with_scan: model.disable_scan() params = model.convert_scan_to_unroll(params) raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers dataloader_num_workers = training_args.dataloader_num_workers model_input_name = feature_extractor.model_input_names[0] normalizer = EnglishTextNormalizer(tokenizer.english_spelling_normalizer) if data_args.max_eval_samples is not None: for split in raw_datasets: raw_datasets[split] = raw_datasets[split].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets[split].select(range(data_args.max_eval_samples)) def prepare_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch[model_input_name] = inputs.get(model_input_name)[0] input_str = batch['text'] batch['labels'] = tokenizer(input_str, max_length=max_label_length, truncation=True).input_ids return batch vectorized_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() for split in raw_datasets: raw_datasets_features = list(raw_datasets[split].features.keys()) if data_args.log_audio: raw_datasets_features.remove(audio_column_name) map_fn = partial(raw_datasets[split].map, function=prepare_dataset, remove_columns=raw_datasets_features) vectorized_datasets[split] = map_fn(num_proc=num_workers, desc='preprocess eval dataset') if not data_args.streaming else map_fn() if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return metric = evaluate.load('wer') all_punctuation = list(string.punctuation.replace("'", '')) return_timestamps = model_args.return_timestamps def compute_metrics(preds, labels): for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) spaced_pred_str = [pred_str[i].replace(punctuation, f' {punctuation} ') for punctuation in all_punctuation for i in range(len(pred_str))] spaced_label_str = [label_str[i].replace(punctuation, f' {punctuation} ') for punctuation in all_punctuation for i in range(len(label_str))] wer_ortho = 100 * metric.compute(predictions=spaced_pred_str, references=spaced_label_str) norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return ({'wer': wer, 'wer_ortho': wer_ortho}, pred_str, label_str, norm_pred_str, norm_label_str) data_collator = FlaxDataCollatorSpeechSeq2SeqWithPadding(processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, input_padding='longest', target_padding='max_length', max_target_length=max_label_length, log_audio=data_args.log_audio) per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() def loss_fn(logits, labels, label_smoothing_factor=0.0): vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing_factor low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -(confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)) soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence) loss = optax.softmax_cross_entropy(logits, soft_labels) loss = loss - normalizing_constant padding_mask = labels >= 0 loss = loss * padding_mask loss = loss.sum() num_labels = padding_mask.sum() return (loss, num_labels) def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop('labels') logits = model(**batch, params=params, freeze_encoder=True, train=False)[0] (loss, num_labels) = loss_fn(logits, labels, label_smoothing_factor) num_labels = jax.lax.psum(num_labels, 'batch') loss = jax.lax.psum(loss, 'batch') loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {'loss': loss} return metrics num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else model.config.num_beams gen_kwargs = {'max_length': max_label_length, 'num_beams': num_beams, 'language': '<|en|>', 'task': 'transcribe', 'return_timestamps': return_timestamps} def generate_step(params, batch): output_ids = model.generate(batch[model_input_name], attention_mask=batch.get('attention_mask'), params=params, freeze_encoder=True, **gen_kwargs) return output_ids.sequences p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), 'batch') p_generate_step = jax.pmap(generate_step, 'batch') params = jax_utils.replicate(params) def eval_step(split='eval'): eval_metrics = [] eval_preds = [] eval_labels = [] eval_audios = [] eval_start = time.time() eval_loader = get_data_loader(vectorized_datasets[split], batch_size=eval_batch_size, data_collator=data_collator, dataloader_num_workers=dataloader_num_workers) for batch in tqdm(eval_loader, desc=f'Evaluating {split}...'): labels = batch['labels'] if data_args.log_audio: eval_audios.extend(batch.pop('audio')) metrics = pad_shard_unpad(p_eval_step, static_return=True)(params, batch.data, min_device_batch=per_device_eval_batch_size) eval_metrics.append(metrics) if training_args.predict_with_generate: generated_ids = pad_shard_unpad(p_generate_step)(params, batch.data, min_device_batch=per_device_eval_batch_size) eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs['max_length']))) eval_labels.extend(labels) eval_time = time.time() - eval_start eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) wer_desc = '' if training_args.predict_with_generate: (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str) = compute_metrics(eval_preds, eval_labels) eval_metrics.update(wer_metric) wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in wer_metric.items()]) logger.info(f"Eval Loss: {eval_metrics['loss']} | {wer_desc})") if has_tensorboard and jax.process_index() == 0 and ('tensorboard' in training_args.report_to): write_metric(summary_writer, eval_metrics, model_args.step, prefix=split) if has_wandb and jax.process_index() == 0 and ('wandb' in training_args.report_to): write_wandb_metric(wandb_logger, eval_metrics, eval_time, prefix=split) if training_args.predict_with_generate: write_wandb_pred(wandb_logger, eval_audios, pred_str, label_str, norm_pred_str, norm_label_str, prefix=split) logger.info('***** Running Eval *****') logger.info(f' Instantaneous batch size per device = {training_args.per_device_eval_batch_size}') logger.info(f' Total eval batch size (w. parallel & distributed) = {eval_batch_size}') for split in vectorized_datasets: eval_step(split=split) if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/run_finetuning.py """""" import logging import os import string import sys import time from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Union import datasets import evaluate import flax import jax import jax.numpy as jnp import numpy as np import optax import transformers from datasets import Dataset, DatasetDict, load_dataset from flax import jax_utils, traverse_util from flax.jax_utils import pad_shard_unpad, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import Repository, create_repo from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, HfArgumentParser, Seq2SeqTrainingArguments, is_tensorboard_available, is_wandb_available from transformers.file_utils import get_full_repo_name from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from distil_whisper import FlaxWhisperForConditionalGeneration check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: pip install -r examples/flax/speech-recogintion/requirements.txt') logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) activation_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'}) attention_dropout: float = field(default=0.0, metadata={'help': 'The dropout ratio for the attention probabilities.'}) dropout: float = field(default=0.0, metadata={'help': 'The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.'}) @flax.struct.dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default='whisper_transcript', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'whisper_transcript'which is the pseudo-labelled Whisper transcription data."}) max_duration_in_seconds: float = field(default=30.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'}) min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'}) max_label_length: int = field(default=128, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) pad_target_to_multiple_of: Optional[int] = field(default=None, metadata={'help': 'If set will pad the target sequence to a multiple of the provided value. This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) train_split_name: str = field(default='train', metadata={'help': "The name of the training data set split to use (via the datasets library). Defaults to 'train'"}) eval_split_name: str = field(default='validation', metadata={'help': "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'"}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experimentreproducibility and to diff code across experiments in the UI.'}) @dataclass class FlaxSeq2SeqTrainingArguments(Seq2SeqTrainingArguments): use_scan: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to use `scan_with_axes` over the encoder and decoder blocks. Using scan results in faster compile times and more efficient memory use during training, since all of the layers in the encoder/decoder are stacked, and we perform a lax.scan over the stacked block to index each layer. However, it results in slower inference time due to the overhead of stacking the layers this way. Thus, we always default to disabling scan for the inference step.'}) freeze_encoder: Optional[bool] = field(default=False, metadata={'help': 'Whether to freeze the entire encoder model. Only recommended when the entire encoder has been copiedfrom the teacher model.'}) def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: shifted_label_ids = np.zeros_like(label_ids) shifted_label_ids[:, 1:] = label_ids[:, :-1] shifted_label_ids[:, 0] = decoder_start_token_id return shifted_label_ids @flax.struct.dataclass class FlaxDataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int input_padding: Union[bool, str] = 'max_length' target_padding: Union[bool, str] = 'max_length' max_target_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: model_input_name = self.processor.model_input_names[0] input_features = {model_input_name: [feature[model_input_name] for feature in features]} label_features = {'input_ids': [feature['labels'] for feature in features]} batch = self.processor.feature_extractor.pad(input_features, padding=self.input_padding, return_tensors='np') labels_batch = self.processor.tokenizer.pad(label_features, max_length=self.max_target_length, padding=self.target_padding, return_tensors='np') labels = labels_batch['input_ids'] if (labels[:, 0] == self.decoder_start_token_id).all().item(): labels = labels[:, 1:] labels_batch.attention_mask = labels_batch.attention_mask[:, 1:] decoder_input_ids = shift_tokens_right(labels, self.decoder_start_token_id) labels = np.ma.array(labels, mask=np.not_equal(labels_batch.attention_mask, 1)) labels = labels.filled(fill_value=-100) batch['labels'] = labels batch['decoder_input_ids'] = decoder_input_ids return batch def get_data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, data_collator: FlaxDataCollatorSpeechSeq2SeqWithPadding, shuffle: bool=True, drop_last: bool=True, dataloader_num_workers: int=0, pin_memory: bool=True) -> DataLoader: if shuffle: batch_idx = jax.random.permutation(rng, len(dataset)) batch_idx = np.asarray(batch_idx) dataset = dataset.select(batch_idx) data_loader = DataLoader(dataset, batch_size=batch_size, drop_last=drop_last, pin_memory=pin_memory, collate_fn=data_collator, num_workers=dataloader_num_workers) return data_loader class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step, logging_steps): summary_writer.scalar('train/time', train_time, step) train_metrics = get_metrics(train_metrics) for (key, vals) in train_metrics.items(): steps_arr = np.arange(0, step, logging_steps)[-len(vals):] tag = f'train/{key}' for (i, val) in enumerate(vals): summary_writer.scalar(tag, val, steps_arr[i]) for (metric_name, value) in eval_metrics.items(): summary_writer.scalar(f'eval/{metric_name}', value, step) def write_wandb_metric(wandb_logger, metrics, train_time, step, prefix): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time wandb_logger.log(log_metrics, step) def write_wandb_pred(wandb_logger, pred_str, label_str, prefix='eval', num_lines=100): if num_lines < len(pred_str): str_data = [[label_str[i], pred_str[i]] for i in range(num_lines)] else: str_data = [[label_str[i], pred_str[i]] for i in range(len(pred_str))] wandb_logger.log({f'{prefix}/predictions': wandb_logger.Table(columns=['label_str', 'pred_str'], data=str_data)}) def create_learning_rate_fn(num_train_steps: int, num_warmup_steps: int, learning_rate: float) -> Callable[[int], jnp.array]: warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule(init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, FlaxSeq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() send_example_telemetry('run_flax_speech_recognition_seq2seq', model_args, data_args, framework='flax') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Training/evaluation parameters %s', training_args) if os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir): raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty.Use `--overwrite_output_dir` to overcome.') if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name(Path(training_args.output_dir).absolute().name, token=training_args.hub_token) else: repo_name = training_args.hub_model_id create_repo(repo_name, exist_ok=True, token=training_args.hub_token) repo = Repository(training_args.output_dir, clone_from=repo_name, token=training_args.hub_token) raw_datasets = DatasetDict() if training_args.do_train: raw_datasets['train'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, cache_dir=data_args.dataset_cache_dir, use_auth_token=True if model_args.use_auth_token else None, num_proc=data_args.preprocessing_num_workers) if training_args.do_eval: raw_datasets['eval'] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, cache_dir=data_args.dataset_cache_dir, use_auth_token=True if model_args.use_auth_token else None, num_proc=data_args.preprocessing_num_workers) if not training_args.do_train and (not training_args.do_eval): raise ValueError('Cannot not train and not do evaluation. At least one of training or evaluation has to be performed.') if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError(f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(next(iter(raw_datasets.values())).column_names)}.") if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError(f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. Make sure to set `--text_column_name` to the correct text column - one of {', '.join(next(iter(raw_datasets.values())).column_names)}.") config = AutoConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) feature_extractor = AutoFeatureExtractor.from_pretrained(model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None) config.update({'activation_dropout': model_args.activation_dropout, 'attention_dropout': model_args.attention_dropout, 'dropout': model_args.dropout}) (model, params) = FlaxWhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, dtype=getattr(jnp, model_args.dtype), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, _do_init=False) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') if training_args.use_scan: model.enable_scan() params = model.convert_unroll_to_scan(params) if training_args.gradient_checkpointing: model.enable_gradient_checkpointing() if hasattr(model.generation_config, 'is_multilingual') and model.generation_config.is_multilingual: tokenizer.set_prefix_tokens(language='English', task='transcribe', predict_timestamps=False) model.generation_config.forced_decoder_ids = tokenizer.get_decoder_prompt_ids(language='English', task='transcribe', no_timestamps=True) raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)) max_input_length = int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) min_input_length = int(data_args.min_duration_in_seconds * feature_extractor.sampling_rate) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers dataloader_num_workers = training_args.dataloader_num_workers text_column_name = data_args.text_column_name model_input_name = feature_extractor.model_input_names[0] normalizer = EnglishTextNormalizer(tokenizer.english_spelling_normalizer) if training_args.do_train and data_args.max_train_samples is not None: raw_datasets['train'] = raw_datasets['train'].select(range(data_args.max_train_samples)) if training_args.do_eval and data_args.max_eval_samples is not None: raw_datasets['eval'] = raw_datasets['eval'].select(range(data_args.max_eval_samples)) def prepare_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch[model_input_name] = inputs.get(model_input_name)[0] batch['input_length'] = len(sample['array']) input_str = ' ' + batch[text_column_name].lower() batch['labels'] = tokenizer(input_str).input_ids return batch vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=num_workers, desc='preprocess train dataset') def is_audio_in_length_range(length): return min_input_length < length < max_input_length vectorized_datasets = vectorized_datasets.filter(is_audio_in_length_range, num_proc=num_workers, input_columns=['input_length']) def is_labels_in_length_range(labels): return 0 < len(labels) < max_label_length vectorized_datasets = vectorized_datasets.filter(is_labels_in_length_range, num_proc=num_workers, input_columns=['labels']) if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return metric = evaluate.load('wer') all_punctuation = list(string.punctuation.replace("'", '')) def compute_metrics(preds, labels): for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True) label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) spaced_pred_str = [pred_str[i].replace(punctuation, '') for punctuation in all_punctuation for i in range(len(pred_str))] spaced_label_str = [label_str[i].replace(punctuation, '') for punctuation in all_punctuation for i in range(len(label_str))] wer_ortho = 100 * metric.compute(predictions=spaced_pred_str, references=spaced_label_str) norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return ({'wer': wer, 'wer_ortho': wer_ortho}, pred_str, label_str) feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) model.generation_config.save_pretrained(training_args.output_dir) processor = AutoProcessor.from_pretrained(training_args.output_dir) data_collator = FlaxDataCollatorSpeechSeq2SeqWithPadding(processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, input_padding='longest', target_padding='max_length', max_target_length=max_label_length) has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning(f'Unable to display metrics through TensorBoard because some package are not installed: {ie}') else: logger.warning('Unable to display metrics through TensorBoard because the package is not installed: Please run `pip install tensorboard` to enable.') has_wandb = is_wandb_available() if has_wandb: import wandb as wandb_logger if jax.process_index() == 0: wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb) else: logger.warning('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') rng = jax.random.PRNGKey(training_args.seed) (rng, dropout_rng) = jax.random.split(rng) num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() steps_per_epoch = len(vectorized_datasets['train']) // train_batch_size total_train_steps = steps_per_epoch * num_epochs linear_decay_lr_schedule_fn = create_learning_rate_fn(total_train_steps, training_args.warmup_steps, training_args.learning_rate) def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) layer_norm_candidates = ['layer_norm', 'self_attn_layer_norm', 'final_layer_norm', 'encoder_attn_layer_norm'] layer_norm_named_params = {layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in ''.join(layer).lower()} flat_mask = {path: path[-1] != 'bias' and path[-2:] not in layer_norm_named_params for path in flat_params} return traverse_util.unflatten_dict(flat_mask) adamw = optax.adamw(learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn) state = TrainState.create(apply_fn=model.__call__, params=params, tx=adamw, dropout_rng=dropout_rng) def loss_fn(logits, labels, label_smoothing_factor=0.0): vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing_factor low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -(confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)) soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence) loss = optax.softmax_cross_entropy(logits, soft_labels) loss = loss - normalizing_constant padding_mask = labels >= 0 loss = loss * padding_mask loss = loss.sum() num_labels = padding_mask.sum() return (loss, num_labels) def train_step(state, batch, freeze_encoder, label_smoothing_factor=0.0): (dropout_rng, new_dropout_rng) = jax.random.split(state.dropout_rng) def compute_loss(params): labels = batch.pop('labels') logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, freeze_encoder=freeze_encoder, train=True)[0] (loss, num_labels) = loss_fn(logits, labels, label_smoothing_factor) return (loss, num_labels) grad_fn = jax.value_and_grad(compute_loss, has_aux=True) ((loss, num_labels), grad) = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, 'batch') loss = jax.lax.psum(loss, 'batch') loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) grad = jax.lax.psum(grad, 'batch') grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {'loss': loss, 'learning_rate': linear_decay_lr_schedule_fn(state.step)} return (new_state, metrics) def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop('labels') logits = model(**batch, params=params, train=False)[0] (loss, num_labels) = loss_fn(logits, labels, label_smoothing_factor) num_labels = jax.lax.psum(num_labels, 'batch') loss = jax.lax.psum(loss, 'batch') loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {'loss': loss} return metrics num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else model.config.num_beams gen_kwargs = {'max_length': max_label_length, 'num_beams': num_beams} def generate_step(params, batch): output_ids = model.generate(batch[model_input_name], attention_mask=batch.get('attention_mask'), params=params, **gen_kwargs) return output_ids.sequences p_train_step = jax.pmap(partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), 'batch', donate_argnums=(0,), static_broadcasted_argnums=(2,)) p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), 'batch') p_generate_step = jax.pmap(generate_step, 'batch') state = state.replicate() logger.info('***** Running training *****') logger.info(f" Num examples = {len(vectorized_datasets['train'])}") logger.info(f' Num Epochs = {num_epochs}') logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}') logger.info(f' Total train batch size (w. parallel & distributed) = {train_batch_size}') logger.info(f' Total optimization steps = {total_train_steps}') train_time = 0 epochs = tqdm(range(num_epochs), desc=f'Epoch ... (1/{num_epochs})', position=0) for epoch in epochs: train_start = time.time() (rng, input_rng) = jax.random.split(rng) train_metrics = [] train_loader = get_data_loader(input_rng, vectorized_datasets['train'], batch_size=train_batch_size, data_collator=data_collator, dataloader_num_workers=dataloader_num_workers) for (step, batch) in enumerate(tqdm(train_loader, desc='Training...', position=1), 1): batch = shard(batch.data) (state, train_metric) = p_train_step(state, batch, training_args.freeze_encoder) cur_step = epoch * steps_per_epoch + step if cur_step % training_args.logging_steps == 0: train_metrics.append(train_metric) train_metric_to_write = unreplicate(train_metric) epochs.write(f"Step... ({cur_step} / {total_train_steps} | Loss: {train_metric_to_write['loss']}, Learning Rate: {train_metric_to_write['learning_rate']})") if has_wandb and jax.process_index() == 0: write_wandb_metric(wandb_logger, train_metric_to_write, train_time + time.time() - train_start, cur_step, 'train') train_time += time.time() - train_start train_metric = unreplicate(train_metric) epochs.write(f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate: {train_metric['learning_rate']})") eval_metrics = [] eval_preds = [] eval_labels = [] eval_start = time.time() eval_loader = get_data_loader(input_rng, vectorized_datasets['eval'], batch_size=eval_batch_size, data_collator=data_collator, shuffle=False, drop_last=False, dataloader_num_workers=dataloader_num_workers) for batch in tqdm(eval_loader, desc='Evaluating...', position=2): labels = batch['labels'] metrics = pad_shard_unpad(p_eval_step, static_return=True)(state.params, batch.data, min_device_batch=per_device_eval_batch_size) eval_metrics.append(metrics) if training_args.predict_with_generate: generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch.data, min_device_batch=per_device_eval_batch_size) eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs['max_length']))) eval_labels.extend(labels) eval_time = time.time() - eval_start eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) wer_desc = '' if training_args.predict_with_generate: (wer_metric, pred_str, label_str) = compute_metrics(eval_preds, eval_labels) eval_metrics.update(wer_metric) wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in wer_metric.items()]) desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']} | {wer_desc})" epochs.write(desc) epochs.desc = desc if has_tensorboard and jax.process_index() == 0: write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step, training_args.logging_steps) if has_wandb and jax.process_index() == 0: write_wandb_metric(wandb_logger, eval_metrics, eval_time, cur_step, 'eval') if training_args.predict_with_generate: write_wandb_pred(wandb_logger, pred_str, label_str) if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: repo.push_to_hub(commit_message=f'Saving weights and logs of epoch {epoch + 1}', blocking=False) if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/run_long_form_transcription.py """""" import logging import os import sys import time from dataclasses import field from pathlib import Path from typing import Optional import datasets import flax import jax import jax.numpy as jnp import numpy as np import transformers from datasets import DatasetDict, IterableDatasetDict, load_dataset from jax.experimental.compilation_cache import compilation_cache as cc from jiwer import process_words, wer_default from nltk import ngrams from tqdm import tqdm from transformers import HfArgumentParser, Seq2SeqTrainingArguments, is_tensorboard_available, is_wandb_available from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from distil_whisper import FlaxWhisperPipeline check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: pip install -r examples/flax/speech-recogintion/requirements.txt') logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`.'}) load_with_scan: Optional[bool] = field(default=False, metadata={'help': 'Whether to load the model with scan enabled. Required when the model was saved with scan enabled'}) return_timestamps: Optional[bool] = field(default=False, metadata={'help': 'Whether to predict timestamps (alongside the text predictions). Timestamp predictions are discarded at the end of inference, but may assist in the model in reducing hallucinations.'}) length_penalty: Optional[float] = field(default=1.0, metadata={'help': 'Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes longer sequences, while length_penalty < 1.0 encourages shorter sequences.'}) do_sample: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to use sampling ; use greedy decoding otherwise.'}) top_k: Optional[int] = field(default=50, metadata={'help': 'The number of the highest probability vocabulary tokens to keep for top-k-filtering.'}) temperature: Optional[float] = field(default=1.0, metadata={'help': 'The value used to modulate the next token probabilities if sampling.'}) chunk_length_s: Optional[float] = field(default=30.0, metadata={'help': "The input length for each chunk. By default, the chunklength is set 30.0s, equal to Whisper's context window."}) compilation_cache: Optional[str] = field(default=None, metadata={'help': 'Whether to enable the JAX (experimental) compilation cache. The compilation step is *cached* the first time it is run. Successive compilation steps for the same function utilise the cache to reducethe compilation time.'}) @flax.struct.dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': "The name of the dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset hours by a '+' symbol."}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_split_name: Optional[str] = field(default=None, metadata={'help': 'The split name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default=None, metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text'."}) max_label_length: int = field(default=256, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experiment reproducibility and to diff code across experiments in the UI.'}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and the data."}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of eval examples to this value if set.'}) log_audio: Optional[bool] = field(default=False, metadata={'help': 'For debugging purposes, record the audio samples as well as the ground truths / preds.'}) log_predictions: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to log the ground truths / pred text to the wandb logger.'}) ngram_degree: Optional[int] = field(default=5, metadata={'help': 'Degree of n-grams used when computing duplicate n-grams in the predicted text.'}) def write_metric(summary_writer, eval_metrics, prefix='eval'): for (metric_name, value) in eval_metrics.items(): summary_writer.scalar(f'{prefix}/{metric_name}', value, 0) def write_wandb_metric(wandb_logger, metrics, train_time, prefix): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time wandb_logger.log(log_metrics) def convert_audio_to_wandb(wandb_logger, audio): return wandb_logger.Audio(audio['array'][:, np.newaxis], sample_rate=audio['sampling_rate']) def write_wandb_pred(wandb_logger, eval_audios, pred_str, label_str, norm_pred_str, norm_label_str, prefix='eval'): columns = ['Target', 'Pred', 'Norm Target', 'Norm Pred'] str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] if len(eval_audios) > 0: columns.insert(0, 'Audio') str_data = [[convert_audio_to_wandb(wandb_logger, eval_audios[i]), *str_data[i]] for i in range(len(pred_str))] wandb_logger.log({f'{prefix}/predictions': wandb_logger.Table(columns=columns, data=str_data)}) def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_hours=None, default_split='train'): if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') for i in range(len(dataset_names)): ds_name = dataset_names[i] dataset_names[i] = f'distil-whisper/{ds_name}' if '/' not in ds_name else ds_name dataset_config_names = dataset_config_names.split('+') splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_hours = dataset_hours.split('+') if dataset_hours is not None else None if len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_hours is not None: if len(dataset_hours) != len(dataset_names): raise ValueError(f'Ensure one probability is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_hours)} hours.') dataset_hours = [float(ds_hours) for ds_hours in dataset_hours] else: dataset_hours = [None] * len(dataset_names) text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'hours': dataset_hours[i]}) return dataset_names_dict def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() send_example_telemetry('run_flax_speech_recognition_seq2seq', model_args, data_args, framework='flax') has_tensorboard = is_tensorboard_available() if 'tensorboard' in training_args.report_to: if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(os.path.join(training_args.output_dir, 'runs'))) except ImportError as ie: has_tensorboard = False logger.warning(f'Unable to display metrics through TensorBoard because some packages are not installed: {ie}') else: logger.warning('Unable to display metrics through TensorBoard because the package is not installed: Please run `pip install tensorboard` to enable.') has_wandb = is_wandb_available() if 'wandb' in training_args.report_to: if has_wandb and jax.process_index() == 0: import wandb as wandb_logger wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb) else: logger.warning('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Evaluation parameters %s', training_args) if model_args.compilation_cache: cc.initialize_cache(os.path.join(model_args.cache_dir, 'jax_cache')) raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() dataset_names_dict = convert_dataset_str_to_list(data_args.dataset_name, data_args.dataset_config_name, splits=data_args.dataset_split_name, text_column_names=data_args.text_column_name) for dataset_dict in dataset_names_dict: pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" raw_datasets[pretty_name] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, use_auth_token=True if model_args.use_auth_token else None, streaming=data_args.streaming) if dataset_dict['text_column_name'] not in list(raw_datasets[pretty_name].features.keys()): raise ValueError(f"--text column name {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(raw_datasets[pretty_name].features.keys()))}") if dataset_dict['text_column_name'] != 'text': raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(dataset_dict['text_column_name'], 'text') raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) audio_column_name = data_args.audio_column_name if audio_column_name not in raw_datasets_features: raise ValueError(f"--audio_column_name '{audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(raw_datasets_features)}.") for split in raw_datasets: raw_datasets[split] = raw_datasets[split].remove_columns(set(raw_datasets[split].features.keys()) - {audio_column_name, 'text'}) pipeline = FlaxWhisperPipeline(model_args.model_name_or_path, dtype=getattr(jnp, model_args.dtype), max_length=training_args.generation_max_length, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, subfolder=model_args.subfolder) if pipeline.model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') if model_args.load_with_scan: pipeline.model.disable_scan() pipeline.params = pipeline.model.convert_scan_to_unroll(pipeline.params) if data_args.max_eval_samples is not None: for split in raw_datasets: raw_datasets[split] = raw_datasets[split].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets[split].select(range(data_args.max_eval_samples)) normalizer = EnglishTextNormalizer(pipeline.tokenizer.english_spelling_normalizer) def compute_metrics(pred_str, label_str, ngram_degree=5): norm_pred_str = [normalizer(pred).replace('.', '') for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer_output = process_words(norm_label_str, norm_pred_str, wer_default, wer_default) wer_norm = 100 * wer_output.wer ier_norm = 100 * wer_output.insertions / sum([len(ref) for ref in wer_output.references]) ser_norm = 100 * wer_output.substitutions / sum([len(ref) for ref in wer_output.references]) der_norm = 100 * wer_output.deletions / sum([len(ref) for ref in wer_output.references]) all_ngrams = list(ngrams(' '.join(norm_pred_str).split(), ngram_degree)) repeated_ngrams = len(all_ngrams) - len(set(all_ngrams)) return ({'wer': wer_norm, 'ier': ier_norm, 'ser': ser_norm, 'der': der_norm, 'repeated_ngrams': repeated_ngrams}, pred_str, label_str, norm_pred_str, norm_label_str) per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) eval_batch_size = per_device_eval_batch_size * jax.device_count() num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else pipeline.model.config.num_beams generation_config = pipeline.model.generation_config if hasattr(generation_config, 'is_multilingual') and generation_config.is_multilingual: language = 'English' task = 'transcribe' else: language = None task = None logger.info('Pre-compiling the generate call...') random_inputs = {'input_features': np.ones((eval_batch_size, 80, 2 * pipeline.model.config.max_source_positions))} pipeline.forward(random_inputs, batch_size=eval_batch_size, language=language, task=task, return_timestamps=model_args.return_timestamps, num_beams=num_beams, length_penalty=model_args.length_penalty, do_sample=model_args.do_sample, top_k=model_args.top_k, temperature=model_args.temperature) def eval_step(split='eval'): eval_preds = [] eval_labels = [] eval_audios = [] eval_start = time.time() for sample in tqdm(raw_datasets[split], desc=f'Evaluating {split}...'): label_str = sample['text'] if data_args.log_audio: eval_audios.append(sample['audio']) pred_str = pipeline(sample['audio'], batch_size=eval_batch_size, language=language, task=task, chunk_length_s=model_args.chunk_length_s, return_timestamps=model_args.return_timestamps, num_beams=num_beams, length_penalty=model_args.length_penalty, do_sample=model_args.do_sample, top_k=model_args.top_k, temperature=model_args.temperature) eval_preds.append(pred_str['text']) eval_labels.append(label_str) eval_time = time.time() - eval_start (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str) = compute_metrics(eval_preds, eval_labels, ngram_degree=data_args.ngram_degree) wer_desc = ' '.join([f'{split} {key}: {value} |' for (key, value) in wer_metric.items()]) logger.info(wer_desc) if has_tensorboard and jax.process_index() == 0 and ('tensorboard' in training_args.report_to): write_metric(summary_writer, wer_metric, prefix=split) if has_wandb and jax.process_index() == 0 and ('wandb' in training_args.report_to): write_wandb_metric(wandb_logger, wer_metric, eval_time, prefix=split) if data_args.log_predictions: write_wandb_pred(wandb_logger, eval_audios, pred_str, label_str, norm_pred_str, norm_label_str, prefix=split) logger.info('***** Running Eval *****') logger.info(f' Instantaneous batch size per device = {training_args.per_device_eval_batch_size}') logger.info(f' Total eval batch size (w. parallel & distributed) = {eval_batch_size}') logger.info(f' Beam size = {num_beams}') if num_beams > 1: logger.info(f' Length penalty size = {model_args.length_penalty}') logger.info(f' Do sample = {model_args.do_sample}') if model_args.do_sample: logger.info(f' Top k = {model_args.top_k}') logger.info(f' Temperature = {model_args.temperature}') for split in raw_datasets: eval_step(split=split) if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/run_pseudo_labelling_pt.py """""" import csv import logging import os import string import sys import time from dataclasses import dataclass, field from datetime import timedelta from pathlib import Path from typing import Any, Dict, List, Optional, Union import datasets import evaluate import numpy as np import torch import transformers from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.logging import get_logger from datasets import DatasetDict, IterableDatasetDict, load_dataset from huggingface_hub import HfFolder, Repository, create_repo, get_full_repo_name from torch.utils.data import DataLoader from tqdm import tqdm from transformers import HfArgumentParser, Seq2SeqTrainingArguments, WhisperConfig, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizerFast from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.utils import check_min_version from transformers.utils.versions import require_version check_min_version('4.34.0.dev0') require_version('datasets>=2.14.6', 'To fix: `pip install --upgrade datasets`') logger = get_logger(__name__) @dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained Whisper model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'}) processor_name: Optional[str] = field(default=None, metadata={'help': 'processor name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'The data type (dtype) in which to load the model weights. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'}) attn_type: Optional[str] = field(default=None, metadata={'help': 'Which attention implementation to use in the encoder and decoder attention layers. Can be one of:\n1. `None`: default Transformers attention implementation.2. `flash_attn`: Flash Attention through PyTorch SDPA. Requires `torch>=2.0` and `optimum` to be installed. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080)3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100)'}) compile_encoder: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to enable torch compile in the encoder module. Requires `torch>=2.0` to be installed.'}) @dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default='text', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text'."}) id_column_name: str = field(default='id', metadata={'help': "The name of the dataset column containing the id data. Defaults to 'id'"}) max_label_length: int = field(default=128, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) data_split_name: str = field(default='train+validation+test', metadata={'help': "The name of the data set splits to use (via the datasets library). Defaults to 'train+validation+test'. Multiple splits can be passed by splitting a list through the '+' character, e.g. 'train+validation' will pseudo-label both the 'train' and 'validation' splits sequentially."}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) streaming: bool = field(default=False, metadata={'help': "Whether to use dataset's streaming mode to load and pre-process the data."}) max_samples_per_split: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of examples per split to this value if set.'}) return_timestamps: bool = field(default=False, metadata={'help': 'Whether to return the timestamps with the text. This enables the `FlaxWhisperTimestampsLogitsProcessor`.'}) language: str = field(default=None, metadata={'help': 'Language for multilingual distillation. This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`.'}) task: str = field(default='transcribe', metadata={'help': 'Task, either `transcribe` for speech recognition or `translate` for speech translation.This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`.'}) decode_token_ids: bool = field(default=True, metadata={'help': 'Whether or not to decode the predicted token ids to text transcriptions.'}) private_dataset: bool = field(default=False, metadata={'help': 'Whether or not to create a private dataset for the pseudo-labelled data.'}) def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: shifted_label_ids = np.zeros_like(label_ids) shifted_label_ids[:, 1:] = label_ids[:, :-1] shifted_label_ids[:, 0] = decoder_start_token_id return shifted_label_ids @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int decoder_prev_token_id: int input_padding: Union[bool, str] = 'max_length' target_padding: Union[bool, str] = 'max_length' max_target_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: model_input_name = self.processor.model_input_names[0] input_features = {model_input_name: [feature[model_input_name] for feature in features]} label_features = {'input_ids': [feature['labels'] for feature in features]} file_ids = {'input_ids': [feature['file_id'] for feature in features]} batch = self.processor.feature_extractor.pad(input_features, padding=self.input_padding, return_tensors='pt') labels_batch = self.processor.tokenizer.pad(label_features, max_length=self.max_target_length, padding=self.target_padding, return_tensors='pt') file_ids_batch = self.processor.tokenizer.pad(file_ids, max_length=self.max_target_length, padding=self.target_padding, return_tensors='pt') labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100) if set(torch.unique(labels[:, 0])).issubset({self.decoder_start_token_id, self.decoder_prev_token_id}): labels = labels[:, 1:] bos_index = torch.argmax((labels == self.decoder_start_token_id).long(), dim=1) prompt_mask = torch.arange(labels.shape[1]) < bos_index[:, None] labels = torch.where(prompt_mask, -100, labels) batch['labels'] = labels batch['file_ids'] = file_ids_batch['input_ids'] return batch def log_metric(accelerator, metrics: Dict, train_time: float, prefix: str='eval'): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time accelerator.log(log_metrics) def log_pred(accelerator, pred_str: List[str], label_str: List[str], norm_pred_str: List[str], norm_label_str: List[str], prefix: str='eval', num_lines: int=200000): if accelerator.is_main_process: wandb_tracker = accelerator.get_tracker('wandb') prefix = prefix.replace('/', '-') str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] wandb_tracker.log_table(table_name=f'{prefix}/all_predictions', columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data[:num_lines]) str_data = np.asarray(str_data) str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] wandb_tracker.log_table(table_name=f'{prefix}/incorrect_predictions', columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data_incorrect[:num_lines]) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() if model_args.dtype == 'float16': mixed_precision = 'fp16' torch_dtype = torch.float16 elif model_args.dtype == 'bfloat16': mixed_precision = 'bf16' torch_dtype = torch.bfloat16 else: mixed_precision = 'no' torch_dtype = torch.float32 kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) accelerator = Accelerator(gradient_accumulation_steps=training_args.gradient_accumulation_steps, mixed_precision=mixed_precision, log_with=training_args.report_to, project_dir=training_args.output_dir, kwargs_handlers=[kwargs]) accelerator.init_trackers(project_name=data_args.wandb_project) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.warning(f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}") if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Training/evaluation parameters %s', training_args) raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() token = model_args.token if model_args.token is not None else HfFolder().get_token() data_splits = data_args.data_split_name.split('+') for split in data_splits: if data_args.streaming: raw_datasets[split] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=split, cache_dir=data_args.dataset_cache_dir, token=token, streaming=True) else: raw_datasets[split] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=split, cache_dir=data_args.dataset_cache_dir, token=token, streaming=False, num_proc=data_args.preprocessing_num_workers) if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError(f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(next(iter(raw_datasets.values())).column_names)}.") if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError(f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. Make sure to set `--text_column_name` to the correct text column - one of {', '.join(next(iter(raw_datasets.values())).column_names)}.") config = WhisperConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=token) feature_extractor = WhisperFeatureExtractor.from_pretrained(model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=token) tokenizer = WhisperTokenizerFast.from_pretrained(model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=token) processor = WhisperProcessor.from_pretrained(model_args.processor_name if model_args.processor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=token) model = WhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, subfolder=model_args.subfolder, token=token, low_cpu_mem_usage=True, torch_dtype=torch_dtype, use_flash_attention_2=model_args.attn_type == 'flash_attn_2') if model_args.attn_type == 'flash_attn': model = model.to_bettertransformer() elif model_args.attn_type not in [None, 'flash_attn', 'flash_attn_2']: raise ValueError(f'Argument `attn_type` is set to {model_args.attn_type}. Should be one of:1. `None`: default Transformers attention implementation.2. `flash_attn`: Flash Attention through PyTorch SDPA. Requires `torch>=2.0` and `optimum` to be installed. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100).') if model_args.compile_encoder: model.model.encoder.forward = torch.compile(model.model.encoder.forward, mode='reduce-overhead', fullgraph=True) model.eval() if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') return_timestamps = data_args.return_timestamps if hasattr(model.generation_config, 'is_multilingual') and model.generation_config.is_multilingual: tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task, predict_timestamps=return_timestamps) elif data_args.language is not None: raise ValueError('Setting language token for an English-only checkpoint is not permitted. The language argument should only be set for multilingual checkpoints.') raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers dataloader_num_workers = training_args.dataloader_num_workers text_column_name = data_args.text_column_name model_input_name = feature_extractor.model_input_names[0] id_column_name = data_args.id_column_name normalizer = EnglishTextNormalizer(tokenizer.english_spelling_normalizer) if data_args.max_samples_per_split is not None: for split in data_splits: raw_datasets[split] = raw_datasets[split].take(data_args.max_samples_per_split) if data_args.streaming else raw_datasets[split].select(range(data_args.max_samples_per_split)) def prepare_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch[model_input_name] = inputs.get(model_input_name)[0] input_str = batch[text_column_name] batch['labels'] = tokenizer(input_str, max_length=max_label_length, truncation=True).input_ids batch['file_id'] = tokenizer(batch[id_column_name], add_special_tokens=False).input_ids return batch raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) if data_args.streaming: vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=raw_datasets_features) else: vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=raw_datasets_features, num_proc=num_workers, desc='preprocess dataset') if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return if data_args.streaming and dataloader_num_workers > 0: logger.warning('Using multiple dataloader num workers with streaming mode will result in different shards of data being transcribed in parallel. This is not advised if you want to preserve the order of the audio-text data.') output_dir = training_args.output_dir if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name(Path(output_dir).absolute().name, token=token) else: repo_name = training_args.hub_model_id create_repo(repo_name, exist_ok=True, token=token, repo_type='dataset', private=data_args.private_dataset) repo = Repository(output_dir, clone_from=repo_name, token=token, repo_type='dataset') with open(os.path.join(output_dir, '.gitattributes'), 'r+') as f: git_lfs_extensions = f.read() if '*.csv' not in git_lfs_extensions: f.write('*.csv filter=lfs diff=lfs merge=lfs -text') elif not os.path.exists(output_dir): os.makedirs(output_dir) metric = evaluate.load('wer') all_punctuation = list(string.punctuation.replace("'", '')) def compute_metrics(preds, labels, file_ids): for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) spaced_pred_str = [pred_str[i].replace(punctuation, f' {punctuation} ') for punctuation in all_punctuation for i in range(len(pred_str))] spaced_label_str = [label_str[i].replace(punctuation, f' {punctuation} ') for punctuation in all_punctuation for i in range(len(label_str))] wer_ortho = 100 * metric.compute(predictions=spaced_pred_str, references=spaced_label_str) norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] file_ids = [file_ids[i] for i in range(len(file_ids)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return ({'wer': wer, 'wer_ortho': wer_ortho}, pred_str, label_str, norm_pred_str, norm_label_str, file_ids) per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, decoder_prev_token_id=tokenizer.all_special_ids[-3], input_padding='longest', target_padding='max_length', max_target_length=max_label_length) num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else getattr(model.generation_config, 'num_beams', 1) gen_kwargs = {'max_length': max_label_length, 'num_beams': num_beams, 'return_timestamps': return_timestamps} if hasattr(model.generation_config, 'is_multilingual') and model.generation_config.is_multilingual: gen_kwargs.update({'language': data_args.language, 'task': data_args.task}) model = accelerator.prepare(model) def eval_step_with_save(split='eval'): eval_preds = [] eval_labels = [] eval_ids = [] eval_start = time.time() eval_loader = DataLoader(vectorized_datasets[split], batch_size=per_device_eval_batch_size, collate_fn=data_collator, num_workers=dataloader_num_workers, pin_memory=True) eval_loader = accelerator.prepare(eval_loader) batches = tqdm(eval_loader, desc=f'Evaluating {split}...', disable=not accelerator.is_local_main_process) split = split.replace('.', '-').split('/')[-1] output_csv = os.path.join(output_dir, f'{split}-transcription.csv') for (step, batch) in enumerate(batches): file_ids = batch.pop('file_ids') generated_ids = model.module.generate(batch['input_features'].to(dtype=torch_dtype), **gen_kwargs) generated_ids = accelerator.pad_across_processes(generated_ids, dim=1, pad_index=tokenizer.pad_token_id) (file_ids, generated_ids, labels) = accelerator.gather_for_metrics((file_ids, generated_ids, batch['labels'])) eval_preds.extend(generated_ids.cpu().numpy()) eval_labels.extend(labels.cpu().numpy()) file_ids = tokenizer.batch_decode(file_ids, skip_special_tokens=True) eval_ids.extend(file_ids) if step % training_args.logging_steps == 0 and step > 0: batches.write(f'Saving transcriptions for split {split} step {step}') accelerator.wait_for_everyone() if data_args.decode_token_ids: eval_preds = tokenizer.batch_decode(eval_preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) csv_data = [[eval_ids[i], eval_preds[i]] for i in range(len(eval_preds))] with open(output_csv, 'w', encoding='UTF8', newline='') as f: writer = csv.writer(f) writer.writerow(['file_id', 'whisper_transcript']) writer.writerows(csv_data) if training_args.push_to_hub and accelerator.is_main_process: repo.push_to_hub(commit_message=f'Saving transcriptions for split {split} step {step}.', blocking=False) accelerator.wait_for_everyone() eval_time = time.time() - eval_start wer_desc = '' if 'validation' in split or 'test' in split: (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str, eval_ids) = compute_metrics(eval_preds, eval_labels, eval_ids) wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in wer_metric.items()]) log_metric(accelerator, metrics=wer_metric, train_time=eval_time, prefix=split) log_pred(accelerator, pred_str, label_str, norm_pred_str, norm_label_str, prefix=split) if data_args.decode_token_ids: eval_preds = pred_str elif data_args.decode_token_ids: eval_preds = tokenizer.batch_decode(eval_preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) batches.write(f'Saving final transcriptions for split {split}.') csv_data = [[eval_ids[i], eval_preds[i]] for i in range(len(eval_preds))] with open(output_csv, 'w', encoding='UTF8', newline='') as f: writer = csv.writer(f) writer.writerow(['file_id', 'whisper_transcript']) writer.writerows(csv_data) logger.info(wer_desc) if not data_args.streaming: raw_datasets[split] = raw_datasets[split].add_column('whisper_transcript', eval_preds) logger.info('***** Running Labelling *****') logger.info(f' Instantaneous batch size per device = {training_args.per_device_eval_batch_size}') logger.info(f' Total eval batch size (w. parallel & distributed) = {training_args.per_device_eval_batch_size * accelerator.num_processes}') logger.info(f' Predict labels with timestamps = {return_timestamps}') logger.info(f' Decode labels to transcriptions = {data_args.decode_token_ids}') for split in data_splits: eval_step_with_save(split=split) accelerator.wait_for_everyone() if training_args.push_to_hub and accelerator.is_main_process: repo.push_to_hub(commit_message=f"Saving final transcriptions for split {split.replace('.', '-').split('/')[-1]}", blocking=False) if not data_args.streaming and accelerator.is_main_process: raw_datasets.save_to_disk(output_dir, num_proc=num_workers) if training_args.push_to_hub: raw_datasets.push_to_hub(repo_name, config_name=data_args.dataset_config_name) accelerator.end_training() if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/run_pt_long_form_transcription.py """""" import logging import os import sys import time from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import torch import transformers from datasets import DatasetDict, IterableDatasetDict, load_dataset from jiwer import process_words, wer_default from nltk import ngrams from tqdm import tqdm from transformers import HfArgumentParser, Seq2SeqTrainingArguments, WhisperTokenizer, is_tensorboard_available, is_wandb_available, pipeline from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.utils import check_min_version from transformers.utils.versions import require_version check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: update `datasets` to the latest version: `pip install --upgrade datasets[audio]`') logger = logging.getLogger(__name__) @dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'Floating-point format in which the model weights should be initialized and evaluated. Choose one of `[float32, float16, bfloat16]`.'}) return_timestamps: Optional[bool] = field(default=False, metadata={'help': 'Whether to predict timestamps (alongside the text predictions). Timestamp predictions are discarded at the end of inference, but may assist in the model in reducing hallucinations.'}) length_penalty: Optional[float] = field(default=1.0, metadata={'help': 'Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes longer sequences, while length_penalty < 1.0 encourages shorter sequences.'}) do_sample: Optional[bool] = field(default=False, metadata={'help': 'Whether or not to use sampling ; use greedy decoding otherwise.'}) top_k: Optional[int] = field(default=50, metadata={'help': 'The number of the highest probability vocabulary tokens to keep for top-k-filtering.'}) temperature: Optional[float] = field(default=1.0, metadata={'help': 'The value used to modulate the next token probabilities if sampling.'}) chunk_length_s: Optional[float] = field(default=0, metadata={'help': 'The input length for each chunk. By default, the chunk length is set to 0, which means no chunking.'}) @dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': "The name of the dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset hours by a '+' symbol."}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_split_name: Optional[str] = field(default=None, metadata={'help': 'The split name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default=None, metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text'."}) max_label_length: int = field(default=256, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experiment reproducibility and to diff code across experiments in the UI.'}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and the data."}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of eval examples to this value if set.'}) log_audio: Optional[bool] = field(default=False, metadata={'help': 'For debugging purposes, record the audio samples as well as the ground truths / preds.'}) log_predictions: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to log the ground truths / pred text to the wandb logger.'}) ngram_degree: Optional[int] = field(default=5, metadata={'help': 'Degree of n-grams used when computing duplicate n-grams in the predicted text.'}) def write_metric(summary_writer, eval_metrics, prefix='eval'): for (metric_name, value) in eval_metrics.items(): summary_writer.add_scalar(f'{prefix}/{metric_name}', value, 0) def write_wandb_metric(wandb_logger, metrics, train_time, prefix): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time wandb_logger.log(log_metrics) def convert_audio_to_wandb(wandb_logger, audio): return wandb_logger.Audio(audio['array'][:, np.newaxis], sample_rate=audio['sampling_rate']) def write_wandb_pred(wandb_logger, eval_audios, pred_str, label_str, norm_pred_str, norm_label_str, prefix='eval'): columns = ['Target', 'Pred', 'Norm Target', 'Norm Pred'] str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] if len(eval_audios) > 0: columns.insert(0, 'Audio') str_data = [[convert_audio_to_wandb(wandb_logger, eval_audios[i]), *str_data[i]] for i in range(len(pred_str))] wandb_logger.log({f'{prefix}/predictions': wandb_logger.Table(columns=columns, data=str_data)}) def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_hours=None, default_split='train'): if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') for i in range(len(dataset_names)): ds_name = dataset_names[i] dataset_names[i] = f'distil-whisper/{ds_name}' if '/' not in ds_name else ds_name dataset_config_names = dataset_config_names.split('+') splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_hours = dataset_hours.split('+') if dataset_hours is not None else None if len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_hours is not None: if len(dataset_hours) != len(dataset_names): raise ValueError(f'Ensure one probability is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_hours)} hours.') dataset_hours = [float(ds_hours) for ds_hours in dataset_hours] else: dataset_hours = [None] * len(dataset_names) text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'hours': dataset_hours[i]}) return dataset_names_dict def data(dataset, text_column_name='text', log_audio=False): for item in dataset: yield {**item['audio'], 'reference': item[text_column_name], 'audio': item['audio'] if log_audio else None} def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() has_tensorboard = is_tensorboard_available() if 'tensorboard' in training_args.report_to: if has_tensorboard: try: from torch.utils.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=os.path.join(training_args.output_dir, 'runs')) except ImportError as ie: has_tensorboard = False logger.warning(f'Unable to display metrics through TensorBoard because some package are not installed: {ie}') else: logger.warning('Unable to display metrics through TensorBoard because the package is not installed: Please run `pip install tensorboard` to enable.') has_wandb = is_wandb_available() if 'wandb' in training_args.report_to: if has_wandb: import wandb as wandb_logger wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb) else: logger.warning('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() logger.info('Evaluation parameters %s', training_args) raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() dataset_names_dict = convert_dataset_str_to_list(data_args.dataset_name, data_args.dataset_config_name, splits=data_args.dataset_split_name, text_column_names=data_args.text_column_name) for dataset_dict in dataset_names_dict: pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" raw_datasets[pretty_name] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, use_auth_token=True if model_args.use_auth_token else None, streaming=data_args.streaming) if dataset_dict['text_column_name'] not in list(raw_datasets[pretty_name].features.keys()): raise ValueError(f"--text column name {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(raw_datasets[pretty_name].features.keys()))}") if dataset_dict['text_column_name'] != 'text': raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(dataset_dict['text_column_name'], 'text') raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) audio_column_name = data_args.audio_column_name if audio_column_name not in raw_datasets_features: raise ValueError(f"--audio_column_name '{audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(raw_datasets_features)}.") for split in raw_datasets: raw_datasets[split] = raw_datasets[split].remove_columns(set(raw_datasets[split].features.keys()) - {audio_column_name, 'text'}) if data_args.max_eval_samples is not None: for split in raw_datasets: raw_datasets[split] = raw_datasets[split].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets[split].select(range(data_args.max_eval_samples)) per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else 1 model_kwargs = {'cache_dir': model_args.cache_dir, 'use_auth_token': True if model_args.use_auth_token else None, 'subfolder': model_args.subfolder} pipe = pipeline('automatic-speech-recognition', model_args.model_name_or_path, torch_dtype=getattr(torch, model_args.dtype), model_kwargs=model_kwargs, max_new_tokens=training_args.generation_max_length, batch_size=per_device_eval_batch_size, chunk_length_s=model_args.chunk_length_s, return_timestamps=model_args.return_timestamps, device='cuda:0' if torch.cuda.is_available() else 'cpu') if pipe.model.can_generate(): if pipe.model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') generate_kwargs = {'num_beams': num_beams, 'length_penalty': model_args.length_penalty, 'do_sample': model_args.do_sample, 'top_k': model_args.top_k, 'temperature': model_args.temperature} if hasattr(pipe.model.generation_config, 'is_multilingual') and pipe.model.generation_config.is_multilingual: generate_kwargs = generate_kwargs.update({'langauge': 'English', 'task': 'transcribe'}) else: generate_kwargs = None whisper_tokenizer = WhisperTokenizer.from_pretrained('openai/whisper-tiny.en') normalizer = EnglishTextNormalizer(whisper_tokenizer.english_spelling_normalizer) def compute_metrics(pred_str, label_str, ngram_degree=5): norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer_output = process_words(norm_label_str, norm_pred_str, wer_default, wer_default) wer_norm = 100 * wer_output.wer ier_norm = 100 * wer_output.insertions / sum([len(ref) for ref in wer_output.references]) ser_norm = 100 * wer_output.substitutions / sum([len(ref) for ref in wer_output.references]) der_norm = 100 * wer_output.deletions / sum([len(ref) for ref in wer_output.references]) all_ngrams = list(ngrams(' '.join(norm_pred_str).split(), ngram_degree)) repeated_ngrams = len(all_ngrams) - len(set(all_ngrams)) return ({'wer': wer_norm, 'ier': ier_norm, 'ser': ser_norm, 'der': der_norm, 'repeated_ngrams': repeated_ngrams}, pred_str, label_str, norm_pred_str, norm_label_str) def eval_step(split='eval'): eval_preds = [] eval_labels = [] eval_audios = [] eval_start = time.time() for sample in tqdm(pipe(data(raw_datasets[split], log_audio=data_args.log_audio), generate_kwargs=generate_kwargs), desc=f'Evaluating {split}...'): eval_preds.append(sample['text']) eval_labels.append(sample['reference'][0]) if data_args.log_audio: eval_audios.append(sample['audio'][0]) eval_time = time.time() - eval_start (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str) = compute_metrics(eval_preds, eval_labels, data_args.ngram_degree) wer_desc = ' '.join([f'{split} {key}: {value} |' for (key, value) in wer_metric.items()]) logger.info(wer_desc) if has_tensorboard and 'tensorboard' in training_args.report_to: write_metric(summary_writer, wer_metric, prefix=split) if has_wandb and 'wandb' in training_args.report_to: write_wandb_metric(wandb_logger, wer_metric, eval_time, prefix=split) if data_args.log_predictions: write_wandb_pred(wandb_logger, eval_audios, pred_str, label_str, norm_pred_str, norm_label_str, prefix=split) logger.info('***** Running Eval *****') logger.info(f' Instantaneous batch size per device = {training_args.per_device_eval_batch_size}') logger.info(f' Total eval batch size (w. parallel & distributed) = {training_args.per_device_eval_batch_size}') if pipe.model.can_generate(): logger.info(f' Beam size = {num_beams}') if num_beams > 1: logger.info(f' Length penalty size = {model_args.length_penalty}') logger.info(f' Do sample = {model_args.do_sample}') if model_args.do_sample: logger.info(f' Top k = {model_args.top_k}') logger.info(f' Temperature = {model_args.temperature}') for split in raw_datasets: eval_step(split=split) if __name__ == '__main__': main() # File: distil-whisper-main/training/flax/run_speculative_decoding.py import copy import time import torch from datasets import load_dataset from transformers import AutoProcessor, WhisperForConditionalGeneration DEVICE = 'cuda' DTYPE = torch.float16 SAMPLING_RATE = 16000 BATCH_SIZE = 1 USE_FLASH_ATTN_2 = True GAMMAS = [5, 7, 6, 5, 4, 3, 5] COUNT = 0 teacher = WhisperForConditionalGeneration.from_pretrained('/home/patrick/distil_whisper/', torch_dtype=DTYPE, variant='fp16', low_cpu_mem_usage=True, use_flash_attention_2=USE_FLASH_ATTN_2) student = WhisperForConditionalGeneration.from_pretrained('/home/patrick/distil_whisper_student/', torch_dtype=DTYPE, variant='fp16', low_cpu_mem_usage=True, use_flash_attention_2=USE_FLASH_ATTN_2) student.generation_config = copy.deepcopy(teacher.generation_config) student.generation_config.num_assistant_tokens_schedule = 'constant' teacher.to(DEVICE) student.to(DEVICE) processor = AutoProcessor.from_pretrained('sanchit-gandhi/large-32-2-gpu-flat-lr') ds = load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation') total_time_default = 0 total_time_spec = 0 total_time_spec_2 = 0 input_values = ds[0]['audio']['array'] inputs = processor(input_values, return_tensors='pt', sampling_rate=SAMPLING_RATE) input_features = inputs.input_features.to(device=DEVICE, dtype=DTYPE) _ = teacher.generate(input_features, max_length=100) end_idx = ds.shape[0] for audio_idx in range(0, end_idx, BATCH_SIZE): input_values = ds[audio_idx:audio_idx + BATCH_SIZE] input_values = [i['array'] for i in input_values['audio']] inputs = processor(input_values, return_tensors='pt', sampling_rate=SAMPLING_RATE) input_features = inputs.input_features.to(device=DEVICE, dtype=DTYPE) start_time = time.time() out = teacher.generate(input_features, max_length=100) run_time = time.time() - start_time print(f'Normal Decoding: {run_time}') total_time_default += run_time default_out = processor.batch_decode(out, skip_special_tokens=True) start_time = time.time() with torch.no_grad(): encoder_outputs = teacher.get_encoder()(input_features) out = teacher.generate(assistant_model=student, assistant_encoder_outputs=encoder_outputs, encoder_outputs=encoder_outputs, max_length=100) run_time = time.time() - start_time spec_out_2 = processor.batch_decode(out, skip_special_tokens=True) print(f'Speculative Decoding 2: {run_time}') total_time_spec_2 += run_time if spec_out_2 != default_out: COUNT += 1 print(f'Audio {audio_idx} does not match. Spec: {spec_out_2}, True: {default_out}') print(20 * '=') print('Total time', total_time_default) print(f'Overall speed-up spec 2 {total_time_default / total_time_spec_2}') # File: distil-whisper-main/training/flax/run_speed_pt.py """""" import json import logging import os import string import subprocess import sys import tempfile import time from dataclasses import dataclass, field from functools import partial from typing import Optional import datasets import evaluate import numpy as np import torch import transformers import whisper from datasets import DatasetDict, IterableDatasetDict, load_dataset from tqdm import tqdm from transformers import HfArgumentParser, WhisperForConditionalGeneration, WhisperProcessor, is_wandb_available, pipeline from transformers.models.whisper.english_normalizer import EnglishTextNormalizer from transformers.models.whisper.modeling_whisper import WhisperForCausalLM from transformers.utils import check_min_version from transformers.utils.versions import require_version check_min_version('4.27.0.dev0') require_version('datasets>=1.18.0', 'To fix: pip install -r examples/flax/speech-recogintion/requirements.txt') logger = logging.getLogger(__name__) PIPELINE_BATCH_SIZE = 16 @dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': "The name of the dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset hours by a '+' symbol."}) model_name_or_path: str = field(default=None, metadata={'help': 'The name of the model to use (via the transformers library). '}) assistant_model_name_or_path: str = field(default=None, metadata={'help': 'The name of the assistant model to use to do speculative decoding. If None, no speculative decoding will be done.'}) use_fp16: bool = field(default=True, metadata={'help': 'Whether to evaluate in fp16'}) use_torch_compile: bool = field(default=False, metadata={'help': 'Whether to compile the model'}) use_orig_whisper: bool = field(default=False, metadata={'help': 'Whether to evaluate with orig whisper'}) use_bf16: bool = field(default=False, metadata={'help': 'Whether to evaluate in bf16'}) use_pipeline: bool = field(default=False, metadata={'help': 'Whether to evaluate with Transformers pipeline'}) chunk_length_s: float = field(default=30.0, metadata={'help': 'Chunk length to use when `use_pipeline` is enabled.'}) return_timestamps: bool = field(default=False, metadata={'help': 'Whether to decode with timestamps. This can help for improved WER for long form evaluation.'}) attn_type: Optional[str] = field(default=None, metadata={'help': "Which attn type to use: None, 'flash', 'compile', 'flash+compile'"}) batch_size: int = field(default=1, metadata={'help': 'The batch size used for evluation.'}) num_beams: int = field(default=1, metadata={'help': 'The beam size used for evluation.'}) samples_per_dataset: Optional[int] = field(default=None, metadata={'help': 'Number of samples per dataset used to measure speed.'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_split_name: Optional[str] = field(default=None, metadata={'help': 'The split name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default=None, metadata={'help': 'The name of the dataset column containing the text data. Defaults to `text`.'}) max_duration_in_seconds: float = field(default=30.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'}) min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'}) max_label_length: int = field(default=128, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) max_gen_length: int = field(default=128, metadata={'help': 'Generate up until max_gen_length tokens.'}) pad_target_to_multiple_of: Optional[int] = field(default=None, metadata={'help': 'If set will pad the target sequence to a multiple of the provided value. This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) wandb_project: str = field(default='distil-whisper-speed-benchmark', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experiment reproducibility and to diff code across experiments in the UI.'}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and the data."}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of eval examples to this value if set.'}) log_audio: Optional[bool] = field(default=False, metadata={'help': 'For debugging purposes, record the audio samples as well as the ground truths / preds.'}) def write_metric(summary_writer, eval_metrics, step, prefix='eval'): for (metric_name, value) in eval_metrics.items(): summary_writer.scalar(f'{prefix}/{metric_name}', value, step) def write_wandb_metric(wandb_logger, metrics, train_time, prefix): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time wandb_logger.log(log_metrics) def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_hours=None, default_split='train'): if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') for i in range(len(dataset_names)): ds_name = dataset_names[i] dataset_names[i] = f'distil-whisper/{ds_name}' if '/' not in ds_name else ds_name dataset_config_names = dataset_config_names.split('+') splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_hours = dataset_hours.split('+') if dataset_hours is not None else None if len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_hours is not None: if len(dataset_hours) != len(dataset_names): raise ValueError(f'Ensure one probability is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_hours)} hours.') dataset_hours = [float(ds_hours) for ds_hours in dataset_hours] else: dataset_hours = [None] * len(dataset_names) text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'hours': dataset_hours[i]}) return dataset_names_dict def main(): parser = HfArgumentParser([DataTrainingArguments]) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0] else: data_args = parser.parse_args_into_dataclasses()[0] logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) if data_args.use_pipeline and data_args.batch_size > 1: raise ValueError('Make sure that `batch_size` is set to 1 when `use_pipeline=True`.') has_wandb = is_wandb_available() if has_wandb: import wandb import wandb as wandb_logger wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb) wandb_logger.log({'torch_version': str(torch.__version__)}) wandb_logger.log({'transformers_version': str(transformers.__version__)}) wandb_logger.log({'batch_size': data_args.batch_size}) if data_args.use_pipeline: wandb_logger.log({'chunk_length_s': data_args.chunk_length_s}) else: raise ValueError('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() dataset_names_dict = convert_dataset_str_to_list(data_args.dataset_name, data_args.dataset_config_name, splits=data_args.dataset_split_name, text_column_names=data_args.text_column_name) if len(dataset_names_dict) == 1: dataset_dict = dataset_names_dict[0] raw_datasets['eval'] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, use_auth_token=True, streaming=data_args.streaming) if dataset_dict['text_column_name'] not in list(raw_datasets['eval'].features.keys()): raise ValueError(f"--text column name {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(raw_datasets['eval'].features.keys()))}") if dataset_dict['text_column_name'] != 'text': raw_datasets['eval'] = raw_datasets['eval'].rename_column(dataset_dict['text_column_name'], 'text') else: for dataset_dict in tqdm(dataset_names_dict, desc='Loading datasets...'): pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" raw_datasets[pretty_name] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, use_auth_token=True, streaming=data_args.streaming) if dataset_dict['text_column_name'] not in list(raw_datasets[pretty_name].features.keys()): raise ValueError(f"`--text_column_name` {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(raw_datasets[pretty_name].features.keys()))}") if dataset_dict['text_column_name'] != 'text': raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(dataset_dict['text_column_name'], 'text') processor = WhisperProcessor.from_pretrained(data_args.model_name_or_path) dtype = torch.float16 if data_args.use_fp16 else torch.float32 if data_args.use_bf16: dtype = torch.bfloat16 use_flash_attention_2 = data_args.attn_type is not None and 'flash2' in data_args.attn_type result = subprocess.run(['nvidia-smi'], capture_output=True, text=True) gpu_type = [x for x in result.stdout.split('=') if len(x) > 1][1].split('0')[1].split() use_sdpa = False if gpu_type[0] == 'Tesla' and use_flash_attention_2: use_flash_attention_2 = False use_sdpa = True use_orig_whisper = False if data_args.use_orig_whisper: use_orig_whisper = True model_name = data_args.model_name_or_path.split('/')[-1].split('whisper-')[-1] model = whisper.load_model(model_name) model.cuda() else: model = WhisperForConditionalGeneration.from_pretrained(data_args.model_name_or_path, torch_dtype=dtype, use_flash_attention_2=use_flash_attention_2) model.cuda() if use_sdpa: logger.info('Use SDPA via BetterTransformers...') model.to_bettertransformer() if data_args.use_torch_compile: logger.info('Enabling torch compile for the encoder.') model.model.encoder.forward = torch.compile(model.model.encoder.forward, mode='reduce-overhead', fullgraph=True) input_values = np.random.randn(data_args.batch_size, 16000) input_features = processor(input_values, return_tensors='pt', sampling_rate=16000).input_features input_features = input_features.to(dtype=dtype, device=model.device) for _ in range(3): _ = model.generate(input_features) model_pipeline = None if data_args.use_pipeline: model_pipeline = pipeline('automatic-speech-recognition', model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, torch_dtype=dtype, device=model.device, chunk_length_s=data_args.chunk_length_s) model_pipeline_forward = model_pipeline._forward assistant_model = None if data_args.assistant_model_name_or_path is not None: logger.info('Loading assistant model...') if data_args.assistant_model_name_or_path.startswith('openai'): assistant_model = WhisperForConditionalGeneration.from_pretrained(data_args.assistant_model_name_or_path, torch_dtype=dtype, use_flash_attention_2=use_flash_attention_2) else: assistant_model = WhisperForCausalLM.from_pretrained(data_args.assistant_model_name_or_path, torch_dtype=dtype, use_flash_attention_2=use_flash_attention_2) assistant_model.cuda() raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=processor.feature_extractor.sampling_rate)) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length audio_column_name = data_args.audio_column_name num_workers = data_args.preprocessing_num_workers model_input_name = processor.feature_extractor.model_input_names[0] normalizer = EnglishTextNormalizer(processor.tokenizer.english_spelling_normalizer) if data_args.max_eval_samples is not None: for split in raw_datasets: raw_datasets[split] = raw_datasets[split].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets[split].select(range(data_args.max_eval_samples)) def prepare_dataset(batch): sample = batch[audio_column_name] if model_pipeline is None and (not use_orig_whisper): inputs = processor.feature_extractor(sample['array'], sampling_rate=sample['sampling_rate'], return_tensors='pt') batch[model_input_name] = inputs.get(model_input_name) else: batch[model_input_name] = sample['array'] batch['length_in_s'] = len(sample['array']) / sample['sampling_rate'] input_str = batch['text'] batch['labels'] = processor.tokenizer(input_str, max_length=max_label_length, truncation=True).input_ids return batch vectorized_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() for split in raw_datasets: raw_datasets_features = list(raw_datasets[split].features.keys()) map_fn = partial(raw_datasets[split].map, function=prepare_dataset, remove_columns=raw_datasets_features) vectorized_datasets[split] = map_fn(num_proc=num_workers, desc='preprocess eval dataset') if not data_args.streaming else map_fn() if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return metric = evaluate.load('wer') list(string.punctuation.replace("'", '')) def compute_metrics(pred_str, label_str): norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] if len(norm_pred_str) == 0 or len(norm_label_str) == 0: return 0.0 wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return wer result_datasets = DatasetDict() def benchmark(batch): if model_pipeline is None and (not use_orig_whisper): inputs = torch.cat(batch[model_input_name], dim=0).cuda() if data_args.use_fp16: inputs = inputs.to(torch.float16) if data_args.use_bf16: inputs = inputs.to(torch.bfloat16) inner_batch_size = inputs.shape[0] else: inner_batch_size = 1 inputs = batch[model_input_name] gen_kwargs = {'return_timestamps': data_args.return_timestamps, 'max_length': data_args.max_gen_length} if not data_args.model_name_or_path.endswith('.en') and (not data_args.model_name_or_path.endswith('24-2')): gen_kwargs['language'] = '<|en|>' gen_kwargs['task'] = 'transcribe' gen_kwargs['num_beams'] = data_args.num_beams if use_orig_whisper: raw_audio = inputs[0].astype(np.float32) out_dict = model.transcribe(raw_audio) batch['transcription'] = [out_dict['text']] batch['time'] = [out_dict['all_time']] elif model_pipeline is not None: time_result = [] def _forward_time(*args, **kwargs): start_time = time.time() result = model_pipeline_forward(*args, **kwargs) end_time = time.time() - start_time time_result.append(end_time) return result model_pipeline._forward = _forward_time result = model_pipeline(inputs, batch_size=PIPELINE_BATCH_SIZE, generate_kwargs=gen_kwargs)[0]['text'] batch['transcription'] = [result] batch['time'] = [sum(time_result)] elif assistant_model is not None: gen_kwargs['assistant_model'] = assistant_model start_time = time.time() with torch.no_grad(): encoder_outputs = model.get_encoder()(inputs) gen_kwargs['encoder_outputs'] = encoder_outputs if data_args.assistant_model_name_or_path.startswith('openai'): with torch.no_grad(): assistant_encoder_outputs = assistant_model.get_encoder()(inputs) gen_kwargs['assistant_encoder_outputs'] = assistant_encoder_outputs else: gen_kwargs['assistant_encoder_outputs'] = encoder_outputs output_ids = model.generate(**gen_kwargs) batch['time'] = inner_batch_size * [(time.time() - start_time) / inner_batch_size] batch['transcription'] = processor.batch_decode(output_ids, skip_special_tokens=True) else: start_time = time.time() output_ids = model.generate(inputs, **gen_kwargs) batch['time'] = inner_batch_size * [(time.time() - start_time) / inner_batch_size] batch['transcription'] = processor.batch_decode(output_ids, skip_special_tokens=True) batch['length_in_s'] = batch['length_in_s'] batch['reference'] = processor.batch_decode(batch['labels'], skip_special_tokens=True) batch['num_words'] = [len(r.split()) for r in batch['reference']] return batch for split in vectorized_datasets: vectorized_datasets_features = [model_input_name] map_fn = partial(vectorized_datasets[split].map, function=benchmark, remove_columns=vectorized_datasets_features, batch_size=data_args.batch_size, batched=True) result_datasets[split] = map_fn(num_proc=1, desc='benchmark eval dataset') if not data_args.streaming else map_fn() stats_dataset = DatasetDict() all_stats = {'times_audio_total': 0, 'times_transcription_total': 0, 'num_words_total': 0, 'num_samples': 0, 'time_per_sample': 0, 'rtf': 0, 'words_per_s': 0, 'wer': 0} count = 0 for split in result_datasets: transcriptions = [] references = [] stats = {k: 0 for k in all_stats.keys()} print(f'Start benchmarking {split}...') if data_args.streaming: result_iter = iter(result_datasets[split]) for result in result_iter: stats['times_audio_total'] += result['length_in_s'] stats['times_transcription_total'] += result['time'] stats['num_words_total'] += result['num_words'] stats['num_samples'] += 1 transcriptions.append(result['transcription']) references.append(result['reference']) count += 1 print(f'Processed {count} samples...') if data_args.samples_per_dataset is not None and stats['num_samples'] == data_args.samples_per_dataset: break stats['time_per_sample'] = stats['times_transcription_total'] / stats['num_samples'] stats['avg_length_sample'] = stats['times_audio_total'] / stats['num_samples'] stats['wer'] = compute_metrics(transcriptions, references) stats['rtf'] = stats['times_audio_total'] / stats['times_transcription_total'] stats['words_per_s'] = stats['num_words_total'] / stats['times_transcription_total'] stats_dataset[split] = stats log_stats = {f'{split}_{k}': v for (k, v) in stats.items()} wandb_logger.log(log_stats) all_stats['times_audio_total'] += stats['times_audio_total'] all_stats['times_transcription_total'] += stats['times_transcription_total'] all_stats['wer'] += stats['wer'] all_stats['num_samples'] += stats['num_samples'] all_stats['num_words_total'] += stats['num_words_total'] all_stats['time_per_sample'] = all_stats['times_transcription_total'] / all_stats['num_samples'] all_stats['avg_length_sample'] = all_stats['times_audio_total'] / all_stats['num_samples'] all_stats['wer'] = all_stats['wer'] / len(result_datasets) all_stats['rtf'] = all_stats['times_audio_total'] / all_stats['times_transcription_total'] all_stats['words_per_s'] = all_stats['num_words_total'] / all_stats['times_transcription_total'] stats_dataset['all'] = all_stats log_all_stats = {f'all_{k}': v for (k, v) in all_stats.items()} wandb_logger.log(log_all_stats) benchmark_artifact = wandb.Artifact('Benchmark', type='datasets') with tempfile.TemporaryDirectory() as temp_dir: for split in stats_dataset: file_name = os.path.join(temp_dir, f"{'_'.join(split.split('/'))}.json") with open(file_name, 'w') as json_file: json.dump(stats_dataset[split], json_file) benchmark_artifact.add_file(file_name, split) wandb_logger.log_artifact(benchmark_artifact) print('Done!') if __name__ == '__main__': main() # File: distil-whisper-main/training/run_distillation.py """""" import logging import os import re import shutil import sys import time from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Any, Dict, List, Optional, Union import datasets import evaluate import numpy as np import torch import torch.nn as nn import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, IterableDataset, IterableDatasetDict, concatenate_datasets, interleave_datasets, load_dataset from huggingface_hub import create_repo, get_full_repo_name, upload_folder from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AddedToken, HfArgumentParser, Seq2SeqTrainingArguments, WhisperConfig, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizerFast, get_scheduler from transformers.modeling_outputs import BaseModelOutput from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer from transformers.utils import check_min_version from transformers.utils.versions import require_version check_min_version('4.34.0.dev0') require_version('datasets>=2.14.6', 'To fix: `pip install --upgrade datasets`') logger = get_logger(__name__) @dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained Whisper model or model identifier from huggingface.co/models'}) teacher_model_name_or_path: str = field(metadata={'help': 'Path to pretrained teacher model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).'}) attn_implementation: Optional[str] = field(default=None, metadata={'help': 'Which attention implementation to use in the encoder and decoder attention layers. Can be one of:\n1. `eager` or `None`: default Transformers attention implementation.\n2. `sdpa`: Flash Attention through PyTorch SDPA. Requires `torch>=2.1`. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).\n3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100).'}) def __post_init__(self): if self.attn_implementation not in [None, 'eager', 'sdpa', 'flash_attention_2']: raise ValueError(f'Got `--attn_implementation={self.attn_implementation}`, which is an invalid attention type. Should be one of:\n1. `eager` or `None`: default Transformers attention implementation.\n2. `sdpa`: Flash Attention through PyTorch SDPA. Requires `torch>=2.1`. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).\n3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100).') @dataclass class DataTrainingArguments: train_dataset_name: str = field(default=None, metadata={'help': "The name of the training dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset ids by a '+' symbol. For example, to load LibriSpeech and Common Voice, set `train_dataset_name='librispeech_asr+common_voice'`."}) train_dataset_config_name: Optional[str] = field(default=None, metadata={'help': "The configuration name of the training dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset configs by a '+' symbol. Note that the order of the configs should match the order of the datasets."}) train_dataset_samples: str = field(default=None, metadata={'help': 'Number of samples in each dataset when loading multiple datasets with streaming mode. Not required when using one dataset or non-streaming mode. The sample values provide the sampling probability for each dataset. Setting them equal to the number of sample values ensures that every sample from every dataset is used once per epoch.'}) eval_dataset_name: str = field(default=None, metadata={'help': "The name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset name if unspecified. Load multiple evaluation datasets by separating dataset ids by a '+' symbol."}) eval_dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the evaluation dataset to use (via the datasets library). Defaults to the training dataset config name if unspecified.'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing if using non-streaming mode.'}) preprocessing_batch_size: Optional[int] = field(default=256, metadata={'help': 'Number of examples per batch provided to the `prepare_dataset` function.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default=None, metadata={'help': 'The name of the dataset column containing the text data in the training set.'}) eval_text_column_name: str = field(default='text', metadata={'help': 'The name of the dataset column containing the text data in the evaluation set.'}) max_duration_in_seconds: float = field(default=30.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'}) min_duration_in_seconds: float = field(default=0.0, metadata={'help': 'Filter audio files that are shorter than `min_duration_in_seconds` seconds'}) max_label_length: int = field(default=448, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) pad_target_to_multiple_of: Optional[int] = field(default=None, metadata={'help': 'If set will pad the target sequence to a multiple of the provided value. This is important to avoid triggering recompilations on TPU. If unspecified, will default to padding the targets to max length.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) train_split_name: str = field(default='train', metadata={'help': "The name of the training data set split to use (via the datasets library). Defaults to 'train'"}) eval_split_name: str = field(default='validation', metadata={'help': "The name of the evaluation data set split to use (via the datasets library). Defaults to 'validation'"}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and pre-process the data."}) wer_threshold: float = field(default=None, metadata={'help': 'Filter training data with Whisper transcriptions that have greater than `wer_threshold` WER with the normalised transcriptions. This only takes effect if training on pseudo-labels targets.If `--use_pseudo_labels=False`, then no WER filtering is performed, since we train directly on the texttranscriptions.'}) use_pseudo_labels: bool = field(default=True, metadata={'help': 'Whether or not to use pseudo-label transcriptions as the targets. If True, the pseudo-labels must be in the dataset column `whisper_transcript` from the previous pseudo-labelling step. This is not currently yet configurable.'}) timestamp_probability: float = field(default=0.2, metadata={'help': 'Probability for training on timestamped tokens if the data contains it.'}) condition_on_prev_probability: float = field(default=0.2, metadata={'help': 'Probability for conditioning on the previous text example.'}) return_timestamps: bool = field(default=False, metadata={'help': 'Whether or not to predict timestamps in the generation step.'}) language: str = field(default=None, metadata={'help': 'Language for multilingual distillation. This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`.'}) task: str = field(default='transcribe', metadata={'help': 'Task, either `transcribe` for speech recognition or `translate` for speech translation.This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`.'}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_dir: str = field(default='./wandb', metadata={'help': 'The dir where wandb metadata will be stored.'}) @dataclass class DistillationTrainingArguments(Seq2SeqTrainingArguments): freeze_encoder: Optional[bool] = field(default=False, metadata={'help': 'Whether to freeze the entire encoder model. Only recommended when the entire encoder has been copied from the teacher model.'}) freeze_decoder: Optional[bool] = field(default=False, metadata={'help': 'Whether to freeze the entire decoder model. Note that the decoder input embeddings are **not** frozen, since they are tied to the LM head.'}) freeze_embed_positions: Optional[bool] = field(default=False, metadata={'help': 'Whether to freeze the decoder embedding positions.'}) temperature: Optional[float] = field(default=2.0, metadata={'help': 'Temperature to anneal the logits when computing the softmax.'}) kl_weight: Optional[float] = field(default=1.0, metadata={'help': 'Weighting assigned to the MSE loss in the KD formulation. MSE loss is computed between the teacher-student hidden states and attentions.'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'The data type (dtype) in which to run training. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'}) save_best_total_limit: Optional[int] = field(default=1, metadata={'help': 'Number of best models to be saved.'}) @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int decoder_prev_token_id: int input_padding: Union[bool, str] = 'max_length' target_padding: Union[bool, str] = 'max_length' max_target_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: input_features = {'input_features': [feature['input_features'] for feature in features]} label_features = {'input_ids': [feature['labels'] for feature in features]} batch = self.processor.feature_extractor.pad(input_features, padding=self.input_padding, return_tensors='pt') labels_batch = self.processor.tokenizer.pad(label_features, max_length=self.max_target_length, padding=self.target_padding, return_tensors='pt') labels = labels_batch['input_ids'] decoder_input_ids = labels[:, :-1] labels = labels[:, 1:] labels_mask = labels_batch.attention_mask[:, 1:] labels = labels.masked_fill(labels_mask.ne(1), -100) bos_index = torch.argmax((labels == self.decoder_start_token_id).long(), dim=1) bos_index = torch.where(bos_index > 0, bos_index + 1, bos_index) prompt_mask = torch.arange(labels.shape[1]) < bos_index[:, None] labels = torch.where(prompt_mask, -100, labels) batch['labels'] = labels batch['decoder_input_ids'] = decoder_input_ids return batch def log_metric(accelerator, metrics: Dict, train_time: float, step: int, epoch: int, learning_rate: float=None, prefix: str='train'): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time log_metrics[f'{prefix}/epoch'] = epoch if learning_rate is not None: log_metrics[f'{prefix}/learning_rate'] = learning_rate accelerator.log(log_metrics, step=step) def log_pred(accelerator, pred_str: List[str], label_str: List[str], norm_pred_str: List[str], norm_label_str: List[str], step: int, prefix: str='eval', num_lines: int=200000): if accelerator.is_main_process: wandb_tracker = accelerator.get_tracker('wandb') cur_step_pretty = f'{int(step // 1000)}k' if step > 1000 else step prefix_pretty = prefix.replace('/', '-') str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] wandb_tracker.log_table(table_name=f'predictions/{prefix_pretty}-step-{cur_step_pretty}', columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data[:num_lines], step=step) str_data = np.asarray(str_data) str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] wandb_tracker.log_table(table_name=f'incorrect_predictions/{prefix_pretty}-step-{cur_step_pretty}', columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data_incorrect[:num_lines], step=step) def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_samples=None, default_split='train') -> List[Dict]: if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') dataset_config_names = dataset_config_names.split('+') if dataset_config_names is not None else None splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_samples = dataset_samples.split('+') if dataset_samples is not None else None if dataset_config_names is not None and len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_samples is not None: if len(dataset_samples) != len(dataset_names): raise ValueError(f'Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_samples)} samples.') dataset_samples = [float(ds_sample) for ds_sample in dataset_samples] else: dataset_samples = [None] * len(dataset_names) dataset_config_names = dataset_config_names if dataset_config_names is not None else ['default' for _ in range(len(dataset_names))] text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'samples': dataset_samples[i]}) return dataset_names_dict def load_multiple_datasets(dataset_names: Union[List, str], dataset_config_names: Union[List, str], splits: Optional[Union[List, str]]=None, text_column_names: Optional[List]=None, sampling_rate: Optional[int]=16000, stopping_strategy: Optional[str]='first_exhausted', dataset_samples: Optional[Union[List, np.array]]=None, streaming: Optional[bool]=True, seed: Optional[int]=None, accelerator: Optional[Accelerator]=None, use_pseudo_labels: float=None, **kwargs) -> IterableDataset: dataset_names_dict = convert_dataset_str_to_list(dataset_names, dataset_config_names, splits, text_column_names, dataset_samples) if dataset_samples is not None: dataset_samples = [ds_dict['samples'] for ds_dict in dataset_names_dict] probabilities = np.array(dataset_samples) / np.sum(dataset_samples) else: probabilities = None all_datasets = [] for dataset_dict in tqdm(dataset_names_dict, desc='Combining datasets...', disable=not accelerator.is_local_main_process if accelerator is not None else False): dataset = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], streaming=streaming, **kwargs) dataset = dataset.cast_column('audio', datasets.features.Audio(sampling_rate)) dataset_features = dataset.features.keys() columns_to_keep = {'audio', 'text'} if dataset_dict['text_column_name'] not in dataset_features: raise ValueError(f"Text column name {dataset_dict['text_column_name']} not found in dataset '{dataset_dict['name']}'. Make sure to set `--text_column_name` to the correct text column - one of {', '.join(dataset_features)}.") if dataset_dict['text_column_name'] != 'text': dataset = dataset.rename_column(dataset_dict['text_column_name'], 'text') if use_pseudo_labels: if 'whisper_transcript' not in dataset_features: raise ValueError(f"Pseudo-label column `whisper_transcript` not found in dataset {dataset_dict['name']}. Ensurepseudo-labels are present in the dataset under this column name, or train directly on the text labels by setting `--use_pseudo_labels=False` and defining the appropriate `--text_column_name`.") columns_to_keep.add('whisper_transcript') if 'condition_on_prev' in dataset_features: columns_to_keep.add('condition_on_prev') dataset_features = dataset.features.keys() dataset = dataset.remove_columns(set(dataset_features - columns_to_keep)) all_datasets.append(dataset) if len(all_datasets) == 1: return all_datasets[0] if streaming: interleaved_dataset = interleave_datasets(all_datasets, stopping_strategy=stopping_strategy, probabilities=probabilities, seed=seed) else: interleaved_dataset = concatenate_datasets(all_datasets) return interleaved_dataset def sorted_checkpoints(output_dir=None, checkpoint_prefix='checkpoint') -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f'{checkpoint_prefix}-*') if os.path.isdir(x)] glob_checkpoints = [path for path in glob_checkpoints if 'val-wer' not in path] for path in glob_checkpoints: regex_match = re.match(f'.*{checkpoint_prefix}-([0-9]+)', path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted def sorted_best_checkpoints(output_dir=None, checkpoint_prefix='checkpoint'): ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(output_dir).glob(f'{checkpoint_prefix}-*') if os.path.isdir(x)] for path in glob_checkpoints: regex_match = re.search('val-wer-([0-9]+\\.[0-9]+)', path) if regex_match is not None and regex_match.groups() is not None: ordering_and_checkpoint_path.append((regex_match.groups(1), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path, reverse=True) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix='checkpoint', sorting_fn=sorted_checkpoints) -> None: if save_total_limit is None or save_total_limit <= 0: return checkpoints_sorted = sorting_fn(output_dir=output_dir, checkpoint_prefix=checkpoint_prefix) if len(checkpoints_sorted) <= save_total_limit: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info(f'Deleting older checkpoint [{checkpoint}].') shutil.rmtree(checkpoint, ignore_errors=True) _RE_CHECKPOINT = re.compile('^checkpoint-(\\d+)-epoch-(\\d+)$') def get_last_checkpoint(folder): content = os.listdir(folder) checkpoints = [path for path in content if _RE_CHECKPOINT.search(path) is not None and os.path.isdir(os.path.join(folder, path))] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_RE_CHECKPOINT.search(x).groups()[0]))) def get_parameter_names(model, forbidden_layer_types, forbidden_module=None): result = [] for (name, child) in model.named_children(): result += [f'{name}.{n}' for n in get_parameter_names(child, forbidden_layer_types, forbidden_module) if not (isinstance(child, tuple(forbidden_layer_types)) or (child in tuple(forbidden_module) if forbidden_module is not None else False))] result += list(model._parameters.keys()) return result def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, DistillationTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() if training_args.dtype == 'float16': mixed_precision = 'fp16' teacher_dtype = torch.float16 elif training_args.dtype == 'bfloat16': mixed_precision = 'bf16' teacher_dtype = torch.bfloat16 else: mixed_precision = 'no' teacher_dtype = torch.float32 accelerator = Accelerator(gradient_accumulation_steps=training_args.gradient_accumulation_steps, mixed_precision=mixed_precision, log_with=training_args.report_to, project_dir=training_args.output_dir) accelerator.init_trackers(project_name=data_args.wandb_project, init_kwargs={'wandb': {'name': data_args.wandb_name, 'dir': data_args.wandb_dir}}) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.warning(f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}") if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Training/evaluation parameters %s', training_args) last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir): last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.') elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.') if accelerator.is_main_process: if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name(Path(training_args.output_dir).absolute().name, token=training_args.hub_token) else: repo_name = training_args.hub_model_id create_repo(repo_name, exist_ok=True, token=training_args.hub_token) with open(os.path.join(training_args.output_dir, '.gitignore'), 'w+') as gitignore: if 'wandb' not in gitignore: gitignore.write('wandb\n') elif training_args.output_dir is not None: os.makedirs(training_args.output_dir, exist_ok=True) accelerator.wait_for_everyone() raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() set_seed(training_args.seed) if training_args.do_train: raw_datasets['train'] = load_multiple_datasets(data_args.train_dataset_name, data_args.train_dataset_config_name, splits=data_args.train_split_name, text_column_names=data_args.text_column_name, use_pseudo_labels=data_args.use_pseudo_labels, streaming=data_args.streaming, dataset_samples=data_args.train_dataset_samples, seed=training_args.seed, accelerator=accelerator, cache_dir=data_args.dataset_cache_dir, token=model_args.token) raw_datasets_train_features = list(raw_datasets['train'].features.keys()) if training_args.do_eval: dataset_names_dict = convert_dataset_str_to_list(data_args.eval_dataset_name if data_args.eval_dataset_name else data_args.train_dataset_name, data_args.eval_dataset_config_name if data_args.eval_dataset_config_name else data_args.train_dataset_config_name, splits=data_args.eval_split_name, text_column_names=data_args.eval_text_column_name) all_eval_splits = [] if len(dataset_names_dict) == 1: dataset_dict = dataset_names_dict[0] all_eval_splits.append('eval') raw_datasets['eval'] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, token=model_args.token, streaming=data_args.streaming) if data_args.eval_text_column_name != 'text': raw_datasets['eval'] = raw_datasets['eval'].rename_column(data_args.eval_text_column_name, 'text') else: for dataset_dict in dataset_names_dict: if dataset_dict['name'] == 'esb/diagnostic-dataset': pretty_name = f"{dataset_dict['config']}-diagnostic/{dataset_dict['split']}" else: pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" all_eval_splits.append(pretty_name) raw_datasets[pretty_name] = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, token=model_args.token, streaming=data_args.streaming) if dataset_dict['text_column_name'] != 'text': raw_datasets[pretty_name] = raw_datasets[pretty_name].rename_column(dataset_dict['text_column_name'], 'text') raw_datasets[pretty_name] = raw_datasets[pretty_name].remove_columns(set(raw_datasets[pretty_name].features.keys()) - {'audio', 'text'}) if not training_args.do_train and (not training_args.do_eval): raise ValueError('Cannot not train and not do evaluation. At least one of training or evaluation has to be performed.') config = WhisperConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token) feature_extractor = WhisperFeatureExtractor.from_pretrained(model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token) tokenizer = WhisperTokenizerFast.from_pretrained(model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token) timestamps = [AddedToken('<|%.2f|>' % (i * 0.02), lstrip=False, rstrip=False) for i in range(1500 + 1)] tokenizer.add_tokens(timestamps) teacher_model = WhisperForConditionalGeneration.from_pretrained(model_args.teacher_model_name_or_path, cache_dir=model_args.cache_dir, token=model_args.token, low_cpu_mem_usage=True, torch_dtype=teacher_dtype, attn_implementation=model_args.attn_implementation) student_model = WhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, subfolder=model_args.subfolder, token=model_args.token, low_cpu_mem_usage=True, attn_implementation=model_args.attn_implementation) if student_model.config.decoder_start_token_id is None or teacher_model.config.decoder_start_token_id is None: raise ValueError(f'Make sure that `config.decoder_start_token_id` is correctly defined for both the student and teacher model. Got {student_model.config.decoder_start_token_id} for the student and {teacher_model.config.decoder_start_token_id} for the teacher.') if training_args.gradient_checkpointing: student_model.gradient_checkpointing_enable() def set_trainable_parameters(module, requires_grad=False): for param in module.parameters(): param.requires_grad = requires_grad module._requires_grad = requires_grad if training_args.freeze_encoder: set_trainable_parameters(student_model.model.encoder, requires_grad=False) student_model.model.encoder.gradient_checkpointing = False if training_args.freeze_decoder: set_trainable_parameters(student_model.model.decoder, requires_grad=False) student_model.model.decoder.gradient_checkpointing = False set_trainable_parameters(student_model.proj_out, requires_grad=True) if training_args.freeze_embed_positions: set_trainable_parameters(student_model.model.decoder.embed_positions, requires_grad=False) if student_model.model.decoder.gradient_checkpointing: logger.info("Disabling gradient checkpointing in the decoder since it's incompatible with `freeze_embed_positions`.") logger.info(f'Number of trainable parameters: {sum((p.numel() for p in student_model.parameters() if p.requires_grad)):.3e}') share_hidden_states = training_args.freeze_encoder and student_model.config.d_model == teacher_model.config.d_model if share_hidden_states: teacher_model.model.encoder = student_model.model.encoder if hasattr(teacher_model.generation_config, 'is_multilingual') and teacher_model.generation_config.is_multilingual: is_multilingual = True tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task, predict_timestamps=False) student_model.generation_config.update(**{'language': data_args.language, 'task': data_args.task}) elif data_args.language is not None: raise ValueError('Setting language token for an English-only checkpoint is not permitted. The language argument should only be set for multilingual checkpoints.') else: is_multilingual = False if accelerator.is_main_process: feature_extractor.save_pretrained(training_args.output_dir) tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) student_model.generation_config.save_pretrained(training_args.output_dir) accelerator.wait_for_everyone() processor = WhisperProcessor.from_pretrained(training_args.output_dir) sampling_rate = feature_extractor.sampling_rate raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=sampling_rate)) max_input_length = int(data_args.max_duration_in_seconds * sampling_rate) min_input_length = int(data_args.min_duration_in_seconds * sampling_rate) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else student_model.config.max_length timestamp_probability = data_args.timestamp_probability condition_on_prev_probability = data_args.condition_on_prev_probability return_timestamps = data_args.return_timestamps if timestamp_probability > 0 else False timestamp_ids = tokenizer.timestamp_ids() timestamp_begin = tokenizer.all_special_ids[-1] timestamp_position = 3 if is_multilingual else 1 decoder_start_token_id = student_model.config.decoder_start_token_id decoder_prev_token_id = tokenizer.all_special_ids[-3] prompt_cutoff_length = max_label_length // 2 num_workers = data_args.preprocessing_num_workers dataloader_num_workers = training_args.dataloader_num_workers prefetch_factor = training_args.dataloader_prefetch_factor metric = evaluate.load('wer') normalizer = BasicTextNormalizer() if data_args.language is not None else EnglishTextNormalizer(tokenizer.english_spelling_normalizer) wer_threshold = data_args.wer_threshold use_pseudo_labels = data_args.use_pseudo_labels train_text_column_name = 'whisper_transcript' if use_pseudo_labels else 'text' if training_args.do_train and data_args.max_train_samples is not None: raw_datasets['train'] = raw_datasets['train'].take(data_args.max_train_samples) if data_args.streaming else raw_datasets['train'].select(range(data_args.max_train_samples)) if training_args.do_eval and data_args.max_eval_samples is not None: for eval_split in all_eval_splits: raw_datasets[eval_split] = raw_datasets[eval_split].take(data_args.max_eval_samples) if data_args.streaming else raw_datasets[eval_split].select(range(data_args.max_eval_samples)) def is_wer_in_range(ground_truth, whisper_transcript): norm_ground_truth = normalizer(ground_truth) if whisper_transcript is not None and whisper_transcript.upper() == whisper_transcript: return False elif len(norm_ground_truth) > 0 and whisper_transcript is not None: norm_whisper_transcript = normalizer(whisper_transcript) wer = 100 * metric.compute(predictions=[norm_whisper_transcript], references=[norm_ground_truth]) return wer < wer_threshold else: return False filter_by_wer_threshold = partial(raw_datasets['train'].filter, function=is_wer_in_range, input_columns=['text', 'whisper_transcript']) if wer_threshold is not None and use_pseudo_labels: with accelerator.main_process_first(): raw_datasets['train'] = filter_by_wer_threshold(num_proc=num_workers, desc='filtering train dataset by wer') if not data_args.streaming else filter_by_wer_threshold() def prepare_train_dataset(batch): audio = [sample['array'] for sample in batch['audio']] inputs = feature_extractor(audio, sampling_rate=sampling_rate) batch['input_features'] = inputs.input_features batch['input_length'] = [len(sample) for sample in audio] input_str_batched = batch[train_text_column_name] condition_on_prev_batched = batch.get('condition_on_prev', len(input_str_batched) * [None]) all_token_ids = [] all_token_ids_unprompted = [] for (prev_ids, input_str) in zip(condition_on_prev_batched, input_str_batched): token_ids = tokenizer(input_str, add_special_tokens=not use_pseudo_labels).input_ids has_timestamps = len(set(token_ids) & set(timestamp_ids)) > 0 if has_timestamps: predict_timestamps = bool(np.random.binomial(1, timestamp_probability)) if not predict_timestamps: token_ids = [token for token in token_ids if token < timestamp_begin] token_ids.insert(timestamp_position, timestamp_begin) all_token_ids_unprompted.append(token_ids) condition_on_prev = bool(np.random.binomial(1, condition_on_prev_probability)) if not condition_on_prev: prev_ids = None elif 'condition_on_prev' not in batch and len(all_token_ids_unprompted) > 1: prev_ids = all_token_ids_unprompted[-2] if prev_ids is not None: if has_timestamps and (not predict_timestamps): prev_ids = [token for token in prev_ids if token < timestamp_begin] if len(prev_ids) > prompt_cutoff_length: prev_ids = prev_ids[-prompt_cutoff_length + 1:] prev_ids = [decoder_prev_token_id] + prev_ids if len(prev_ids + token_ids) > max_label_length: trim_length = len(prev_ids + token_ids) - max_label_length + 1 prev_ids = prev_ids[trim_length:] prev_ids = [decoder_prev_token_id] + prev_ids token_ids = prev_ids + token_ids all_token_ids.append(token_ids) batch['labels'] = all_token_ids return batch def prepare_eval_dataset(batch): sample = batch['audio'] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch['input_features'] = inputs.input_features[0] batch['input_length'] = len(sample['array']) input_str = batch['text'] batch['labels'] = tokenizer(input_str).input_ids return batch vectorized_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() if training_args.do_train: map_fn_train = partial(raw_datasets['train'].map, function=prepare_train_dataset, remove_columns=raw_datasets_train_features, batched=True, batch_size=data_args.preprocessing_batch_size) with accelerator.main_process_first(): vectorized_datasets['train'] = map_fn_train(num_proc=num_workers, desc='preprocess train dataset') if not data_args.streaming else map_fn_train() if training_args.do_eval: for eval_split in all_eval_splits: raw_datasets_eval_features = list(raw_datasets[eval_split].features.keys()) map_fn_eval = partial(raw_datasets[eval_split].map, function=prepare_eval_dataset, remove_columns=raw_datasets_eval_features) with accelerator.main_process_first(): vectorized_datasets[eval_split] = map_fn_eval(num_proc=num_workers, desc='preprocess eval dataset') if not data_args.streaming else map_fn_eval() def is_audio_in_length_range(length): return min_input_length < length < max_input_length filter_by_audio_fn = partial(vectorized_datasets.filter, function=is_audio_in_length_range, input_columns=['input_length']) with accelerator.main_process_first(): vectorized_datasets = filter_by_audio_fn(num_proc=num_workers, desc='filtering train dataset by audio length') if not data_args.streaming else filter_by_audio_fn() def is_labels_in_length_range(labels): return 0 < len(labels) <= max_label_length filter_by_labels_fn = partial(vectorized_datasets.filter, function=is_labels_in_length_range, input_columns=['labels']) with accelerator.main_process_first(): vectorized_datasets = filter_by_labels_fn(num_proc=num_workers, desc='filtering train dataset') if not data_args.streaming else filter_by_labels_fn() if data_args.preprocessing_only: if data_args.streaming: raise ValueError('When using streaming mode, dataset pre-processing is performed on the fly, hence there is no notionof a cached pre-processed dataset. Remove the argument `--preprocessing_only` to run pre-processing on the fly with streaming mode.') cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return def compute_metrics(preds, labels): for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=True, decode_with_timestamps=return_timestamps) label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) wer_ortho = 100 * metric.compute(predictions=pred_str, references=label_str) norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return ({'wer': wer, 'wer_ortho': wer_ortho}, pred_str, label_str, norm_pred_str, norm_label_str) per_device_train_batch_size = int(training_args.per_device_train_batch_size) train_batch_size = per_device_train_batch_size * accelerator.num_processes gradient_accumulation_steps = int(training_args.gradient_accumulation_steps) per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) if not data_args.streaming and training_args.max_steps < 0: num_epochs = int(training_args.num_train_epochs) steps_per_epoch = len(vectorized_datasets['train']) // (train_batch_size * gradient_accumulation_steps) total_train_steps = steps_per_epoch * num_epochs elif training_args.max_steps > 0: logger.info('max_steps is given, it will override any value given in num_train_epochs') total_train_steps = int(training_args.max_steps) if not data_args.streaming: steps_per_epoch = len(vectorized_datasets['train']) // (train_batch_size * gradient_accumulation_steps) num_epochs = int(np.ceil(total_train_steps / steps_per_epoch)) else: num_epochs = sys.maxsize steps_per_epoch = total_train_steps else: raise ValueError('max_steps must be specified when training with a streaming (iterable) dataset') if training_args.eval_steps is None: logger.info(f"eval_steps is not set, evaluating at the end of {('each epoch' if not data_args.streaming else 'training')}") eval_steps = steps_per_epoch else: eval_steps = training_args.eval_steps forbidden_module = [module for (module, flag) in [(student_model.model.encoder, training_args.freeze_encoder), (student_model.model.decoder, training_args.freeze_decoder)] if flag] or None decay_parameters = get_parameter_names(student_model, [nn.LayerNorm], forbidden_module=forbidden_module) decay_parameters = [name for name in decay_parameters if 'bias' not in name] optimizer_grouped_parameters = [{'params': [param for (name, param) in student_model.named_parameters() if name in decay_parameters], 'weight_decay': training_args.weight_decay}, {'params': [param for (name, param) in student_model.named_parameters() if name not in decay_parameters], 'weight_decay': 0.0}] optimizer = torch.optim.AdamW(params=optimizer_grouped_parameters, lr=training_args.learning_rate, betas=(training_args.adam_beta1, training_args.adam_beta2), eps=training_args.adam_epsilon) lr_scheduler = get_scheduler(name=training_args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=training_args.warmup_steps * accelerator.num_processes, num_training_steps=total_train_steps * accelerator.num_processes) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor, decoder_start_token_id=decoder_start_token_id, decoder_prev_token_id=decoder_prev_token_id, input_padding='longest', target_padding='max_length', max_target_length=max_label_length) num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else getattr(student_model.generation_config, 'num_beams', 1) gen_kwargs = {'max_length': max_label_length, 'num_beams': num_beams, 'return_timestamps': return_timestamps} if is_multilingual: gen_kwargs.update({'language': data_args.language, 'task': data_args.task}) (student_model, teacher_model, optimizer, lr_scheduler) = accelerator.prepare(student_model, teacher_model, optimizer, lr_scheduler) def kl_divergence(target_distribution, log_predicted_distribution, labels): kl_loss = nn.KLDivLoss(reduction='none') divergence = kl_loss(log_predicted_distribution, target_distribution) padding_mask = labels >= 0 padding_mask = padding_mask.unsqueeze(-1) divergence = divergence * padding_mask divergence = divergence.sum() / padding_mask.sum() return divergence def train_step(batch, temperature=2.0): student_model.train() teacher_model.eval() student_outputs = student_model(**batch) with torch.no_grad(): if share_hidden_states: encoder_outputs = BaseModelOutput(student_outputs.encoder_last_hidden_state.to(dtype=teacher_dtype)) teacher_outputs = teacher_model(encoder_outputs=encoder_outputs, labels=batch['labels']) else: teacher_outputs = teacher_model(**batch) ce_loss = student_outputs.loss teacher_distribution = nn.functional.softmax(teacher_outputs.logits / temperature, dim=-1) student_distribution = nn.functional.log_softmax(student_outputs.logits / temperature, dim=-1) kl_loss = kl_divergence(teacher_distribution, student_distribution, batch['labels']) * temperature ** 2 loss = 0.8 * ce_loss + training_args.kl_weight * kl_loss metrics = {'loss': loss, 'ce_loss': ce_loss, 'kl_loss': kl_loss} return (loss, metrics) def eval_step(batch): student_model.eval() teacher_model.eval() with torch.no_grad(): student_outputs = student_model(**batch) if share_hidden_states: encoder_outputs = BaseModelOutput(student_outputs.encoder_last_hidden_state.to(dtype=teacher_dtype)) teacher_outputs = teacher_model(encoder_outputs=encoder_outputs, labels=batch['labels']) else: teacher_outputs = teacher_model(**batch) ce_loss = student_outputs.loss student_distribution = nn.functional.log_softmax(student_outputs.logits, dim=-1) teacher_distribution = nn.functional.softmax(teacher_outputs.logits, dim=-1) kl_loss = kl_divergence(teacher_distribution, student_distribution, batch['labels']) loss = 0.8 * ce_loss + training_args.kl_weight * kl_loss metrics = {'loss': loss, 'ce_loss': ce_loss, 'kl_loss': kl_loss} return metrics def generate_step(batch): student_model.eval() output_ids = accelerator.unwrap_model(student_model).generate(batch['input_features'], **gen_kwargs) output_ids = accelerator.pad_across_processes(output_ids, dim=1, pad_index=tokenizer.pad_token_id) return output_ids logger.info('***** Running training *****') logger.info(f' Num examples = {total_train_steps * train_batch_size * gradient_accumulation_steps}') if not data_args.streaming: logger.info(f' Num epochs = {num_epochs}') logger.info(f' Instantaneous batch size per device = {training_args.per_device_train_batch_size}') logger.info(f' Gradient accumulation steps = {gradient_accumulation_steps}') logger.info(f' Total train batch size (w. parallel & distributed) = {train_batch_size * gradient_accumulation_steps}') logger.info(f' Total optimization steps = {total_train_steps}') train_time = 0 train_start = time.time() steps_trained_progress_bar = tqdm(range(total_train_steps), desc='Train steps ... ', position=0, disable=not accelerator.is_local_main_process) continue_training = True epochs_trained = 0 cur_step = 0 best_val_wer = np.inf checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint if checkpoint is not None: accelerator.load_state(checkpoint) pattern = 'checkpoint-(\\d+)-epoch-(\\d+)' match = re.search(pattern, checkpoint) cur_step = int(match.group(1)) epochs_trained = int(match.group(2)) logger.info(' Continuing training from checkpoint, will skip to saved global_step') logger.info(f' Continuing training from epoch {epochs_trained}') logger.info(f' Continuing training from global step {cur_step}') steps_trained_progress_bar.update(cur_step) for epoch in range(0, epochs_trained): vectorized_datasets['train'] = vectorized_datasets['train'].shuffle(training_args.seed) if not data_args.streaming and training_args.max_steps < 0: resume_step = (cur_step - epochs_trained * steps_per_epoch) * gradient_accumulation_steps else: resume_step = None vectorized_datasets['train'] = vectorized_datasets['train'].shuffle(training_args.seed) else: resume_step = None for epoch in range(epochs_trained, num_epochs): vectorized_datasets['train'] = vectorized_datasets['train'].shuffle(training_args.seed) train_dataloader = DataLoader(vectorized_datasets['train'], collate_fn=data_collator, batch_size=per_device_train_batch_size, num_workers=dataloader_num_workers, prefetch_factor=prefetch_factor, pin_memory=training_args.dataloader_pin_memory) train_dataloader = accelerator.prepare(train_dataloader) if hasattr(train_dataloader, 'dataset') and isinstance(train_dataloader.dataset, IterableDataset): train_dataloader.dataset.set_epoch(epoch) if resume_step is not None: train_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) resume_step = None for batch in train_dataloader: with accelerator.accumulate(student_model): (loss, train_metric) = train_step(batch, temperature=training_args.temperature) accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(student_model.parameters(), training_args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() if accelerator.sync_gradients: steps_trained_progress_bar.update(1) cur_step += 1 if cur_step % training_args.logging_steps == 0: steps_trained_progress_bar.write(f"Step... ({cur_step} / {total_train_steps} | Loss: {train_metric['loss']}, Learning Rate: {lr_scheduler.get_last_lr()[0]})") log_metric(accelerator, metrics=train_metric, learning_rate=lr_scheduler.get_last_lr()[0], train_time=train_time + time.time() - train_start, step=cur_step, epoch=epoch, prefix='train') if cur_step % training_args.save_steps == 0 or cur_step == total_train_steps: intermediate_dir = os.path.join(training_args.output_dir, f'checkpoint-{cur_step}-epoch-{epoch}') accelerator.save_state(output_dir=intermediate_dir) feature_extractor.save_pretrained(intermediate_dir) tokenizer.save_pretrained(intermediate_dir) config.save_pretrained(intermediate_dir) student_model.generation_config.save_pretrained(intermediate_dir) accelerator.wait_for_everyone() if accelerator.is_main_process: rotate_checkpoints(training_args.save_total_limit, output_dir=training_args.output_dir) if training_args.push_to_hub: upload_folder(folder_path=training_args.output_dir, repo_id=repo_name, repo_type='model', commit_message=f'Saving train state of step {cur_step}') if training_args.do_eval and (cur_step % eval_steps == 0 or cur_step == total_train_steps): train_time += time.time() - train_start student_model.eval() (wer_l, labels_l) = ([], []) for eval_split in all_eval_splits: eval_metrics = [] eval_preds = [] eval_labels = [] eval_start = time.time() validation_dataloader = DataLoader(vectorized_datasets[eval_split], collate_fn=data_collator, batch_size=per_device_eval_batch_size, drop_last=False, num_workers=dataloader_num_workers, prefetch_factor=prefetch_factor, pin_memory=training_args.dataloader_pin_memory) validation_dataloader = accelerator.prepare(validation_dataloader) for batch in tqdm(validation_dataloader, desc=f'Evaluating {eval_split}...', position=2, disable=not accelerator.is_local_main_process): eval_metric = eval_step(batch) eval_metric = accelerator.gather_for_metrics(eval_metric) eval_metrics.append(eval_metric) if training_args.predict_with_generate: generated_ids = generate_step(batch) (generated_ids, labels) = accelerator.gather_for_metrics((generated_ids, batch['labels'])) eval_preds.extend(generated_ids) eval_labels.extend(labels) eval_time = time.time() - eval_start eval_metrics = {key: torch.mean(torch.stack([d[key] for d in eval_metrics])) for key in eval_metrics[0]} wer_desc = '' if training_args.predict_with_generate: (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str) = compute_metrics(eval_preds, eval_labels) eval_metrics.update(wer_metric) wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in wer_metric.items()]) log_pred(accelerator, pred_str, label_str, norm_pred_str, norm_label_str, step=cur_step, prefix=eval_split) steps_trained_progress_bar.write(f"Eval results for step ({cur_step} / {total_train_steps} | Eval Loss: {eval_metrics['loss']} | {wer_desc})") wer_l.append(wer_metric) labels_l.append(norm_label_str) log_metric(accelerator, metrics=eval_metrics, train_time=eval_time, step=cur_step, epoch=epoch, prefix=eval_split) train_start = time.time() numerators = [wer['wer'] * len(labs) for (wer, labs) in zip(wer_l, labels_l)] val_wer = sum(numerators) / sum((len(labs) for labs in labels_l)) if val_wer < best_val_wer: intermediate_dir = os.path.join(training_args.output_dir, f'checkpoint-{cur_step}-epoch-{epoch}-val-wer-{val_wer:.3f}') logger.info(f'Saving new best model, validation WER: {val_wer:.3f}') accelerator.save_state(output_dir=intermediate_dir) feature_extractor.save_pretrained(intermediate_dir) tokenizer.save_pretrained(intermediate_dir) config.save_pretrained(intermediate_dir) student_model.generation_config.save_pretrained(intermediate_dir) accelerator.wait_for_everyone() if accelerator.is_main_process: rotate_checkpoints(training_args.save_best_total_limit, output_dir=training_args.output_dir, sorting_fn=sorted_best_checkpoints) accelerator.unwrap_model(student_model).save_pretrained(training_args.output_dir) if training_args.push_to_hub: upload_folder(folder_path=training_args.output_dir, repo_id=repo_name, repo_type='model', commit_message=f'Saving best state, step {cur_step}, val wer {val_wer:.3f}') best_val_wer = val_wer if cur_step == total_train_steps: final_weights_dir = os.path.join(training_args.output_dir, 'end-of-training-weights') feature_extractor.save_pretrained(final_weights_dir) tokenizer.save_pretrained(final_weights_dir) config.save_pretrained(final_weights_dir) student_model.generation_config.save_pretrained(final_weights_dir) student_model = accelerator.unwrap_model(student_model) student_model.save_pretrained(final_weights_dir) if training_args.push_to_hub: upload_folder(folder_path=training_args.output_dir, repo_id=repo_name, repo_type='model', commit_message=f'Saving final weights of step {cur_step}') continue_training = False break if not continue_training: break accelerator.end_training() if __name__ == '__main__': main() # File: distil-whisper-main/training/run_eval.py """""" import json import logging import os import sys import tempfile import time from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np import torch import transformers from datasets import DatasetDict, IterableDatasetDict, load_dataset from tqdm import tqdm from transformers import HfArgumentParser, WhisperForConditionalGeneration, WhisperProcessor, is_wandb_available, pipeline, set_seed from transformers.models.whisper.english_normalizer import EnglishTextNormalizer, BasicTextNormalizer from transformers.modeling_outputs import BaseModelOutput from transformers.models.whisper.modeling_whisper import WhisperForCausalLM from transformers.models.whisper.tokenization_whisper import TO_LANGUAGE_CODE from transformers.utils import check_min_version, is_accelerate_available from transformers.utils.versions import require_version check_min_version('4.34.0.dev0') require_version('datasets>=2.14.6', 'To fix: `pip install --upgrade datasets`') logger = logging.getLogger(__name__) PIPELINE_BATCH_SIZE = 16 @dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': "The name of the dataset to use (via the datasets library). Load and combine multiple datasets by separating dataset hours by a '+' symbol."}) model_name_or_path: str = field(default=None, metadata={'help': 'The name of the model to use (via the transformers library). '}) subfolder: str = field(default='', metadata={'help': 'If specified load weights from a subfolder in the model repository'}) model_variant: str = field(default=None, metadata={'help': 'If specified load weights from `variant` filename, *e.g.* pytorch_model..bin. '}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) assistant_model_name_or_path: str = field(default=None, metadata={'help': 'The name of the assistant model to use to do speculative decoding. If None, no speculative decoding will be done.'}) dtype: Optional[str] = field(default='float16', metadata={'help': 'Floating-point format in which the model weights should be initialized and the computations run. Choose one of `[float32, float16, bfloat16]`.'}) use_pipeline: bool = field(default=False, metadata={'help': 'Whether to evaluate with Transformers pipeline'}) chunk_length_s: float = field(default=30.0, metadata={'help': 'Chunk length to use when `use_pipeline` is enabled.'}) return_timestamps: bool = field(default=True, metadata={'help': 'Whether to decode with timestamps. This can help for improved WER for long form evaluation.'}) language: str = field(default=None, metadata={'help': 'Language for multilingual evaluation. This argument should be set for multilingual evaluation only. For English speech recognition, it should be left as `None`.'}) task: str = field(default='transcribe', metadata={'help': 'Task, either `transcribe` for speech recognition or `translate` for speech translation.This argument should be set for multilingual evaluation only. For English speech recognition, it should be left as `None`.'}) attn_implementation: Optional[str] = field(default=None, metadata={'help': "Which attn type to use: ['eager', 'sdpa', 'flash_attention_2']"}) batch_size: int = field(default=1, metadata={'help': 'The batch size to be used for generation.'}) num_beams: int = field(default=1, metadata={'help': 'The beam size to be used for evaluation. Set to 1 for greedy, or >1 for beam search.'}) temperature_fallback: bool = field(default=True, metadata={'help': 'Whether to use temperature fallback for evaluation.'}) logprob_threshold: float = field(default=-1.0, metadata={'help': 'Whether to use temperature fallback for evaluation.'}) no_speech_threshold: float = field(default=0.6, metadata={'help': "Only relevant for long-form transcription. If defined, the 'no-speech' token combined with the `logprob_threshold`is used to determine whether a segment contains only silence. In this case, the transcription for this segmentis skipped."}) compression_ratio_threshold: float = field(default=1.35, metadata={'help': 'Only relevant for long-form transcription. If defined, the zlib compression rate of each segment will be computed. If the compression rate ofa segment is higher than `compression_ratio_threshold`, temperature fallback is activated: the generated segment is discarded and the generation isrepeated using a higher temperature. The intuition behind this feature is that segments with very high compression ratessuffer from a lot of repetition. The unwanted repetition can be reduced by injecting more randomness by increasing the temperature. If `compression_ratio_threshold` is defined make sure that `temperature` is a list of values. The default value for `compression_ratio_threshold` is 1.35.'}) condition_on_prev_tokens: bool = field(default=False, metadata={'help': 'Whether to condition on previous tokens or not'}) samples_per_dataset: Optional[int] = field(default=None, metadata={'help': 'Number of samples per dataset used to measure speed.'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_split_name: Optional[str] = field(default=None, metadata={'help': 'The split name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default=None, metadata={'help': 'The name of the dataset column containing the text data. Defaults to `text`.'}) generation_max_length: int = field(default=256, metadata={'help': 'Generate up until `generation_max_length` tokens.'}) log_predictions: Optional[bool] = field(default=True, metadata={'help': 'Whether or not to log the ground truths / pred text to the wandb logger.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) wandb_project: str = field(default='distil-whisper-speed-benchmark', metadata={'help': 'The name of the wandb project.'}) wandb_name: str = field(default=None, metadata={'help': 'The name of the wandb run.'}) wandb_job_type: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb job type.'}) wandb_dir: str = field(default=None, metadata={'help': 'The absolute path to save the wandb logs.'}) save_code_to_wandb: bool = field(default=False, metadata={'help': 'Whether to save main script to wandb. This is valuable for improving experiment reproducibility and to diff code across experiments in the UI.'}) streaming: bool = field(default=True, metadata={'help': "Whether to use Datasets' streaming mode to load and the data."}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of eval examples to this value if set.'}) seed: int = field(default=42, metadata={'help': 'RNG seed for reproducibility.'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) prompt_text: str = field(default=None, metadata={'help': 'Text prompt to condition the generation on. Useful for controlling the style of transcription and predicting named entities.'}) precise_tok_per_s: bool = field(default=False, metadata={'help': 'If True, compute tok/sec by forcing the number of generated token ids to num_tokens on dummy batches. If False, computes tok/sec over the entire dataset with variable number of generated tokens.'}) num_tokens: int = field(default=20, metadata={'help': 'Number of tokens to generate if computing tok/sec with precise_tok_per_s.'}) num_batches: int = field(default=100, metadata={'help': 'Number of batches for the tok/sec calculation with precise_tok_per_s'}) only_short_form: bool = field(default=False, metadata={'help': 'Whether the evaluation should be only short form (filter out samples > 30sec).'}) only_long_form: bool = field(default=False, metadata={'help': 'Whether the evaluation should be only long form (filter out samples <= 30sec).'}) def write_metric(summary_writer, eval_metrics, step, prefix='eval'): for (metric_name, value) in eval_metrics.items(): summary_writer.scalar(f'{prefix}/{metric_name}', value, step) def write_wandb_metric(wandb_logger, metrics, prefix): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v wandb_logger.log(log_metrics) def write_wandb_pred(wandb_logger, pred_str, label_str, norm_pred_str, norm_label_str, wer_per_sample, prefix='eval'): columns = ['WER', 'Target', 'Pred', 'Norm Target', 'Norm Pred'] str_data = [[wer_per_sample[i], label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] wandb_logger.log({f'{prefix}/predictions': wandb_logger.Table(columns=columns, data=str_data)}) def convert_dataset_str_to_list(dataset_names, dataset_config_names, splits=None, text_column_names=None, dataset_hours=None, default_split='train'): if isinstance(dataset_names, str): dataset_names = dataset_names.split('+') for i in range(len(dataset_names)): ds_name = dataset_names[i] dataset_names[i] = f'distil-whisper/{ds_name}' if '/' not in ds_name else ds_name dataset_config_names = dataset_config_names.split('+') if dataset_config_names is not None else None splits = splits.split('+') if splits is not None else None text_column_names = text_column_names.split('+') if text_column_names is not None else None dataset_hours = dataset_hours.split('+') if dataset_hours is not None else None if dataset_config_names is not None and len(dataset_names) != len(dataset_config_names): raise ValueError(f'Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_config_names)} configs.') if splits is not None and len(splits) != len(dataset_names): raise ValueError(f'Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits.') if text_column_names is not None and len(text_column_names) != len(dataset_names): raise ValueError(f'Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and {len(text_column_names)} text column names.') if dataset_hours is not None: if len(dataset_hours) != len(dataset_names): raise ValueError(f'Ensure one probability is passed for each dataset, got {len(dataset_names)} datasets and {len(dataset_hours)} hours.') dataset_hours = [float(ds_hours) for ds_hours in dataset_hours] else: dataset_hours = [None] * len(dataset_names) dataset_config_names = dataset_config_names if dataset_config_names is not None else ['default' for _ in range(len(dataset_names))] text_column_names = text_column_names if text_column_names is not None else ['text' for _ in range(len(dataset_names))] splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))] dataset_names_dict = [] for (i, ds_name) in enumerate(dataset_names): dataset_names_dict.append({'name': ds_name, 'config': dataset_config_names[i], 'split': splits[i], 'text_column_name': text_column_names[i], 'hours': dataset_hours[i]}) return dataset_names_dict def language_to_id(language: str, generation_config) -> str: language = language.lower() if language in generation_config.lang_to_id.keys(): language_token = language elif language in TO_LANGUAGE_CODE.keys(): language_token = f'<|{TO_LANGUAGE_CODE[language]}|>' elif language in TO_LANGUAGE_CODE.values(): language_token = f'<|{language}|>' else: is_language_code = len(language) == 2 raise ValueError(f'Unsupported language: {language}. Language should be one of: {(list(TO_LANGUAGE_CODE.values()) if is_language_code else list(TO_LANGUAGE_CODE.keys()))}.') if language_token not in generation_config.lang_to_id: raise ValueError(f'{language_token} is not supported by this specific model as it is not in the `generation_config.lang_to_id`.(You should just add it to the generation config)') return language_token def main(): parser = HfArgumentParser([DataTrainingArguments]) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): data_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0] else: data_args = parser.parse_args_into_dataclasses()[0] logger.setLevel(logging.INFO) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)]) set_seed(data_args.seed) if data_args.use_pipeline and data_args.batch_size > 1: raise ValueError('Make sure that `batch_size` is set to 1 when `use_pipeline=True`.') has_wandb = is_wandb_available() if has_wandb: import wandb import wandb as wandb_logger generation_arguments = {'torch_version': str(torch.__version__), 'transformers_version': str(transformers.__version__), 'attn_implementation': data_args.attn_implementation, 'model_name_or_path': data_args.model_name_or_path, 'subfolder': data_args.subfolder, 'assistant_model_name_or_path': data_args.assistant_model_name_or_path, 'seed': data_args.seed, 'batch_size': data_args.batch_size, 'num_beams': data_args.num_beams, 'return_timestamps': data_args.return_timestamps, 'condition_on_prev_tokens': data_args.condition_on_prev_tokens, 'temperature_fallback': data_args.temperature_fallback, 'logprob_threshold': data_args.logprob_threshold, 'no_speech_threshold': data_args.no_speech_threshold, 'use_pipeline': data_args.use_pipeline, 'chunk_length_s': data_args.chunk_length_s} wandb_logger.init(project=data_args.wandb_project, name=data_args.wandb_name, job_type=data_args.wandb_job_type, dir=data_args.wandb_dir, save_code=data_args.save_code_to_wandb, config=generation_arguments) else: raise ValueError('Wandb logging requires wandb to be installed. Run `pip install wandb` to enable.') raw_datasets = IterableDatasetDict() dataset_names_dict = convert_dataset_str_to_list(data_args.dataset_name, data_args.dataset_config_name, splits=data_args.dataset_split_name, text_column_names=data_args.text_column_name) for dataset_dict in tqdm(dataset_names_dict, desc='Loading datasets...'): sub_dataset = load_dataset(dataset_dict['name'], dataset_dict['config'], split=dataset_dict['split'], cache_dir=data_args.dataset_cache_dir, streaming=data_args.streaming, num_proc=data_args.preprocessing_num_workers) if data_args.only_short_form: sub_dataset = sub_dataset.filter(lambda x: len(x['audio']['array']) / x['audio']['sampling_rate'] <= 30) if data_args.only_long_form: sub_dataset = sub_dataset.filter(lambda x: len(x['audio']['array']) / x['audio']['sampling_rate'] > 30) if dataset_dict['text_column_name'] not in list(sub_dataset.features.keys()): raise ValueError(f"`--text_column_name` {dataset_dict['text_column_name']} not found in the evaluation dataset {dataset_dict['name']}. Ensure `text_column_name` is set to the correct column for the target text. Should be one of {' '.join(list(sub_dataset.features.keys()))}") if dataset_dict['text_column_name'] != 'text': sub_dataset = sub_dataset.rename_column(dataset_dict['text_column_name'], 'text') if not data_args.streaming: sub_dataset = sub_dataset.to_iterable_dataset() pretty_name = f"{dataset_dict['name'].split('/')[-1]}/{dataset_dict['split'].replace('.', '-')}" raw_datasets[pretty_name] = sub_dataset processor = WhisperProcessor.from_pretrained(data_args.model_name_or_path, subfolder=data_args.subfolder, cache_dir=data_args.cache_dir, use_fast=data_args.use_fast_tokenizer) dtype = getattr(torch, data_args.dtype) model = WhisperForConditionalGeneration.from_pretrained(data_args.model_name_or_path, subfolder=data_args.subfolder, torch_dtype=dtype, attn_implementation=data_args.attn_implementation, low_cpu_mem_usage=is_accelerate_available(), cache_dir=data_args.cache_dir, variant=data_args.model_variant) model.to('cuda:0', dtype=dtype) model_pipeline = None if data_args.use_pipeline: model_pipeline = pipeline('automatic-speech-recognition', model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, torch_dtype=dtype, device=model.device, chunk_length_s=data_args.chunk_length_s) model_pipeline_forward = model_pipeline._forward assistant_model = None if data_args.assistant_model_name_or_path is not None: logger.info('Loading assistant model...') if data_args.assistant_model_name_or_path.startswith('openai'): assistant_model = WhisperForConditionalGeneration.from_pretrained(data_args.assistant_model_name_or_path, torch_dtype=dtype, attn_implementation=data_args.attn_implementation, low_cpu_mem_usage=is_accelerate_available(), cache_dir=data_args.cache_dir) else: assistant_model = WhisperForCausalLM.from_pretrained(data_args.assistant_model_name_or_path, torch_dtype=dtype, attn_implementation=data_args.attn_implementation, low_cpu_mem_usage=is_accelerate_available(), cache_dir=data_args.cache_dir) assistant_model.cuda() raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=processor.feature_extractor.sampling_rate)) audio_column_name = data_args.audio_column_name language = language_to_id(data_args.language, model.generation_config) if data_args.language else None if language is None or language == '<|en|>': normalizer = EnglishTextNormalizer(processor.tokenizer.english_spelling_normalizer) else: normalizer = BasicTextNormalizer() sampling_rate = processor.feature_extractor.sampling_rate if data_args.samples_per_dataset is not None: for split in raw_datasets: raw_datasets[split] = raw_datasets[split].take(data_args.samples_per_dataset) def prepare_dataset(batch): audio = [sample['array'].astype(np.float32) for sample in batch[audio_column_name]] if model_pipeline is None: inputs = processor.feature_extractor(audio, sampling_rate=sampling_rate, return_tensors='pt', truncation=False, padding='longest', return_attention_mask=True) if inputs.input_features.shape[-1] < 3000: inputs = processor.feature_extractor(audio, sampling_rate=sampling_rate, return_tensors='pt', return_attention_mask=True) batch['input_features'] = inputs.input_features.to(dtype) batch['attention_mask'] = inputs.attention_mask else: batch['input_features'] = audio batch['length_in_s'] = [len(sample) / sampling_rate for sample in audio] batch['reference'] = batch['text'] return batch vectorized_datasets = IterableDatasetDict() for split in raw_datasets: raw_datasets_features = list(raw_datasets[split].features.keys()) vectorized_datasets[split] = raw_datasets[split].map(function=prepare_dataset, remove_columns=raw_datasets_features, batch_size=data_args.batch_size, batched=True) if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return metric = evaluate.load('wer') def compute_metrics(pred_str, label_str): norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return wer gen_kwargs = {'max_length': data_args.generation_max_length, 'return_timestamps': data_args.return_timestamps, 'num_beams': data_args.num_beams, 'top_k': 0} if hasattr(model.generation_config, 'is_multilingual') and model.generation_config.is_multilingual: gen_kwargs['language'] = data_args.language gen_kwargs['task'] = data_args.task elif data_args.language is not None: raise ValueError('Setting language token for an English-only checkpoint is not permitted. The language argument should only be set for multilingual checkpoints.') if assistant_model is not None: gen_kwargs['assistant_model'] = assistant_model if data_args.prompt_text is not None: gen_kwargs['prompt_ids'] = processor.get_prompt_ids(data_args.prompt_text, return_tensors='pt').to('cuda:0') long_form_gen_kwargs = {'condition_on_prev_tokens': data_args.condition_on_prev_tokens, 'compression_ratio_threshold': data_args.compression_ratio_threshold, 'temperature': (0.0, 0.2, 0.4, 0.6, 0.8, 1.0) if data_args.temperature_fallback else 0, 'logprob_threshold': data_args.logprob_threshold, 'no_speech_threshold': data_args.no_speech_threshold} forced_decoder_ids = processor.get_decoder_prompt_ids(task=data_args.task, language=data_args.language, no_timestamps=not data_args.return_timestamps) def benchmark(batch): if model_pipeline is None: inputs = torch.stack(batch['input_features'], dim=0).cuda() attention_mask = torch.stack(batch['attention_mask'], dim=0).cuda() (inner_batch_size, num_mels, seq_len) = inputs.shape if seq_len == 3000: batch_gen_kwargs = gen_kwargs else: batch_gen_kwargs = {**gen_kwargs, **long_form_gen_kwargs} set_seed(data_args.seed) start_time = time.time() output_ids = model.generate(inputs, attention_mask=attention_mask, **batch_gen_kwargs) gen_time = time.time() - start_time batch['time'] = inner_batch_size * [gen_time / inner_batch_size] if not data_args.precise_tok_per_s: n_generated_tokens = output_ids.numel() - inner_batch_size * len(forced_decoder_ids) batch['tokens_per_sec'] = inner_batch_size * [n_generated_tokens / gen_time / inner_batch_size] batch['transcription'] = processor.batch_decode(output_ids, skip_special_tokens=True, decode_with_timestamps=data_args.return_timestamps) else: inputs = batch['input_features'] time_result = [] n_generated_tokens = [] def _forward_time(*args, **kwargs): start_time = time.time() result = model_pipeline_forward(*args, **kwargs) end_time = time.time() - start_time time_result.append(end_time) for toks in result['tokens']: n_generated_tokens.append(len(toks) - len(forced_decoder_ids)) return result model_pipeline._forward = _forward_time result = model_pipeline(inputs, batch_size=PIPELINE_BATCH_SIZE, generate_kwargs={**gen_kwargs})[0]['text'] if not data_args.precise_tok_per_s: n_generated_tokens = sum(n_generated_tokens) gen_time = time_result[0] batch['tokens_per_sec'] = [n_generated_tokens / gen_time] batch['transcription'] = [result] batch['time'] = [sum(time_result)] batch['num_words'] = [len(r.split()) for r in batch['reference']] return batch result_datasets = DatasetDict() for split in vectorized_datasets: result_datasets[split] = vectorized_datasets[split].map(function=benchmark, remove_columns=['input_features'], batch_size=data_args.batch_size, batched=True) stats_dataset = DatasetDict() all_stats = {'rtf': 0, 'wer': 0, 'tokens_per_sec': 0} rtf_stats = {'times_audio_total': 0, 'times_transcription_total': 0} def benchmark_gen(num_batches): tokens_per_secs = [] for _ in range(num_batches): dummy_encoder_outputs = BaseModelOutput(torch.randn((data_args.batch_size, model.config.max_source_positions, model.config.d_model), dtype=model.dtype, device=model.device)) n_tokens = data_args.num_tokens if model_pipeline is None: start_time = time.time() _ = model.generate(encoder_outputs=dummy_encoder_outputs, min_new_tokens=n_tokens, max_new_tokens=n_tokens, **gen_kwargs) gen_time = time.time() - start_time else: start_time = time.time() _ = model_pipeline.model.generate(encoder_outputs=dummy_encoder_outputs, min_new_tokens=n_tokens, max_new_tokens=n_tokens, **gen_kwargs) gen_time = time.time() - start_time n_generated_tokens = n_tokens * data_args.batch_size tokens_per_secs.append(n_generated_tokens / gen_time) return tokens_per_secs logger.info('***** Running Evaluation *****') for key in generation_arguments: logger.info(f' {key}: {generation_arguments[key]}') datasets_evaluated_progress_bar = tqdm(result_datasets, desc='Datasets', position=0) for split in datasets_evaluated_progress_bar: transcriptions = [] references = [] stats = {} times_audio_total = 0 times_transcription_total = 0 tokens_per_secs = [] if data_args.precise_tok_per_s: tokens_per_secs = benchmark_gen(data_args.num_batches) datasets_evaluated_progress_bar.write(f'Start benchmarking {split}...') result_iter = iter(result_datasets[split]) for result in tqdm(result_iter, desc='Samples', position=1): times_audio_total += result['length_in_s'] times_transcription_total += result['time'] if data_args.prompt_text is not None: result['transcription'] = result['transcription'].replace(data_args.prompt_text, '') transcriptions.append(result['transcription']) references.append(result['reference']) if not data_args.precise_tok_per_s: tokens_per_secs.append(result['tokens_per_sec']) norm_transcriptions = [normalizer(pred) for pred in transcriptions] norm_references = [normalizer(label) for label in references] transcriptions = [transcriptions[i] for i in range(len(transcriptions)) if len(norm_references[i]) > 0] references = [references[i] for i in range(len(references)) if len(norm_references[i]) > 0] norm_transcriptions = [norm_transcriptions[i] for i in range(len(norm_transcriptions)) if len(norm_references[i]) > 0] norm_references = [norm_references[i] for i in range(len(norm_references)) if len(norm_references[i]) > 0] stats['wer'] = compute_metrics(norm_transcriptions, norm_references) wer_per_sample = [] for (pred, ref) in zip(norm_transcriptions, norm_references): wer_per_sample.append(compute_metrics([pred], [ref])) stats['rtf'] = times_audio_total / times_transcription_total stats['tokens_per_sec'] = sum(tokens_per_secs) / len(tokens_per_secs) stats_dataset[split] = stats wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in stats.items()]) datasets_evaluated_progress_bar.write(wer_desc) write_wandb_metric(wandb_logger, stats, prefix=split) if data_args.log_predictions: write_wandb_pred(wandb_logger, transcriptions, references, norm_transcriptions, norm_references, wer_per_sample, prefix=split) rtf_stats['times_audio_total'] += times_audio_total rtf_stats['times_transcription_total'] += times_transcription_total all_stats['wer'] += stats['wer'] all_stats['tokens_per_sec'] += stats['tokens_per_sec'] all_stats['wer'] = all_stats['wer'] / len(result_datasets) all_stats['rtf'] = rtf_stats['times_audio_total'] / rtf_stats['times_transcription_total'] all_stats['tokens_per_sec'] = all_stats['tokens_per_sec'] / len(result_datasets) stats_dataset['all'] = all_stats write_wandb_metric(wandb_logger, all_stats, prefix='all') benchmark_artifact = wandb.Artifact('Benchmark', type='datasets') with tempfile.TemporaryDirectory() as temp_dir: for split in stats_dataset: file_name = os.path.join(temp_dir, f"{'_'.join(split.split('/'))}.json") with open(file_name, 'w') as json_file: json.dump(stats_dataset[split], json_file) benchmark_artifact.add_file(file_name, split) wandb_logger.log_artifact(benchmark_artifact) if __name__ == '__main__': main() # File: distil-whisper-main/training/run_pseudo_labelling.py """""" import csv import logging import os import sys import time import warnings from dataclasses import dataclass, field from datetime import timedelta from pathlib import Path from typing import Any, Dict, List, Optional, Union import datasets import evaluate import numpy as np import torch import transformers from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.logging import get_logger from datasets import DatasetDict, IterableDatasetDict, load_dataset from huggingface_hub import HfFolder, create_repo, get_full_repo_name, snapshot_download, upload_folder from torch.utils.data import DataLoader from tqdm import tqdm from soundfile import LibsndfileError from datasets.arrow_dataset import table_iter from transformers import HfArgumentParser, Seq2SeqTrainingArguments, WhisperConfig, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizerFast from transformers.models.whisper.english_normalizer import BasicTextNormalizer, EnglishTextNormalizer from transformers.utils import check_min_version from transformers.utils.versions import require_version check_min_version('4.34.0.dev0') require_version('datasets>=2.14.6', 'To fix: `pip install --upgrade datasets`') logger = get_logger(__name__) @dataclass class ModelArguments: model_name_or_path: str = field(metadata={'help': 'Path to pretrained Whisper model or model identifier from huggingface.co/models'}) config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'}) tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'}) feature_extractor_name: Optional[str] = field(default=None, metadata={'help': 'feature extractor name or path if not the same as model_name'}) processor_name: Optional[str] = field(default=None, metadata={'help': 'processor name or path if not the same as model_name'}) cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'}) use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}) model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}) subfolder: str = field(default='', metadata={'help': 'In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you canspecify the folder name here.'}) token: str = field(default=None, metadata={'help': 'The token to use as HTTP bearer authorization for remote files. If not specified, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).'}) dtype: Optional[str] = field(default='float32', metadata={'help': 'The data type (dtype) in which to load the model weights. One of `float32` (full-precision), `float16` or `bfloat16` (both half-precision).'}) attn_implementation: Optional[str] = field(default=None, metadata={'help': 'Which attention implementation to use in the encoder and decoder attention layers. Can be one of:\n1. `eager` or `None`: default Transformers attention implementation.\n2. `sdpa`: Flash Attention through PyTorch SDPA. Requires `torch>=2.1`. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).\n3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100).'}) attn_type: Optional[str] = field(default=None, metadata={'help': 'Deprecated. Use `attn_implementation` instead.'}) def __post_init__(self): if self.attn_type is not None and self.attn_implementation is None: if self.attn_type == 'flash_attn': self.attn_implementation = 'sdpa' elif self.attn_type == 'flash_attn_2': self.attn_implementation = 'flash_attention_2' elif self.attn_type in [None, 'eager', 'sdpa', 'flash_attention_2']: self.attn_implementation = self.attn_type else: raise ValueError(f'Argument `--attn_type` is deprecated, and set to an invalid option `{self.attn_type}`. You should omit the argument `--attn_type`, and instead set `-attention_implementation` to one of the following:\n1. `eager` or `None`: default Transformers attention implementation.\n2. `sdpa`: Flash Attention through PyTorch SDPA. Requires `torch>=2.1`. Recommended for hardware where Flash Attention 2 is not supported, e.g. Turing GPUs, (T4, RTX 2080).\n3. `flash_attn_2`: Flash Attention 2 through the Flash Attention package https://github.com/Dao-AILab/flash-attention. **Always** recommended on supported hardware (Ampere, Ada, or Hopper GPUs, e.g., A100, RTX 3090, RTX 4090, H100).') warnings.warn(f'Argument `--attn_type` is deprecated. Use `--attn_implementation` instead. Inferring `--attn_implementation={self.attn_implementation} from argument `--attn_type={self.attn_type}`.') elif self.attn_type is not None and self.attn_implementation is not None: raise ValueError('`--attn_type` and `--attn_implementation` are both specified. Only the argument `--attn_implementation`.') @dataclass class DataTrainingArguments: dataset_name: str = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) dataset_cache_dir: Optional[str] = field(default=None, metadata={'help': 'Path to cache directory for saving and loading datasets'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) preprocessing_batch_size: Optional[int] = field(default=500, metadata={'help': 'The batch size to use for the dataset pre-processing.'}) audio_column_name: str = field(default='audio', metadata={'help': "The name of the dataset column containing the audio data. Defaults to 'audio'"}) text_column_name: str = field(default='text', metadata={'help': "The name of the dataset column containing the text data. Defaults to 'text'."}) id_column_name: str = field(default='id', metadata={'help': "The name of the dataset column containing the id data. Defaults to 'id'"}) speaker_id_column_name: str = field(default=None, metadata={'help': 'The name of the dataset column containing the speaker id data. Defaults to None.'}) max_duration_in_seconds: float = field(default=30.0, metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'}) max_label_length: int = field(default=256, metadata={'help': 'Truncate transcriptions that are longer `max_label_length` tokens.'}) concatenate_audio: bool = field(default=True, metadata={'help': 'Whether or not to concatenate the audio samples to `max_duration_in_seconds`.'}) preprocessing_only: bool = field(default=False, metadata={'help': 'Whether to only do data preprocessing and skip training. This is especially useful when data preprocessing errors out in distributed training due to timeout. In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets can consequently be loaded in distributed training'}) dataset_split_name: str = field(default='train+validation+test', metadata={'help': "The name of the data set splits to use (via the datasets library). Defaults to 'train+validation+test'. Multiple splits can be passed by splitting a list through the '+' character, e.g. 'train+validation' will pseudo-label both the 'train' and 'validation' splits sequentially."}) wandb_project: str = field(default='distil-whisper', metadata={'help': 'The name of the wandb project.'}) streaming: bool = field(default=False, metadata={'help': "Whether to use dataset's streaming mode to load and pre-process the data."}) max_samples_per_split: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes, truncate the number of examples per split to this value if set.'}) return_timestamps: bool = field(default=False, metadata={'help': 'Whether to return the timestamps with the text. This enables the `FlaxWhisperTimestampsLogitsProcessor`.'}) language: str = field(default=None, metadata={'help': 'Language for multilingual distillation. This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`.'}) task: str = field(default='transcribe', metadata={'help': 'Task, either `transcribe` for speech recognition or `translate` for speech translation.This argument should be set for multilingual distillation only. For English speech recognition, it should be left as `None`.'}) decode_token_ids: bool = field(default=True, metadata={'help': 'Deprecated. The predicted token ids should always be decoded to text transcriptions.'}) private_dataset: bool = field(default=False, metadata={'help': 'Whether or not to create a private dataset for the pseudo-labelled data.'}) def __post_init__(self): if not self.decode_token_ids: raise ValueError('The argument `--decode_token_ids` is deprecated. The token ids are now always decoded to their corresponding text string. This is following a fix to the merges of the Whisper tokenizeron the Hugging Face Hub: https://huggingface.co/openai/whisper-large-v2/discussions/100. You should either omit the argument `--decode_token_ids`, or set it to True explicitly.') def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray: shifted_label_ids = np.zeros_like(label_ids) shifted_label_ids[:, 1:] = label_ids[:, :-1] shifted_label_ids[:, 0] = decoder_start_token_id return shifted_label_ids @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any decoder_start_token_id: int input_padding: Union[bool, str] = 'max_length' target_padding: Union[bool, str] = 'max_length' max_target_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: model_input_name = self.processor.model_input_names[0] input_features = {model_input_name: [feature[model_input_name] for feature in features]} label_features = {'input_ids': [feature['labels'] for feature in features]} batch = self.processor.feature_extractor.pad(input_features, padding=self.input_padding, return_tensors='pt') labels_batch = self.processor.tokenizer.pad(label_features, max_length=self.max_target_length, padding=self.target_padding, return_tensors='pt') labels = labels_batch['input_ids'].masked_fill(labels_batch.attention_mask.ne(1), -100) if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item(): labels = labels[:, 1:] batch['labels'] = labels return batch def log_metric(accelerator, metrics: Dict, train_time: float, prefix: str='eval'): log_metrics = {} for (k, v) in metrics.items(): log_metrics[f'{prefix}/{k}'] = v log_metrics[f'{prefix}/time'] = train_time accelerator.log(log_metrics) def log_pred(accelerator, pred_str: List[str], label_str: List[str], norm_pred_str: List[str], norm_label_str: List[str], prefix: str='eval', num_lines: int=200000): if accelerator.is_main_process: wandb_tracker = accelerator.get_tracker('wandb') prefix = prefix.replace('/', '-') str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))] wandb_tracker.log_table(table_name=f'{prefix}/all_predictions', columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data[:num_lines]) str_data = np.asarray(str_data) str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]] wandb_tracker.log_table(table_name=f'{prefix}/incorrect_predictions', columns=['Target', 'Pred', 'Norm Target', 'Norm Pred'], data=str_data_incorrect[:num_lines]) def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith('.json'): (model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: (model_args, data_args, training_args) = parser.parse_args_into_dataclasses() if model_args.dtype == 'float16': mixed_precision = 'fp16' torch_dtype = torch.float16 elif model_args.dtype == 'bfloat16': mixed_precision = 'bf16' torch_dtype = torch.bfloat16 else: mixed_precision = 'no' torch_dtype = torch.float32 kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=7200)) accelerator = Accelerator(gradient_accumulation_steps=training_args.gradient_accumulation_steps, mixed_precision=mixed_precision, log_with=training_args.report_to, project_dir=training_args.output_dir, kwargs_handlers=[kwargs]) accelerator.init_trackers(project_name=data_args.wandb_project) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger.warning(f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}") if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() logger.info('Training/evaluation parameters %s', training_args) raw_datasets = IterableDatasetDict() if data_args.streaming else DatasetDict() token = model_args.token if model_args.token is not None else HfFolder().get_token() data_splits = data_args.dataset_split_name.split('+') for split in data_splits: with accelerator.main_process_first(): raw_datasets[split] = load_dataset(data_args.dataset_name, data_args.dataset_config_name, split=split, cache_dir=data_args.dataset_cache_dir, token=token, streaming=data_args.streaming, num_proc=data_args.preprocessing_num_workers if not data_args.streaming else None) if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError(f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. Make sure to set `--audio_column_name` to the correct audio column - one of {', '.join(next(iter(raw_datasets.values())).column_names)}.") if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names: raise ValueError(f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. Make sure to set `--text_column_name` to the correct text column - one of {', '.join(next(iter(raw_datasets.values())).column_names)}.") config = WhisperConfig.from_pretrained(model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=token) feature_extractor = WhisperFeatureExtractor.from_pretrained(model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=token) tokenizer = WhisperTokenizerFast.from_pretrained(model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=token) processor = WhisperProcessor.from_pretrained(model_args.processor_name if model_args.processor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=token) model = WhisperForConditionalGeneration.from_pretrained(model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, subfolder=model_args.subfolder, token=token, low_cpu_mem_usage=True, torch_dtype=torch_dtype, attn_implementation=model_args.attn_implementation) model.eval() if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined') return_timestamps = data_args.return_timestamps if hasattr(model.generation_config, 'is_multilingual') and model.generation_config.is_multilingual: is_multilingual = True tokenizer.set_prefix_tokens(language=data_args.language, task=data_args.task, predict_timestamps=return_timestamps) elif data_args.language is not None: raise ValueError('Setting language token for an English-only checkpoint is not permitted. The language argument should only be set for multilingual checkpoints.') else: is_multilingual = False raw_datasets = raw_datasets.cast_column(data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)) max_input_length = int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) max_label_length = data_args.max_label_length if data_args.max_label_length is not None else model.config.max_length audio_column_name = data_args.audio_column_name sampling_rate = feature_extractor.sampling_rate preprocessing_batch_size = data_args.preprocessing_batch_size num_workers = data_args.preprocessing_num_workers dataloader_num_workers = training_args.dataloader_num_workers text_column_name = data_args.text_column_name model_input_name = feature_extractor.model_input_names[0] id_column_name = data_args.id_column_name speaker_id_column_name = data_args.speaker_id_column_name normalizer = BasicTextNormalizer() if data_args.language is not None else EnglishTextNormalizer(tokenizer.english_spelling_normalizer) timestamp_position = 3 if is_multilingual else 1 decoder_prev_token_id = tokenizer.convert_tokens_to_ids('<|startofprev|>') decoder_eot_token_id = tokenizer.eos_token_id if data_args.max_samples_per_split is not None: for split in data_splits: raw_datasets[split] = raw_datasets[split].take(data_args.max_samples_per_split) if data_args.streaming else raw_datasets[split].select(range(data_args.max_samples_per_split)) if speaker_id_column_name is not None: raw_datasets = raw_datasets.sort(speaker_id_column_name) def concatenate_dataset(batch): (audio_arrays, texts, speaker_ids) = ([], [], []) for row in table_iter(batch.pa_table, batch_size=1): row = batch.formatter.format_row(row) try: sample_audio = row[audio_column_name]['array'] sample_text = row[text_column_name] sample_speaker_id = row[speaker_id_column_name] if speaker_id_column_name else None except LibsndfileError: logger.warning(f'{row[id_column_name]} is corrupted! Skipping sample.') continue audio_arrays.append(sample_audio) texts.append(sample_text) speaker_ids.append(sample_speaker_id) concat_audio = [audio_arrays[0]] concat_text = [texts[0]] concat_speaker_id = [speaker_ids[0]] condition_on_prev = [0] for (audio_array, text, speaker_id) in zip(audio_arrays[1:], texts[1:], speaker_ids[1:]): is_same_speaker = speaker_id == concat_speaker_id[-1] is_concatenable = len(audio_array) + len(concat_audio[-1]) <= max_input_length if is_same_speaker and is_concatenable: concat_audio[-1] = np.append(concat_audio[-1], audio_array) concat_text[-1] = concat_text[-1] + ' ' + text else: concat_audio.append(audio_array) concat_text.append(text) concat_speaker_id.append(speaker_id) condition_on_prev.append(1 if is_same_speaker else 0) batch[audio_column_name] = [{'array': array, 'sampling_rate': sampling_rate} for array in concat_audio] batch[text_column_name] = concat_text batch[id_column_name] = concat_speaker_id batch['condition_on_prev'] = condition_on_prev return batch raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) if data_args.concatenate_audio and (not data_args.streaming): with accelerator.main_process_first(): raw_datasets = raw_datasets.map(concatenate_dataset, batched=True, batch_size=preprocessing_batch_size, num_proc=num_workers, remove_columns=set(raw_datasets_features) - {audio_column_name, text_column_name, id_column_name, 'condition_on_prev'}, desc='Concatenating dataset...') raw_datasets = raw_datasets.cast_column(audio_column_name, datasets.features.Audio(sampling_rate=sampling_rate)) pretty_name = data_args.dataset_name.split('/')[-1] def postprocess_ids(speaker_ids, indices): speaker_ids_formatted = [] for (speaker, idx) in zip(speaker_ids, indices): formatted_idx = f'{pretty_name}-{speaker}-{idx}' if speaker is not None else f'{pretty_name}-{idx}' speaker_ids_formatted.append(formatted_idx) return {id_column_name: speaker_ids_formatted} with accelerator.main_process_first(): raw_datasets = raw_datasets.map(postprocess_ids, input_columns=[id_column_name], with_indices=True, desc='Setting sample idxs...', batched=True, batch_size=preprocessing_batch_size, num_proc=num_workers) elif data_args.concatenate_audio and data_args.streaming: raise ValueError('Streaming mode is not yet compatible with concatenating audios to `max_duration_in_seconds`.Either set `--streaming=False` and download the audios locally, or open an issue on the Distil-Whisper repo to request this feature.') def prepare_dataset(batch): sample = batch[audio_column_name] inputs = feature_extractor(sample['array'], sampling_rate=sample['sampling_rate']) batch[model_input_name] = inputs.get(model_input_name)[0] input_str = batch[text_column_name] batch['labels'] = tokenizer(input_str, max_length=max_label_length, truncation=True).input_ids return batch raw_datasets_features = list(next(iter(raw_datasets.values())).features.keys()) file_ids_dataset = IterableDatasetDict() if data_args.streaming else DatasetDict() for split in raw_datasets: file_ids_dataset[split] = raw_datasets[split][id_column_name] if data_args.streaming: with accelerator.main_process_first(): vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=raw_datasets_features) else: with accelerator.main_process_first(): vectorized_datasets = raw_datasets.map(prepare_dataset, remove_columns=raw_datasets_features, num_proc=num_workers, desc='preprocess dataset') if data_args.preprocessing_only: cache = {k: v.cache_files for (k, v) in vectorized_datasets.items()} logger.info(f'Data preprocessing finished. Files cached at {cache}.') return if data_args.streaming and dataloader_num_workers > 0: logger.warning('Using multiple dataloader num workers with streaming mode will result in different shards of data being transcribed in parallel. This is not advised if you want to preserve the order of the audio-text data.') output_dir = training_args.output_dir if accelerator.is_main_process: if training_args.push_to_hub: if training_args.hub_model_id is None: repo_name = get_full_repo_name(Path(output_dir).absolute().name, token=training_args.hub_token) else: repo_name = training_args.hub_model_id create_repo(repo_name, repo_type='dataset', exist_ok=True, token=training_args.hub_token) snapshot_download(repo_id=repo_name, local_dir=output_dir) with open(os.path.join(output_dir, '.gitattributes'), 'r+') as f: git_lfs_extensions = f.read() if '*.csv' not in git_lfs_extensions: f.write('*.csv filter=lfs diff=lfs merge=lfs -text') elif output_dir is not None: os.makedirs(output_dir, exist_ok=True) accelerator.wait_for_everyone() metric = evaluate.load('wer') def compute_metrics(preds, labels, file_ids): for idx in range(len(labels)): labels[idx][labels[idx] == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(preds, skip_special_tokens=False, decode_with_timestamps=return_timestamps) label_str = tokenizer.batch_decode(labels, skip_special_tokens=True) norm_pred_str = [normalizer(pred) for pred in pred_str] norm_label_str = [normalizer(label) for label in label_str] pred_str = [pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] label_str = [label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] file_ids = [file_ids[i] for i in range(len(file_ids)) if len(norm_label_str[i]) > 0] norm_pred_str = [norm_pred_str[i] for i in range(len(norm_pred_str)) if len(norm_label_str[i]) > 0] norm_label_str = [norm_label_str[i] for i in range(len(norm_label_str)) if len(norm_label_str[i]) > 0] wer = 100 * metric.compute(predictions=norm_pred_str, references=norm_label_str) return ({'wer': wer}, pred_str, label_str, norm_pred_str, norm_label_str, file_ids) def filter_eot_tokens(preds): for idx in range(len(preds)): token_ids = [token for token in preds[idx] if token != decoder_eot_token_id] token_ids = token_ids + [decoder_eot_token_id] preds[idx] = token_ids return preds per_device_eval_batch_size = int(training_args.per_device_eval_batch_size) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor, decoder_start_token_id=model.config.decoder_start_token_id, input_padding='longest', target_padding='max_length', max_target_length=max_label_length) num_beams = training_args.generation_num_beams if training_args.generation_num_beams is not None else getattr(model.generation_config, 'num_beams', 1) gen_kwargs = {'max_length': max_label_length, 'num_beams': num_beams, 'return_timestamps': return_timestamps} if hasattr(model.generation_config, 'is_multilingual') and model.generation_config.is_multilingual: gen_kwargs.update({'language': data_args.language, 'task': data_args.task}) model.generation_config.forced_decoder_ids = None model.config.forced_decoder_ids = None model = accelerator.prepare(model) def eval_step_with_save(split='eval'): eval_preds = [] eval_labels = [] eval_ids = [] pred_str = [] eval_start = time.time() eval_loader = DataLoader(vectorized_datasets[split], batch_size=per_device_eval_batch_size, collate_fn=data_collator, num_workers=dataloader_num_workers, pin_memory=True) file_loader = DataLoader(file_ids_dataset[split], batch_size=per_device_eval_batch_size * accelerator.num_processes, num_workers=dataloader_num_workers) eval_loader = accelerator.prepare(eval_loader) batches = tqdm(eval_loader, desc=f'Evaluating {split}...', disable=not accelerator.is_local_main_process) split = split.replace('.', '-').split('/')[-1] output_csv = os.path.join(output_dir, f'{split}-transcription.csv') for (step, (batch, file_ids)) in enumerate(zip(batches, file_loader)): generate_fn = model.module.generate if accelerator.num_processes > 1 else model.generate generated_ids = generate_fn(batch['input_features'].to(dtype=torch_dtype), **gen_kwargs) generated_ids = accelerator.pad_across_processes(generated_ids, dim=1, pad_index=tokenizer.pad_token_id) (generated_ids, labels) = accelerator.gather_for_metrics((generated_ids, batch['labels'])) eval_preds.extend(generated_ids.cpu().numpy()) eval_labels.extend(labels.cpu().numpy()) eval_ids.extend(file_ids) if step % training_args.logging_steps == 0 and step > 0: batches.write(f'Saving transcriptions for split {split} step {step}') accelerator.wait_for_everyone() pred_ids = eval_preds[-(len(eval_preds) - len(pred_str)):] pred_ids = filter_eot_tokens(pred_ids) pred_str.extend(tokenizer.batch_decode(pred_ids, skip_special_tokens=False, decode_with_timestamps=return_timestamps)) csv_data = [[eval_ids[i], pred_str[i]] for i in range(len(eval_preds))] with open(output_csv, 'w', encoding='UTF8', newline='') as f: writer = csv.writer(f) writer.writerow(['file_id', 'whisper_transcript']) writer.writerows(csv_data) if training_args.push_to_hub and accelerator.is_main_process: upload_folder(folder_path=output_dir, repo_id=repo_name, repo_type='dataset', commit_message=f'Saving transcriptions for split {split} step {step}.') accelerator.wait_for_everyone() eval_time = time.time() - eval_start wer_desc = '' if 'validation' in split or 'test' in split: eval_preds = filter_eot_tokens(eval_preds) (wer_metric, pred_str, label_str, norm_pred_str, norm_label_str, eval_ids) = compute_metrics(eval_preds, eval_labels, eval_ids) wer_desc = ' '.join([f'Eval {key}: {value} |' for (key, value) in wer_metric.items()]) log_metric(accelerator, metrics=wer_metric, train_time=eval_time, prefix=split) log_pred(accelerator, pred_str, label_str, norm_pred_str, norm_label_str, prefix=split) else: pred_ids = eval_preds[-(len(eval_preds) - len(pred_str)):] pred_ids = filter_eot_tokens(pred_ids) pred_str.extend(tokenizer.batch_decode(pred_ids, skip_special_tokens=False, decode_with_timestamps=return_timestamps)) batches.write(f'Saving final transcriptions for split {split}.') csv_data = [[eval_ids[i], eval_preds[i]] for i in range(len(eval_preds))] with open(output_csv, 'w', encoding='UTF8', newline='') as f: writer = csv.writer(f) writer.writerow(['file_id', 'whisper_transcript']) writer.writerows(csv_data) logger.info(wer_desc) if not data_args.streaming: raw_datasets[split] = raw_datasets[split].add_column('whisper_transcript', pred_str) raw_datasets[split] = raw_datasets[split].add_column('eval_preds', eval_preds) def add_concatenated_text(eval_preds, condition_on_prev): concatenated_prev = [None] for (token_ids, condition) in zip(eval_preds[:-1], condition_on_prev[1:]): if condition is False: concatenated_prev.append(None) else: prompt_ids = [token for token in token_ids if token != decoder_eot_token_id] prompt_ids = [decoder_prev_token_id] + prompt_ids[timestamp_position:] concatenated_prev.append(prompt_ids) return {'condition_on_prev': concatenated_prev} if data_args.concatenate_audio: with accelerator.main_process_first(): raw_datasets[split] = raw_datasets[split].map(add_concatenated_text, input_columns=['eval_preds', 'condition_on_prev'], remove_columns=['eval_preds'], desc='Setting condition on prev...', batched=True, batch_size=preprocessing_batch_size, num_proc=num_workers) logger.info('***** Running Labelling *****') logger.info(f' Instantaneous batch size per device = {training_args.per_device_eval_batch_size}') logger.info(f' Total eval batch size (w. parallel & distributed) = {training_args.per_device_eval_batch_size * accelerator.num_processes}') logger.info(f' Predict labels with timestamps = {return_timestamps}') for split in data_splits: eval_step_with_save(split=split) accelerator.wait_for_everyone() if training_args.push_to_hub and accelerator.is_main_process: upload_folder(folder_path=output_dir, repo_id=repo_name, repo_type='dataset', commit_message=f"Saving final transcriptions for split {split.replace('.', '-').split('/')[-1]}") if not data_args.streaming and accelerator.is_main_process: raw_datasets.save_to_disk(output_dir, num_proc=num_workers) if training_args.push_to_hub: raw_datasets.push_to_hub(repo_name, config_name=data_args.dataset_config_name) accelerator.end_training() if __name__ == '__main__': main()