|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import inspect
|
|
import random
|
|
import warnings
|
|
from collections import defaultdict
|
|
from contextlib import contextmanager, nullcontext
|
|
from copy import deepcopy
|
|
from functools import wraps
|
|
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union
|
|
|
|
import numpy as np
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
from accelerate import PartialState
|
|
from accelerate.utils import is_deepspeed_available, tqdm
|
|
from datasets import Dataset
|
|
from torch.utils.data import DataLoader
|
|
from transformers import (
|
|
AutoModelForCausalLM,
|
|
DataCollator,
|
|
PreTrainedModel,
|
|
PreTrainedTokenizerBase,
|
|
Trainer,
|
|
TrainingArguments,
|
|
)
|
|
from transformers.trainer_callback import TrainerCallback
|
|
from transformers.trainer_utils import EvalLoopOutput
|
|
|
|
from ..import_utils import is_peft_available, is_wandb_available
|
|
from ..models import PreTrainedModelWrapper, create_reference_model
|
|
from .utils import (
|
|
DPODataCollatorWithPadding,
|
|
disable_dropout_in_model,
|
|
pad_to_length,
|
|
peft_module_casting_to_bf16,
|
|
trl_sanitze_kwargs_for_tagging,
|
|
)
|
|
|
|
|
|
if is_peft_available():
|
|
from peft import PeftModel, get_peft_model, prepare_model_for_kbit_training
|
|
|
|
|
|
if is_wandb_available():
|
|
import wandb
|
|
|
|
if is_deepspeed_available():
|
|
import deepspeed
|
|
|
|
from transformers.integrations.deepspeed import is_deepspeed_zero3_enabled
|
|
|
|
|
|
class DPOTrainer(Trainer):
|
|
r"""
|
|
Initialize DPOTrainer.
|
|
|
|
Args:
|
|
model (`transformers.PreTrainedModel`):
|
|
The model to train, preferably an `AutoModelForSequenceClassification`.
|
|
ref_model (`PreTrainedModelWrapper`):
|
|
Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no
|
|
reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized.
|
|
beta (`float`, defaults to 0.1):
|
|
The beta factor in DPO loss. Higher beta means less divergence from the initial policy. For the IPO loss, beta is the regularization parameter denoted by tau in the paper.
|
|
label_smoothing (`float`, defaults to 0):
|
|
The robust DPO label smoothing parameter from the [cDPO](https://ericmitchell.ai/cdpo.pdf) report that should be between 0 and 0.5.
|
|
loss_type (`str`, defaults to `"sigmoid"`):
|
|
The type of DPO loss to use. Either `"sigmoid"` the default DPO loss,`"hinge"` loss from [SLiC](https://arxiv.org/abs/2305.10425) paper, `"ipo"` from [IPO](https://arxiv.org/abs/2310.12036) paper, or `"kto"` from the HALOs [report](https://github.com/ContextualAI/HALOs/blob/main/assets/report.pdf).
|
|
args (`transformers.TrainingArguments`):
|
|
The arguments to use for training.
|
|
data_collator (`transformers.DataCollator`):
|
|
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
|
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
|
label_pad_token_id (`int`, defaults to `-100`):
|
|
The label pad token id. This argument is required if you want to use the default data collator.
|
|
padding_value (`int`, defaults to `0`):
|
|
The padding value if it is different to the tokenizer's pad_token_id.
|
|
truncation_mode (`str`, defaults to `keep_end`):
|
|
The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator.
|
|
train_dataset (`datasets.Dataset`):
|
|
The dataset to use for training.
|
|
eval_dataset (`datasets.Dataset`):
|
|
The dataset to use for evaluation.
|
|
tokenizer (`transformers.PreTrainedTokenizerBase`):
|
|
The tokenizer to use for training. This argument is required if you want to use the default data collator.
|
|
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
|
The model initializer to use for training. If None is specified, the default model initializer will be used.
|
|
callbacks (`List[transformers.TrainerCallback]`):
|
|
The callbacks to use for training.
|
|
optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
|
The optimizer and scheduler to use for training.
|
|
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
|
The function to use to preprocess the logits before computing the metrics.
|
|
max_length (`int`, defaults to `None`):
|
|
The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator.
|
|
max_prompt_length (`int`, defaults to `None`):
|
|
The maximum length of the prompt. This argument is required if you want to use the default data collator.
|
|
max_target_length (`int`, defaults to `None`):
|
|
The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder.
|
|
peft_config (`Dict`, defaults to `None`):
|
|
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
|
|
is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`):
|
|
If no model is provided, we need to know if the model_init returns an encoder-decoder.
|
|
disable_dropout (`bool`, defaults to `True`):
|
|
Whether or not to disable dropouts in `model` and `ref_model`.
|
|
generate_during_eval (`bool`, defaults to `False`):
|
|
Whether to sample and log generations during evaluation step.
|
|
compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
|
|
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
|
a dictionary string to metric values.
|
|
precompute_ref_log_probs (`bool`, defaults to `False`):
|
|
Flag to precompute reference model log probabilities and evaluation datasets. This is useful if you want to train
|
|
without the reference model and reduce the total GPU memory needed.
|
|
dataset_num_proc (`Optional[int]`, *optional*):
|
|
The number of workers to use to tokenize the data. Defaults to None.
|
|
model_init_kwargs (`Optional[Dict]`, *optional*):
|
|
Dict of Optional kwargs to pass when instantiating the model from a string
|
|
ref_model_init_kwargs (`Optional[Dict]`, *optional*):
|
|
Dict of Optional kwargs to pass when instantiating the ref model from a string
|
|
model_adapter_name (`str`, defaults to `None`):
|
|
Name of the train target PEFT adapter, when using LoRA with multiple adapters.
|
|
ref_adapter_name (`str`, defaults to `None`):
|
|
Name of the reference PEFT adapter, when using LoRA with multiple adapters.
|
|
reference_free (`bool`):
|
|
If True, we ignore the _provided_ reference model and implicitly use a reference model that assigns equal probability to all responses.
|
|
"""
|
|
|
|
_tag_names = ["trl", "dpo"]
|
|
|
|
def __init__(
|
|
self,
|
|
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
|
ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
|
dpo_alpha: float = 1.0,
|
|
beta: float = 0.1,
|
|
gamma: float = 0.1,
|
|
label_smoothing: float = 0,
|
|
loss_type: Literal["sigmoid", "hinge", "ipo", "kto_pair"] = "sigmoid",
|
|
args: Optional[TrainingArguments] = None,
|
|
data_collator: Optional[DataCollator] = None,
|
|
label_pad_token_id: int = -100,
|
|
padding_value: Optional[int] = None,
|
|
truncation_mode: str = "keep_end",
|
|
train_dataset: Optional[Dataset] = None,
|
|
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
|
|
tokenizer: Optional[PreTrainedTokenizerBase] = None,
|
|
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
|
callbacks: Optional[List[TrainerCallback]] = None,
|
|
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
|
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
|
max_length: Optional[int] = None,
|
|
max_prompt_length: Optional[int] = None,
|
|
max_target_length: Optional[int] = None,
|
|
peft_config: Optional[Dict] = None,
|
|
is_encoder_decoder: Optional[bool] = None,
|
|
disable_dropout: bool = True,
|
|
generate_during_eval: bool = False,
|
|
compute_metrics: Optional[Callable[[EvalLoopOutput], Dict]] = None,
|
|
precompute_ref_log_probs: bool = False,
|
|
dataset_num_proc: Optional[int] = None,
|
|
model_init_kwargs: Optional[Dict] = None,
|
|
ref_model_init_kwargs: Optional[Dict] = None,
|
|
model_adapter_name: Optional[str] = None,
|
|
ref_adapter_name: Optional[str] = None,
|
|
reference_free: bool = False,
|
|
):
|
|
|
|
if model_init_kwargs is None:
|
|
model_init_kwargs = {}
|
|
elif not isinstance(model, str):
|
|
raise ValueError("You passed model_kwargs to the DPOTrainer. But your model is already instantiated.")
|
|
|
|
if ref_model_init_kwargs is None:
|
|
ref_model_init_kwargs = {}
|
|
elif not isinstance(ref_model, str):
|
|
raise ValueError("You passed ref_model_kwargs to the DPOTrainer. But your ref_model is already instantiated.")
|
|
|
|
if isinstance(model, str):
|
|
warnings.warn("You passed a model_id to the DPOTrainer. This will automatically create an " "`AutoModelForCausalLM` or a `PeftModel` (if you passed a `peft_config`) for you.")
|
|
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
|
|
|
if isinstance(ref_model, str):
|
|
warnings.warn("You passed a ref model_id to the DPOTrainer. This will automatically create an " "`AutoModelForCausalLM`")
|
|
ref_model = AutoModelForCausalLM.from_pretrained(ref_model, **ref_model_init_kwargs)
|
|
|
|
|
|
|
|
self._peft_has_been_casted_to_bf16 = False
|
|
|
|
if generate_during_eval and not is_wandb_available():
|
|
raise ValueError("`generate_during_eval=True` requires Weights and Biases to be installed." " Please install `wandb` to resolve.")
|
|
|
|
if model is not None:
|
|
self.is_encoder_decoder = model.config.is_encoder_decoder
|
|
elif is_encoder_decoder is None:
|
|
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
|
else:
|
|
self.is_encoder_decoder = is_encoder_decoder
|
|
|
|
self.is_peft_model = is_peft_available() and isinstance(model, PeftModel)
|
|
self.model_adapter_name = model_adapter_name
|
|
self.ref_adapter_name = ref_adapter_name
|
|
self.reference_free = reference_free
|
|
|
|
if ref_model:
|
|
self.ref_model = ref_model
|
|
elif self.is_peft_model or precompute_ref_log_probs:
|
|
|
|
self.ref_model = None
|
|
else:
|
|
if is_deepspeed_zero3_enabled():
|
|
self.ref_model = AutoModelForCausalLM.from_pretrained(model)
|
|
else:
|
|
self.ref_model = create_reference_model(model)
|
|
|
|
if tokenizer is None:
|
|
raise ValueError("tokenizer must be specified to tokenize a DPO dataset.")
|
|
if max_length is None:
|
|
warnings.warn(
|
|
"`max_length` is not set in the DPOTrainer's init" " it will default to `512` by default, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
max_length = 512
|
|
if max_prompt_length is None:
|
|
warnings.warn(
|
|
"`max_prompt_length` is not set in the DPOTrainer's init" " it will default to `128` by default, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
max_prompt_length = 128
|
|
|
|
if max_target_length is None and self.is_encoder_decoder:
|
|
warnings.warn(
|
|
"When using an encoder decoder architecture, you should set `max_target_length` in the DPOTrainer's init" " it will default to `128` by default, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
max_target_length = 128
|
|
|
|
if data_collator is None:
|
|
data_collator = DPODataCollatorWithPadding(
|
|
pad_token_id=tokenizer.pad_token_id,
|
|
label_pad_token_id=label_pad_token_id,
|
|
is_encoder_decoder=self.is_encoder_decoder,
|
|
)
|
|
|
|
if args.remove_unused_columns:
|
|
args.remove_unused_columns = False
|
|
|
|
warnings.warn(
|
|
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments" " we have set it for you, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
|
|
self.use_dpo_data_collator = True
|
|
else:
|
|
self.use_dpo_data_collator = False
|
|
|
|
if disable_dropout:
|
|
disable_dropout_in_model(model)
|
|
if self.ref_model is not None:
|
|
disable_dropout_in_model(self.ref_model)
|
|
|
|
self.max_length = max_length
|
|
self.generate_during_eval = generate_during_eval
|
|
self.label_pad_token_id = label_pad_token_id
|
|
self.padding_value = padding_value if padding_value is not None else tokenizer.pad_token_id
|
|
self.max_prompt_length = max_prompt_length
|
|
self.truncation_mode = truncation_mode
|
|
self.max_target_length = max_target_length
|
|
self.tokenizer = tokenizer
|
|
self.precompute_ref_log_probs = precompute_ref_log_probs
|
|
|
|
|
|
|
|
self._precomputed_train_ref_log_probs = False
|
|
self._precomputed_eval_ref_log_probs = False
|
|
|
|
if loss_type in ["hinge", "ipo", "kto_pair"] and label_smoothing > 0:
|
|
warnings.warn("You are using a loss type that does not support label smoothing. Ignoring label_smoothing parameter.")
|
|
|
|
self.dpo_alpha = dpo_alpha
|
|
self.beta = beta
|
|
self.gamma = gamma
|
|
self.label_smoothing = label_smoothing
|
|
self.loss_type = loss_type
|
|
|
|
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
|
|
|
self.dataset_num_proc = dataset_num_proc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
super().__init__(
|
|
model=model,
|
|
args=args,
|
|
data_collator=data_collator,
|
|
train_dataset=train_dataset,
|
|
eval_dataset=eval_dataset,
|
|
tokenizer=tokenizer,
|
|
model_init=model_init,
|
|
compute_metrics=compute_metrics,
|
|
callbacks=callbacks,
|
|
optimizers=optimizers,
|
|
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
|
)
|
|
|
|
if not hasattr(self, "accelerator"):
|
|
raise AttributeError("Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`.")
|
|
|
|
|
|
if self.is_deepspeed_enabled:
|
|
if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs:
|
|
raise ValueError("You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`.")
|
|
|
|
if self.ref_model is None:
|
|
if not (self.is_peft_model or self.precompute_ref_log_probs):
|
|
raise ValueError("No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`")
|
|
else:
|
|
if self.is_deepspeed_enabled:
|
|
self.ref_model = self._prepare_deepspeed(self.ref_model)
|
|
else:
|
|
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
|
|
|
|
def _prepare_deepspeed(self, model: PreTrainedModelWrapper):
|
|
|
|
deepspeed_plugin = self.accelerator.state.deepspeed_plugin
|
|
config_kwargs = deepcopy(deepspeed_plugin.deepspeed_config)
|
|
|
|
if model is not None:
|
|
if hasattr(model, "config"):
|
|
hidden_size = max(model.config.hidden_sizes) if getattr(model.config, "hidden_sizes", None) else getattr(model.config, "hidden_size", None)
|
|
if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3:
|
|
|
|
|
|
config_kwargs.update(
|
|
{
|
|
"zero_optimization.reduce_bucket_size": hidden_size * hidden_size,
|
|
"zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size,
|
|
"zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size,
|
|
}
|
|
)
|
|
|
|
|
|
|
|
if config_kwargs["zero_optimization"]["stage"] != 3:
|
|
config_kwargs["zero_optimization"]["stage"] = 0
|
|
model, *_ = deepspeed.initialize(model=model, config=config_kwargs)
|
|
model.eval()
|
|
return model
|
|
|
|
def get_train_dataloader(self) -> DataLoader:
|
|
"""
|
|
Returns the training [`~torch.utils.data.DataLoader`].
|
|
|
|
Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`.
|
|
"""
|
|
|
|
if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs:
|
|
dataloader_params = {
|
|
"batch_size": self.args.per_device_train_batch_size,
|
|
"collate_fn": self.data_collator,
|
|
"num_workers": self.args.dataloader_num_workers,
|
|
"pin_memory": self.args.dataloader_pin_memory,
|
|
"shuffle": False,
|
|
}
|
|
|
|
|
|
data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params))
|
|
|
|
reference_chosen_logps = []
|
|
reference_rejected_logps = []
|
|
for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"):
|
|
reference_chosen_logp, reference_rejected_logp = self.compute_reference_log_probs(padded_batch)
|
|
reference_chosen_logp, reference_rejected_logp = self.accelerator.gather_for_metrics((reference_chosen_logp, reference_rejected_logp))
|
|
reference_chosen_logps.append(reference_chosen_logp.cpu())
|
|
reference_rejected_logps.append(reference_rejected_logp.cpu())
|
|
|
|
all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy()
|
|
all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy()
|
|
|
|
self.train_dataset = self.train_dataset.add_column(name="reference_chosen_logps", column=all_reference_chosen_logps)
|
|
self.train_dataset = self.train_dataset.add_column(name="reference_rejected_logps", column=all_reference_rejected_logps)
|
|
|
|
self._precomputed_train_ref_log_probs = True
|
|
|
|
return super().get_train_dataloader()
|
|
|
|
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
|
|
"""
|
|
Returns the evaluation [`~torch.utils.data.DataLoader`].
|
|
|
|
Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`.
|
|
|
|
Args:
|
|
eval_dataset (`torch.utils.data.Dataset`, *optional*):
|
|
If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
|
|
by the `model.forward()` method are automatically removed. It must implement `__len__`.
|
|
"""
|
|
if eval_dataset is None and self.eval_dataset is None:
|
|
raise ValueError("Trainer: evaluation requires an eval_dataset.")
|
|
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
|
|
|
|
if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs:
|
|
dataloader_params = {
|
|
"batch_size": self.args.per_device_eval_batch_size,
|
|
"collate_fn": self.data_collator,
|
|
"num_workers": self.args.dataloader_num_workers,
|
|
"pin_memory": self.args.dataloader_pin_memory,
|
|
"shuffle": False,
|
|
}
|
|
|
|
|
|
data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params))
|
|
|
|
reference_chosen_logps = []
|
|
reference_rejected_logps = []
|
|
for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"):
|
|
reference_chosen_logp, reference_rejected_logp = self.compute_reference_log_probs(padded_batch)
|
|
reference_chosen_logp, reference_rejected_logp = self.accelerator.gather_for_metrics((reference_chosen_logp, reference_rejected_logp))
|
|
reference_chosen_logps.append(reference_chosen_logp.cpu())
|
|
reference_rejected_logps.append(reference_rejected_logp.cpu())
|
|
|
|
all_reference_chosen_logps = torch.cat(reference_chosen_logps).float().numpy()
|
|
all_reference_rejected_logps = torch.cat(reference_rejected_logps).float().numpy()
|
|
|
|
eval_dataset = eval_dataset.add_column(name="reference_chosen_logps", column=all_reference_chosen_logps)
|
|
eval_dataset = eval_dataset.add_column(name="reference_rejected_logps", column=all_reference_rejected_logps)
|
|
|
|
|
|
if self.eval_dataset is not None:
|
|
self.eval_dataset = eval_dataset
|
|
self._precomputed_eval_ref_log_probs = True
|
|
|
|
return super().get_eval_dataloader(eval_dataset=eval_dataset)
|
|
|
|
def build_tokenized_answer(self, prompt, answer):
|
|
"""
|
|
Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`.
|
|
It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`.
|
|
Reference:
|
|
https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
|
"""
|
|
|
|
full_tokenized = self.tokenizer(prompt + answer, add_special_tokens=False)
|
|
prompt_input_ids = self.tokenizer(prompt, add_special_tokens=False)["input_ids"]
|
|
|
|
answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
|
|
answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
|
|
|
|
|
|
full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
|
|
|
|
|
|
full_input_ids = np.array(full_tokenized["input_ids"])
|
|
|
|
if len(full_input_ids) != len(full_concat_input_ids):
|
|
raise ValueError("Prompt input ids and answer input ids should have the same length.")
|
|
|
|
|
|
|
|
|
|
|
|
response_token_ids_start_idx = len(prompt_input_ids)
|
|
|
|
|
|
|
|
if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
|
|
response_token_ids_start_idx -= 1
|
|
|
|
prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
|
|
prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
|
|
|
|
if len(prompt_input_ids) != len(prompt_attention_mask):
|
|
raise ValueError("Prompt input ids and attention mask should have the same length.")
|
|
|
|
answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
|
|
answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
|
|
|
|
return dict(
|
|
prompt_input_ids=prompt_input_ids,
|
|
prompt_attention_mask=prompt_attention_mask,
|
|
input_ids=answer_input_ids,
|
|
attention_mask=answer_attention_mask,
|
|
)
|
|
|
|
def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> Dict:
|
|
"""Tokenize a single row from a DPO specific dataset.
|
|
|
|
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
|
|
in case the prompt + chosen or prompt + rejected responses is/are too long. First
|
|
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
|
|
|
|
We also create the labels for the chosen/rejected responses, which are of length equal to
|
|
the sum of the length of the prompt and the chosen/rejected response, with
|
|
label_pad_token_id for the prompt tokens.
|
|
"""
|
|
batch = {}
|
|
prompt = feature["prompt"]
|
|
chosen = feature["chosen"]
|
|
rejected = feature["rejected"]
|
|
|
|
if not self.is_encoder_decoder:
|
|
|
|
|
|
|
|
|
|
|
|
if not isinstance(prompt, str):
|
|
raise ValueError(f"prompt should be an str but got {type(prompt)}")
|
|
prompt_tokens = self.tokenizer(prompt, add_special_tokens=False)
|
|
prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
|
|
|
|
if not isinstance(chosen, str):
|
|
raise ValueError(f"chosen should be an str but got {type(chosen)}")
|
|
chosen_tokens = self.build_tokenized_answer(prompt, chosen)
|
|
|
|
if not isinstance(rejected, str):
|
|
raise ValueError(f"rejected should be an str but got {type(rejected)}")
|
|
rejected_tokens = self.build_tokenized_answer(prompt, rejected)
|
|
|
|
|
|
|
|
prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
|
|
|
|
chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
|
|
rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
|
|
prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
|
|
|
|
for k, v in prompt_tokens.items():
|
|
prompt_tokens[k] = v[:prompt_len_input_ids]
|
|
|
|
|
|
|
|
num_diff_tokens = sum([a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])])
|
|
num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
|
|
if num_diff_tokens > 1 or num_diff_len > 1:
|
|
raise ValueError("Chosen and rejected prompt_input_ids might only differ on the " "last token due to tokenizer merge ops.")
|
|
|
|
|
|
prompt_tokens["prompt_input_ids"] = [self.tokenizer.bos_token_id] + prompt_tokens["prompt_input_ids"]
|
|
chosen_tokens["prompt_input_ids"] = [self.tokenizer.bos_token_id] + chosen_tokens["prompt_input_ids"]
|
|
rejected_tokens["prompt_input_ids"] = [self.tokenizer.bos_token_id] + rejected_tokens["prompt_input_ids"]
|
|
|
|
prompt_tokens["prompt_attention_mask"] = [1] + prompt_tokens["prompt_attention_mask"]
|
|
chosen_tokens["prompt_attention_mask"] = [1] + chosen_tokens["prompt_attention_mask"]
|
|
rejected_tokens["prompt_attention_mask"] = [1] + rejected_tokens["prompt_attention_mask"]
|
|
|
|
|
|
chosen_tokens["input_ids"].append(self.tokenizer.eos_token_id)
|
|
chosen_tokens["attention_mask"].append(1)
|
|
|
|
rejected_tokens["input_ids"].append(self.tokenizer.eos_token_id)
|
|
rejected_tokens["attention_mask"].append(1)
|
|
|
|
longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
|
|
|
|
|
|
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
|
|
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
|
if self.truncation_mode == "keep_start":
|
|
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
|
answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
|
|
elif self.truncation_mode == "keep_end":
|
|
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
|
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
|
|
else:
|
|
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
|
|
|
|
|
|
for answer_tokens in [chosen_tokens, rejected_tokens]:
|
|
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
|
for k in ["input_ids", "attention_mask"]:
|
|
answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
|
|
|
|
|
|
chosen_sequence_tokens = {k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]}
|
|
rejected_sequence_tokens = {k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]}
|
|
chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
|
|
chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [self.label_pad_token_id] * len(chosen_tokens["prompt_input_ids"])
|
|
rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
|
|
rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [self.label_pad_token_id] * len(rejected_tokens["prompt_input_ids"])
|
|
|
|
for k, toks in {
|
|
"chosen_": chosen_sequence_tokens,
|
|
"rejected_": rejected_sequence_tokens,
|
|
"": prompt_tokens,
|
|
}.items():
|
|
for type_key, tokens in toks.items():
|
|
if type_key == "token_type_ids":
|
|
continue
|
|
batch[f"{k}{type_key}"] = tokens
|
|
|
|
else:
|
|
chosen_tokens = self.tokenizer(chosen, truncation=True, max_length=self.max_target_length, add_special_tokens=True)
|
|
rejected_tokens = self.tokenizer(rejected, truncation=True, max_length=self.max_target_length, add_special_tokens=True)
|
|
prompt_tokens = self.tokenizer(prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True)
|
|
|
|
batch["chosen_labels"] = chosen_tokens["input_ids"]
|
|
batch["rejected_labels"] = rejected_tokens["input_ids"]
|
|
batch["prompt_input_ids"] = prompt_tokens["input_ids"]
|
|
batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
|
|
|
|
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
|
|
batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(labels=batch["rejected_labels"])
|
|
batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(labels=batch["chosen_labels"])
|
|
|
|
return batch
|
|
|
|
@contextmanager
|
|
def null_ref_context(self):
|
|
"""Context manager for handling null reference model (that is, peft adapter manipulation)."""
|
|
with self.accelerator.unwrap_model(self.model).disable_adapter() if self.is_peft_model and not self.ref_adapter_name else nullcontext():
|
|
if self.ref_adapter_name:
|
|
self.model.set_adapter(self.ref_adapter_name)
|
|
yield
|
|
if self.ref_adapter_name:
|
|
self.model.set_adapter(self.model_adapter_name or "default")
|
|
|
|
def compute_reference_log_probs(self, padded_batch: Dict) -> Dict:
|
|
"""Computes log probabilities of the reference model for a single padded batch of a DPO specific dataset."""
|
|
compte_ref_context_manager = torch.cuda.amp.autocast if self._peft_has_been_casted_to_bf16 else nullcontext
|
|
|
|
|
|
with torch.no_grad(), compte_ref_context_manager():
|
|
if self.ref_model is None:
|
|
with self.null_ref_context():
|
|
(
|
|
reference_chosen_logps,
|
|
reference_rejected_logps,
|
|
_,
|
|
_,
|
|
) = self.concatenated_forward(self.model, padded_batch)
|
|
else:
|
|
(
|
|
reference_chosen_logps,
|
|
reference_rejected_logps,
|
|
_,
|
|
_,
|
|
) = self.concatenated_forward(self.ref_model, padded_batch)
|
|
|
|
return reference_chosen_logps, reference_rejected_logps
|
|
|
|
@staticmethod
|
|
def concatenated_inputs(
|
|
batch: Dict[str, Union[List, torch.LongTensor]],
|
|
is_encoder_decoder: bool = False,
|
|
label_pad_token_id: int = -100,
|
|
padding_value: int = 0,
|
|
device: Optional[torch.device] = None,
|
|
) -> Dict[str, torch.LongTensor]:
|
|
"""Concatenate the chosen and rejected inputs into a single tensor.
|
|
|
|
Args:
|
|
batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
|
|
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
|
label_pad_token_id: The label pad token id.
|
|
padding_value: The padding value to use for the concatenated inputs_ids.
|
|
device: The device for the concatenated inputs.
|
|
|
|
Returns:
|
|
A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
|
|
"""
|
|
concatenated_batch = {}
|
|
|
|
if is_encoder_decoder:
|
|
max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
|
|
else:
|
|
max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
|
|
|
|
for k in batch:
|
|
|
|
if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
|
|
if "labels" in k or is_encoder_decoder:
|
|
pad_value = label_pad_token_id
|
|
elif k.endswith("_input_ids"):
|
|
pad_value = padding_value
|
|
elif k.endswith("_attention_mask"):
|
|
pad_value = 0
|
|
concatenated_key = k.replace("chosen", "concatenated")
|
|
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
|
|
for k in batch:
|
|
if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
|
|
if "labels" in k or is_encoder_decoder:
|
|
pad_value = label_pad_token_id
|
|
elif k.endswith("_input_ids"):
|
|
pad_value = padding_value
|
|
elif k.endswith("_attention_mask"):
|
|
pad_value = 0
|
|
concatenated_key = k.replace("rejected", "concatenated")
|
|
concatenated_batch[concatenated_key] = torch.cat(
|
|
(
|
|
concatenated_batch[concatenated_key],
|
|
pad_to_length(batch[k], max_length, pad_value=pad_value),
|
|
),
|
|
dim=0,
|
|
).to(device=device)
|
|
|
|
if is_encoder_decoder:
|
|
concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
|
|
concatenated_batch["concatenated_attention_mask"] = batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
|
|
|
|
|
|
|
|
|
|
|
|
concatenated_batch["concatenated_images"] = batch["images"] * 2
|
|
concatenated_batch["image_sizes"] = batch["image_sizes"] * 2
|
|
concatenated_batch["modalities"] = batch["modalities"] * 2
|
|
return concatenated_batch
|
|
|
|
def dpo_loss(
|
|
self,
|
|
policy_chosen_logps: torch.FloatTensor,
|
|
policy_rejected_logps: torch.FloatTensor,
|
|
reference_chosen_logps: torch.FloatTensor,
|
|
reference_rejected_logps: torch.FloatTensor,
|
|
) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
|
"""Compute the DPO loss for a batch of policy and reference model log probabilities.
|
|
|
|
Args:
|
|
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
|
|
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
|
|
reference_chosen_logps: Log probabilities of the reference model for the chosen responses. Shape: (batch_size,)
|
|
reference_rejected_logps: Log probabilities of the reference model for the rejected responses. Shape: (batch_size,)
|
|
|
|
Returns:
|
|
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
|
|
The losses tensor contains the DPO loss for each example in the batch.
|
|
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
|
|
"""
|
|
pi_logratios = policy_chosen_logps - policy_rejected_logps
|
|
if self.reference_free:
|
|
ref_logratios = torch.tensor([0], dtype=pi_logratios.dtype, device=pi_logratios.device)
|
|
else:
|
|
ref_logratios = reference_chosen_logps - reference_rejected_logps
|
|
|
|
pi_logratios = pi_logratios.to(self.accelerator.device)
|
|
ref_logratios = ref_logratios.to(self.accelerator.device)
|
|
logits = pi_logratios - ref_logratios
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.loss_type == "sigmoid":
|
|
losses = -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing
|
|
elif self.loss_type == "hinge":
|
|
losses = torch.relu(1 - self.beta * logits)
|
|
elif self.loss_type == "ipo":
|
|
|
|
losses = (logits - 1 / (2 * self.beta)) ** 2
|
|
elif self.loss_type == "kto_pair":
|
|
|
|
chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0)
|
|
rejected_KL = (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0)
|
|
|
|
chosen_logratios = policy_chosen_logps - reference_chosen_logps
|
|
rejected_logratios = policy_rejected_logps - reference_rejected_logps
|
|
|
|
losses = torch.cat(
|
|
(
|
|
1 - F.sigmoid(self.beta * (chosen_logratios - rejected_KL)),
|
|
1 - F.sigmoid(self.beta * (chosen_KL - rejected_logratios)),
|
|
),
|
|
0,
|
|
)
|
|
else:
|
|
raise ValueError(f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'kto_pair']")
|
|
|
|
chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device) - reference_chosen_logps.to(self.accelerator.device)).detach()
|
|
rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device) - reference_rejected_logps.to(self.accelerator.device)).detach()
|
|
|
|
return losses, chosen_rewards, rejected_rewards
|
|
|
|
@staticmethod
|
|
def get_batch_logps(
|
|
logits: torch.FloatTensor,
|
|
labels: torch.LongTensor,
|
|
average_log_prob: bool = False,
|
|
label_pad_token_id: int = -100,
|
|
is_encoder_decoder: bool = False,
|
|
) -> torch.FloatTensor:
|
|
"""Compute the log probabilities of the given labels under the given logits.
|
|
|
|
Args:
|
|
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
|
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
|
|
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
|
|
label_pad_token_id: The label pad token id.
|
|
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
|
|
|
Returns:
|
|
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
|
|
"""
|
|
if logits.shape[:-1] != labels.shape:
|
|
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
|
|
|
if not is_encoder_decoder:
|
|
labels = labels[:, 1:].clone()
|
|
logits = logits[:, :-1, :]
|
|
loss_mask = labels != label_pad_token_id
|
|
|
|
|
|
labels[labels == label_pad_token_id] = 0
|
|
|
|
per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
|
|
|
|
if average_log_prob:
|
|
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
|
else:
|
|
return (per_token_logps * loss_mask).sum(-1)
|
|
|
|
def get_sft_loss(self, logits, labels):
|
|
|
|
shift_logits = logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous()
|
|
|
|
loss_fct = nn.CrossEntropyLoss()
|
|
shift_logits = shift_logits.view(-1, shift_logits.size(-1))
|
|
shift_labels = shift_labels.view(-1)
|
|
|
|
shift_labels = shift_labels.to(shift_logits.device)
|
|
loss = loss_fct(shift_logits, shift_labels)
|
|
return loss
|
|
|
|
def concatenated_forward(self, model: nn.Module, batch: Dict[str, Union[List, torch.LongTensor]]) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
|
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
|
|
|
|
We do this to avoid doing two forward passes, because it's faster for FSDP.
|
|
"""
|
|
|
|
concatenated_batch = self.concatenated_inputs(
|
|
batch,
|
|
is_encoder_decoder=self.is_encoder_decoder,
|
|
label_pad_token_id=self.label_pad_token_id,
|
|
padding_value=self.padding_value,
|
|
device=self.accelerator.device,
|
|
)
|
|
len_chosen = batch["chosen_labels"].shape[0]
|
|
|
|
|
|
all_logits, new_labels = model(
|
|
concatenated_batch["concatenated_input_ids"],
|
|
attention_mask=concatenated_batch["concatenated_attention_mask"],
|
|
labels=concatenated_batch["concatenated_labels"],
|
|
images=concatenated_batch["concatenated_images"],
|
|
image_sizes=concatenated_batch["image_sizes"],
|
|
modalities=concatenated_batch["modalities"],
|
|
use_cache=False,
|
|
dpo_forward=True,
|
|
)
|
|
all_logits = all_logits.to(torch.float32)
|
|
all_logps = self.get_batch_logps(
|
|
all_logits,
|
|
new_labels,
|
|
average_log_prob=self.loss_type == "ipo",
|
|
is_encoder_decoder=self.is_encoder_decoder,
|
|
label_pad_token_id=self.label_pad_token_id,
|
|
)
|
|
|
|
chosen_logps = all_logps[:len_chosen]
|
|
rejected_logps = all_logps[len_chosen:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
chosen_logits = all_logits[:len_chosen]
|
|
rejected_logits = all_logits[len_chosen:]
|
|
|
|
chosen_labels = new_labels[:len_chosen]
|
|
rejected_labels = new_labels[len_chosen:]
|
|
|
|
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, chosen_labels, rejected_labels)
|
|
|
|
def get_batch_loss_metrics(
|
|
self,
|
|
model,
|
|
batch: Dict[str, Union[List, torch.LongTensor]],
|
|
train_eval: Literal["train", "eval"] = "train",
|
|
):
|
|
"""Compute the DPO loss and other metrics for the given batch of inputs for train or test.
|
|
CHANGE: 1. add sft loss
|
|
2. all gather metrics
|
|
"""
|
|
metrics = {}
|
|
|
|
(
|
|
policy_chosen_logps,
|
|
policy_rejected_logps,
|
|
policy_chosen_logits,
|
|
policy_rejected_logits,
|
|
chosen_labels,
|
|
rejected_labels,
|
|
) = self.concatenated_forward(model, batch)
|
|
|
|
|
|
if "reference_chosen_logps" in batch and "reference_rejected_logps" in batch:
|
|
reference_chosen_logps = batch["reference_chosen_logps"]
|
|
reference_rejected_logps = batch["reference_rejected_logps"]
|
|
else:
|
|
with torch.no_grad():
|
|
if self.ref_model is None:
|
|
with self.null_ref_context():
|
|
(
|
|
reference_chosen_logps,
|
|
reference_rejected_logps,
|
|
) = self.concatenated_forward(
|
|
self.model, batch
|
|
)[:2]
|
|
else:
|
|
(
|
|
reference_chosen_logps,
|
|
reference_rejected_logps,
|
|
) = self.concatenated_forward(
|
|
self.ref_model, batch
|
|
)[:2]
|
|
|
|
unscaled_dpo_losses, chosen_rewards, rejected_rewards = self.dpo_loss(
|
|
policy_chosen_logps,
|
|
policy_rejected_logps,
|
|
reference_chosen_logps,
|
|
reference_rejected_logps,
|
|
)
|
|
unscaled_dpo_losses = unscaled_dpo_losses.mean()
|
|
dpo_losses = unscaled_dpo_losses * self.dpo_alpha
|
|
unscaled_sft_loss = self.get_sft_loss(policy_chosen_logits, chosen_labels)
|
|
sft_loss = unscaled_sft_loss * self.gamma
|
|
|
|
|
|
losses = dpo_losses + sft_loss
|
|
|
|
|
|
reward_accuracies = (chosen_rewards > rejected_rewards).float()
|
|
|
|
def all_gather_tensor(tensor):
|
|
if torch.distributed.is_available() and torch.distributed.is_initialized():
|
|
tensor = tensor.detach()
|
|
gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())]
|
|
torch.distributed.all_gather(gathered_tensor, tensor)
|
|
tensor = torch.cat(gathered_tensor, dim=0)
|
|
|
|
|
|
return tensor
|
|
|
|
|
|
chosen_rewards = all_gather_tensor(chosen_rewards)
|
|
rejected_rewards = all_gather_tensor(rejected_rewards)
|
|
reward_accuracies = all_gather_tensor(reward_accuracies)
|
|
policy_chosen_logps = all_gather_tensor(policy_chosen_logps)
|
|
policy_rejected_logps = all_gather_tensor(policy_rejected_logps)
|
|
reference_chosen_logps = all_gather_tensor(reference_chosen_logps)
|
|
reference_rejected_logps = all_gather_tensor(reference_rejected_logps)
|
|
|
|
prefix = "eval_" if train_eval == "eval" else ""
|
|
metrics[f"{prefix}losses/dpo"] = unscaled_dpo_losses.cpu()
|
|
metrics[f"{prefix}losses/sft"] = unscaled_sft_loss.cpu()
|
|
metrics[f"{prefix}losses/total"] = losses.cpu()
|
|
metrics[f"{prefix}rewards/chosen"] = chosen_rewards.mean().cpu()
|
|
metrics[f"{prefix}rewards/rejected"] = rejected_rewards.mean().cpu()
|
|
metrics[f"{prefix}rewards/accuracies"] = reward_accuracies.mean().cpu()
|
|
metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).mean().cpu()
|
|
|
|
metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.detach().mean().cpu()
|
|
metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.detach().mean().cpu()
|
|
|
|
|
|
|
|
|
|
metrics[f"{prefix}ref_logps/rejected"] = reference_rejected_logps.mean().cpu()
|
|
metrics[f"{prefix}ref_logps/chosen"] = reference_chosen_logps.mean().cpu()
|
|
|
|
|
|
|
|
|
|
|
|
return losses, metrics
|
|
|
|
def compute_loss(
|
|
self,
|
|
model: Union[PreTrainedModel, nn.Module],
|
|
inputs: Dict[str, Union[torch.Tensor, Any]],
|
|
return_outputs=False,
|
|
) -> Union[torch.Tensor, Tuple[torch.Tensor, Dict[str, torch.Tensor]]]:
|
|
if not self.use_dpo_data_collator:
|
|
warnings.warn(
|
|
"compute_loss is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than "
|
|
"DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator"
|
|
)
|
|
|
|
compute_loss_context_manager = torch.cuda.amp.autocast if self._peft_has_been_casted_to_bf16 else nullcontext
|
|
|
|
with compute_loss_context_manager():
|
|
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
|
|
|
|
|
|
self.store_metrics(metrics, train_eval="train")
|
|
|
|
if return_outputs:
|
|
return (loss, metrics)
|
|
return loss
|
|
|
|
def get_batch_samples(self, model, batch: Dict[str, torch.LongTensor]) -> Tuple[str, str]:
|
|
"""Generate samples from the model and reference model for the given batch of inputs."""
|
|
|
|
|
|
|
|
generate_context_manager = nullcontext if not self._peft_has_been_casted_to_bf16 else torch.cuda.amp.autocast
|
|
|
|
with generate_context_manager():
|
|
policy_output = model.generate(
|
|
input_ids=batch["prompt_input_ids"],
|
|
attention_mask=batch["prompt_attention_mask"],
|
|
max_length=self.max_length,
|
|
do_sample=True,
|
|
pad_token_id=self.tokenizer.pad_token_id,
|
|
)
|
|
|
|
|
|
if "reference_output" in batch:
|
|
reference_output = batch["reference_output"]
|
|
else:
|
|
if self.ref_model is None:
|
|
with self.null_ref_context():
|
|
reference_output = self.model.generate(
|
|
input_ids=batch["prompt_input_ids"],
|
|
attention_mask=batch["prompt_attention_mask"],
|
|
max_length=self.max_length,
|
|
do_sample=True,
|
|
pad_token_id=self.tokenizer.pad_token_id,
|
|
)
|
|
else:
|
|
reference_output = self.ref_model.generate(
|
|
input_ids=batch["prompt_input_ids"],
|
|
attention_mask=batch["prompt_attention_mask"],
|
|
max_length=self.max_length,
|
|
do_sample=True,
|
|
pad_token_id=self.tokenizer.pad_token_id,
|
|
)
|
|
|
|
policy_output = pad_to_length(policy_output, self.max_length, self.tokenizer.pad_token_id)
|
|
policy_output_decoded = self.tokenizer.batch_decode(policy_output, skip_special_tokens=True)
|
|
|
|
reference_output = pad_to_length(reference_output, self.max_length, self.tokenizer.pad_token_id)
|
|
reference_output_decoded = self.tokenizer.batch_decode(reference_output, skip_special_tokens=True)
|
|
|
|
return policy_output_decoded, reference_output_decoded
|
|
|
|
def prediction_step(
|
|
self,
|
|
model: Union[PreTrainedModel, nn.Module],
|
|
inputs: Dict[str, Union[torch.Tensor, Any]],
|
|
prediction_loss_only: bool,
|
|
ignore_keys: Optional[List[str]] = None,
|
|
):
|
|
if not self.use_dpo_data_collator:
|
|
warnings.warn(
|
|
"prediction_step is only implemented for DPODataCollatorWithPadding, and you passed a datacollator that is different than "
|
|
"DPODataCollatorWithPadding - you might see unexpected behavior. Alternatively, you can implement your own prediction_step method if you are using a custom data collator"
|
|
)
|
|
if ignore_keys is None:
|
|
if hasattr(model, "config"):
|
|
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
|
else:
|
|
ignore_keys = []
|
|
|
|
prediction_context_manager = torch.cuda.amp.autocast if self._peft_has_been_casted_to_bf16 else nullcontext
|
|
|
|
with torch.no_grad(), prediction_context_manager():
|
|
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
|
|
|
|
|
|
self.store_metrics(metrics, train_eval="eval")
|
|
|
|
if prediction_loss_only:
|
|
return (loss.detach(), None, None)
|
|
|
|
|
|
logits_dict = {
|
|
"eval_logits/chosen": metrics["eval_logits/chosen"],
|
|
"eval_logits/rejected": metrics["eval_logits/rejected"],
|
|
}
|
|
logits = tuple(v.unsqueeze(dim=0) for k, v in logits_dict.items() if k not in ignore_keys)
|
|
logits = torch.stack(logits).mean(axis=1).to(self.accelerator.device)
|
|
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
|
|
|
return (loss.detach(), logits, labels)
|
|
|
|
def store_metrics(self, metrics: Dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
|
for key, value in metrics.items():
|
|
self._stored_metrics[train_eval][key].append(value)
|
|
|
|
def evaluation_loop(
|
|
self,
|
|
dataloader: DataLoader,
|
|
description: str,
|
|
prediction_loss_only: Optional[bool] = None,
|
|
ignore_keys: Optional[List[str]] = None,
|
|
metric_key_prefix: str = "eval",
|
|
) -> EvalLoopOutput:
|
|
"""
|
|
Overriding built-in evaluation loop to store metrics for each batch.
|
|
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
|
|
|
Works both with or without labels.
|
|
"""
|
|
|
|
|
|
if self.generate_during_eval:
|
|
|
|
num_samples = len(dataloader.dataset)
|
|
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
|
|
|
|
|
random_batch_dataset = dataloader.dataset.select(random_indices)
|
|
random_batch = self.data_collator(random_batch_dataset)
|
|
random_batch = self._prepare_inputs(random_batch)
|
|
|
|
policy_output_decoded, ref_output_decoded = self.get_batch_samples(self.model, random_batch)
|
|
|
|
self.log(
|
|
{
|
|
"game_log": wandb.Table(
|
|
columns=["Prompt", "Policy", "Ref Model"],
|
|
rows=[[prompt, pol[len(prompt) :], ref[len(prompt) :]] for prompt, pol, ref in zip(random_batch["prompt"], policy_output_decoded, ref_output_decoded)],
|
|
)
|
|
}
|
|
)
|
|
self.state.log_history.pop()
|
|
|
|
|
|
initial_output = super().evaluation_loop(dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix)
|
|
|
|
return initial_output
|
|
|
|
def log(self, logs: Dict[str, float]) -> None:
|
|
"""
|
|
Log `logs` on the various objects watching training, including stored metrics.
|
|
|
|
Args:
|
|
logs (`Dict[str, float]`):
|
|
The values to log.
|
|
"""
|
|
|
|
train_eval = "train" if "loss" in logs else "eval"
|
|
|
|
for key, metrics in self._stored_metrics[train_eval].items():
|
|
logs[key] = torch.tensor(metrics).mean().item()
|
|
del self._stored_metrics[train_eval]
|
|
return super().log(logs)
|
|
|
|
@wraps(Trainer.push_to_hub)
|
|
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
|
|
"""
|
|
Overwrite the `push_to_hub` method in order to force-add the tag "sft" when pushing the
|
|
model on the Hub. Please refer to `~transformers.Trainer.push_to_hub` for more details.
|
|
"""
|
|
kwargs = trl_sanitze_kwargs_for_tagging(model=self.model, tag_names=self._tag_names, kwargs=kwargs)
|
|
|
|
return super().push_to_hub(commit_message=commit_message, blocking=blocking, **kwargs)
|
|
|