ValueError: Attempting to unscale FP16 gradients.

#26
by hsuyab - opened

Hi I am getting this error when I am trying to fine-tune gpt-j-6b on custom data? Any one has any idea on this?
This happens when I call my trainer.train() to fine-tune the model
```

ValueError Traceback (most recent call last)
/tmp/ipykernel_7169/3756176484.py in
46
47 # Fine-tune the model
---> 48 trainer.train()

/opt/conda/lib/python3.8/site-packages/transformers/trainer.py in train(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)
1631 self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
1632 )
-> 1633 return inner_training_loop(
1634 args=args,
1635 resume_from_checkpoint=resume_from_checkpoint,

/opt/conda/lib/python3.8/site-packages/transformers/trainer.py in inner_training_loop(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)
1933 xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
1934 # AMP: gradients need unscaling
-> 1935 self.scaler.unscale
(self.optimizer)
1936
1937 if is_sagemaker_mp_enabled() and args.fp16:

/opt/conda/lib/python3.8/site-packages/torch/cuda/amp/grad_scaler.py in unscale_(self, optimizer)
282 found_inf = torch.full((1,), 0.0, dtype=torch.float32, device=self._scale.device)
283
--> 284 optimizer_state["found_inf_per_device"] = self.unscale_grads(optimizer, inv_scale, found_inf, False)
285 optimizer_state["stage"] = OptState.UNSCALED
286

/opt/conda/lib/python3.8/site-packages/torch/cuda/amp/grad_scaler.py in unscale_grads(self, optimizer, inv_scale, found_inf, allow_fp16)
210 continue
211 if (not allow_fp16) and param.grad.dtype == torch.float16:
--> 212 raise ValueError("Attempting to unscale FP16 gradients.")
213 if param.grad.is_sparse:
214 # is_coalesced() == False means the sparse grad has values with duplicate indices.

ValueError: Attempting to unscale FP16 gradients.
```

Sign up or log in to comment