|
|
|
|
|
"""Megatron Module""" |
|
|
|
import torch |
|
from torch.autograd import Variable |
|
from torch.nn.parameter import Parameter |
|
|
|
from megatron import get_args |
|
from megatron.core import mpu, tensor_parallel |
|
|
|
|
|
_FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor) |
|
_HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor) |
|
_BF16_TYPES = (torch.BFloat16Tensor, torch.cuda.BFloat16Tensor) |
|
|
|
|
|
|
|
def param_is_not_shared(param): |
|
return not hasattr(param, 'shared') or not param.shared |
|
|
|
|
|
|
|
class MegatronModule(torch.nn.Module): |
|
"""Megatron specific extensions of torch Module with support |
|
for pipelining.""" |
|
|
|
def __init__(self, share_word_embeddings=True): |
|
super(MegatronModule, self).__init__() |
|
self.share_word_embeddings = share_word_embeddings |
|
|
|
|
|
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): |
|
"""Use this function to override the state dict for |
|
saving checkpoints.""" |
|
return self.state_dict(prefix=prefix, keep_vars=keep_vars) |
|
|
|
|
|
def word_embeddings_weight(self): |
|
if self.pre_process: |
|
if self.language_model.tie_embed_logits: |
|
return self.language_model.embedding.word_embeddings.weight |
|
return self.language_model.lm_head |
|
else: |
|
if not self.language_model.tie_embed_logits: |
|
return self.language_model.lm_head |
|
if not self.share_word_embeddings: |
|
raise Exception('word_embeddings_weight() called for last ' |
|
'stage, but share_word_embeddings is false') |
|
return self.word_embeddings.weight |
|
|
|
def initialize_word_embeddings(self, init_method_normal, args): |
|
if not self.share_word_embeddings: |
|
raise Exception('initialize_word_embeddings() was called but ' |
|
'share_word_embeddings is false') |
|
|
|
|
|
|
|
|
|
if args.pipeline_model_parallel_size == 1: |
|
return |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if mpu.is_pipeline_last_stage() and \ |
|
not self.pre_process: |
|
assert not mpu.is_pipeline_first_stage() |
|
self._word_embeddings_for_head_key = 'word_embeddings_for_head' |
|
|
|
|
|
self.word_embeddings = tensor_parallel.VocabParallelEmbedding( |
|
args.padded_vocab_size, args.hidden_size, |
|
init_method=init_method_normal(args.init_method_std), |
|
params_dtype=args.params_dtype, |
|
use_cpu_initialization=args.use_cpu_initialization, |
|
perform_initialization=args.perform_initialization) |
|
self.word_embeddings.weight.data.fill_(0) |
|
self.word_embeddings.weight.shared = True |
|
|
|
|
|
|
|
if not mpu.is_pipeline_first_stage(ignore_virtual=True) and \ |
|
self.pre_process: |
|
self.language_model.embedding.zero_parameters() |
|
|
|
if not torch.distributed.is_initialized(): |
|
if not getattr(MegatronModule, "embedding_warning_printed", False): |
|
print("WARNING! Distributed processes aren't initialized, so " |
|
"word embeddings in the last layer are not initialized. " |
|
"If you are just manipulating a model this is fine, but " |
|
"this needs to be handled manually. If you are training " |
|
"something is definitely wrong.") |
|
MegatronModule.embedding_warning_printed = True |
|
return |
|
|
|
|
|
|
|
if mpu.is_rank_in_embedding_group(): |
|
torch.distributed.all_reduce(self.word_embeddings_weight().data, |
|
group=mpu.get_embedding_group()) |
|
|
|
|
|
|
|
|
|
if mpu.is_rank_in_position_embedding_group() and \ |
|
args.pipeline_model_parallel_split_rank is not None: |
|
|
|
self.language_model.embedding.cuda() |
|
position_embeddings = self.language_model.embedding.position_embeddings |
|
torch.distributed.all_reduce(position_embeddings.weight.data, |
|
group=mpu.get_position_embedding_group()) |
|
|
|
|
|
def conversion_helper(val, conversion): |
|
"""Apply conversion to val. Recursively apply conversion if `val` |
|
#is a nested tuple/list structure.""" |
|
if not isinstance(val, (tuple, list)): |
|
return conversion(val) |
|
rtn = [conversion_helper(v, conversion) for v in val] |
|
if isinstance(val, tuple): |
|
rtn = tuple(rtn) |
|
return rtn |
|
|
|
|
|
def fp32_to_float16(val, float16_convertor): |
|
"""Convert fp32 `val` to fp16/bf16""" |
|
def half_conversion(val): |
|
val_typecheck = val |
|
if isinstance(val_typecheck, (Parameter, Variable)): |
|
val_typecheck = val.data |
|
if isinstance(val_typecheck, _FLOAT_TYPES): |
|
val = float16_convertor(val) |
|
return val |
|
return conversion_helper(val, half_conversion) |
|
|
|
|
|
def float16_to_fp32(val): |
|
"""Convert fp16/bf16 `val` to fp32""" |
|
def float_conversion(val): |
|
val_typecheck = val |
|
if isinstance(val_typecheck, (Parameter, Variable)): |
|
val_typecheck = val.data |
|
if isinstance(val_typecheck, (_BF16_TYPES, _HALF_TYPES)): |
|
val = val.float() |
|
return val |
|
return conversion_helper(val, float_conversion) |
|
|
|
|
|
|
|
class Float16Module(MegatronModule): |
|
|
|
def __init__(self, module, args): |
|
super(Float16Module, self).__init__() |
|
|
|
if args.fp16: |
|
self.add_module('module', module.half()) |
|
def float16_convertor(val): |
|
return val.half() |
|
elif args.bf16: |
|
self.add_module('module', module.bfloat16()) |
|
def float16_convertor(val): |
|
return val.bfloat16() |
|
else: |
|
raise Exception('should not be here') |
|
|
|
self.float16_convertor = float16_convertor |
|
|
|
|
|
def set_input_tensor(self, input_tensor): |
|
return self.module.set_input_tensor(input_tensor) |
|
|
|
|
|
def forward(self, *inputs, **kwargs): |
|
if mpu.is_pipeline_first_stage(): |
|
inputs = fp32_to_float16(inputs, self.float16_convertor) |
|
outputs = self.module(*inputs, **kwargs) |
|
if mpu.is_pipeline_last_stage(): |
|
outputs = float16_to_fp32(outputs) |
|
return outputs |
|
|
|
|
|
def state_dict(self, prefix='', keep_vars=False): |
|
return self.module.state_dict(prefix=prefix, keep_vars=keep_vars) |
|
|
|
|
|
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False): |
|
return self.module.state_dict_for_save_checkpoint(prefix=prefix, |
|
keep_vars=keep_vars) |
|
|
|
|
|
def load_state_dict(self, state_dict, strict=True): |
|
self.module.load_state_dict(state_dict, strict=strict) |
|
|