response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Creates a dataloader that can also use the `SeedableRandomSampler`
def generate_baseline_dataloader(train_set, generator, batch_size, use_seedable_sampler=False): "Creates a dataloader that can also use the `SeedableRandomSampler`" if use_seedable_sampler: # The SeedableRandomSampler is needed during distributed setups # for full reproducability across processes with the `DataLoader` sampler = SeedableRandomSampler( generator=generator, data_source=train_set, num_samples=len(train_set), ) return DataLoader(train_set, batch_size=batch_size, sampler=sampler) else: return DataLoader(train_set, batch_size=batch_size, shuffle=True, generator=generator)
Returns everything needed to perform basic training
def get_training_setup(accelerator, sched=False): "Returns everything needed to perform basic training" set_seed(42) model = RegressionModel() ddp_model = deepcopy(model) dset = RegressionDataset(length=80) dataloader = DataLoader(dset, batch_size=16) model.to(accelerator.device) if sched: opt = AdamW(params=model.parameters(), lr=1e-3) ddp_opt = AdamW(params=ddp_model.parameters(), lr=1e-3) sched = LambdaLR(opt, lr_lambda=lambda epoch: epoch**0.65) ddp_sched = LambdaLR(ddp_opt, lr_lambda=lambda epoch: epoch**0.65) # Make a copy of `model` if sched: ddp_model, ddp_opt, ddp_sched, dataloader = accelerator.prepare(ddp_model, ddp_opt, ddp_sched, dataloader) else: ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader
Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*):
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader
Returns everything needed to perform basic training
def get_basic_setup(accelerator, num_samples=82, batch_size=16): "Returns everything needed to perform basic training" set_seed(42) model = RegressionModel() ddp_model = deepcopy(model) dset = RegressionDataset(length=num_samples) dataloader = DataLoader(dset, batch_size=batch_size) model.to(accelerator.device) ddp_model, dataloader = accelerator.prepare(ddp_model, dataloader) return model, ddp_model, dataloader
Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): The name of the model to use. n_train (`int`, *optional*): The number of training examples to use. n_val (`int`, *optional*): The number of validation examples to use.
def get_dataloaders( accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased", n_train: int = 320, n_val: int = 160, ): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): The name of the model to use. n_train (`int`, *optional*): The number of training examples to use. n_val (`int`, *optional*): The number of validation examples to use. """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset( "glue", "mrpc", split={"train": f"train[:{n_train}]", "validation": f"validation[:{n_val}]"} ) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader
Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*):
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"): """ Creates a set of `DataLoader`s for the `glue` dataset. Args: accelerator (`Accelerator`): An `Accelerator` object batch_size (`int`, *optional*): The batch size for the train and validation DataLoaders. model_name (`str`, *optional*): """ tokenizer = AutoTokenizer.from_pretrained(model_name) datasets = load_dataset("glue", "mrpc") def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.XLA: return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt") return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE ) return train_dataloader, eval_dataloader
This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model
def load_and_quantize_model( model: torch.nn.Module, bnb_quantization_config: BnbQuantizationConfig, weights_location: Union[str, os.PathLike] = None, device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, no_split_module_classes: Optional[List[str]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_state_dict: bool = False, ): """ This function will quantize the input model with the associated config passed in `bnb_quantization_config`. If the model is in the meta device, we will load and dispatch the weights according to the `device_map` passed. If the model is already loaded, we will quantize the model and put the model on the GPU, Args: model (`torch.nn.Module`): Input model. The model can be already loaded or on the meta device bnb_quantization_config (`BnbQuantizationConfig`): The bitsandbytes quantization parameters weights_location (`str` or `os.PathLike`): The folder weights_location to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Returns: `torch.nn.Module`: The quantized model """ load_in_4bit = bnb_quantization_config.load_in_4bit load_in_8bit = bnb_quantization_config.load_in_8bit if load_in_8bit and not is_8bit_bnb_available(): raise ImportError( "You have a version of `bitsandbytes` that is not compatible with 8bit quantization," " make sure you have the latest version of `bitsandbytes` installed." ) if load_in_4bit and not is_4bit_bnb_available(): raise ValueError( "You have a version of `bitsandbytes` that is not compatible with 4bit quantization," "make sure you have the latest version of `bitsandbytes` installed." ) modules_on_cpu = [] # custom device map if isinstance(device_map, dict) and len(device_map.keys()) > 1: modules_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: bnb_quantization_config.skip_modules = get_keys_to_not_convert(model) # add cpu modules to skip modules only for 4-bit modules if load_in_4bit: bnb_quantization_config.skip_modules.extend(modules_on_cpu) modules_to_not_convert = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fp32_modules is None: bnb_quantization_config.keep_in_fp32_modules = [] keep_in_fp32_modules = bnb_quantization_config.keep_in_fp32_modules modules_to_not_convert.extend(keep_in_fp32_modules) # compatibility with peft model.is_loaded_in_4bit = load_in_4bit model.is_loaded_in_8bit = load_in_8bit model_device = get_parameter_device(model) if model_device.type != "meta": # quantization of an already loaded model logger.warning( "It is not recommended to quantize a loaded model. " "The model should be instantiated under the `init_empty_weights` context manager." ) model = replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert) # convert param to the right dtype dtype = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules): param.to(torch.float32) if param.dtype != torch.float32: name = name.replace(".weight", "").replace(".bias", "") param = getattr(model, name, None) if param is not None: param.to(torch.float32) elif torch.is_floating_point(param): param.to(dtype) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device()) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device()) else: raise RuntimeError("No GPU found. A GPU is needed for quantization.") logger.info( f"The model device type is {model_device.type}. However, cuda is needed for quantization." "We move the model to cuda." ) return model elif weights_location is None: raise RuntimeError( f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " ) else: with init_empty_weights(): model = replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=modules_to_not_convert ) device_map = get_quantized_model_device_map( model, bnb_quantization_config, device_map, max_memory=max_memory, no_split_module_classes=no_split_module_classes, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True offload = any(x in list(device_map.values()) for x in ["cpu", "disk"]) load_checkpoint_in_model( model, weights_location, device_map, dtype=bnb_quantization_config.torch_dtype, offload_folder=offload_folder, offload_state_dict=offload_state_dict, keep_in_fp32_modules=bnb_quantization_config.keep_in_fp32_modules, offload_8bit_bnb=load_in_8bit and offload, ) return dispatch_model(model, device_map=device_map, offload_dir=offload_folder)
A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[str]`): Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[str]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert.
def replace_with_bnb_layers(model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None): """ A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules or by `bnb.nn.Linear4bit` modules from the `bitsandbytes`library. The function will be run recursively and replace `torch.nn.Linear` modules. Parameters: model (`torch.nn.Module`): Input model or `torch.nn.Module` as the function is run recursively. modules_to_not_convert (`List[str]`): Names of the modules to not quantize convert. In practice we keep the `lm_head` in full precision for numerical stability reasons. current_key_name (`List[str]`, *optional*): An array to track the current key of the recursion. This is used to check whether the current key (part of it) is not in the list of modules to not convert. """ if modules_to_not_convert is None: modules_to_not_convert = [] model, has_been_replaced = _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert, current_key_name ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model
Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not.
def _replace_with_bnb_layers( model, bnb_quantization_config, modules_to_not_convert=None, current_key_name=None, ): """ Private method that wraps the recursion for module replacement. Returns the converted model and a boolean that indicates if the conversion has been successfull or not. """ # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb has_been_replaced = False for name, module in model.named_children(): if current_key_name is None: current_key_name = [] current_key_name.append(name) if isinstance(module, nn.Linear) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` current_key_name_str = ".".join(current_key_name) proceed = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: proceed = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_8bit: bnb_module = bnb.nn.Linear8bitLt( module.in_features, module.out_features, module.bias is not None, has_fp16_weights=False, threshold=bnb_quantization_config.llm_int8_threshold, ) elif bnb_quantization_config.load_in_4bit: bnb_module = bnb.nn.Linear4bit( module.in_features, module.out_features, module.bias is not None, bnb_quantization_config.bnb_4bit_compute_dtype, compress_statistics=bnb_quantization_config.bnb_4bit_use_double_quant, quant_type=bnb_quantization_config.bnb_4bit_quant_type, ) else: raise ValueError("load_in_8bit and load_in_4bit can't be both False") bnb_module.weight.data = module.weight.data if module.bias is not None: bnb_module.bias.data = module.bias.data bnb_module.requires_grad_(False) setattr(model, name, bnb_module) has_been_replaced = True if len(list(module.children())) > 0: _, _has_been_replaced = _replace_with_bnb_layers( module, bnb_quantization_config, modules_to_not_convert, current_key_name ) has_been_replaced = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1) return model, has_been_replaced
An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model
def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in int8. Parameters: model (`torch.nn.Module`): Input model """ # Create a copy of the model with init_empty_weights(): tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_params = find_tied_parameters(tied_model) # For compatibility with Accelerate < 0.18 if isinstance(tied_params, dict): tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys()) else: tied_keys = sum(tied_params, []) has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = False if hasattr(model, "base_model_prefix"): is_base_model = not hasattr(model, model.base_model_prefix) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head list_modules = list(model.named_children()) list_last_module = [list_modules[-1][0]] # add last module together with tied weights intersection = set(list_last_module) - set(tied_keys) list_untouched = list(set(tied_keys)) + list(intersection) # remove ".weight" from the keys names_to_remove = [".weight", ".bias"] filtered_module_names = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: name = name.replace(name_to_remove, "") filtered_module_names.append(name) return filtered_module_names
Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model
def has_4bit_bnb_layers(model): """Check if we have `bnb.nn.Linear4bit` or `bnb.nn.Linear8bitLt` layers inside our model""" # bitsandbytes will initialize CUDA on import, so it needs to be imported lazily import bitsandbytes as bnb for m in model.modules(): if isinstance(m, bnb.nn.Linear4bit): return True return False
Gets a class from a module by its name. Args: module (`torch.nn.Module`): The module to get the class from. name (`str`): The name of the class.
def get_module_class_from_name(module, name): """ Gets a class from a module by its name. Args: module (`torch.nn.Module`): The module to get the class from. name (`str`): The name of the class. """ modules_children = list(module.children()) if module.__class__.__name__ == name: return module.__class__ elif len(modules_children) == 0: return else: for child_module in modules_children: module_class = get_module_class_from_name(child_module, name) if module_class is not None: return module_class
Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of strings as the result. Example: ```python >>> from accelerate.utils.environment import verify_env >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"} >>> valid_env_items = verify_env(env) >>> print(valid_env_items) ["ACCELERATE_DEBUG_MODE=1 ", "OTHER_ENV=2 "] ```
def convert_dict_to_env_variables(current_env: dict): """ Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of strings as the result. Example: ```python >>> from accelerate.utils.environment import verify_env >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"} >>> valid_env_items = verify_env(env) >>> print(valid_env_items) ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"] ``` """ forbidden_chars = [";", "\n", "<", ">", " "] valid_env_items = [] for key, value in current_env.items(): if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1: valid_env_items.append(f"{key}={value}\n") else: logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.") return valid_env_items
Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
def str_to_bool(value) -> int: """ Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; """ value = value.lower() if value in ("y", "yes", "t", "true", "on", "1"): return 1 elif value in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError(f"invalid truth value {value}")
Returns the first positive env value found in the `env_keys` list or the default.
def get_int_from_env(env_keys, default): """Returns the first positive env value found in the `env_keys` list or the default.""" for e in env_keys: val = int(os.environ.get(e, -1)) if val >= 0: return val return default
Returns truthy value for `key` from the env if available else the default.
def parse_flag_from_env(key, default=False): """Returns truthy value for `key` from the env if available else the default.""" value = os.environ.get(key, str(default)) return str_to_bool(value) == 1
Checks if any of `library_names` are imported in the environment. Will return any names that are.
def are_libraries_initialized(*library_names: str) -> List[str]: """ Checks if any of `library_names` are imported in the environment. Will return any names that are. """ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]
Returns the right nvidia-smi command based on the system.
def _nvidia_smi(): """ Returns the right nvidia-smi command based on the system. """ if platform.system() == "Windows": # If platform is Windows and nvidia-smi can't be found in path # try from systemd drive with default installation path command = which("nvidia-smi") if command is None: command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"] else: command = "nvidia-smi" return command
Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. Largely based on the `gputil` library.
def get_gpu_info(): """ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. Largely based on the `gputil` library. """ # Returns as list of `n` GPUs and their names output = subprocess.check_output( [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True ) output = output.strip() gpus = output.split(os.linesep) # Get names from output gpu_count = len(gpus) gpu_names = [gpu.split(",")[1].strip() for gpu in gpus] return gpu_names, gpu_count
Returns the driver version In the case of multiple GPUs, will return the first.
def get_driver_version(): """ Returns the driver version In the case of multiple GPUs, will return the first. """ output = subprocess.check_output( [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True ) output = output.strip() return output.split(os.linesep)[0]
Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after the 3090. Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.
def check_cuda_p2p_ib_support(): """ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after the 3090. Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. """ try: device_names, device_count = get_gpu_info() # As new consumer GPUs get released, add them to `unsupported_devices`` unsupported_devices = {"RTX 40"} if device_count > 1: if any( unsupported_device in device_name for device_name in device_names for unsupported_device in unsupported_devices ): # Check if they have the right driver version acceptable_driver_version = "550.40.07" current_driver_version = get_driver_version() if parse(current_driver_version) < parse(acceptable_driver_version): return False return True except Exception: pass return True
Checks if all the current GPUs available support FP8. Notably must initialize `torch.cuda` to check.
def check_fp8_capability(): """ Checks if all the current GPUs available support FP8. Notably must initialize `torch.cuda` to check. """ cuda_device_capacity = torch.cuda.get_device_capability() return cuda_device_capacity >= (8, 9)
Returns various information about the environment in relation to CPU distributed training as a `CPUInformation` dataclass.
def get_cpu_distributed_information() -> CPUInformation: """ Returns various information about the environment in relation to CPU distributed training as a `CPUInformation` dataclass. """ information = {} information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) information["world_size"] = get_int_from_env( ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1 ) information["local_rank"] = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) information["local_world_size"] = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) return CPUInformation(**information)
Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead. Args: local_process_index (int): The index of the current process on the current server. verbose (bool, *optional*): Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None: """ Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead. Args: local_process_index (int): The index of the current process on the current server. verbose (bool, *optional*): Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True. """ if verbose is None: verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False) if torch.cuda.is_available(): from accelerate.utils import is_pynvml_available if not is_pynvml_available(): raise ImportError( "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)" ) import pynvml as nvml # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py nvml.nvmlInit() num_elements = math.ceil(os.cpu_count() / 64) handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index) affinity_string = "" for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements): # assume nvml returns list of 64 bit ints affinity_string = f"{j:064b}{affinity_string}" affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is the 0th element affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0] os.sched_setaffinity(0, affinity_to_set) if verbose: cpu_cores = os.sched_getaffinity(0) logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}")
Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node. This result is cached between calls. If you want to override it, please use `accelerate.utils.environment.override_numa_afifnity`. Args: local_process_index (int): The index of the current process on the current server. verbose (bool, *optional*): Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True.
def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None: """ Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node. This result is cached between calls. If you want to override it, please use `accelerate.utils.environment.override_numa_afifnity`. Args: local_process_index (int): The index of the current process on the current server. verbose (bool, *optional*): Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True. """ override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda uninitialized.
def is_cuda_available(): """ Checks if `cuda` is available via an `nvml-based` check which won't trigger the drivers and leave cuda uninitialized. """ pytorch_nvml_based_cuda_check_previous_value = os.environ.get("PYTORCH_NVML_BASED_CUDA_CHECK") try: os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = str(1) available = torch.cuda.is_available() finally: if pytorch_nvml_based_cuda_check_previous_value: os.environ["PYTORCH_NVML_BASED_CUDA_CHECK"] = pytorch_nvml_based_cuda_check_previous_value else: os.environ.pop("PYTORCH_NVML_BASED_CUDA_CHECK", None) return available
Checks if `torch_xla` is installed and potentially if a TPU is in the environment
def is_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" warnings.warn( "`is_tpu_available` is deprecated and will be removed in v0.27.0. " "Please use the `is_torch_xla_available` instead.", FutureWarning, ) # Due to bugs on the amp series GPUs, we disable torch-xla on them if is_cuda_available(): return False if check_device: if _tpu_available: try: # Will raise a RuntimeError if no XLA configuration is found _ = xm.xla_device() return True except RuntimeError: return False return _tpu_available
Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false.
def is_torch_xla_available(check_is_tpu=False, check_is_gpu=False): """ Check if `torch_xla` is available. To train a native pytorch job in an environment with torch xla installed, set the USE_TORCH_XLA to false. """ assert not (check_is_tpu and check_is_gpu), "The check_is_tpu and check_is_gpu cannot both be true." if not _torch_xla_available: return False elif check_is_gpu: return torch_xla.runtime.device_type() in ["GPU", "CUDA"] elif check_is_tpu: return torch_xla.runtime.device_type() == "TPU" return True
Checks if bf16 is supported, optionally ignoring the TPU
def is_bf16_available(ignore_tpu=False): "Checks if bf16 is supported, optionally ignoring the TPU" if is_torch_xla_available(check_is_tpu=True): return not ignore_tpu if is_cuda_available(): return torch.cuda.is_bf16_supported() if is_mps_available(): return False return True
Checks if `torch_mlu` is installed and potentially if a MLU is in the environment
def is_mlu_available(check_device=False): "Checks if `torch_mlu` is installed and potentially if a MLU is in the environment" if importlib.util.find_spec("torch_mlu") is None: return False import torch import torch_mlu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no MLU is found _ = torch.mlu.device_count() return torch.mlu.is_available() except RuntimeError: return False return hasattr(torch, "mlu") and torch.mlu.is_available()
Checks if `torch_npu` is installed and potentially if a NPU is in the environment
def is_npu_available(check_device=False): "Checks if `torch_npu` is installed and potentially if a NPU is in the environment" if importlib.util.find_spec("torch") is None or importlib.util.find_spec("torch_npu") is None: return False import torch import torch_npu # noqa: F401 if check_device: try: # Will raise a RuntimeError if no NPU is found _ = torch.npu.device_count() return torch.npu.is_available() except RuntimeError: return False return hasattr(torch, "npu") and torch.npu.is_available()
check if user disables it explicitly
def is_xpu_available(check_device=False): "check if user disables it explicitly" if not parse_flag_from_env("ACCELERATE_USE_XPU", default=True): return False "Checks if `intel_extension_for_pytorch` is installed and potentially if a XPU is in the environment" if is_ipex_available(): import torch if is_torch_version("<=", "1.12"): return False else: return False import intel_extension_for_pytorch # noqa: F401 if check_device: try: # Will raise a RuntimeError if no XPU is found _ = torch.xpu.device_count() return torch.xpu.is_available() except RuntimeError: return False return hasattr(torch, "xpu") and torch.xpu.is_available()
Filters out all `accelerate` specific args
def _filter_args(args, parser, default_args=[]): """ Filters out all `accelerate` specific args """ new_args, _ = parser.parse_known_args(default_args) for key, value in vars(args).items(): if key in vars(new_args).keys(): setattr(new_args, key, value) return new_args
Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs are: OpenMPI, Intel MPI, or MVAPICH. Returns: Program name and arg names for hostfile, num processes, and processes per node
def _get_mpirun_args(): """ Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs are: OpenMPI, Intel MPI, or MVAPICH. Returns: Program name and arg names for hostfile, num processes, and processes per node """ # Find the MPI program name mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)] if len(mpi_apps) == 0: raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.") # Call the app with the --version flag to determine which MPI app is installed mpi_app = mpi_apps[0] mpirun_version = subprocess.check_output([mpi_app, "--version"]) if b"Open MPI" in mpirun_version: return mpi_app, "--hostfile", "-n", "--npernode" else: # Intel MPI and MVAPICH both use the same arg names return mpi_app, "-f", "-n", "-ppn"
Prepares and returns the command list and an environment with the correct simple launcher environment variables.
def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct simple launcher environment variables. """ cmd = [] if args.no_python and args.module: raise ValueError("--module and --no_python cannot be used together") if args.mpirun_hostfile is not None: mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg = _get_mpirun_args() mpirun_ccl = getattr(args, "mpirun_ccl", None) num_machines = args.num_machines num_processes = getattr(args, "num_processes", None) nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1" cmd += [mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node] if num_processes: cmd += [num_proc_arg, str(num_processes)] if not args.no_python: cmd.append(sys.executable) if args.module: cmd.append("-m") cmd.append(args.training_script) cmd.extend(args.training_script_args) current_env = os.environ.copy() current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if args.gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = args.gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if args.num_machines > 1: current_env["MASTER_ADDR"] = args.main_process_ip current_env["MASTER_PORT"] = str(args.main_process_port) if args.mpirun_hostfile is not None: current_env["CCL_WORKER_COUNT"] = mpirun_ccl elif args.num_processes > 1: current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if is_ipex_available(): current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower() current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower() if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return cmd, current_env
Prepares and returns an environment with the correct multi-GPU environment variables.
def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]: """ Prepares and returns an environment with the correct multi-GPU environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port if num_machines > 1: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids mixed_precision = args.mixed_precision.lower() try: mixed_precision = PrecisionType(mixed_precision) except ValueError: raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) if args.use_fsdp: current_env["ACCELERATE_USE_FSDP"] = "true" if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states: raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`") current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) if args.fsdp_auto_wrap_policy is not None: current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) if args.fsdp_transformer_layer_cls_to_wrap is not None: current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) if args.fsdp_backward_prefetch_policy is not None: warnings.warn( "`fsdp_backward_prefetch_policy` is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use" " `fsdp_backward_prefetch` instead", FutureWarning, ) args.fsdp_backward_prefetch = args.fsdp_backward_prefetch_policy if args.fsdp_backward_prefetch is not None: current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch) if args.fsdp_state_dict_type is not None: current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower() current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower() current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower() current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower() if args.use_megatron_lm: prefix = "MEGATRON_LM_" current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) if args.megatron_lm_num_micro_batches is not None: current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) if args.megatron_lm_sequence_parallelism is not None: current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) if args.megatron_lm_recompute_activations is not None: current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) if args.megatron_lm_use_distributed_optimizer is not None: current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return current_env
Prepares and returns the command list and an environment with the correct DeepSpeed environment variables.
def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct DeepSpeed environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port cmd = None # make sure launcher is not None if args.deepspeed_multinode_launcher is None: # set to default pdsh args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0] if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: cmd = ["deepspeed", "--no_local_rank"] cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)]) if args.deepspeed_exclusion_filter is not None: cmd.extend( [ "--exclude", str(args.deepspeed_exclusion_filter), ] ) elif args.deepspeed_inclusion_filter is not None: cmd.extend( [ "--include", str(args.deepspeed_inclusion_filter), ] ) else: cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) if main_process_ip: cmd.extend(["--master_addr", str(main_process_ip)]) cmd.extend(["--master_port", str(main_process_port)]) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: cmd.append("--module") elif args.no_python: cmd.append("--no_python") cmd.append(args.training_script) cmd.extend(args.training_script_args) elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() current_env["ACCELERATE_USE_DEEPSPEED"] = "true" if args.zero_stage is not None: current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) if args.gradient_accumulation_steps is not None: current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) if args.gradient_clipping is not None: current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() if args.offload_optimizer_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() if args.offload_param_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() if args.zero3_init_flag is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() if args.zero3_save_16bit_model is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() if args.deepspeed_config_file is not None: current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" if args.deepspeed_moe_layer_cls_names is not None: current_env["ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES"] = str(args.deepspeed_moe_layer_cls_names) return cmd, current_env
Prepares and returns an environment with the correct TPU environment variables.
def prepare_tpu( args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False ) -> Tuple[argparse.Namespace, Dict[str, str]]: """ Prepares and returns an environment with the correct TPU environment variables. """ if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True): if args.downcast_bf16: current_env["XLA_DOWNCAST_BF16"] = "1" else: current_env["XLA_USE_BF16"] = "1" if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if pod: # Take explicit args and set them up for XLA args.vm = args.tpu_vm args.tpu = args.tpu_name return args, current_env
Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the caller to set it in os.environ.
def env_var_path_add(env_var_name, path_to_add): """ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the caller to set it in os.environ. """ paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] paths.append(str(path_to_add)) return ":".join(paths)
Build the model.
def model_provider_func(pre_process=True, post_process=True, add_encoder=True, add_decoder=True): """Build the model.""" args = get_args() mode = "pre-training" if args.pretraining_flag else "fine-tuning" if args.rank == 0: print(f"Building {args.model_type_name} model in the {mode} mode.") print( "The Megatron LM model weights are initialized at random in `accelerator.prepare`. " "Please use `accelerator.load_checkpoint` to load a pre-trained checkpoint matching the distributed setup." ) if args.model_type_name == "bert": if args.pretraining_flag: num_tokentypes = 2 if args.bert_binary_head else 0 model = BertModel( num_tokentypes=num_tokentypes, add_binary_head=args.bert_binary_head, parallel_output=True, pre_process=pre_process, post_process=post_process, ) else: model = Classification( num_classes=args.num_labels, num_tokentypes=2, pre_process=pre_process, post_process=post_process ) elif args.model_type_name == "gpt": model = GPTModel(num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process) elif args.model_type_name == "t5": model = T5Model( num_tokentypes=0, parallel_output=True, pre_process=pre_process, post_process=post_process, add_encoder=add_encoder, add_decoder=add_decoder, ) else: raise ValueError(f"Unsupported model type: {args.model_type_name}") return model
Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group.
def avg_losses_across_data_parallel_group(losses): """ Average losses across data parallel group. Args: losses (List[Tensor]): List of losses to average across data parallel group. """ return average_losses_across_data_parallel_group(losses)
Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks.
def gather_across_data_parallel_groups(tensor): """ Recursively gather tensor in a nested list/tuple/dictionary of tensors from data parallel ranks. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather across data parallel ranks. """ def _gpu_gather_one(tensor): if tensor.ndim == 0: tensor = tensor.clone()[None] output_tensors = [ torch.empty_like(tensor) for _ in range(torch.distributed.get_world_size(group=mpu.get_data_parallel_group())) ] torch.distributed.all_gather(output_tensors, tensor, group=mpu.get_data_parallel_group()) return torch.cat(output_tensors, dim=0) return recursively_apply(_gpu_gather_one, tensor, error_on_other_type=True)
Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. Returned objects should be reassigned to the same variables. Args: objects (`Iterable`): An iterable of objects Returns: A list of `None` objects to replace `objects` Example: ```python >>> import torch >>> from accelerate.utils import release_memory >>> a = torch.ones(1000, 1000).cuda() >>> b = torch.ones(1000, 1000).cuda() >>> a, b = release_memory(a, b) ```
def release_memory(*objects): """ Releases memory from `objects` by setting them to `None` and calls `gc.collect()` and `torch.cuda.empty_cache()`. Returned objects should be reassigned to the same variables. Args: objects (`Iterable`): An iterable of objects Returns: A list of `None` objects to replace `objects` Example: ```python >>> import torch >>> from accelerate.utils import release_memory >>> a = torch.ones(1000, 1000).cuda() >>> b = torch.ones(1000, 1000).cuda() >>> a, b = release_memory(a, b) ``` """ if not isinstance(objects, list): objects = list(objects) for i in range(len(objects)): objects[i] = None gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() elif is_mps_available(): torch.mps.empty_cache() else: torch.cuda.empty_cache() return objects
Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory Args: exception (`Exception`): An exception
def should_reduce_batch_size(exception: Exception) -> bool: """ Checks if `exception` relates to CUDA out-of-memory, CUDNN not supported, or CPU out-of-memory Args: exception (`Exception`): An exception """ _statements = [ "CUDA out of memory.", # CUDA OOM "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU "DefaultCPUAllocator: can't allocate memory", # CPU OOM ] if isinstance(exception, RuntimeError) and len(exception.args) == 1: return any(err in exception.args[0] for err in _statements) return False
A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. Args: function (`callable`, *optional*): A function to wrap starting_batch_size (`int`, *optional*): The batch size to try and fit into memory Example: ```python >>> from accelerate.utils import find_executable_batch_size >>> @find_executable_batch_size(starting_batch_size=128) ... def train(batch_size, model, optimizer): ... ... >>> train(model, optimizer) ```
def find_executable_batch_size(function: callable = None, starting_batch_size: int = 128): """ A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as its first argument. Args: function (`callable`, *optional*): A function to wrap starting_batch_size (`int`, *optional*): The batch size to try and fit into memory Example: ```python >>> from accelerate.utils import find_executable_batch_size >>> @find_executable_batch_size(starting_batch_size=128) ... def train(batch_size, model, optimizer): ... ... >>> train(model, optimizer) ``` """ if function is None: return functools.partial(find_executable_batch_size, starting_batch_size=starting_batch_size) batch_size = starting_batch_size def decorator(*args, **kwargs): nonlocal batch_size gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() params = list(inspect.signature(function).parameters.keys()) # Guard against user error if len(params) < (len(args) + 1): arg_str = ", ".join([f"{arg}={value}" for arg, value in zip(params[1:], args[1:])]) raise TypeError( f"Batch size was passed into `{function.__name__}` as the first argument when called." f"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" ) while True: if batch_size == 0: raise RuntimeError("No executable batch size found, reached zero.") try: return function(batch_size, *args, **kwargs) except Exception as e: if should_reduce_batch_size(e): gc.collect() if is_xpu_available(): torch.xpu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() batch_size //= 2 else: raise return decorator
Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False` for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same Args: first_device (`torch.device`): First device to check second_device (`torch.device`): Second device to check
def check_device_same(first_device, second_device): """ Utility method to check if two `torch` devices are similar. When dealing with CUDA devices, torch throws `False` for `torch.device("cuda") == torch.device("cuda:0")` whereas they should be the same Args: first_device (`torch.device`): First device to check second_device (`torch.device`): Second device to check """ if first_device.type != second_device.type: return False if first_device.type == "cuda" and first_device.index is None: # In case the first_device is a cuda device and have # the index attribute set to `None`, default it to `0` first_device = torch.device("cuda", index=0) if second_device.type == "cuda" and second_device.index is None: # In case the second_device is a cuda device and have # the index attribute set to `None`, default it to `0` second_device = torch.device("cuda", index=0) return first_device == second_device
Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ```
def convert_file_size_to_int(size: Union[int, str]): """ Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes). Args: size (`int` or `str`): The size to convert. Will be directly returned if an `int`. Example: ```py >>> convert_file_size_to_int("1MiB") 1048576 ``` """ mem_size = -1 err_msg = ( f"`size` {size} is not in a valid format. Use an integer for bytes, or a string with an unit (like '5.0GB')." ) try: if isinstance(size, int): mem_size = size elif size.upper().endswith("GIB"): mem_size = int(float(size[:-3]) * (2**30)) elif size.upper().endswith("MIB"): mem_size = int(float(size[:-3]) * (2**20)) elif size.upper().endswith("KIB"): mem_size = int(float(size[:-3]) * (2**10)) elif size.upper().endswith("GB"): int_size = int(float(size[:-2]) * (10**9)) mem_size = int_size // 8 if size.endswith("b") else int_size elif size.upper().endswith("MB"): int_size = int(float(size[:-2]) * (10**6)) mem_size = int_size // 8 if size.endswith("b") else int_size elif size.upper().endswith("KB"): int_size = int(float(size[:-2]) * (10**3)) mem_size = int_size // 8 if size.endswith("b") else int_size except ValueError: raise ValueError(err_msg) if mem_size < 0: raise ValueError(err_msg) return mem_size
Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(torch.float32) 4 ```
def dtype_byte_size(dtype: torch.dtype): """ Returns the size (in bytes) occupied by one parameter of type `dtype`. Example: ```py >>> dtype_byte_size(torch.float32) 4 ``` """ if dtype == torch.bool: return 1 / 8 elif dtype == CustomDtype.INT2: return 1 / 4 elif dtype == CustomDtype.INT4: return 1 / 2 elif dtype == CustomDtype.FP8: return 1 bit_search = re.search(r"[^\d](\d+)$", str(dtype)) if bit_search is None: raise ValueError(f"`dtype` is not a valid dtype: {dtype}.") bit_size = int(bit_search.groups()[0]) return bit_size // 8
Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id.
def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. """ _SIZE = { torch.int64: 8, torch.float32: 4, torch.int32: 4, torch.bfloat16: 2, torch.float16: 2, torch.int16: 2, torch.uint8: 1, torch.int8: 1, torch.bool: 1, torch.float64: 8, } try: storage_ptr = tensor.untyped_storage().data_ptr() storage_size = tensor.untyped_storage().nbytes() except Exception: # Fallback for torch==1.10 try: storage_ptr = tensor.storage().data_ptr() storage_size = tensor.storage().size() * _SIZE[tensor.dtype] except NotImplementedError: # Fallback for meta storage storage_ptr = 0 # On torch >=2.0 this is the tensor size storage_size = tensor.nelement() * _SIZE[tensor.dtype] return tensor.device, storage_ptr, storage_size
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): The name of the model save file.
def shard_checkpoint( state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME ): """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB]. <Tip warning={true}> If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will have a size greater than `max_shard_size`. </Tip> Args: state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): The name of the model save file. """ max_shard_size = convert_file_size_to_int(max_shard_size) sharded_state_dicts = [{}] last_block_size = 0 total_size = 0 storage_id_to_block = {} for key, weight in state_dict.items(): # when bnb serialization is used the weights in the state dict can be strings # check: https://github.com/huggingface/transformers/pull/24416 for more details if isinstance(weight, str): continue else: storage_id = id_tensor_storage(weight) # If a `weight` shares the same underlying storage as another tensor, we put `weight` in the same `block` if storage_id in storage_id_to_block: block_id = storage_id_to_block[storage_id] sharded_state_dicts[block_id][key] = weight continue weight_size = weight.numel() * dtype_byte_size(weight.dtype) # If this weight is going to tip up over the maximal size, we split. if last_block_size + weight_size > max_shard_size: sharded_state_dicts.append({}) last_block_size = 0 sharded_state_dicts[-1][key] = weight last_block_size += weight_size total_size += weight_size storage_id_to_block[storage_id] = len(sharded_state_dicts) - 1 # If we only have one shard, we return it if len(sharded_state_dicts) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): shard_file = weights_name.replace(".bin", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin") shard_file = shard_file.replace( ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" ) shards[shard_file] = shard for key in shard.keys(): weight_map[key] = shard_file # Add the metadata metadata = {"total_size": total_size} index = {"metadata": metadata, "weight_map": weight_map} return shards, index
A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). Args: module (`torch.nn.Module`): The module in which the tensor we want to move lives. tensor_name (`str`): The full name of the parameter/buffer. device (`int`, `str` or `torch.device`): The device on which to set the tensor. value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any other device). dtype (`torch.dtype`, *optional*): If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to the dtype of the existing parameter in the model. fp16_statistics (`torch.HalfTensor`, *optional*): The list of fp16 statistics to set on the module, used for 8 bit model serialization. tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight on the device for all others, instead of duplicating memory.
def set_module_tensor_to_device( module: nn.Module, tensor_name: str, device: Union[int, str, torch.device], value: Optional[torch.Tensor] = None, dtype: Optional[Union[str, torch.dtype]] = None, fp16_statistics: Optional[torch.HalfTensor] = None, tied_params_map: Optional[Dict[int, Dict[torch.device, torch.Tensor]]] = None, ): """ A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). Args: module (`torch.nn.Module`): The module in which the tensor we want to move lives. tensor_name (`str`): The full name of the parameter/buffer. device (`int`, `str` or `torch.device`): The device on which to set the tensor. value (`torch.Tensor`, *optional*): The value of the tensor (useful when going from the meta device to any other device). dtype (`torch.dtype`, *optional*): If passed along the value of the parameter will be cast to this `dtype`. Otherwise, `value` will be cast to the dtype of the existing parameter in the model. fp16_statistics (`torch.HalfTensor`, *optional*): The list of fp16 statistics to set on the module, used for 8 bit model serialization. tied_params_map (Dict[int, Dict[torch.device, torch.Tensor]], *optional*, defaults to `None`): A map of current data pointers to dictionaries of devices to already dispatched tied weights. For a given execution device, this parameter is useful to reuse the first available pointer of a shared weight on the device for all others, instead of duplicating memory. """ # Recurse if needed if "." in tensor_name: splits = tensor_name.split(".") for split in splits[:-1]: new_module = getattr(module, split) if new_module is None: raise ValueError(f"{module} has no attribute {split}.") module = new_module tensor_name = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") is_buffer = tensor_name in module._buffers old_value = getattr(module, tensor_name) # Treat the case where old_value (or a custom `value`, typically offloaded to RAM/disk) belongs to a tied group, and one of the weight # in the tied group has already been dispatched to the device, by avoiding reallocating memory on the device and just copying the pointer. if ( value is not None and tied_params_map is not None and value.data_ptr() in tied_params_map and device in tied_params_map[value.data_ptr()] ): module._parameters[tensor_name] = tied_params_map[value.data_ptr()][device] return elif ( tied_params_map is not None and old_value.data_ptr() in tied_params_map and device in tied_params_map[old_value.data_ptr()] ): module._parameters[tensor_name] = tied_params_map[old_value.data_ptr()][device] return if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") if value is not None: if old_value.shape != value.shape: raise ValueError( f'Trying to set a tensor of shape {value.shape} in "{tensor_name}" (which has shape {old_value.shape}), this look incorrect.' ) if dtype is None: # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model value = value.to(old_value.dtype) elif not str(value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): value = value.to(dtype) param = module._parameters[tensor_name] if tensor_name in module._parameters else None param_cls = type(param) device_quantization = None with torch.no_grad(): # leave it on cpu first before moving them to cuda # # fix the case where the device is meta, we don't want to put it on cpu because there is no data =0 if ( param is not None and param.device.type != "cuda" and torch.device(device).type == "cuda" and param_cls.__name__ in ["Int8Params", "FP4Params", "Params4bit"] ): device_quantization = device device = "cpu" # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if isinstance(device, int): if is_npu_available(): device = f"npu:{device}" elif is_mlu_available(): device = f"mlu:{device}" elif is_xpu_available(): device = f"xpu:{device}" if value is None: new_value = old_value.to(device) if dtype is not None and device in ["meta", torch.device("meta")]: if not str(old_value.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): new_value = new_value.to(dtype) if not is_buffer: module._parameters[tensor_name] = param_cls(new_value, requires_grad=old_value.requires_grad) elif isinstance(value, torch.Tensor): new_value = value.to(device) else: new_value = torch.tensor(value, device=device) if device_quantization is not None: device = device_quantization if is_buffer: module._buffers[tensor_name] = new_value elif value is not None or not check_device_same(torch.device(device), module._parameters[tensor_name].device): param_cls = type(module._parameters[tensor_name]) kwargs = module._parameters[tensor_name].__dict__ if param_cls.__name__ in ["Int8Params", "FP4Params"]: if param_cls.__name__ == "Int8Params" and new_value.dtype == torch.float32: # downcast to fp16 if any - needed for 8bit serialization new_value = new_value.to(torch.float16) # quantize module that are going to stay on the cpu so that we offload quantized weights if device == "cpu" and param_cls.__name__ == "Int8Params": new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(0).to("cpu") new_value.CB = new_value.CB.to("cpu") new_value.SCB = new_value.SCB.to("cpu") else: new_value = param_cls(new_value, requires_grad=old_value.requires_grad, **kwargs).to(device) elif param_cls.__name__ in ["QTensor", "QBitsTensor"]: new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad).to(device) else: new_value = param_cls(new_value, requires_grad=old_value.requires_grad).to(device) module._parameters[tensor_name] = new_value if fp16_statistics is not None: module._parameters[tensor_name].SCB = fp16_statistics.to(device) del fp16_statistics # as we put the weight to meta, it doesn't have SCB attr anymore. make sure that it is not a meta weight if ( module.__class__.__name__ == "Linear8bitLt" and getattr(module.weight, "SCB", None) is None and str(module.weight.device) != "meta" ): # quantize only if necessary device_index = torch.device(device).index if torch.device(device).type == "cuda" else None if not getattr(module.weight, "SCB", None) and device_index is not None: if module.bias is not None and module.bias.device.type != "meta": # if a bias exists, we need to wait until the bias is set on the correct device module = module.cuda(device_index) elif module.bias is None: # if no bias exists, we can quantize right away module = module.cuda(device_index) elif module.__class__.__name__ == "Linear4bit" and getattr(module.weight, "quant_state", None) is None: # quantize only if necessary device_index = torch.device(device).index if torch.device(device).type == "cuda" else None if not getattr(module.weight, "quant_state", None) and device_index is not None: module.weight = module.weight.cuda(device_index) # clean pre and post foward hook if device != "cpu": if is_npu_available(): torch.npu.empty_cache() elif is_mlu_available(): torch.mlu.empty_cache() elif is_xpu_available(): torch.xpu.empty_cache() else: torch.cuda.empty_cache() # When handling tied weights, we update tied_params_map to keep track of the tied weights that have already been allocated on the device in # order to avoid duplicating memory, see above. if ( tied_params_map is not None and old_value.data_ptr() in tied_params_map and device not in tied_params_map[old_value.data_ptr()] ): tied_params_map[old_value.data_ptr()][device] = new_value elif ( value is not None and tied_params_map is not None and value.data_ptr() in tied_params_map and device not in tied_params_map[value.data_ptr()] ): tied_params_map[value.data_ptr()][device] = new_value
A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. Args: module (`torch.nn.Module`): The module we want the tensors on. include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result. recurse (`bool`, *optional`, defaults to `False`): Whether or not to go look in every submodule or just return the direct parameters and buffers. remove_non_persistent (`bool`, *optional*, defaults to `False`): Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers = True
def named_module_tensors( module: nn.Module, include_buffers: bool = True, recurse: bool = False, remove_non_persistent: bool = False ): """ A helper function that gathers all the tensors (parameters + buffers) of a given module. If `include_buffers=True` it's the same as doing `module.named_parameters(recurse=recurse) + module.named_buffers(recurse=recurse)`. Args: module (`torch.nn.Module`): The module we want the tensors on. include_buffer (`bool`, *optional*, defaults to `True`): Whether or not to include the buffers in the result. recurse (`bool`, *optional`, defaults to `False`): Whether or not to go look in every submodule or just return the direct parameters and buffers. remove_non_persistent (`bool`, *optional*, defaults to `False`): Whether or not to remove the non persistent buffer from the buffers. Useful only when include_buffers = True """ yield from module.named_parameters(recurse=recurse) if include_buffers: non_persistent_buffers = set() if remove_non_persistent: non_persistent_buffers = get_non_persistent_buffers(module, recurse=recurse) for named_buffer in module.named_buffers(recurse=recurse): name, _ = named_buffer if name not in non_persistent_buffers: yield named_buffer
Gather all non persistent buffers of a given modules into a set Args: module (`nn.Module`): The module we want the non persistent buffers on. recurse (`bool`, *optional*, defaults to `False`): Whether or not to go look in every submodule or just return the direct non persistent buffers.
def get_non_persistent_buffers(module: nn.Module, recurse: bool = False): """ Gather all non persistent buffers of a given modules into a set Args: module (`nn.Module`): The module we want the non persistent buffers on. recurse (`bool`, *optional*, defaults to `False`): Whether or not to go look in every submodule or just return the direct non persistent buffers. """ non_persistent_buffers_set = module._non_persistent_buffers_set if recurse: for _, m in module.named_modules(): non_persistent_buffers_set |= m._non_persistent_buffers_set return non_persistent_buffers_set
Check if there is any indication in the given model that some weights should be tied. Args: model (`torch.nn.Module`): The model to inspect Returns: bool: True if the model needs to have tied weights
def check_tied_parameters_in_config(model: nn.Module): """ Check if there is any indication in the given model that some weights should be tied. Args: model (`torch.nn.Module`): The model to inspect Returns: bool: True if the model needs to have tied weights """ # based on model.tie_weights() method has_tied_word_embedding = False has_tied_encoder_decoder = False has_tied_module = False if "PreTrainedModel" in [c.__name__ for c in inspect.getmro(model.__class__)]: has_tied_word_embedding = ( hasattr(model, "config") and getattr(model.config, "tie_word_embeddings", False) and model.get_output_embeddings() ) has_tied_encoder_decoder = ( hasattr(model, "config") and getattr(model.config, "is_encoder_decoder", False) and getattr(model.config, "tie_encoder_decoder", False) ) has_tied_module = any(hasattr(module, "_tie_weights") for module in model.modules()) return any([has_tied_word_embedding, has_tied_encoder_decoder, has_tied_module])
Check if tied parameters are on the same device Args: tied_params (`List[List[str]]`): A list of lists of parameter names being all tied together. device_map (`Dict[str, Union[int, str, torch.device]]`): A map that specifies where each submodule should go.
def check_tied_parameters_on_same_device(tied_params, device_map): """ Check if tied parameters are on the same device Args: tied_params (`List[List[str]]`): A list of lists of parameter names being all tied together. device_map (`Dict[str, Union[int, str, torch.device]]`): A map that specifies where each submodule should go. """ for tie_param in tied_params: tie_param_devices = {} for param in tie_param: tie_param_devices[param] = _get_param_device(param, device_map) if len(set(tie_param_devices.values())) > 1: logger.warn( f"Tied parameters are on different devices: {tie_param_devices}. " "Please modify your custom device map or set `device_map='auto'`. " )
Find the tied parameters in a given model. <Tip warning={true}> The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore them. </Tip> Args: model (`torch.nn.Module`): The model to inspect. Returns: List[List[str]]: A list of lists of parameter names being all tied together. Example: ```py >>> from collections import OrderedDict >>> import torch.nn as nn >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) >>> model.linear2.weight = model.linear1.weight >>> find_tied_parameters(model) [['linear1.weight', 'linear2.weight']] ```
def find_tied_parameters(model: nn.Module, **kwargs): """ Find the tied parameters in a given model. <Tip warning={true}> The signature accepts keyword arguments, but they are for the recursive part of this function and you should ignore them. </Tip> Args: model (`torch.nn.Module`): The model to inspect. Returns: List[List[str]]: A list of lists of parameter names being all tied together. Example: ```py >>> from collections import OrderedDict >>> import torch.nn as nn >>> model = nn.Sequential(OrderedDict([("linear1", nn.Linear(4, 4)), ("linear2", nn.Linear(4, 4))])) >>> model.linear2.weight = model.linear1.weight >>> find_tied_parameters(model) [['linear1.weight', 'linear2.weight']] ``` """ # Initialize result and named_parameters before recursing. named_parameters = kwargs.get("named_parameters", None) prefix = kwargs.get("prefix", "") result = kwargs.get("result", {}) if named_parameters is None: named_parameters = {n: p for n, p in model.named_parameters()} else: # A tied parameter will not be in the full `named_parameters` seen above but will be in the `named_parameters` # of the submodule it belongs to. So while recursing we track the names that are not in the initial # `named_parameters`. for name, parameter in model.named_parameters(): full_name = name if prefix == "" else f"{prefix}.{name}" if full_name not in named_parameters: # When we find one, it has to be one of the existing parameters. for new_name, new_param in named_parameters.items(): if new_param is parameter: if new_name not in result: result[new_name] = [] result[new_name].append(full_name) # Once we have treated direct parameters, we move to the child modules. for name, child in model.named_children(): child_name = name if prefix == "" else f"{prefix}.{name}" find_tied_parameters(child, named_parameters=named_parameters, prefix=child_name, result=result) return FindTiedParametersResult([sorted([weight] + list(set(tied))) for weight, tied in result.items()])
Reties tied parameters in a given model if the link was broken (for instance when adding hooks). Args: model (`torch.nn.Module`): The model in which to retie parameters. tied_params (`List[List[str]]`): A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`.
def retie_parameters(model, tied_params): """ Reties tied parameters in a given model if the link was broken (for instance when adding hooks). Args: model (`torch.nn.Module`): The model in which to retie parameters. tied_params (`List[List[str]]`): A mapping parameter name to tied parameter name as obtained by `find_tied_parameters`. """ for tied_group in tied_params: param_to_tie = None # two loops : the first one to set param_to_tie , the second one to change the values of tied_group for param_name in tied_group: module = model splits = param_name.split(".") for split in splits[:-1]: module = getattr(module, split) param = getattr(module, splits[-1]) if param_to_tie is None and param.device != torch.device("meta"): param_to_tie = param break if param_to_tie is not None: for param_name in tied_group: module = model splits = param_name.split(".") for split in splits[:-1]: module = getattr(module, split) setattr(module, splits[-1], param_to_tie)
Just does torch.dtype(dtype) if necessary.
def _get_proper_dtype(dtype: Union[str, torch.device]) -> torch.dtype: """ Just does torch.dtype(dtype) if necessary. """ if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) return dtype
Compute the size of each submodule of a given model.
def compute_module_sizes( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, buffers_only: bool = False, ): """ Compute the size of each submodule of a given model. """ if dtype is not None: dtype = _get_proper_dtype(dtype) dtype_size = dtype_byte_size(dtype) if special_dtypes is not None: special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()} special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()} module_sizes = defaultdict(int) module_list = [] if not buffers_only: module_list = named_module_tensors(model, recurse=True) else: module_list = model.named_buffers(recurse=True) for name, tensor in module_list: if special_dtypes is not None and name in special_dtypes: size = tensor.numel() * special_dtypes_size[name] elif dtype is None: size = tensor.numel() * dtype_byte_size(tensor.dtype) elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")): # According to the code in set_module_tensor_to_device, these types won't be converted # so use their original size here size = tensor.numel() * dtype_byte_size(tensor.dtype) else: size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype)) name_parts = name.split(".") for idx in range(len(name_parts) + 1): module_sizes[".".join(name_parts[:idx])] += size return module_sizes
Compute the total size of buffers in each submodule of a given model.
def compute_module_total_buffer_size( model: nn.Module, dtype: Optional[Union[str, torch.device]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, ): """ Compute the total size of buffers in each submodule of a given model. """ module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes, buffers_only=True) return module_sizes.get("", 0)
Utility function that will scan a list of named modules and return the maximum size used by one full layer. The definition of a layer being: - a module with no direct children (just parameters and buffers) - a module whose class name is in the list `no_split_module_classes` Args: modules (`List[Tuple[str, torch.nn.Module]]`): The list of named modules where we want to determine the maximum layer size. module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. Returns: `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size.
def get_max_layer_size( modules: List[Tuple[str, torch.nn.Module]], module_sizes: Dict[str, int], no_split_module_classes: List[str] ): """ Utility function that will scan a list of named modules and return the maximum size used by one full layer. The definition of a layer being: - a module with no direct children (just parameters and buffers) - a module whose class name is in the list `no_split_module_classes` Args: modules (`List[Tuple[str, torch.nn.Module]]`): The list of named modules where we want to determine the maximum layer size. module_sizes (`Dict[str, int]`): A dictionary mapping each layer name to its size (as generated by `compute_module_sizes`). no_split_module_classes (`List[str]`): A list of class names for layers we don't want to be split. Returns: `Tuple[int, List[str]]`: The maximum size of a layer with the list of layer names realizing that maximum size. """ max_size = 0 layer_names = [] modules_to_treat = modules.copy() while len(modules_to_treat) > 0: module_name, module = modules_to_treat.pop(0) modules_children = list(module.named_children()) if isinstance(module, torch.nn.Module) else [] if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: # No splitting this one so we compare to the max_size size = module_sizes[module_name] if size > max_size: max_size = size layer_names = [module_name] elif size == max_size: layer_names.append(module_name) else: modules_to_treat = [(f"{module_name}.{n}", v) for n, v in modules_children] + modules_to_treat return max_size, layer_names
Get the maximum memory available if nothing is passed, converts string to int otherwise.
def get_max_memory(max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None): """ Get the maximum memory available if nothing is passed, converts string to int otherwise. """ import psutil if max_memory is None: if not (torch.cuda.is_available() or is_npu_available() or is_mlu_available() or is_xpu_available()): max_memory = {} else: # Make sure CUDA is initialized on each GPU to have the right memory info. if is_npu_available(): for i in range(torch.npu.device_count()): _ = torch.tensor(0, device=torch.device("npu", i)) max_memory = {i: torch.npu.mem_get_info(i)[0] for i in range(torch.npu.device_count())} elif is_mlu_available(): for i in range(torch.mlu.device_count()): _ = torch.tensor(0, device=torch.device("mlu", i)) max_memory = {i: torch.mlu.mem_get_info(i)[0] for i in range(torch.mlu.device_count())} elif is_xpu_available(): for i in range(torch.xpu.device_count()): _ = torch.tensor(0, device=torch.device("xpu", i)) max_memory = {i: torch.xpu.max_memory_allocated(i) for i in range(torch.xpu.device_count())} else: for i in range(torch.cuda.device_count()): _ = torch.tensor([0], device=i) max_memory = {i: torch.cuda.mem_get_info(i)[0] for i in range(torch.cuda.device_count())} # allocate everything in the mps device as the RAM is shared if is_mps_available(): max_memory["mps"] = psutil.virtual_memory().available else: max_memory["cpu"] = psutil.virtual_memory().available return max_memory for key in max_memory: if isinstance(max_memory[key], str): max_memory[key] = convert_file_size_to_int(max_memory[key]) # Need to sort the device by type to make sure that we allocate the gpu first. # As gpu/npu/xpu are represented by int, we need to sort them first. gpu_devices = [k for k in max_memory.keys() if isinstance(k, int)] gpu_devices.sort() # check if gpu/npu/xpu devices are available and if not, throw a warning if is_npu_available(): num_devices = torch.npu.device_count() elif is_mlu_available(): num_devices = torch.mlu.device_count() elif is_xpu_available(): num_devices = torch.xpu.device_count() else: num_devices = torch.cuda.device_count() for device in gpu_devices: if device >= num_devices or device < 0: logger.warning(f"Device {device} is not available, available devices are {list(range(num_devices))}") # Add the other devices in the preset order if they are available all_devices = gpu_devices + [k for k in ["mps", "cpu", "disk"] if k in max_memory.keys()] # Raise an error if a device is not recognized for k in max_memory.keys(): if k not in all_devices: raise ValueError( f"Device {k} is not recognized, available devices are integers(for GPU/XPU), 'mps', 'cpu' and 'disk'" ) max_memory = {k: max_memory[k] for k in all_devices} return max_memory
Cleans a device_map by grouping all submodules that go on the same device together.
def clean_device_map(device_map: Dict[str, Union[int, str, torch.device]], module_name: str = ""): """ Cleans a device_map by grouping all submodules that go on the same device together. """ # Get the value of the current module and if there is only one split across several keys, regroup it. prefix = "" if module_name == "" else f"{module_name}." values = [v for k, v in device_map.items() if k.startswith(prefix)] if len(set(values)) == 1 and len(values) > 1: for k in [k for k in device_map if k.startswith(prefix)]: del device_map[k] device_map[module_name] = values[0] # Recurse over the children children_modules = [k for k in device_map.keys() if k.startswith(prefix) and len(k) > len(module_name)] idx = len(module_name.split(".")) + 1 if len(module_name) > 0 else 1 children_modules = set(".".join(k.split(".")[:idx]) for k in children_modules) for child in children_modules: clean_device_map(device_map, module_name=child) return device_map
Loads the weights from the offload folder into the model. Args: model (`torch.nn.Module`): The model to load the weights into. index (`dict`): A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the model. offload_folder (`str`): The folder where the offloaded weights are stored.
def load_offloaded_weights(model, index, offload_folder): """ Loads the weights from the offload folder into the model. Args: model (`torch.nn.Module`): The model to load the weights into. index (`dict`): A dictionary containing the parameter name and its metadata for each parameter that was offloaded from the model. offload_folder (`str`): The folder where the offloaded weights are stored. """ if index is None or len(index) == 0: # Nothing to do return for param_name, metadata in index.items(): if "SCB" in param_name: continue fp16_statistics = None if "weight" in param_name and param_name.replace("weight", "SCB") in index.keys(): weight_name = param_name.replace("weight", "SCB") fp16_statistics = load_offloaded_weight( os.path.join(offload_folder, f"{weight_name}.dat"), index[weight_name] ) tensor_file = os.path.join(offload_folder, f"{param_name}.dat") weight = load_offloaded_weight(tensor_file, metadata) set_module_tensor_to_device(model, param_name, "cpu", value=weight, fp16_statistics=fp16_statistics)
Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). low_zero (`bool`, *optional*): Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the Transformers generate function).
def get_balanced_memory( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None, low_zero: bool = False, ): """ Compute a `max_memory` dictionary for [`infer_auto_device_map`] that will balance the use of each available GPU. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). low_zero (`bool`, *optional*): Minimizes the number of weights on GPU 0, which is convenient when it's used for other operations (like the Transformers generate function). """ # Get default / clean up max_memory user_not_set_max_memory = max_memory is None max_memory = get_max_memory(max_memory) if is_npu_available(): num_devices = len([d for d in max_memory if torch.device(d).type == "npu" and max_memory[d] > 0]) elif is_mlu_available(): num_devices = len([d for d in max_memory if torch.device(d).type == "mlu" and max_memory[d] > 0]) elif is_xpu_available(): num_devices = len( [ d for d in max_memory if ( d != "cpu" and (torch.device(d).type == "xpu" or torch.xpu.get_device_properties(d).dev_type == "gpu") ) and max_memory[d] > 0 ] ) else: num_devices = len([d for d in max_memory if torch.device(d).type == "cuda" and max_memory[d] > 0]) if num_devices == 0: return max_memory if num_devices == 1: # We cannot do low_zero on just one GPU, but we will still reserve some memory for the buffer low_zero = False # If user just asked us to handle memory usage, we should avoid OOM if user_not_set_max_memory: for key in max_memory.keys(): if isinstance(key, int): max_memory[key] *= 0.9 # 90% is a good compromise logger.info( f"We will use 90% of the memory on device {key} for storing the model, and 10% for the buffer to avoid OOM. " "You can set `max_memory` in to a higher value to use more memory (at your own risk)." ) break # only one device module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) per_gpu = module_sizes[""] // (num_devices - 1 if low_zero else num_devices) # We can't just set the memory to model_size // num_devices as it will end being too small: each GPU will get # slightly less layers and some layers will end up offload at the end. So this function computes a buffer size to # add which is the biggest of: # - the size of no split block (if applicable) # - the mean of the layer sizes if no_split_module_classes is None: no_split_module_classes = [] elif not isinstance(no_split_module_classes, (list, tuple)): no_split_module_classes = [no_split_module_classes] # Identify the size of the no_split_block modules if len(no_split_module_classes) > 0: no_split_children = {} for name, size in module_sizes.items(): if name == "": continue submodule = model for submodule_name in name.split("."): submodule = getattr(submodule, submodule_name) class_name = submodule.__class__.__name__ if class_name in no_split_module_classes and class_name not in no_split_children: no_split_children[class_name] = size if set(no_split_children.keys()) == set(no_split_module_classes): break buffer = max(no_split_children.values()) if len(no_split_children) > 0 else 0 else: buffer = 0 # Compute mean of final modules. In the first dict of module sizes, leaves are the parameters leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0] module_sizes = {n: v for n, v in module_sizes.items() if n not in leaves} # Once removed, leaves are the final modules. leaves = [n for n in module_sizes if len([p for p in module_sizes if n == "" or p.startswith(n + ".")]) == 0] mean_leaves = int(sum([module_sizes[n] for n in leaves]) / max(len(leaves), 1)) buffer = int(1.25 * max(buffer, mean_leaves)) per_gpu += buffer # Sorted list of GPUs id (we may have some gpu ids not included in the our max_memory list - let's ignore them) gpus_idx_list = list( sorted( device_id for device_id, device_mem in max_memory.items() if isinstance(device_id, int) and device_mem > 0 ) ) # The last device is left with max_memory just in case the buffer is not enough. for idx in gpus_idx_list[:-1]: max_memory[idx] = min(max_memory[0] if low_zero and idx == 0 else per_gpu, max_memory[idx]) if low_zero: min_zero = max(0, module_sizes[""] - sum([max_memory[i] for i in range(1, num_devices)])) max_memory[0] = min(min_zero, max_memory[0]) return max_memory
Computes the total size of the model and its largest layer
def calculate_maximum_sizes(model: torch.nn.Module): "Computes the total size of the model and its largest layer" sizes = compute_module_sizes(model) # `transformers` models store this information for us no_split_modules = getattr(model, "_no_split_modules", None) if no_split_modules is None: no_split_modules = [] modules_to_treat = ( list(model.named_parameters(recurse=False)) + list(model.named_children()) + list(model.named_buffers(recurse=False)) ) largest_layer = get_max_layer_size(modules_to_treat, sizes, no_split_modules) total_size = sizes[""] return total_size, largest_layer
Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, such that: - we don't exceed the memory available of any of the GPU. - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that has the largest size. - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk that has the largest size. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). verbose (`bool`, *optional*, defaults to `False`): Whether or not to provide debugging statements as the function builds the device_map. clean_result (`bool`, *optional*, defaults to `True`): Clean the resulting device_map by grouping all submodules that go on the same device together. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters.
def infer_auto_device_map( model: nn.Module, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, dtype: Optional[Union[str, torch.dtype]] = None, special_dtypes: Optional[Dict[str, Union[str, torch.dtype]]] = None, verbose: bool = False, clean_result: bool = True, offload_buffers: bool = False, ): """ Compute a device map for a given model giving priority to GPUs, then offload on CPU and finally offload to disk, such that: - we don't exceed the memory available of any of the GPU. - if offload to the CPU is needed, there is always room left on GPU 0 to put back the layer offloaded on CPU that has the largest size. - if offload to the CPU is needed,we don't exceed the RAM available on the CPU. - if offload to the disk is needed, there is always room left on the CPU to put back the layer offloaded on disk that has the largest size. <Tip> All computation is done analyzing sizes and dtypes of the model parameters. As a result, the model can be on the meta device (as it would if initialized within the `init_empty_weights` context manager). </Tip> Args: model (`torch.nn.Module`): The model to analyze. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available if unset. Example: `max_memory={0: "1GB"}`. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. special_dtypes (`Dict[str, Union[str, torch.device]]`, *optional*): If provided, special dtypes to consider for some specific weights (will override dtype used as default for all weights). verbose (`bool`, *optional*, defaults to `False`): Whether or not to provide debugging statements as the function builds the device_map. clean_result (`bool`, *optional*, defaults to `True`): Clean the resulting device_map by grouping all submodules that go on the same device together. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. """ # Get default / clean up max_memory max_memory = get_max_memory(max_memory) if no_split_module_classes is None: no_split_module_classes = [] elif not isinstance(no_split_module_classes, (list, tuple)): no_split_module_classes = [no_split_module_classes] devices = list(max_memory.keys()) if "disk" not in devices: devices.append("disk") gpus = [device for device in devices if device not in ["cpu", "disk"]] # Devices that need to keep space for a potential offloaded layer. if "mps" in gpus: main_devices = ["mps"] elif len(gpus) > 0: main_devices = [gpus[0], "cpu"] else: main_devices = ["cpu"] module_sizes = compute_module_sizes(model, dtype=dtype, special_dtypes=special_dtypes) tied_parameters = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_parameters) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) device_map = OrderedDict() current_device = 0 current_memory_used = 0 device_memory_used = {} device_buffer_sizes = {} # Direct submodules and parameters modules_to_treat = ( list(model.named_parameters(recurse=False)) + list(model.named_children()) + list(model.named_buffers(recurse=False)) ) # Initialize maximum largest layer, to know which space to keep in memory max_layer_size, max_layer_names = get_max_layer_size(modules_to_treat, module_sizes, no_split_module_classes) # Ready ? This is going to be a bit messy. while len(modules_to_treat) > 0: name, module = modules_to_treat.pop(0) if verbose: print(f"\nTreating module {name}.") # Max size in the remaining layers may have changed since we took one, so we maybe update it. max_layer_names = [n for n in max_layer_names if n != name and not n.startswith(name + ".")] if len(max_layer_names) == 0: max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) # Assess size needed module_size = module_sizes[name] # We keep relevant tied parameters only: one of the tied parameters in the group is inside the current module # and the other is not. # Note: If we are currently processing the name `compute.weight`, an other parameter named e.g. `compute.weight_submodule.parameter` # needs to be considered outside the current module, hence the check with additional dots. tied_param_goups = [ tied_group for tied_group in tied_parameters if any(name + "." in k + "." for k in tied_group) and not all(name + "." in k + "." for k in tied_group) ] if verbose and len(tied_param_goups) > 0: print(f" Found the relevant tied param groups {tied_param_goups}") # Then we keep track of all the parameters that are tied to the current module, but not in the current module tied_params = sum( [[p for p in tied_group if name + "." not in p + "."] for tied_group in tied_param_goups], [] ) if verbose and len(tied_params) > 0: print(f" So those parameters need to be taken into account {tied_params}") device = devices[current_device] current_max_size = max_memory[device] if device != "disk" else None current_memory_reserved = 0 # Reduce max size available by the largest layer. if devices[current_device] in main_devices: current_max_size = current_max_size - max_layer_size current_memory_reserved = max_layer_size # Case 1 -> We're too big! if current_max_size is not None and current_memory_used + module_size > current_max_size: # Split or not split? modules_children = ( [] if isinstance(module, nn.Parameter) or isinstance(module, torch.Tensor) else list(module.named_children()) ) if verbose: print( f"Not enough space on {devices[current_device]} to put {name} (space available " f"{current_max_size - current_memory_used}, module size {module_size})." ) if len(modules_children) == 0 or module.__class__.__name__ in no_split_module_classes: # -> no split, we go to the next device if verbose: print("This module cannot be split, going to the next device.") device_memory_used[device] = current_memory_used + current_memory_reserved current_device += 1 modules_to_treat = [(name, module)] + modules_to_treat current_memory_used = 0 else: # -> split, we replace the module studied by its children + parameters if verbose: print(f"Splitting {name}.") modules_children = list(module.named_parameters(recurse=False)) + modules_children modules_to_treat = [(f"{name}.{n}", v) for n, v in modules_children] + modules_to_treat # Update the max layer size. max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) # Case 2, it fits! We're not entirely out of the wood though, because we may have some tied parameters. elif len(tied_params) > 0: # First locate all tied modules tied_module_names = [] tied_modules = [] for tied_param in tied_params: tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n in tied_param][0] tied_module_names.append(modules_to_treat[tied_module_index][0]) tied_modules.append(modules_to_treat[tied_module_index][1]) if verbose: print( f" It looks like {name} is going to fit on {devices[current_device]} but we have tied " f"parameters to account for.\n - Names {tied_params}\n - Module names {tied_module_names}" ) # Let's see if it all fits first module_size_with_ties = module_size for tied_param, tied_module_name in zip(tied_params, tied_module_names): module_size_with_ties += module_sizes[tied_module_name] - module_sizes[tied_param] if current_max_size is None or current_memory_used + module_size_with_ties <= current_max_size: # We really really fit! if verbose: print(f"Putting {name} and {tied_module_names} on {devices[current_device]}.") current_memory_used += module_size_with_ties device_map[name] = devices[current_device] for tied_module_name in tied_module_names: if tied_module_name in [m[0] for m in modules_to_treat]: # The module may have been removed by a previous iteration of this loop. tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][ 0 ] modules_to_treat.pop(tied_module_index) device_map[tied_module_name] = devices[current_device] if not offload_buffers and isinstance(module, nn.Module): current_buffer_size = compute_module_total_buffer_size( module, dtype=dtype, special_dtypes=special_dtypes ) device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size else: # We don't fit with the tied modules. Next question is: can we split one of the tied modules to make it # smaller or do we need to go on the next device? if verbose: print( f"Not enough space on {devices[current_device]} to put {name} and {tied_module_names} (space " f"available {current_max_size - current_memory_used}, needed size {module_size_with_ties})." ) split_happened = False for tied_module_name, tied_module in zip(tied_module_names, tied_modules): tied_module_children = list(tied_module.named_children()) if len(tied_module_children) == 0 or tied_module.__class__.__name__ in no_split_module_classes: # can't break this one. continue if verbose: print(f"Splitting {tied_module_name}.") tied_module_children = list(tied_module.named_parameters(recurse=False)) + tied_module_children tied_module_children = [(f"{tied_module_name}.{n}", v) for n, v in tied_module_children] tied_module_index = [i for i, (n, _) in enumerate(modules_to_treat) if n == tied_module_name][0] modules_to_treat = ( [(name, module)] + modules_to_treat[:tied_module_index] + tied_module_children + modules_to_treat[tied_module_index + 1 :] ) # Update the max layer size. max_layer_size, max_layer_names = get_max_layer_size( [(n, m) for n, m in modules_to_treat if isinstance(m, torch.nn.Module)], module_sizes, no_split_module_classes, ) split_happened = True break if not split_happened: # If the tied module is not split, we go to the next device if verbose: print("None of the tied module can be split, going to the next device.") device_memory_used[device] = current_memory_used + current_memory_reserved current_device += 1 modules_to_treat = [(name, module)] + modules_to_treat current_memory_used = 0 else: if verbose: if current_max_size is None: print(f"Putting {name} (size={module_size}) on {devices[current_device]}.") else: print( f"Putting {name} (size={module_size}) on {devices[current_device]} " f"(available={current_max_size - current_memory_used})." ) current_memory_used += module_size device_memory_used[device] = current_memory_used + current_memory_reserved device_map[name] = devices[current_device] if not offload_buffers and isinstance(module, nn.Module): current_buffer_size = compute_module_total_buffer_size( module, dtype=dtype, special_dtypes=special_dtypes ) device_buffer_sizes[device] = device_buffer_sizes.get(device, 0) + current_buffer_size if clean_result: device_map = clean_device_map(device_map) non_gpu_buffer_size = device_buffer_sizes.get("cpu", 0) + device_buffer_sizes.get("disk", 0) if non_gpu_buffer_size > 0 and not offload_buffers: is_buffer_fit_any_gpu = False for gpu_device, gpu_max_memory in max_memory.items(): if gpu_device == "cpu" or gpu_device == "disk": continue if not is_buffer_fit_any_gpu: gpu_memory_used = device_memory_used.get(gpu_device, 0) if gpu_max_memory >= non_gpu_buffer_size + gpu_memory_used: is_buffer_fit_any_gpu = True if len(gpus) > 0 and not is_buffer_fit_any_gpu: warnings.warn( f"Current model requires {non_gpu_buffer_size} bytes of buffer for offloaded layers, which seems does " f"not fit any GPU's remaining memory. If you are experiencing a OOM later, please consider using " f"offload_buffers=True." ) return device_map
Checks a device map covers everything in a given model. Args: model (`torch.nn.Module`): The model to check the device map against. device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check.
def check_device_map(model: nn.Module, device_map: Dict[str, Union[int, str, torch.device]]): """ Checks a device map covers everything in a given model. Args: model (`torch.nn.Module`): The model to check the device map against. device_map (`Dict[str, Union[int, str, torch.device]]`): The device map to check. """ all_model_tensors = [name for name, _ in model.state_dict().items()] for module_name in device_map.keys(): if module_name == "": all_model_tensors.clear() break else: all_model_tensors = [ name for name in all_model_tensors if not name == module_name and not name.startswith(module_name + ".") ] if len(all_model_tensors) > 0: non_covered_params = ", ".join(all_model_tensors) raise ValueError( f"The device_map provided does not give any device for the following parameters: {non_covered_params}" )
Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the weights can be fast-loaded directly on the GPU. Args: checkpoint_file (`str`): The path to the checkpoint to load. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device.
def load_state_dict(checkpoint_file, device_map=None): """ Load a checkpoint from a given file. If the checkpoint is in the safetensors format and a device map is passed, the weights can be fast-loaded directly on the GPU. Args: checkpoint_file (`str`): The path to the checkpoint to load. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. """ if checkpoint_file.endswith(".safetensors"): with safe_open(checkpoint_file, framework="pt") as f: metadata = f.metadata() weight_names = f.keys() if metadata is None: logger.warn( f"The safetensors archive passed at {checkpoint_file} does not contain metadata. " "Make sure to save your model with the `save_pretrained` method. Defaulting to 'pt' metadata." ) metadata = {"format": "pt"} if metadata.get("format") not in ["pt", "tf", "flax"]: raise OSError( f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " "you save your model with the `save_pretrained` method." ) elif metadata["format"] != "pt": raise ValueError(f"The checkpoint passed was saved with {metadata['format']}, we need a the pt format.") if device_map is None: return safe_load_file(checkpoint_file) else: # if we only have one device we can load everything directly if len(set(device_map.values())) == 1: return safe_load_file(checkpoint_file, device=list(device_map.values())[0]) devices = list(set(device_map.values()) - {"disk"}) # cpu device should always exist as fallback option if "cpu" not in devices: devices.append("cpu") # For each device, get the weights that go there device_weights = {device: [] for device in devices} for module_name, device in device_map.items(): if device in devices: device_weights[device].extend( [k for k in weight_names if k == module_name or k.startswith(module_name + ".")] ) # all weights that haven't defined a device should be loaded on CPU device_weights["cpu"].extend([k for k in weight_names if k not in sum(device_weights.values(), [])]) tensors = {} if is_tqdm_available(): progress_bar = tqdm( main_process_only=False, total=sum([len(device_weights[device]) for device in devices]), unit="w", smoothing=0, leave=False, ) else: progress_bar = None for device in devices: target_device = device if is_xpu_available(): current_safetensors_version = packaging.version.parse(importlib.metadata.version("safetensors")) if compare_versions(current_safetensors_version, "<", "0.4.2"): raise ModuleNotFoundError( f"You need at least safetensors 0.4.2 for Intel GPU, while you have {current_safetensors_version}" ) if isinstance(device, int): target_device = f"xpu:{device}" with safe_open(checkpoint_file, framework="pt", device=target_device) as f: for key in device_weights[device]: if progress_bar is not None: progress_bar.set_postfix(dev=device, refresh=False) progress_bar.set_description(key) tensors[key] = f.get_tensor(key) if progress_bar is not None: progress_bar.update() if progress_bar is not None: progress_bar.close() return tensors else: return torch.load(checkpoint_file, map_location=torch.device("cpu"))
Returns the state dictionary for an offloaded model via iterative onloading Args: model (`torch.nn.Module`): The offloaded model we want to save
def get_state_dict_offloaded_model(model: nn.Module): """ Returns the state dictionary for an offloaded model via iterative onloading Args: model (`torch.nn.Module`): The offloaded model we want to save """ from ..hooks import AlignDevicesHook state_dict = {} placeholders = set() for name, module in model.named_modules(): if name == "": continue if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: original_device = module._hf_hook.execution_device # assign hook execution device to cpu module._hf_hook.execution_device = "cpu" # onload meta tensors to execution device try: module._hf_hook.pre_forward(module) except MemoryError: raise MemoryError("Offloaded module must fit in CPU memory to call save_model!") from None module_state_dict = module.state_dict() # offload meta tensors from cpu module._hf_hook.post_forward(module, torch.tensor([])) # re-assign hook to original execution device module._hf_hook.execution_device = original_device else: module_state_dict = module.state_dict() for key in module_state_dict: # ignore placeholder parameters that are still on the meta device if module_state_dict[key].device == torch.device("meta"): placeholders.add(name + f".{key}") continue params = module_state_dict[key] state_dict[name + f".{key}"] = params for key in placeholders.copy(): if key in state_dict: placeholders.remove(key) if placeholders: logger.warning(f"The following tensors were not saved because they were still on meta device: {placeholders}") return state_dict
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded. <Tip warning={true}> Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. </Tip> Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the buffers in the weights offloaded to disk. keep_in_fp32_modules(`List[str]`, *optional*): A list of the modules that we keep in `torch.float32` dtype. offload_8bit_bnb (`bool`, *optional*): Whether or not to enable offload of 8-bit modules on cpu/disk. strict (`bool`, *optional*, defaults to `False`): Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's state_dict.
def load_checkpoint_in_model( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Dict[str, Union[int, str, torch.device]]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: bool = False, offload_buffers: bool = False, keep_in_fp32_modules: List[str] = None, offload_8bit_bnb: bool = False, strict: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded. <Tip warning={true}> Once loaded across devices, you still need to call [`dispatch_model`] on your model to make it able to run. To group the checkpoint loading and dispatch in one single call, use [`load_checkpoint_and_dispatch`]. </Tip> Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. - a path to a folder containing a unique pytorch_model.bin or a model.safetensors file. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*, defaults to `False`): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to include the buffers in the weights offloaded to disk. keep_in_fp32_modules(`List[str]`, *optional*): A list of the modules that we keep in `torch.float32` dtype. offload_8bit_bnb (`bool`, *optional*): Whether or not to enable offload of 8-bit modules on cpu/disk. strict (`bool`, *optional*, defaults to `False`): Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's state_dict. """ if offload_8bit_bnb: from .bnb import quantize_and_offload_8bit tied_params = find_tied_parameters(model) if check_tied_parameters_in_config(model) and len(tied_params) == 0: logger.warn( "The model weights are not tied. Please use the `tie_weights` method before using the `infer_auto_device` function." ) if device_map is not None: check_tied_parameters_on_same_device(tied_params, device_map) if offload_folder is None and device_map is not None and "disk" in device_map.values(): raise ValueError( "At least one of the model submodule will be offloaded to disk, please pass along an `offload_folder`." ) elif offload_folder is not None and device_map is not None and "disk" in device_map.values(): os.makedirs(offload_folder, exist_ok=True) if isinstance(dtype, str): # We accept "torch.float16" or just "float16" dtype = dtype.replace("torch.", "") dtype = getattr(torch, dtype) checkpoint_files = None index_filename = None if os.path.isfile(checkpoint): if str(checkpoint).endswith(".json"): index_filename = checkpoint else: checkpoint_files = [checkpoint] elif os.path.isdir(checkpoint): # check if the whole state dict is present potential_state_bin = [f for f in os.listdir(checkpoint) if f == WEIGHTS_NAME] potential_state_safetensor = [f for f in os.listdir(checkpoint) if f == SAFE_WEIGHTS_NAME] if len(potential_state_bin) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_bin[0])] elif len(potential_state_safetensor) == 1: checkpoint_files = [os.path.join(checkpoint, potential_state_safetensor[0])] else: # otherwise check for sharded checkpoints potential_index = [f for f in os.listdir(checkpoint) if f.endswith(".index.json")] if len(potential_index) == 0: raise ValueError( f"{checkpoint} is not a folder containing a `.index.json` file or a {WEIGHTS_NAME} or a {SAFE_WEIGHTS_NAME} file" ) elif len(potential_index) == 1: index_filename = os.path.join(checkpoint, potential_index[0]) else: raise ValueError( f"{checkpoint} containing more than one `.index.json` file, delete the irrelevant ones." ) else: raise ValueError( "`checkpoint` should be the path to a file containing a whole state dict, or the index of a sharded " f"checkpoint, or a folder containing a sharded checkpoint or the whole state dict, but got {checkpoint}." ) if index_filename is not None: checkpoint_folder = os.path.split(index_filename)[0] with open(index_filename) as f: index = json.loads(f.read()) if "weight_map" in index: index = index["weight_map"] checkpoint_files = sorted(list(set(index.values()))) checkpoint_files = [os.path.join(checkpoint_folder, f) for f in checkpoint_files] # Logic for missing/unexepected keys goes here. offload_index = {} if offload_state_dict: state_dict_folder = tempfile.mkdtemp() state_dict_index = {} unexpected_keys = set() model_keys = set(model.state_dict().keys()) buffer_names = [name for name, _ in model.named_buffers()] for checkpoint_file in checkpoint_files: loaded_checkpoint = load_state_dict(checkpoint_file, device_map=device_map) if device_map is None: model.load_state_dict(loaded_checkpoint, strict=strict) unexpected_keys.update(set(loaded_checkpoint.keys()) - model_keys) else: for param_name, param in loaded_checkpoint.items(): # skip SCB parameter (for 8-bit serialization) if "SCB" in param_name: continue if param_name not in model_keys: unexpected_keys.add(param_name) if not strict: continue # Skip loading this parameter. module_name = param_name while len(module_name) > 0 and module_name not in device_map: module_name = ".".join(module_name.split(".")[:-1]) if module_name == "" and "" not in device_map: # TODO: group all errors and raise at the end. raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] new_dtype = dtype if dtype is not None and torch.is_floating_point(param): if keep_in_fp32_modules is not None and dtype == torch.float16: proceed = False for key in keep_in_fp32_modules: if ((key in param_name) and (key + "." in param_name)) or key == param_name: proceed = True break if proceed: new_dtype = torch.float32 if "weight" in param_name and param_name.replace("weight", "SCB") in loaded_checkpoint.keys(): if param.dtype == torch.int8: fp16_statistics = loaded_checkpoint[param_name.replace("weight", "SCB")] else: fp16_statistics = None if param_device == "disk": if offload_buffers or param_name not in buffer_names: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, offload_folder, offload_index, fp16_statistics ) continue else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, offload_folder, index=offload_index) elif param_device == "cpu" and offload_state_dict: if new_dtype is None: new_dtype = param.dtype if offload_8bit_bnb: quantize_and_offload_8bit( model, param, param_name, new_dtype, state_dict_folder, state_dict_index, fp16_statistics ) else: set_module_tensor_to_device(model, param_name, "meta", dtype=new_dtype) offload_weight(param, param_name, state_dict_folder, index=state_dict_index) else: set_module_tensor_to_device( model, param_name, param_device, value=param, dtype=new_dtype, fp16_statistics=fp16_statistics, ) # Force Python to clean up. del loaded_checkpoint gc.collect() if not strict and len(unexpected_keys) > 0: logger.warning( f"Some weights of the model checkpoint at {checkpoint} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}. This may or may not be an issue - make sure that the checkpoint does not have unnecessary parameters, or that the model definition correctly corresponds to the checkpoint." ) save_offload_index(offload_index, offload_folder) # Load back offloaded state dict on CPU if offload_state_dict: load_offloaded_weights(model, state_dict_index, state_dict_folder) shutil.rmtree(state_dict_folder) retie_parameters(model, tied_params)
Return a context manager for autocasting mixed precision Args: native_amp (`bool`, *optional*, defaults to False): Whether mixed precision is actually enabled. cache_enabled (`bool`, *optional*, defaults to True): Whether the weight cache inside autocast should be enabled.
def get_mixed_precision_context_manager(native_amp: bool = False, autocast_kwargs: AutocastKwargs = None): """ Return a context manager for autocasting mixed precision Args: native_amp (`bool`, *optional*, defaults to False): Whether mixed precision is actually enabled. cache_enabled (`bool`, *optional*, defaults to True): Whether the weight cache inside autocast should be enabled. """ state = AcceleratorState() if autocast_kwargs is None: autocast_kwargs = {} else: autocast_kwargs = autocast_kwargs.to_kwargs() if native_amp: device_type = ( "cuda" if (state.distributed_type == DistributedType.XLA and is_torch_xla_available(check_is_gpu=True)) else state.device.type ) if state.mixed_precision == "fp16": return torch.autocast(device_type=device_type, dtype=torch.float16, **autocast_kwargs) elif state.mixed_precision in ["bf16", "fp8"] and state.distributed_type in [ DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.FSDP, DistributedType.XLA, ]: return torch.autocast(device_type=device_type, dtype=torch.bfloat16, **autocast_kwargs) else: return torch.autocast(device_type=device_type, **autocast_kwargs) else: return contextlib.nullcontext()
Offload a state dict in a given folder. Args: save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload.
def offload_state_dict(save_dir: Union[str, os.PathLike], state_dict: Dict[str, torch.Tensor]): """ Offload a state dict in a given folder. Args: save_dir (`str` or `os.PathLike`): The directory in which to offload the state dict. state_dict (`Dict[str, torch.Tensor]`): The dictionary of tensors to offload. """ os.makedirs(save_dir, exist_ok=True) index = {} for name, parameter in state_dict.items(): index = offload_weight(parameter, name, save_dir, index=index) # Update index save_offload_index(index, save_dir)
Extract the sub state-dict corresponding to a list of given submodules. Args: state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. submodule_names (`List[str]`): The list of submodule names we want to extract.
def extract_submodules_state_dict(state_dict: Dict[str, torch.Tensor], submodule_names: List[str]): """ Extract the sub state-dict corresponding to a list of given submodules. Args: state_dict (`Dict[str, torch.Tensor]`): The state dict to extract from. submodule_names (`List[str]`): The list of submodule names we want to extract. """ result = {} for module_name in submodule_names: # We want to catch module_name parameter (module_name.xxx) or potentially module_name, but not any of the # submodules that could being like module_name (transformers.h.1 and transformers.h.10 for instance) result.update( { key: param for key, param in state_dict.items() if key == module_name or key.startswith(module_name + ".") } ) return result
Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a `namedtuple` perfectly.
def is_namedtuple(data): """ Checks if `data` is a `namedtuple` or not. Can have false positives, but only if a user is trying to mimic a `namedtuple` perfectly. """ return isinstance(data, tuple) and hasattr(data, "_asdict") and hasattr(data, "_fields")
Cast a generator to the same type as obj (list, tuple, or namedtuple)
def honor_type(obj, generator): """ Cast a generator to the same type as obj (list, tuple, or namedtuple) """ # Some objects may not be able to instantiate from a generator directly if is_namedtuple(obj): return type(obj)(*list(generator)) else: return type(obj)(generator)
Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs (additional keyword arguments, *optional*): Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`.
def recursively_apply(func, data, *args, test_type=is_torch_tensor, error_on_other_type=False, **kwargs): """ Recursively apply a function on a data structure that is a nested list/tuple/dictionary of a given base type. Args: func (`callable`): The function to recursively apply. data (nested list/tuple/dictionary of `main_type`): The data on which to apply `func` *args: Positional arguments that will be passed to `func` when applied on the unpacked data. main_type (`type`, *optional*, defaults to `torch.Tensor`): The base type of the objects to which apply `func`. error_on_other_type (`bool`, *optional*, defaults to `False`): Whether to return an error or not if after unpacking `data`, we get on an object that is not of type `main_type`. If `False`, the function will leave objects of types different than `main_type` unchanged. **kwargs (additional keyword arguments, *optional*): Keyword arguments that will be passed to `func` when applied on the unpacked data. Returns: The same data structure as `data` with `func` applied to every object of type `main_type`. """ if isinstance(data, (tuple, list)): return honor_type( data, ( recursively_apply( func, o, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for o in data ), ) elif isinstance(data, Mapping): return type(data)( { k: recursively_apply( func, v, *args, test_type=test_type, error_on_other_type=error_on_other_type, **kwargs ) for k, v in data.items() } ) elif test_type(data): return func(data, *args, **kwargs) elif error_on_other_type: raise TypeError( f"Unsupported types ({type(data)}) passed to `{func.__name__}`. Only nested list/tuple/dicts of " f"objects that are valid for `{test_type.__name__}` should be passed." ) return data
Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to send to a given device. device (`torch.device`): The device to send the data to. Returns: The same data structure as `tensor` with all tensors sent to the proper device.
def send_to_device(tensor, device, non_blocking=False, skip_keys=None): """ Recursively sends the elements in a nested list/tuple/dictionary of tensors to a given device. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to send to a given device. device (`torch.device`): The device to send the data to. Returns: The same data structure as `tensor` with all tensors sent to the proper device. """ if is_torch_tensor(tensor) or hasattr(tensor, "to"): # `torch.Tensor.to("npu")` could not find context when called for the first time (see this [issue](https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue)). if device == "npu": device = "npu:0" if device == "xpu": device = "xpu:0" # TODO: torch_mlu LongTensor.to(<int num>) has bugs, we will fix this later. if is_torch_tensor(tensor) and tensor.device.type in ["mlu"] and tensor.dtype in [torch.int64]: tensor = tensor.cpu() try: return tensor.to(device, non_blocking=non_blocking) except TypeError: # .to() doesn't accept non_blocking as kwarg return tensor.to(device) except AssertionError as error: # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). # This call is inside the try-block since is_npu_available is not supported by torch.compile. if is_npu_available(): if isinstance(device, int): device = f"npu:{device}" elif is_xpu_available(): if isinstance(device, int): device = f"xpu:{device}" else: raise error try: return tensor.to(device, non_blocking=non_blocking) except TypeError: # .to() doesn't accept non_blocking as kwarg return tensor.to(device) elif isinstance(tensor, (tuple, list)): return honor_type( tensor, (send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for t in tensor) ) elif isinstance(tensor, Mapping): if isinstance(skip_keys, str): skip_keys = [skip_keys] elif skip_keys is None: skip_keys = [] return type(tensor)( { k: t if k in skip_keys else send_to_device(t, device, non_blocking=non_blocking, skip_keys=skip_keys) for k, t in tensor.items() } ) else: return tensor
Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors.
def get_data_structure(data): """ Recursively gathers the information needed to rebuild a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with [`~utils.TensorInformation`] instead of tensors. """ def _get_data_structure(tensor): return TensorInformation(shape=tensor.shape, dtype=tensor.dtype) return recursively_apply(_get_data_structure, data)
Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with lists of tensor shapes instead of tensors.
def get_shape(data): """ Recursively gathers the shape of a nested list/tuple/dictionary of tensors as a list. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to send to analyze. Returns: The same data structure as `data` with lists of tensor shapes instead of tensors. """ def _get_shape(tensor): return list(tensor.shape) return recursively_apply(_get_shape, data)
Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. Returns: The same data structure as `data` with tensors instead of [`~utils.TensorInformation`].
def initialize_tensors(data_structure): """ Recursively initializes tensors from a nested list/tuple/dictionary of [`~utils.TensorInformation`]. Returns: The same data structure as `data` with tensors instead of [`~utils.TensorInformation`]. """ def _initialize_tensor(tensor_info): return torch.empty(*tensor_info.shape, dtype=tensor_info.dtype) return recursively_apply(_initialize_tensor, data_structure, test_type=is_tensor_information)
Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size.
def find_batch_size(data): """ Recursively finds the batch size in a nested list/tuple/dictionary of lists of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size. """ if isinstance(data, (tuple, list, Mapping)) and (len(data) == 0): raise ValueError(f"Cannot find the batch size from empty {type(data)}.") if isinstance(data, (tuple, list)): return find_batch_size(data[0]) elif isinstance(data, Mapping): for k in data.keys(): return find_batch_size(data[k]) elif not isinstance(data, torch.Tensor): raise TypeError(f"Can only find the batch size of tensors but got {type(data)}.") return data.shape[0]
Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size.
def ignorant_find_batch_size(data): """ Same as [`utils.operations.find_batch_size`] except will ignore if `ValueError` and `TypeErrors` are raised Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to find the batch size. Returns: `int`: The batch size. """ try: return find_batch_size(data) except (ValueError, TypeError): pass return None
Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers. Returns: The same data structure as `data` with lists of numbers instead of `torch.Tensor`.
def listify(data): """ Recursively finds tensors in a nested list/tuple/dictionary and converts them to a list of numbers. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data from which to convert to regular numbers. Returns: The same data structure as `data` with lists of numbers instead of `torch.Tensor`. """ def _convert_to_list(tensor): tensor = tensor.detach().cpu() if tensor.dtype == torch.bfloat16: # As of Numpy 1.21.4, NumPy does not support bfloat16 (see # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ). # Until Numpy adds bfloat16, we must convert float32. tensor = tensor.to(torch.float32) return tensor.tolist() return recursively_apply(_convert_to_list, data)
Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`.
def verify_operation(function): """ Verifies that `tensor` is the same shape across all processes. Only ran if `PartialState().debug` is `True`. """ @wraps(function) def wrapper(*args, **kwargs): if PartialState().distributed_type == DistributedType.NO or not PartialState().debug: return function(*args, **kwargs) operation = f"{function.__module__}.{function.__name__}" if "tensor" in kwargs: tensor = kwargs["tensor"] else: tensor = args[0] if PartialState().device.type != find_device(tensor).type: raise DistributedOperationException( f"One or more of the tensors passed to {operation} were not on the {tensor.device.type} while the `Accelerator` is configured for {PartialState().device.type}. " f"Please move it to the {PartialState().device.type} before calling {operation}." ) shapes = get_shape(tensor) output = gather_object([shapes]) if output[0] is not None: are_same = output.count(output[0]) == len(output) if not are_same: process_shape_str = "\n - ".join([f"Process {i}: {shape}" for i, shape in enumerate(output)]) raise DistributedOperationException( f"Cannot apply desired operation due to shape mismatches. " "All shapes across devices must be valid." f"\n\nOperation: `{operation}`\nInput shapes:\n - {process_shape_str}" ) return function(*args, **kwargs) return wrapper
Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing `DistributedOperationException`.
def chained_operation(function): """ Checks that `verify_operation` failed and if so reports a more helpful error chaining the existing `DistributedOperationException`. """ @wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except DistributedOperationException as e: operation = f"{function.__module__}.{function.__name__}" raise DistributedOperationException( f"Error found while calling `{operation}`. Please see the earlier error for more details." ) from e return wrapper
Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. Returns: The same data structure as `tensor` with all tensors sent to the proper device.
def gather(tensor): """ Recursively gather tensor in a nested list/tuple/dictionary of tensors from all devices. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. Returns: The same data structure as `tensor` with all tensors sent to the proper device. """ if PartialState().distributed_type == DistributedType.XLA: return _tpu_gather(tensor) elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: return _gpu_gather(tensor) else: return tensor
Recursively gather object in a nested list/tuple/dictionary of objects from all devices. Args: object (nested list/tuple/dictionary of picklable object): The data to gather. Returns: The same data structure as `object` with all the objects sent to every device.
def gather_object(object: Any): """ Recursively gather object in a nested list/tuple/dictionary of objects from all devices. Args: object (nested list/tuple/dictionary of picklable object): The data to gather. Returns: The same data structure as `object` with all the objects sent to every device. """ if PartialState().distributed_type == DistributedType.XLA: raise NotImplementedError("gather objects in TPU is not supported") elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: return _gpu_gather_object(object) else: return object
Grabs the shape of `tensor` only available on one process and returns a tensor of its shape
def gather_tensor_shape(tensor): """ Grabs the shape of `tensor` only available on one process and returns a tensor of its shape """ # Allocate 80 bytes to store the shape max_tensor_dimension = 2**20 state = PartialState() base_tensor = torch.empty(max_tensor_dimension, dtype=torch.int, device=state.device) # Since PyTorch can't just send a tensor to another GPU without # knowing its size, we store the size of the tensor with data # in an allocation if tensor is not None: shape = tensor.shape tensor_dtype = TENSOR_TYPE_TO_INT[tensor.dtype] base_tensor[: len(shape) + 1] = torch.tensor(list(shape) + [tensor_dtype], dtype=int) # Perform a reduction to copy the size data onto all GPUs base_tensor = reduce(base_tensor, reduction="sum") base_tensor = base_tensor[base_tensor.nonzero()] # The last non-zero data contains the coded dtype the source tensor is dtype = int(base_tensor[-1:][0]) base_tensor = base_tensor[:-1] return base_tensor, dtype
Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as each worker doesn't need to know its shape when used (and tensor can be `None`) Args: tensor (`torch.tensor`): The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest should be `None`.
def copy_tensor_to_devices(tensor=None) -> torch.Tensor: """ Copys a tensor that only exists on a single device and broadcasts it to other devices. Differs from `broadcast` as each worker doesn't need to know its shape when used (and tensor can be `None`) Args: tensor (`torch.tensor`): The tensor that should be sent to all devices. Must only have it be defined on a single device, the rest should be `None`. """ state = PartialState() shape, dtype = gather_tensor_shape(tensor) if tensor is None: tensor = torch.zeros(shape, dtype=TENSOR_INT_TO_DTYPE[dtype]).to(state.device) return reduce(tensor, reduction="sum")
Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. from_process (`int`, *optional*, defaults to 0): The process from which to send the data Returns: The same data structure as `tensor` with all tensors broadcasted to the proper device.
def broadcast(tensor, from_process: int = 0): """ Recursively broadcast tensor in a nested list/tuple/dictionary of tensors to all devices. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. from_process (`int`, *optional*, defaults to 0): The process from which to send the data Returns: The same data structure as `tensor` with all tensors broadcasted to the proper device. """ if PartialState().distributed_type == DistributedType.XLA: return _tpu_broadcast(tensor, src=from_process, name="accelerate.utils.broadcast") elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: return _gpu_broadcast(tensor, src=from_process) else: return tensor
Broadcast a list of picklable objects form one process to the others. Args: object_list (list of picklable objects): The list of objects to broadcast. This list will be modified inplace. from_process (`int`, *optional*, defaults to 0): The process from which to send the data. Returns: The same list containing the objects from process 0.
def broadcast_object_list(object_list, from_process: int = 0): """ Broadcast a list of picklable objects form one process to the others. Args: object_list (list of picklable objects): The list of objects to broadcast. This list will be modified inplace. from_process (`int`, *optional*, defaults to 0): The process from which to send the data. Returns: The same list containing the objects from process 0. """ if PartialState().distributed_type == DistributedType.XLA: for i, obj in enumerate(object_list): object_list[i] = xm.mesh_reduce("accelerate.utils.broadcast_object_list", obj, lambda x: x[from_process]) elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: torch.distributed.broadcast_object_list(object_list, src=from_process) return object_list
Recursively takes a slice in a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to slice. tensor_slice (`slice`): The slice to take. Returns: The same data structure as `data` with all the tensors slices.
def slice_tensors(data, tensor_slice, process_index=None, num_processes=None): """ Recursively takes a slice in a nested list/tuple/dictionary of tensors. Args: data (nested list/tuple/dictionary of `torch.Tensor`): The data to slice. tensor_slice (`slice`): The slice to take. Returns: The same data structure as `data` with all the tensors slices. """ def _slice_tensor(tensor, tensor_slice): return tensor[tensor_slice] return recursively_apply(_slice_tensor, data, tensor_slice)
Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. Args: data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): The data to concatenate. dim (`int`, *optional*, defaults to 0): The dimension on which to concatenate. Returns: The same data structure as `data` with all the tensors concatenated.
def concatenate(data, dim=0): """ Recursively concatenate the tensors in a nested list/tuple/dictionary of lists of tensors with the same shape. Args: data (nested list/tuple/dictionary of lists of tensors `torch.Tensor`): The data to concatenate. dim (`int`, *optional*, defaults to 0): The dimension on which to concatenate. Returns: The same data structure as `data` with all the tensors concatenated. """ if isinstance(data[0], (tuple, list)): return honor_type(data[0], (concatenate([d[i] for d in data], dim=dim) for i in range(len(data[0])))) elif isinstance(data[0], Mapping): return type(data[0])({k: concatenate([d[k] for d in data], dim=dim) for k in data[0].keys()}) elif not isinstance(data[0], torch.Tensor): raise TypeError(f"Can only concatenate tensors but got {type(data[0])}") return torch.cat(data, dim=dim)
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end.
def pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): """ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so they can safely be gathered. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to gather. dim (`int`, *optional*, defaults to 0): The dimension on which to pad. pad_index (`int`, *optional*, defaults to 0): The value with which to pad. pad_first (`bool`, *optional*, defaults to `False`): Whether to pad at the beginning or the end. """ def _pad_across_processes(tensor, dim=0, pad_index=0, pad_first=False): if getattr(tensor, "is_nested", False): warnings.warn( "Cannot pad nested tensors without more information. Leaving unprocessed.", CannotPadNestedTensorWarning, ) return tensor if dim >= len(tensor.shape): return tensor # Gather all sizes size = torch.tensor(tensor.shape, device=tensor.device)[None] sizes = gather(size).cpu() # Then pad to the maximum size max_size = max(s[dim] for s in sizes) if max_size == tensor.shape[dim]: return tensor old_size = tensor.shape new_size = list(old_size) new_size[dim] = max_size new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index if pad_first: indices = tuple( slice(max_size - old_size[dim], max_size) if i == dim else slice(None) for i in range(len(new_size)) ) else: indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) new_tensor[indices] = tensor return new_tensor return recursively_apply( _pad_across_processes, tensor, error_on_other_type=True, dim=dim, pad_index=pad_index, pad_first=pad_first )
Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions. New tensors are just the last input repeated. E.g.: Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4])
def pad_input_tensors(tensor, batch_size, num_processes, dim=0): """ Takes a `tensor` of arbitrary size and pads it so that it can work given `num_processes` needed dimensions. New tensors are just the last input repeated. E.g.: Tensor: ([3,4,4]) Num processes: 4 Expected result shape: ([4,4,4]) """ def _pad_input_tensors(tensor, batch_size, num_processes, dim=0): remainder = batch_size // num_processes last_inputs = batch_size - (remainder * num_processes) if batch_size // num_processes == 0: to_pad = num_processes - batch_size else: to_pad = num_processes - (batch_size // num_processes) # In the rare case that `to_pad` is negative, # we need to pad the last inputs - the found `to_pad` if last_inputs > to_pad & to_pad < 1: to_pad = last_inputs - to_pad old_size = tensor.shape new_size = list(old_size) new_size[0] = batch_size + to_pad new_tensor = tensor.new_zeros(tuple(new_size)) indices = tuple(slice(0, old_size[dim]) if i == dim else slice(None) for i in range(len(new_size))) new_tensor[indices] = tensor return new_tensor return recursively_apply( _pad_input_tensors, tensor, error_on_other_type=True, batch_size=batch_size, num_processes=num_processes, dim=dim, )
Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the mean of a given operation. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to reduce. reduction (`str`, *optional*, defaults to `"mean"`): A reduction method. Can be of "mean", "sum", or "none" scale (`float`, *optional*): A default scaling value to be applied after the reduce, only valied on XLA. Returns: The same data structure as `data` with all the tensors reduced.
def reduce(tensor, reduction="mean", scale=1.0): """ Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the mean of a given operation. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to reduce. reduction (`str`, *optional*, defaults to `"mean"`): A reduction method. Can be of "mean", "sum", or "none" scale (`float`, *optional*): A default scaling value to be applied after the reduce, only valied on XLA. Returns: The same data structure as `data` with all the tensors reduced. """ def _reduce_across_processes(tensor, reduction="mean", scale=1.0): state = PartialState() cloned_tensor = tensor.clone() if state.distributed_type == DistributedType.NO: return cloned_tensor if state.distributed_type == DistributedType.XLA: # Some processes may have different HLO graphs than other # processes, for example in the breakpoint API # accelerator.set_trigger(). Use mark_step to make HLOs # the same on all processes. xm.mark_step() xm.all_reduce(xm.REDUCE_SUM, [cloned_tensor], scale) xm.mark_step() elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES: torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM) if reduction == "mean": cloned_tensor /= state.num_processes return cloned_tensor return recursively_apply( _reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction, scale=scale )
Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to convert from FP16/BF16 to FP32. Returns: The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32.
def convert_to_fp32(tensor): """ Recursively converts the elements nested list/tuple/dictionary of tensors in FP16/BF16 precision to FP32. Args: tensor (nested list/tuple/dictionary of `torch.Tensor`): The data to convert from FP16/BF16 to FP32. Returns: The same data structure as `tensor` with all tensors that were in FP16/BF16 precision converted to FP32. """ def _convert_to_fp32(tensor): return tensor.float() def _is_fp16_bf16_tensor(tensor): return (is_torch_tensor(tensor) or hasattr(tensor, "dtype")) and tensor.dtype in ( torch.float16, torch.bfloat16, ) return recursively_apply(_convert_to_fp32, tensor, test_type=_is_fp16_bf16_tensor)
Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device). Args: (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of.
def find_device(data): """ Finds the device on which a nested dict/list/tuple of tensors lies (assuming they are all on the same device). Args: (nested list/tuple/dictionary of `torch.Tensor`): The data we want to know the device of. """ if isinstance(data, Mapping): for obj in data.values(): device = find_device(obj) if device is not None: return device elif isinstance(data, (tuple, list)): for obj in data: device = find_device(obj) if device is not None: return device elif isinstance(data, torch.Tensor): return data.device
Check whether the module was compiled with torch.compile()
def is_compiled_module(module): """ Check whether the module was compiled with torch.compile() """ if is_torch_version("<", "2.0.0") or not hasattr(torch, "_dynamo"): return False return isinstance(module, torch._dynamo.eval_frame.OptimizedModule)