# Code to load a model. import os import warnings import torch import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig def load_model(repo_id, bnb=None, torch_dtype='auto'): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Try our best to get deterministic results. if device.type == 'cuda': # For determinism with CUDA >= 10.2, PyTorch says to use one of these. os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' #os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8' torch.use_deterministic_algorithms(True) # Ignore a spurious warning from huggingface_hub: # https://github.com/huggingface/transformers/issues/30618 warnings.filterwarnings('ignore', message="`resume_download` is deprecated") # Ignore a spurious warning from bitsandbytes. warnings.filterwarnings('ignore', message="MatMul8bitLt: inputs will be cast from") print(f'Loading model "{repo_id}" (bnb = "{bnb}")...') # Ignore a spurious warning "Special tokens have been added..." transformers.logging.set_verbosity_error() tokenizer = AutoTokenizer.from_pretrained(repo_id, use_fast=True) transformers.logging.set_verbosity_warning() bnb_config = None if bnb == 'nf8': bnb_config = BitsAndBytesConfig(load_in_8bit=True) if bnb == 'nf4': bnb_config = BitsAndBytesConfig(load_in_4bit=True) device_map = 'auto' if device.type == 'cpu': # BFloat16 is not supported on MPS device_map = None model = AutoModelForCausalLM.from_pretrained( repo_id, torch_dtype=torch_dtype, device_map=device_map, quantization_config=bnb_config, ) # Disable gradients to save memory. for param in model.parameters(): param.requires_grad = False # Try our best to get deterministic results. model.eval() print('Done loading model.') return model, tokenizer def load_tokenizer(repo_id): # Ignore a spurious warning "Special tokens have been added..." transformers.logging.set_verbosity_error() tokenizer = AutoTokenizer.from_pretrained(repo_id, use_fast=True) transformers.logging.set_verbosity_warning() return tokenizer