File size: 2,263 Bytes
bfcf71e
 
 
 
 
 
 
 
44b21e0
 
 
bfcf71e
44b21e0
bfcf71e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44b21e0
 
 
 
 
bfcf71e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
# Code to load a model.

import os
import warnings
import torch
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig

def load_model(repo_id, bnb=None, torch_dtype='auto'):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Try our best to get deterministic results.
    if device.type == 'cuda':
        # For determinism with CUDA >= 10.2, PyTorch says to use one of these.
        os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'
        #os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8'

    torch.use_deterministic_algorithms(True)

    # Ignore a spurious warning from huggingface_hub:
    # https://github.com/huggingface/transformers/issues/30618
    warnings.filterwarnings('ignore', message="`resume_download` is deprecated")

    # Ignore a spurious warning from bitsandbytes.
    warnings.filterwarnings('ignore', message="MatMul8bitLt: inputs will be cast from")

    print(f'Loading model "{repo_id}" (bnb = "{bnb}")...')

    # Ignore a spurious warning "Special tokens have been added..."
    transformers.logging.set_verbosity_error()
    tokenizer = AutoTokenizer.from_pretrained(repo_id, use_fast=True)
    transformers.logging.set_verbosity_warning()

    bnb_config = None
    if bnb == 'nf8':
        bnb_config = BitsAndBytesConfig(load_in_8bit=True)
    if bnb == 'nf4':
        bnb_config = BitsAndBytesConfig(load_in_4bit=True)

    device_map = 'auto'
    if device.type == 'cpu':
        # BFloat16 is not supported on MPS
        device_map = None

    model = AutoModelForCausalLM.from_pretrained(
        repo_id,
        torch_dtype=torch_dtype,
        device_map=device_map,
        quantization_config=bnb_config,
    )

    # Disable gradients to save memory.
    for param in model.parameters():
        param.requires_grad = False

    # Try our best to get deterministic results.
    model.eval()

    print('Done loading model.')

    return model, tokenizer

def load_tokenizer(repo_id):
    # Ignore a spurious warning "Special tokens have been added..."
    transformers.logging.set_verbosity_error()
    tokenizer = AutoTokenizer.from_pretrained(repo_id, use_fast=True)
    transformers.logging.set_verbosity_warning()
    return tokenizer