Spaces:
Runtime error
Runtime error
# This file is adapted from: https://github.com/tloen/alpaca-lora ( for merge ) and https://gist.github.com/benob/4850a0210b01672175942203aa36d300 ( for shard ) | |
# It can merge the LoRA weights back into the base model for export to PyTorch state_dicts (`consolidated.0x.pth`). The number of shards is according to the user command argument. | |
# They should help users who want to run inference in projects like llama.cpp or alpaca.cpp. | |
import os | |
import json | |
import torch | |
from peft import PeftModel, LoraConfig | |
import argparse | |
import transformers | |
# args | |
parser = argparse.ArgumentParser() | |
# The original base model checkpoint dir | |
parser.add_argument("--model_path", type=str, default='decapoda-research/llama-7b-hf') | |
# The finetuned lora model checkpoint dir | |
parser.add_argument("--lora_path",type=str, default='./lora-Vicuna/checkpoint-3000') | |
# The output dir | |
parser.add_argument("--out_path", type=str, default='./lora-Vicuna/checkpoint-3000-with-lora') | |
parser.add_argument("--num_shards", type=int, default=None) | |
args = parser.parse_args() | |
# | |
assert ( | |
"LlamaTokenizer" in transformers._import_structure["models.llama"] | |
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" | |
from transformers import LlamaTokenizer, LlamaForCausalLM | |
params = { | |
'65B': {"dim": 8192, "multiple_of": 256, "n_heads": 64, "n_layers": 80, "norm_eps": 1e-06, "vocab_size": -1}, | |
'30B': {"dim": 6656, "multiple_of": 256, "n_heads": 52, "n_layers": 60, "norm_eps": 1e-06, "vocab_size": -1}, | |
'13B': {"dim": 5120, "multiple_of": 256, "n_heads": 40, "n_layers": 40, "norm_eps": 1e-06, "vocab_size": -1}, | |
'7B': {"dim": 4096, "multiple_of": 256, "n_heads": 32, "n_layers": 32, "norm_eps": 1e-06, "vocab_size": -1}, | |
} | |
NUM_SHARDS = { | |
"7B": 1, | |
"13B": 2, | |
"30B": 4, | |
"65B": 8, | |
} | |
layer_kind = { | |
'tok_embeddings': 'ParallelEmbedding', | |
'output': 'ColumnParallelLinear', | |
'attention.wq': 'ColumnParallelLinear', | |
'attention.wk': 'ColumnParallelLinear', | |
'attention.wv': 'ColumnParallelLinear', | |
'attention.wo': 'RowParallelLinear', | |
'feed_forward.w1': 'ColumnParallelLinear', | |
'feed_forward.w2': 'RowParallelLinear', | |
'feed_forward.w3': 'ColumnParallelLinear', | |
'attention_norm': None, | |
'ffn_norm': None, | |
'norm': None, | |
'rope.freqs': None, | |
} | |
print(f">>> load model from {args.model_path} and lora from {args.lora_path}....") | |
tokenizer = LlamaTokenizer.from_pretrained(args.model_path) | |
base_model = LlamaForCausalLM.from_pretrained( | |
args.model_path, | |
load_in_8bit=False, | |
torch_dtype=torch.float16, | |
device_map={"": "cpu"}, | |
) | |
lora_model = PeftModel.from_pretrained( | |
base_model, | |
args.lora_path, | |
device_map={"": "cpu"}, | |
torch_dtype=torch.float16, | |
) | |
# merge weights | |
for layer in lora_model.base_model.model.model.layers: | |
layer.self_attn.q_proj.merge_weights = True | |
layer.self_attn.v_proj.merge_weights = True | |
lora_model.train(False) | |
lora_model_sd = lora_model.state_dict() | |
n_layers = base_model.config.num_hidden_layers | |
model_size = None | |
for size in params.keys(): | |
if n_layers == params[size]["n_layers"]: | |
model_size = size | |
print(f">>> automatically recognize model_size={size}") | |
if model_size is None: | |
raise Exception('cannot recognize model_size! please check if your model is llama-based model') | |
n_heads = base_model.config.num_attention_heads | |
assert n_heads == params[model_size]["n_heads"] | |
dim = base_model.config.hidden_size | |
assert dim == params[model_size]["dim"] | |
dims_per_head = dim // n_heads | |
base = 10000.0 | |
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)) | |
if args.num_shards is None: | |
num_shards = NUM_SHARDS[model_size] | |
else: | |
num_shards = args.num_shards | |
print(f'>>> will split model checkpoint in {num_shards} parts') | |
def permute(w): | |
return ( | |
w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim) | |
) | |
def unpermute(w): | |
return ( | |
w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim) | |
) | |
def translate_state_dict_key(k): | |
k = k.replace("base_model.model.", "") | |
if k == "model.embed_tokens.weight": | |
return "tok_embeddings.weight" | |
elif k == "model.norm.weight": | |
return "norm.weight" | |
elif k == "lm_head.weight": | |
return "output.weight" | |
elif k.startswith("model.layers."): | |
layer = k.split(".")[2] | |
if k.endswith(".self_attn.q_proj.weight"): | |
return f"layers.{layer}.attention.wq.weight" | |
elif k.endswith(".self_attn.k_proj.weight"): | |
return f"layers.{layer}.attention.wk.weight" | |
elif k.endswith(".self_attn.v_proj.weight"): | |
return f"layers.{layer}.attention.wv.weight" | |
elif k.endswith(".self_attn.o_proj.weight"): | |
return f"layers.{layer}.attention.wo.weight" | |
elif k.endswith(".mlp.gate_proj.weight"): | |
return f"layers.{layer}.feed_forward.w1.weight" | |
elif k.endswith(".mlp.down_proj.weight"): | |
return f"layers.{layer}.feed_forward.w2.weight" | |
elif k.endswith(".mlp.up_proj.weight"): | |
return f"layers.{layer}.feed_forward.w3.weight" | |
elif k.endswith(".input_layernorm.weight"): | |
return f"layers.{layer}.attention_norm.weight" | |
elif k.endswith(".post_attention_layernorm.weight"): | |
return f"layers.{layer}.ffn_norm.weight" | |
elif k.endswith("rotary_emb.inv_freq") or "lora" in k: | |
return None | |
else: | |
print(layer, k) | |
raise NotImplementedError | |
else: | |
print(k) | |
raise NotImplementedError | |
new_state_dict = {} | |
for k, v in lora_model_sd.items(): | |
new_k = translate_state_dict_key(k) | |
if new_k is not None: | |
if "wq" in new_k or "wk" in new_k: | |
new_state_dict[new_k] = unpermute(v) | |
else: | |
new_state_dict[new_k] = v | |
os.makedirs(args.out_path, exist_ok=True) | |
if num_shards == 1: | |
torch.save(new_state_dict, f"{args.out_path}/consolidated.00.pth") | |
with open(f"{args.out_path}/params.json", "w") as f: | |
json.dump(params[model_size], f) | |
else: | |
output = [dict() for x in range(num_shards)] | |
print('>>> start converting to shards...') | |
# sharded the models | |
for key in new_state_dict.keys(): | |
tensors = [new_state_dict[key]] | |
print(key) | |
print(' in shapes=', [p.shape for p in tensors]) | |
for pattern, kind in layer_kind.items(): | |
if key.replace('.weight', '').endswith(pattern): | |
print(' kind=', kind) | |
if kind == 'ColumnParallelLinear': | |
with torch.no_grad(): | |
merged = torch.cat(tensors, 0) | |
slice_size = merged.shape[0] // num_shards | |
for rank in range(num_shards): | |
output[rank][key] = merged[slice_size * rank: slice_size * (rank + 1),:].clone().detach() | |
elif kind in ('ParallelEmbedding', 'RowParallelLinear'): | |
with torch.no_grad(): | |
merged = torch.cat(tensors, 1) | |
slice_size = merged.shape[1] // num_shards | |
for rank in range(num_shards): | |
output[rank][key] = merged[:,slice_size * rank: slice_size * (rank + 1)].clone().detach() | |
else: | |
for rank in range(num_shards): | |
output[rank][key] = tensors[0] | |
print(' out shapes=', [output[rank][key].shape for rank in range(num_shards)]) | |
print() | |
break | |
print('saving...') | |
with open(os.path.join(args.out_path, 'params.json'), 'w') as fp: | |
fp.write(json.dumps(params)) | |
for rank in range(num_shards): | |
print(' ', rank) | |
torch.save(output[rank], os.path.join(args.out_path, 'consolidated.%02d.pth' % rank)) | |
print('done.') | |