|
import torch, torch.nn as nn |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
source_dir = "/mnt/str/models/qwen2-0.5b-instruct" |
|
target_dir = "/mnt/str/models/llama3-70b-instruct" |
|
output_dir = "/mnt/str/temp/transplant" |
|
|
|
|
|
model = AutoModelForCausalLM.from_pretrained(source_dir, device_map = "auto") |
|
tokenizer_source = AutoTokenizer.from_pretrained(source_dir) |
|
tokenizer_target = AutoTokenizer.from_pretrained(target_dir) |
|
tied = model.config.tie_word_embeddings |
|
target_vocab_size = max(tokenizer_target.vocab.values()) + 1 |
|
|
|
|
|
old_emb = model.model.embed_tokens.weight |
|
new_emb = torch.empty((target_vocab_size, model.config.hidden_size), |
|
dtype = old_emb.dtype, device = old_emb.device) |
|
|
|
|
|
old_head = model.lm_head.weight |
|
new_head = torch.empty((target_vocab_size, model.config.hidden_size), |
|
dtype = old_head.dtype, device = old_head.device) |
|
|
|
|
|
for idx in range(target_vocab_size): |
|
decode = tokenizer_target.decode(torch.tensor(idx, dtype = torch.long), decode_special_tokens = True) |
|
encode = tokenizer_source.encode(decode, add_special_tokens = False, return_tensors = "pt") |
|
new_emb[idx] = old_emb[encode.flatten()].mean(dim = 0) |
|
new_head[idx] = old_head[encode.flatten()].mean(dim = 0) |
|
|
|
|
|
model.model.embed_tokens.weight = nn.Parameter(new_emb, requires_grad = False) |
|
model.model.embed_tokens.num_embeddings = target_vocab_size |
|
|
|
|
|
model.lm_head.weight = nn.Parameter(new_head, requires_grad = False) |
|
model.lm_head.out_features = tokenizer_target.vocab_size |
|
|
|
|
|
model.vocab_size = target_vocab_size |
|
model.config.vocab_size = target_vocab_size |
|
model.config.bos_token_id = tokenizer_target.bos_token_id |
|
model.config.eos_token_id = tokenizer_target.eos_token_id |
|
|
|
|
|
model.save_pretrained(output_dir, tie_word_embeddings = tied) |
|
tokenizer_target.save_pretrained(output_dir) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|