File size: 1,338 Bytes
a93e458 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import torch
from transformers import AutoModel, AutoTokenizer, LlamaForCausalLM
# Load the Llama 2 checkpoint
llama2_checkpoint_path = "/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit"
llama2_tokenizer = AutoTokenizer.from_pretrained(llama2_checkpoint_path)
llama2_model = LlamaForCausalLM.from_pretrained(llama2_checkpoint_path)
# Load the original Llama 2 model
original_llama2_model_name = "/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9"
original_llama2_tokenizer = AutoTokenizer.from_pretrained(original_llama2_model_name)
original_llama2_model = LlamaForCausalLM.from_pretrained(original_llama2_model_name)
# Compare the weights of the embedding and output layers
llama2_embedding_weights = llama2_model.get_input_embeddings().weight
original_llama2_embedding_weights = original_llama2_model.get_input_embeddings().weight
for (name1, param1), (name2, param2) in zip(
llama2_model.named_parameters(), original_llama2_model.named_parameters()
):
try:
if not torch.allclose(param1, param2, atol=1e-7):
print(f"Different weights in {name1} and {name2}")
else:
print(f"Same weights in {name1} and {name2}")
except:
print(f"Couldn't do allclose for layer {name1}")
a = 1
|