|
import torch |
|
from transformers import AutoModel, AutoTokenizer, LlamaForCausalLM |
|
|
|
|
|
llama2_checkpoint_path = "/mnt/data/bpop/multilinguality_tower/extended-models/llama-2-7b-hf-merged-multi-32k-meaninit" |
|
llama2_tokenizer = AutoTokenizer.from_pretrained(llama2_checkpoint_path) |
|
llama2_model = LlamaForCausalLM.from_pretrained(llama2_checkpoint_path) |
|
|
|
|
|
original_llama2_model_name = "/mnt/data_2/cache/models--meta-llama--Llama-2-7b-hf/snapshots/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9" |
|
original_llama2_tokenizer = AutoTokenizer.from_pretrained(original_llama2_model_name) |
|
original_llama2_model = LlamaForCausalLM.from_pretrained(original_llama2_model_name) |
|
|
|
|
|
llama2_embedding_weights = llama2_model.get_input_embeddings().weight |
|
original_llama2_embedding_weights = original_llama2_model.get_input_embeddings().weight |
|
|
|
for (name1, param1), (name2, param2) in zip( |
|
llama2_model.named_parameters(), original_llama2_model.named_parameters() |
|
): |
|
try: |
|
if not torch.allclose(param1, param2, atol=1e-7): |
|
print(f"Different weights in {name1} and {name2}") |
|
else: |
|
print(f"Same weights in {name1} and {name2}") |
|
except: |
|
print(f"Couldn't do allclose for layer {name1}") |
|
|
|
a = 1 |
|
|