This is a MOE model based on meta-llama/Llama-3.2-3B
example code
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import math
## v2 models
model_path = "cloudyu/Llama-3.2-3Bx4"
tokenizer = AutoTokenizer.from_pretrained(model_path, use_default_system_prompt=False)
model = AutoModelForCausalLM.from_pretrained(
model_path, torch_dtype=torch.float32, trust_remote_code=True,device_map='cpu'
)
print(model)
print(model.lm_head.weight)
prompt = input("please input prompt:")
while len(prompt) > 0:
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to("cpu")
generation_output = model.generate(
input_ids=input_ids, max_new_tokens=500,repetition_penalty=1.2
)
print(tokenizer.decode(generation_output[0]))
prompt = input("please input prompt:")
- Downloads last month
- 17
Inference Providers
NEW
This model is not currently available via any of the supported third-party Inference Providers, and
HF Inference API was unable to determine this model's library.