lrl-modelcloud
commited on
Commit
•
6c8f29b
1
Parent(s):
2aaa79e
Update README.md
Browse files
README.md
CHANGED
@@ -46,7 +46,7 @@ This model has been quantized using [GPTQModel](https://github.com/ModelCloud/GP
|
|
46 |
from transformers import AutoTokenizer
|
47 |
from gptqmodel import GPTQModel
|
48 |
|
49 |
-
model_name = "ModelCloud/Llama-3.2-1B-Instruct-gptqmodel-4bit-
|
50 |
|
51 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
52 |
model = GPTQModel.from_quantized(model_name)
|
|
|
46 |
from transformers import AutoTokenizer
|
47 |
from gptqmodel import GPTQModel
|
48 |
|
49 |
+
model_name = "ModelCloud/Llama-3.2-1B-Instruct-gptqmodel-4bit-vortex-v1"
|
50 |
|
51 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
52 |
model = GPTQModel.from_quantized(model_name)
|