Update README.md
Browse files
README.md
CHANGED
@@ -35,7 +35,7 @@ quantization_config=BitsAndBytesConfig(
|
|
35 |
bnb_4bit_quant_type="nf4",
|
36 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
37 |
)
|
38 |
-
model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True, torch_dtype=torch.bfloat16
|
39 |
# quantization_config=quantization_config, # Uncomment this line for 4bit quantization
|
40 |
)
|
41 |
model.eval()
|
|
|
35 |
bnb_4bit_quant_type="nf4",
|
36 |
bnb_4bit_compute_dtype=torch.bfloat16,
|
37 |
)
|
38 |
+
model = AutoModelForCausalLM.from_pretrained(model_info, trust_remote_code=True, torch_dtype=torch.bfloat16,
|
39 |
# quantization_config=quantization_config, # Uncomment this line for 4bit quantization
|
40 |
)
|
41 |
model.eval()
|