Spaces:
Runtime error
Runtime error
Vision-CAIR
commited on
Commit
Β·
4f58782
1
Parent(s):
5db9e5b
Update minigpt4/models/base_model.py
Browse files
minigpt4/models/base_model.py
CHANGED
@@ -171,7 +171,7 @@ class BaseModel(nn.Module):
|
|
171 |
def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
|
172 |
lora_target_modules=["q_proj","v_proj"], **lora_kargs):
|
173 |
logging.info('Loading LLAMA')
|
174 |
-
llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False)
|
175 |
llama_tokenizer.pad_token = "$$"
|
176 |
|
177 |
if low_resource:
|
@@ -179,12 +179,14 @@ class BaseModel(nn.Module):
|
|
179 |
llama_model_path,
|
180 |
torch_dtype=torch.float16,
|
181 |
load_in_8bit=True,
|
182 |
-
device_map={'': low_res_device}
|
|
|
183 |
)
|
184 |
else:
|
185 |
llama_model = LlamaForCausalLM.from_pretrained(
|
186 |
llama_model_path,
|
187 |
torch_dtype=torch.float16,
|
|
|
188 |
)
|
189 |
|
190 |
if lora_r > 0:
|
|
|
171 |
def init_llm(cls, llama_model_path, low_resource=False, low_res_device=0, lora_r=0,
|
172 |
lora_target_modules=["q_proj","v_proj"], **lora_kargs):
|
173 |
logging.info('Loading LLAMA')
|
174 |
+
llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False, use_auth_token=True)
|
175 |
llama_tokenizer.pad_token = "$$"
|
176 |
|
177 |
if low_resource:
|
|
|
179 |
llama_model_path,
|
180 |
torch_dtype=torch.float16,
|
181 |
load_in_8bit=True,
|
182 |
+
device_map={'': low_res_device},
|
183 |
+
use_auth_token=True,
|
184 |
)
|
185 |
else:
|
186 |
llama_model = LlamaForCausalLM.from_pretrained(
|
187 |
llama_model_path,
|
188 |
torch_dtype=torch.float16,
|
189 |
+
use_auth_token=True,
|
190 |
)
|
191 |
|
192 |
if lora_r > 0:
|