Spaces:
Paused
Paused
Update utils.py
Browse files
utils.py
CHANGED
@@ -83,8 +83,8 @@ def load_tokenizer_and_model(base_model,load_8bit=False):
|
|
83 |
if device == "cuda":
|
84 |
model = AutoModelForCausalLM.from_pretrained(
|
85 |
base_model,
|
86 |
-
load_in_8bit=load_8bit,
|
87 |
-
torch_dtype=torch.float16,
|
88 |
device_map="auto",
|
89 |
)
|
90 |
else:
|
@@ -92,8 +92,8 @@ def load_tokenizer_and_model(base_model,load_8bit=False):
|
|
92 |
base_model, device_map={"": device}, low_cpu_mem_usage=True
|
93 |
)
|
94 |
|
95 |
-
if not load_8bit:
|
96 |
-
model.half() # seems to fix bugs for some users.
|
97 |
|
98 |
model.eval()
|
99 |
return tokenizer,model,device
|
|
|
83 |
if device == "cuda":
|
84 |
model = AutoModelForCausalLM.from_pretrained(
|
85 |
base_model,
|
86 |
+
#load_in_8bit=load_8bit,
|
87 |
+
#torch_dtype=torch.float16,
|
88 |
device_map="auto",
|
89 |
)
|
90 |
else:
|
|
|
92 |
base_model, device_map={"": device}, low_cpu_mem_usage=True
|
93 |
)
|
94 |
|
95 |
+
#if not load_8bit:
|
96 |
+
#model.half() # seems to fix bugs for some users.
|
97 |
|
98 |
model.eval()
|
99 |
return tokenizer,model,device
|