Update app.py
Browse files
app.py
CHANGED
@@ -4,17 +4,19 @@ import torch
|
|
4 |
from peft import PeftModel, PeftConfig
|
5 |
base_model = "TinyPixel/Llama-2-7B-bf16-sharded"
|
6 |
tuned_adapter = "newronai/llama-2-7b-QLoRA-Trial1"
|
7 |
-
bnb_config = BitsAndBytesConfig(
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
)
|
|
|
12 |
|
13 |
|
14 |
config = PeftConfig.from_pretrained(tuned_adapter)
|
15 |
model = AutoModelForCausalLM.from_pretrained(base_model,
|
16 |
use_cache="cache",
|
17 |
-
|
|
|
18 |
)
|
19 |
|
20 |
model = PeftModel.from_pretrained(model, tuned_adapter)
|
|
|
4 |
from peft import PeftModel, PeftConfig
|
5 |
base_model = "TinyPixel/Llama-2-7B-bf16-sharded"
|
6 |
tuned_adapter = "newronai/llama-2-7b-QLoRA-Trial1"
|
7 |
+
# bnb_config = BitsAndBytesConfig(
|
8 |
+
# load_in_4bit=True,
|
9 |
+
# bnb_4bit_quant_type="nf4",
|
10 |
+
# bnb_4bit_compute_dtype=torch.float16,
|
11 |
+
# )
|
12 |
+
# bnb_config = BitsAndBytesConfig()
|
13 |
|
14 |
|
15 |
config = PeftConfig.from_pretrained(tuned_adapter)
|
16 |
model = AutoModelForCausalLM.from_pretrained(base_model,
|
17 |
use_cache="cache",
|
18 |
+
|
19 |
+
# quantization_config=bnb_config
|
20 |
)
|
21 |
|
22 |
model = PeftModel.from_pretrained(model, tuned_adapter)
|