Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,10 +11,18 @@ from pathlib import Path
|
|
11 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
12 |
from langdetect import detect
|
13 |
CONTEXT_WINDOW = 50_000
|
|
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
llm = HuggingFaceEndpoint(
|
16 |
repo_id="mistralai/Mistral-Nemo-Instruct-2407", #"mistralai/Mistral-7B-Instruct-v0.3",
|
17 |
task="text-generation",
|
|
|
18 |
max_new_tokens=4096,
|
19 |
temperature=0.5,
|
20 |
do_sample=False,
|
|
|
11 |
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
|
12 |
from langdetect import detect
|
13 |
CONTEXT_WINDOW = 50_000
|
14 |
+
from transformers import BitsAndBytesConfig
|
15 |
|
16 |
+
quantization_config = BitsAndBytesConfig(
|
17 |
+
load_in_4bit=True,
|
18 |
+
bnb_4bit_quant_type="nf4",
|
19 |
+
bnb_4bit_compute_dtype="float16",
|
20 |
+
bnb_4bit_use_double_quant=True
|
21 |
+
)
|
22 |
llm = HuggingFaceEndpoint(
|
23 |
repo_id="mistralai/Mistral-Nemo-Instruct-2407", #"mistralai/Mistral-7B-Instruct-v0.3",
|
24 |
task="text-generation",
|
25 |
+
model_kwargs={"quantization_config": quantization_config},
|
26 |
max_new_tokens=4096,
|
27 |
temperature=0.5,
|
28 |
do_sample=False,
|