Spaces:
Runtime error
Runtime error
change model to meta-llama/Llama-2-7b-chat-hf
Browse files- backend/query_llm.py +2 -2
backend/query_llm.py
CHANGED
@@ -8,7 +8,7 @@ from typing import Any, Dict, Generator, List
|
|
8 |
from huggingface_hub import InferenceClient
|
9 |
from transformers import AutoTokenizer
|
10 |
|
11 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-
|
12 |
|
13 |
temperature = 0.9
|
14 |
top_p = 0.6
|
@@ -18,7 +18,7 @@ OPENAI_KEY = getenv("OPENAI_API_KEY")
|
|
18 |
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
19 |
|
20 |
hf_client = InferenceClient(
|
21 |
-
"meta-llama/Llama-2-
|
22 |
token=HF_TOKEN
|
23 |
)
|
24 |
|
|
|
8 |
from huggingface_hub import InferenceClient
|
9 |
from transformers import AutoTokenizer
|
10 |
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf") # model name
|
12 |
|
13 |
temperature = 0.9
|
14 |
top_p = 0.6
|
|
|
18 |
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
19 |
|
20 |
hf_client = InferenceClient(
|
21 |
+
"meta-llama/Llama-2-7b-chat-hf",
|
22 |
token=HF_TOKEN
|
23 |
)
|
24 |
|