Spaces:
Running
Running
add_generation_prompt=True
Browse files- gen_api_answer.py +2 -2
gen_api_answer.py
CHANGED
@@ -89,7 +89,7 @@ def get_prometheus_response(model_name, prompt, system_prompt=None, max_tokens=5
|
|
89 |
# Apply chat template
|
90 |
model_id = "prometheus-eval/prometheus-7b-v2.0"
|
91 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
92 |
-
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False)
|
93 |
|
94 |
payload = {
|
95 |
"inputs": formatted_prompt,
|
@@ -127,7 +127,7 @@ def get_atla_response(model_name, prompt, system_prompt=None, max_tokens=500, te
|
|
127 |
# Apply chat template
|
128 |
model_id = "AtlaAI/Atla-8B-preview" # Update this if using a different model
|
129 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
130 |
-
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False)
|
131 |
|
132 |
payload = {
|
133 |
"inputs": formatted_prompt,
|
|
|
89 |
# Apply chat template
|
90 |
model_id = "prometheus-eval/prometheus-7b-v2.0"
|
91 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
92 |
+
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
93 |
|
94 |
payload = {
|
95 |
"inputs": formatted_prompt,
|
|
|
127 |
# Apply chat template
|
128 |
model_id = "AtlaAI/Atla-8B-preview" # Update this if using a different model
|
129 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
130 |
+
formatted_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
131 |
|
132 |
payload = {
|
133 |
"inputs": formatted_prompt,
|