Text Generation
Transformers
Safetensors
PyTorch
English
mistral
finetuned
quantized
4-bit precision
AWQ
conversational
Inference Endpoints
text-generation-inference
awq

added chat_template in tokenizer_config.json

#1
Files changed (1) hide show
  1. tokenizer_config.json +2 -1
tokenizer_config.json CHANGED
@@ -37,6 +37,7 @@
37
  },
38
  "additional_special_tokens": [],
39
  "bos_token": "<s>",
 
40
  "clean_up_tokenization_spaces": false,
41
  "eos_token": "<|im_end|>",
42
  "legacy": true,
@@ -11952,7 +11953,7 @@
11952
  "": 29272,
11953
  "ž": 30924,
11954
  "Ÿ": 29208,
11955
- " ": 29000,
11956
  "¡": 29028,
11957
  "¢": 29656,
11958
  "£": 28922,
 
37
  },
38
  "additional_special_tokens": [],
39
  "bos_token": "<s>",
40
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|im_start|>user\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'system' %}\n{{ '<|im_start|>system\n' + message['content'] + '<|im_end|>' }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|im_start|>assistant\n' + message['content'] + '<|im_end|>' }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|im_start|>assistant' }}\n{% endif %}\n{% endfor %}",
41
  "clean_up_tokenization_spaces": false,
42
  "eos_token": "<|im_end|>",
43
  "legacy": true,
 
11953
  "": 29272,
11954
  "ž": 30924,
11955
  "Ÿ": 29208,
11956
+ " ": 29000,
11957
  "¡": 29028,
11958
  "¢": 29656,
11959
  "£": 28922,