Xenova HF staff commited on
Commit
e6d1e01
1 Parent(s): c748e17

Add default chat template to tokenizer_config.json

Browse files

[Automated] This PR adds the default chat template to the tokenizer config, allowing the model to be used with the new conversational widget (see [PR](https://github.com/huggingface/huggingface.js/pull/457)).

If the default is not appropriate for your model, please set `tokenizer.chat_template` to an appropriate template. See https://huggingface.co/docs/transformers/main/chat_templating for more information.

Files changed (1) hide show
  1. tokenizer_config.json +3 -2
tokenizer_config.json CHANGED
@@ -3,5 +3,6 @@
3
  "clean_up_tokenization_spaces": true,
4
  "eos_token": "<|endoftext|>",
5
  "model_max_length": 2048,
6
- "tokenizer_class": "PreTrainedTokenizerFast"
7
- }
 
 
3
  "clean_up_tokenization_spaces": true,
4
  "eos_token": "<|endoftext|>",
5
  "model_max_length": 2048,
6
+ "tokenizer_class": "PreTrainedTokenizerFast",
7
+ "chat_template": "{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
8
+ }