Text Generation
Transformers
PyTorch
English
German
mistral
conversational
Inference Endpoints
text-generation-inference
bjoernp commited on
Commit
57e3994
1 Parent(s): c2e582f

Upload tokenizer

Browse files
special_tokens_map.json CHANGED
@@ -1,13 +1,10 @@
1
  {
2
  "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>"
 
5
  ],
6
- "bos_token": "<|im_start|>",
7
- "cls_token": "<CLS>",
8
- "eos_token": "<|im_end|>",
9
- "mask_token": "<MASK>",
10
- "pad_token": "<PAD>",
11
- "sep_token": "<SEP>",
12
  "unk_token": "<unk>"
13
  }
 
1
  {
2
  "additional_special_tokens": [
3
+ "<unk>",
4
+ "<s>",
5
+ "</s>"
6
  ],
7
+ "bos_token": "<s>",
8
+ "eos_token": "</s>",
 
 
 
 
9
  "unk_token": "<unk>"
10
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -25,46 +25,6 @@
25
  "special": true
26
  },
27
  "32000": {
28
- "content": "<CLS>",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "32001": {
36
- "content": "<SEP>",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- },
43
- "32002": {
44
- "content": "<EOD>",
45
- "lstrip": false,
46
- "normalized": true,
47
- "rstrip": false,
48
- "single_word": false,
49
- "special": false
50
- },
51
- "32003": {
52
- "content": "<MASK>",
53
- "lstrip": false,
54
- "normalized": false,
55
- "rstrip": false,
56
- "single_word": false,
57
- "special": true
58
- },
59
- "32004": {
60
- "content": "<PAD>",
61
- "lstrip": false,
62
- "normalized": false,
63
- "rstrip": false,
64
- "single_word": false,
65
- "special": true
66
- },
67
- "32005": {
68
  "content": "<|im_start|>",
69
  "lstrip": false,
70
  "normalized": false,
@@ -72,7 +32,7 @@
72
  "single_word": false,
73
  "special": true
74
  },
75
- "32006": {
76
  "content": "<|im_end|>",
77
  "lstrip": false,
78
  "normalized": false,
@@ -82,22 +42,20 @@
82
  }
83
  },
84
  "additional_special_tokens": [
85
- "<|im_start|>",
86
- "<|im_end|>"
 
87
  ],
88
- "bos_token": "<|im_start|>",
89
  "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
90
  "clean_up_tokenization_spaces": false,
91
- "cls_token": "<CLS>",
92
- "eos_token": "<|im_end|>",
93
  "legacy": true,
94
- "mask_token": "<MASK>",
95
  "model_max_length": 1000000000000000019884624838656,
96
- "pad_token": "<PAD>",
97
- "padding_side": "right",
98
- "sep_token": "<SEP>",
99
  "sp_model_kwargs": {},
 
100
  "tokenizer_class": "LlamaTokenizer",
101
  "unk_token": "<unk>",
102
- "use_default_system_prompt": false
103
  }
 
25
  "special": true
26
  },
27
  "32000": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  "content": "<|im_start|>",
29
  "lstrip": false,
30
  "normalized": false,
 
32
  "single_word": false,
33
  "special": true
34
  },
35
+ "32001": {
36
  "content": "<|im_end|>",
37
  "lstrip": false,
38
  "normalized": false,
 
42
  }
43
  },
44
  "additional_special_tokens": [
45
+ "<unk>",
46
+ "<s>",
47
+ "</s>"
48
  ],
49
+ "bos_token": "<s>",
50
  "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
51
  "clean_up_tokenization_spaces": false,
52
+ "eos_token": "</s>",
 
53
  "legacy": true,
 
54
  "model_max_length": 1000000000000000019884624838656,
55
+ "pad_token": null,
 
 
56
  "sp_model_kwargs": {},
57
+ "spaces_between_special_tokens": false,
58
  "tokenizer_class": "LlamaTokenizer",
59
  "unk_token": "<unk>",
60
+ "use_default_system_prompt": true
61
  }