Text Generation
Transformers
PyTorch
English
mixtral
conversational
Inference Endpoints
text-generation-inference
ehartford commited on
Commit
2123ea0
1 Parent(s): 3706d99

Delete tokenizer_config.json

Browse files
Files changed (1) hide show
  1. tokenizer_config.json +0 -60
tokenizer_config.json DELETED
@@ -1,60 +0,0 @@
1
- {
2
- "add_bos_token": true,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": false,
26
- "single_word": false,
27
- "special": true
28
- },
29
- "32000": {
30
- "content": "<|im_end|>",
31
- "lstrip": false,
32
- "normalized": false,
33
- "rstrip": false,
34
- "single_word": false,
35
- "special": true
36
- },
37
- "32001": {
38
- "content": "<|im_start|>",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false,
43
- "special": false
44
- }
45
- },
46
- "additional_special_tokens": [],
47
- "bos_token": "<s>",
48
- "clean_up_tokenization_spaces": false,
49
- "eos_token": "<|im_end|>",
50
- "legacy": true,
51
- "model_max_length": 1000000000000000019884624838656,
52
- "pad_token": "</s>",
53
- "sp_model_kwargs": {},
54
- "spaces_between_special_tokens": false,
55
- "tokenizer_class": "LlamaTokenizer",
56
- "trust_remote_code": true,
57
- "unk_token": "<unk>",
58
- "use_default_system_prompt": false,
59
- "use_fast": true
60
- }