4bit
/

Text Generation
Transformers
PyTorch
code
mpt
instruct
self instruct
custom_code
text-generation-inference
Inference Endpoints
Replit-v1-CodeInstruct-3B / tokenizer_config.json
camenduru's picture
thanks to teknium ❤
8b543a0
raw
history blame contribute delete
425 Bytes
{
"auto_map": {
"AutoTokenizer": [
"replit/replit-code-v1-3b--replit_lm_tokenizer.ReplitLMTokenizer",
null
]
},
"bos_token": null,
"clean_up_tokenization_spaces": false,
"eos_token": "<|endoftext|>",
"model_max_length": 512,
"pad_token": "<|pad|>",
"padding_side": "right",
"sep_token": null,
"sp_model_kwargs": {},
"tokenizer_class": "ReplitLMTokenizer",
"unk_token": "<|unk|>"
}