Text Generation
Transformers
PyTorch
Safetensors
longllama
text-generation-inference
custom_code
long_llama_3b / tokenizer_config.json
Szymon Tworkowski
colab test
b65129a
raw
history blame
No virus
617 Bytes
{"bos_token": "", "eos_token": {"__type": "AddedToken", "content": "</s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "model_max_length": 1000000000000000019884624838656, "tokenizer_class": "LlamaTokenizer", "unk_token": {"__type": "AddedToken", "content": "<unk>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "add_bos_token": true, "add_eos_token": false, "pad_token": null, "sp_model_kwargs": {}, "clean_up_tokenization_spaces": {"__type": "AddedToken", "content": "<s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}}