danielhanchen
commited on
Commit
•
45764a0
1
Parent(s):
eb2c138
Upload tokenizer
Browse files- README.md +0 -1
- tokenizer_config.json +1 -2
README.md
CHANGED
@@ -8,7 +8,6 @@ tags:
|
|
8 |
- transformers
|
9 |
- gemma
|
10 |
- bnb
|
11 |
-
|
12 |
---
|
13 |
|
14 |
# Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory via Unsloth!
|
|
|
8 |
- transformers
|
9 |
- gemma
|
10 |
- bnb
|
|
|
11 |
---
|
12 |
|
13 |
# Finetune Mistral, Gemma, Llama 2-5x faster with 70% less memory via Unsloth!
|
tokenizer_config.json
CHANGED
@@ -1742,9 +1742,8 @@
|
|
1742 |
"bos_token": "<bos>",
|
1743 |
"clean_up_tokenization_spaces": false,
|
1744 |
"eos_token": "<eos>",
|
1745 |
-
"model_max_length":
|
1746 |
"pad_token": "<pad>",
|
1747 |
-
"padding_side": "right",
|
1748 |
"sp_model_kwargs": {},
|
1749 |
"spaces_between_special_tokens": false,
|
1750 |
"tokenizer_class": "GemmaTokenizer",
|
|
|
1742 |
"bos_token": "<bos>",
|
1743 |
"clean_up_tokenization_spaces": false,
|
1744 |
"eos_token": "<eos>",
|
1745 |
+
"model_max_length": 1000000000000000019884624838656,
|
1746 |
"pad_token": "<pad>",
|
|
|
1747 |
"sp_model_kwargs": {},
|
1748 |
"spaces_between_special_tokens": false,
|
1749 |
"tokenizer_class": "GemmaTokenizer",
|