Xenova HF staff commited on
Commit
bdf71d7
1 Parent(s): 59bb272

Fix tokenizer_config.json

Browse files

Without this fix, if you do:
```py
from transformers import pipeline
pipeline('text-generation', 'JackFram/llama-160m')
```

you get an error:
```
RecursionError: maximum recursion depth exceeded while calling a Python object
```

See [this issue](https://github.com/huggingface/transformers/issues/25036) for more info.

Files changed (1) hide show
  1. tokenizer_config.json +3 -3
tokenizer_config.json CHANGED
@@ -3,7 +3,7 @@
3
  "add_eos_token": false,
4
  "bos_token": {
5
  "__type": "AddedToken",
6
- "content": "",
7
  "lstrip": false,
8
  "normalized": true,
9
  "rstrip": false,
@@ -12,7 +12,7 @@
12
  "clean_up_tokenization_spaces": false,
13
  "eos_token": {
14
  "__type": "AddedToken",
15
- "content": "",
16
  "lstrip": false,
17
  "normalized": true,
18
  "rstrip": false,
@@ -24,7 +24,7 @@
24
  "tokenizer_class": "LlamaTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
27
- "content": "",
28
  "lstrip": false,
29
  "normalized": true,
30
  "rstrip": false,
 
3
  "add_eos_token": false,
4
  "bos_token": {
5
  "__type": "AddedToken",
6
+ "content": "<s>",
7
  "lstrip": false,
8
  "normalized": true,
9
  "rstrip": false,
 
12
  "clean_up_tokenization_spaces": false,
13
  "eos_token": {
14
  "__type": "AddedToken",
15
+ "content": "</s>",
16
  "lstrip": false,
17
  "normalized": true,
18
  "rstrip": false,
 
24
  "tokenizer_class": "LlamaTokenizer",
25
  "unk_token": {
26
  "__type": "AddedToken",
27
+ "content": "<unk>",
28
  "lstrip": false,
29
  "normalized": true,
30
  "rstrip": false,