Update README.md
Browse files
README.md
CHANGED
@@ -47,6 +47,7 @@ We collect 120G novels as the pretraining data for LongLM.
|
|
47 |
```python\
|
48 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
49 |
tokenizer = T5Tokenizer.from_pretrained('LongLM-large')
|
|
|
50 |
model = T5ForConditionalGeneration.from_pretrained('LongLM-large')
|
51 |
```
|
52 |
|
|
|
47 |
```python\
|
48 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
49 |
tokenizer = T5Tokenizer.from_pretrained('LongLM-large')
|
50 |
+
tokenizer.add_special_tokens({"additional_special_tokens": ["<extra_id_%d>"%d for d in range(100)]})
|
51 |
model = T5ForConditionalGeneration.from_pretrained('LongLM-large')
|
52 |
```
|
53 |
|