Update README.md
Browse files
README.md
CHANGED
@@ -24,13 +24,13 @@ The following `bitsandbytes` quantization config was used during training:
|
|
24 |
```python
|
25 |
from peft import PeftModel, PeftConfig
|
26 |
from huggingface_hub import login
|
27 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
28 |
|
29 |
login("[YOUR HF TOKEN HERE FOR USING LLAMA]")
|
30 |
config = PeftConfig.from_pretrained("ChangeIsKey/llama-7b-lexical-substitution")
|
31 |
base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
32 |
|
33 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
34 |
tokenizer.add_special_tokens({ "additional_special_tokens":[AddedToken("<|s|>"), AddedToken("<|answer|>"), AddedToken("<|end|>")]})
|
35 |
if tokenizer.pad_token is None:
|
36 |
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|
|
|
24 |
```python
|
25 |
from peft import PeftModel, PeftConfig
|
26 |
from huggingface_hub import login
|
27 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, AddedToken
|
28 |
|
29 |
login("[YOUR HF TOKEN HERE FOR USING LLAMA]")
|
30 |
config = PeftConfig.from_pretrained("ChangeIsKey/llama-7b-lexical-substitution")
|
31 |
base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
|
32 |
|
33 |
+
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", use_fast=False, trust_remote_code=True)
|
34 |
tokenizer.add_special_tokens({ "additional_special_tokens":[AddedToken("<|s|>"), AddedToken("<|answer|>"), AddedToken("<|end|>")]})
|
35 |
if tokenizer.pad_token is None:
|
36 |
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
|