FrancescoPeriti commited on
Commit
53c910a
·
verified ·
1 Parent(s): 79fe4a6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -24,13 +24,13 @@ The following `bitsandbytes` quantization config was used during training:
24
  ```python
25
  from peft import PeftModel, PeftConfig
26
  from huggingface_hub import login
27
- from transformers import AutoModelForCausalLM, AutoTokenizer
28
 
29
  login("[YOUR HF TOKEN HERE FOR USING LLAMA]")
30
  config = PeftConfig.from_pretrained("ChangeIsKey/llama-7b-lexical-substitution")
31
  base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
32
 
33
- tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_fast=False, trust_remote_code=True, cache_dir=LLMs_CACHE_DIR)
34
  tokenizer.add_special_tokens({ "additional_special_tokens":[AddedToken("<|s|>"), AddedToken("<|answer|>"), AddedToken("<|end|>")]})
35
  if tokenizer.pad_token is None:
36
  tokenizer.add_special_tokens({'pad_token': '[PAD]'})
 
24
  ```python
25
  from peft import PeftModel, PeftConfig
26
  from huggingface_hub import login
27
+ from transformers import AutoModelForCausalLM, AutoTokenizer, AddedToken
28
 
29
  login("[YOUR HF TOKEN HERE FOR USING LLAMA]")
30
  config = PeftConfig.from_pretrained("ChangeIsKey/llama-7b-lexical-substitution")
31
  base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
32
 
33
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", use_fast=False, trust_remote_code=True)
34
  tokenizer.add_special_tokens({ "additional_special_tokens":[AddedToken("<|s|>"), AddedToken("<|answer|>"), AddedToken("<|end|>")]})
35
  if tokenizer.pad_token is None:
36
  tokenizer.add_special_tokens({'pad_token': '[PAD]'})