shibing624 commited on
Commit
8b36b07
1 Parent(s): b00f6e2

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -6
README.md CHANGED
@@ -136,7 +136,6 @@ pip install transformers
136
 
137
  ```python
138
  import sys
139
- from peft import PeftModel
140
  from transformers import LlamaForCausalLM, LlamaTokenizer
141
 
142
 
@@ -154,16 +153,13 @@ for s in sents:
154
  inputs = tokenizer(q, return_tensors="pt")
155
  inputs = inputs.to(device=device)
156
 
157
- generate_ids = ref_model.generate(
158
  **inputs,
159
  max_new_tokens=120,
160
  do_sample=True,
161
  top_p=0.85,
162
  temperature=1.0,
163
- repetition_penalty=1.0,
164
- eos_token_id=tokenizer.eos_token_id,
165
- bos_token_id=tokenizer.bos_token_id,
166
- pad_token_id=tokenizer.pad_token_id,
167
  )
168
 
169
  output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True)[0]
 
136
 
137
  ```python
138
  import sys
 
139
  from transformers import LlamaForCausalLM, LlamaTokenizer
140
 
141
 
 
153
  inputs = tokenizer(q, return_tensors="pt")
154
  inputs = inputs.to(device=device)
155
 
156
+ generate_ids = model.generate(
157
  **inputs,
158
  max_new_tokens=120,
159
  do_sample=True,
160
  top_p=0.85,
161
  temperature=1.0,
162
+ repetition_penalty=1.0
 
 
 
163
  )
164
 
165
  output = tokenizer.batch_decode(generate_ids, skip_special_tokens=True)[0]