Update README.md
Browse files
README.md
CHANGED
@@ -31,7 +31,7 @@ tokenizer = AutoTokenizer.from_pretrained('/path/to/tokenizer')
|
|
31 |
model = GPT2LMHeadModel.from_pretrained('/path/to/output').to(device)
|
32 |
input_ids = tokenizer.encode(enzyme_class,return_tensors='pt').to(device)
|
33 |
# change max_length or num_return_sequences to your requirements
|
34 |
-
output = model.generate(input_ids, top_k=
|
35 |
eos_token_id=1,pad_token_id=0,do_sample=True, num_return_sequences=100)
|
36 |
```
|
37 |
|
|
|
31 |
model = GPT2LMHeadModel.from_pretrained('/path/to/output').to(device)
|
32 |
input_ids = tokenizer.encode(enzyme_class,return_tensors='pt').to(device)
|
33 |
# change max_length or num_return_sequences to your requirements
|
34 |
+
output = model.generate(input_ids, top_k=9, repetition_penalty=1.2, max_length=1024,
|
35 |
eos_token_id=1,pad_token_id=0,do_sample=True, num_return_sequences=100)
|
36 |
```
|
37 |
|