Update README.md
Browse files
README.md
CHANGED
@@ -6,7 +6,7 @@ language:
|
|
6 |
- en
|
7 |
pipeline_tag: text-generation
|
8 |
tags:
|
9 |
-
-
|
10 |
---
|
11 |
|
12 |
This model is a finetuned version of ```gpt2``` using ```HuggingFaceH4/ultrachat_200k```
|
@@ -35,7 +35,7 @@ prompt.
|
|
35 |
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
36 |
>>> def generate_text(prompt):
|
37 |
>>> inputs = tokenizer.encode(prompt, return_tensors='pt')
|
38 |
-
>>> outputs =
|
39 |
>>> generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
40 |
>>> return generated[:generated.rfind(".")+1]
|
41 |
>>> prompt = """
|
|
|
6 |
- en
|
7 |
pipeline_tag: text-generation
|
8 |
tags:
|
9 |
+
- gpt2
|
10 |
---
|
11 |
|
12 |
This model is a finetuned version of ```gpt2``` using ```HuggingFaceH4/ultrachat_200k```
|
|
|
35 |
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
36 |
>>> def generate_text(prompt):
|
37 |
>>> inputs = tokenizer.encode(prompt, return_tensors='pt')
|
38 |
+
>>> outputs = model.generate(inputs, max_length=64, pad_token_id=tokenizer.eos_token_id)
|
39 |
>>> generated = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
40 |
>>> return generated[:generated.rfind(".")+1]
|
41 |
>>> prompt = """
|