Update README.md
Browse files
README.md
CHANGED
@@ -17,14 +17,14 @@ More information about Erya dataset can be found here: [RUCAIBox/Erya-dataset ·
|
|
17 |
# Example
|
18 |
|
19 |
```python
|
20 |
-
from transformers import BertTokenizer, CPTForConditionalGeneration
|
21 |
|
22 |
-
tokenizer = BertTokenizer.from_pretrained("RUCAIBox/Erya")
|
23 |
-
model = CPTForConditionalGeneration.from_pretrained("RUCAIBox/Erya")
|
24 |
|
25 |
-
input_ids = tokenizer("安世字子孺,少以父任为郎。", return_tensors='pt')
|
26 |
-
input_ids.pop("token_type_ids")
|
27 |
|
28 |
-
pred_ids = model.generate(max_new_tokens=256, **input_ids)
|
29 |
-
print(tokenizer.batch_decode(pred_ids, skip_special_tokens=True))
|
30 |
```
|
|
|
17 |
# Example
|
18 |
|
19 |
```python
|
20 |
+
>>> from transformers import BertTokenizer, CPTForConditionalGeneration
|
21 |
|
22 |
+
>>> tokenizer = BertTokenizer.from_pretrained("RUCAIBox/Erya")
|
23 |
+
>>> model = CPTForConditionalGeneration.from_pretrained("RUCAIBox/Erya")
|
24 |
|
25 |
+
>>> input_ids = tokenizer("安世字子孺,少以父任为郎。", return_tensors='pt')
|
26 |
+
>>> input_ids.pop("token_type_ids")
|
27 |
|
28 |
+
>>> pred_ids = model.generate(max_new_tokens=256, **input_ids)
|
29 |
+
>>> print(tokenizer.batch_decode(pred_ids, skip_special_tokens=True))
|
30 |
```
|