samleeasus
commited on
Commit
•
6bc90ef
1
Parent(s):
6c2dd9b
Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
```python
|
2 |
+
import torch
|
3 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM
|
4 |
+
import transformers
|
5 |
+
tokenizer = LlamaTokenizer.from_pretrained(
|
6 |
+
'ocisd4/openllama-zh',
|
7 |
+
add_bos_token=False,
|
8 |
+
add_eos_token=False,
|
9 |
+
use_auth_token=True,
|
10 |
+
use_fast=False)
|
11 |
+
|
12 |
+
model = LlamaForCausalLM.from_pretrained('ocisd4/openllama-zh', device_map='auto',use_auth_token=True)
|
13 |
+
|
14 |
+
|
15 |
+
prompt = '關於華碩的傳說'
|
16 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
17 |
+
|
18 |
+
generation_output = model.generate(
|
19 |
+
input_ids=input_ids, max_new_tokens=256,
|
20 |
+
do_sample=True, top_k=40, top_p=0.95, temperature=0.7, repetition_penalty=1.08,
|
21 |
+
)
|
22 |
+
|
23 |
+
print(tokenizer.decode(generation_output[0]))
|
24 |
+
```
|
25 |
+
|
26 |
+
The is a 7B pretrain model, train from openllama pretrain weight, context size=2048
|
27 |
+
|
28 |
+
**keep updating new model**
|