Update README.md
Browse files
README.md
CHANGED
@@ -61,20 +61,16 @@ Here provides a code snippet with `apply_chat_template` to show you how to load
|
|
61 |
```python
|
62 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
63 |
|
64 |
-
model_name =
|
65 |
-
|
66 |
-
model = AutoModelForCausalLM.from_pretrained(
|
67 |
-
model_name,
|
68 |
-
torch_dtype="auto",
|
69 |
-
device_map="auto"
|
70 |
-
)
|
71 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
72 |
|
73 |
-
prompt =
|
74 |
messages = [
|
75 |
-
{"role": "system", "content": "You are
|
76 |
{"role": "user", "content": prompt}
|
77 |
]
|
|
|
78 |
text = tokenizer.apply_chat_template(
|
79 |
messages,
|
80 |
tokenize=False,
|
@@ -91,6 +87,8 @@ generated_ids = [
|
|
91 |
]
|
92 |
|
93 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
|
|
|
|
94 |
```
|
95 |
|
96 |
|
|
|
61 |
```python
|
62 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
63 |
|
64 |
+
model_name = 'deeptoken/Qwen2.5-3B-RuoZhiBa-Instruct'
|
65 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype='auto', device_map='auto')
|
|
|
|
|
|
|
|
|
|
|
66 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
67 |
|
68 |
+
prompt = '鸡柳是鸡的哪个部位?'
|
69 |
messages = [
|
70 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
71 |
{"role": "user", "content": prompt}
|
72 |
]
|
73 |
+
|
74 |
text = tokenizer.apply_chat_template(
|
75 |
messages,
|
76 |
tokenize=False,
|
|
|
87 |
]
|
88 |
|
89 |
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
90 |
+
|
91 |
+
print(response)
|
92 |
```
|
93 |
|
94 |
|