yentinglin
commited on
Commit
•
f0463e0
1
Parent(s):
5552136
Update README.md
Browse files
README.md
CHANGED
@@ -65,7 +65,7 @@ Here's how you can run the model using the `pipeline()` function from 🤗 Trans
|
|
65 |
import torch
|
66 |
from transformers import pipeline
|
67 |
|
68 |
-
pipe = pipeline("text-generation", model="
|
69 |
|
70 |
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
71 |
messages = [
|
|
|
65 |
import torch
|
66 |
from transformers import pipeline
|
67 |
|
68 |
+
pipe = pipeline("text-generation", model="yentinglin/Taiwan-LLM-7B-v2.1-chat", torch_dtype=torch.bfloat16, device_map="auto")
|
69 |
|
70 |
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
71 |
messages = [
|