Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- ko
|
4 |
+
pipeline_tag: text-generation
|
5 |
+
tags:
|
6 |
+
- llama2
|
7 |
+
---
|
8 |
+
|
9 |
+
### Model Generation
|
10 |
+
|
11 |
+
from transforemrs import AutoTokenizer, AutoModelForCausalLM
|
12 |
+
|
13 |
+
model = AutoModelForCausalLM.from_pretrained("AIdenU/LLAMA-2-13b-ko-Y24-DPO_v0.1", device_map="auto")
|
14 |
+
tokenizer = AutoTokenizer.from_pretrained("AIdenU/LLAMA-2-13b-ko-Y24-DPO_v0.1", use_fast=True)
|
15 |
+
|
16 |
+
text="์๋
ํ์ธ์."
|
17 |
+
outputs = model.generate(
|
18 |
+
**tokenizer(
|
19 |
+
f"### Instruction: {text}\n\n### output:",
|
20 |
+
return_tensors='pt'
|
21 |
+
).to('cuda'),
|
22 |
+
max_new_tokens=256,
|
23 |
+
temperature=0.2,
|
24 |
+
top_p=1,
|
25 |
+
do_sample=True
|
26 |
+
)
|
27 |
+
print(tokenizer.decode(outputs[0]))
|