Update README.md
Browse filesKDI - Dataset을 구축하여 sLLM 빌드하였습니다.
Co-lab환경에서 A100이용
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("prismdata/KDI-Llama-3-Open-Ko-8B-Instruct",cache_dir="./", device_map = 'cuda')
tokenizer = AutoTokenizer.from_pretrained("prismdata/KDI-Llama-3-Open-Ko-8B-Instruct",cache_dir="./", device_map = 'cuda')
prompt_template = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\nHuman: {prompt}\nAssistant:\n"
text = 'PMDU(prime minister’s delivery unit)가 어떤 역할을 하는 조직인가요?'
model_inputs = tokenizer(prompt_template.format(prompt=text), return_tensors='pt').to("cuda:0")
outputs = model.generate(**model_inputs, max_new_tokens=256).to("cuda:0")
output_text = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
print(output_text)
@@ -1,7 +1,11 @@
|
|
1 |
-
---
|
2 |
-
license: apache-2.0
|
3 |
-
tags:
|
4 |
-
- unsloth
|
5 |
-
- trl
|
6 |
-
- sft
|
7 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
tags:
|
4 |
+
- unsloth
|
5 |
+
- trl
|
6 |
+
- sft
|
7 |
+
datasets:
|
8 |
+
- prismdata/KDI-DATASET
|
9 |
+
base_model:
|
10 |
+
- beomi/Llama-3-Open-Ko-8B-Instruct-preview
|
11 |
+
---
|