import torch
import huggingface_hub
from transformers import AutoTokenizer, AutoModelForCausalLM

model_id = 'yainage90/Llama3-open-Ko-3-8B-Law-Chat-Full-Weights'

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype="auto",
    device_map="auto",
)

model.eval()

question = '''
μˆ μ„ λ§ˆμ‹œλ‹€κ°€ μ˜†ν…Œμ΄λΈ”μ—μ„œ λ¨Όμ € μ‹œλΉ„λ₯Ό κ±Έμ—ˆκ³  μ‹¬ν•˜κ²Œ μ‹Έμš°μ§„ μ•Šμ•˜μŠ΅λ‹ˆλ‹€
μ„œλ‘œ 언성이 높아지닀가 μ œκ°€ μˆ κΉ€μ— ν™”κ°€ λ‚˜μ„œ ν™”λ₯Ό 참지 λͺ»ν•˜κ³  μƒλŒ€μ˜ μ–΄κΉ¨μͺ½ νŒ”λΆ€λΆ„μ„ 꽉 μž‘μ•˜μŠ΅λ‹ˆλ‹€ 
λ‹Ήμ—°νžˆ 뭐 λ‹€μΉ˜κ±°λ‚˜ μƒν•΄μž…μ€ 뢀뢄은 μ—†λŠ”λ° μ œκ°€ 본인 λͺΈμ— 손을 λŒ”λ‹€λŠ” 이유둜 ν­ν–‰μ£„λ‘œ μ‹ κ³ λ₯Ό ν–ˆκ³  κ²½μ°° 쑰사λ₯Ό λ°›μ•˜μŠ΅λ‹ˆλ‹€ 
μƒλŒ€κ°€ ν•©μ˜ μ˜μ‚¬κ°€ 없을 경우 λ²ŒκΈˆμ€ μ–΄λŠμ •λ„ λ‚˜μ˜¬κΉŒμš”??
'''

messages = [
    {"role": "system", "content": "μΉœμ ˆν•œ μ±—λ΄‡μœΌλ‘œμ„œ μƒλŒ€λ°©μ˜ μš”μ²­μ— μ΅œλŒ€ν•œ μžμ„Έν•˜κ³  μΉœμ ˆν•˜κ²Œ λ‹΅ν•˜μž. λͺ¨λ“  λŒ€λ‹΅μ€ ν•œκ΅­μ–΄(Korean)으둜 λŒ€λ‹΅ν•΄μ€˜."},
    {"role": "user", "content": question},
]

input_ids = tokenizer.apply_chat_template(
    messages,
    add_generation_prompt=True,
    return_tensors="pt"
).to(model.device)

terminators = [
    tokenizer.eos_token_id,
    tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

with torch.no_grad():
    outputs = model.generate(
        input_ids,
        max_new_tokens=1024,
        eos_token_id=terminators,
        do_sample=True,
        temperature=0.9,
        top_p=0.9,
    )

response = outputs[0][input_ids.shape[-1]:]
print(tokenizer.decode(response, skip_special_tokens=True))
μƒλŒ€λ°©μ΄ μ²˜λ²Œμ„ μ›ν•˜μ§€ μ•ŠλŠ”λ‹€λŠ” μ˜μ‚¬ν‘œμ‹œλ₯Ό ν•˜λ©΄ μ²˜λ²Œλ°›μ§€ μ•Šμ„ μˆ˜λŠ” μžˆμŠ΅λ‹ˆλ‹€. κ·ΈλŸ¬λ‚˜ λ§Œμ•½ μƒλŒ€λ°©μ΄ μ²˜λ²Œμ„ μ›ν•œλ‹€λ©΄ 100λ§Œμ› μ΄μƒμ˜ λ²ŒκΈˆν˜•μ΄ μ˜ˆμƒλ©λ‹ˆλ‹€.
λ²ŒκΈˆν˜•μ„ λ°›κ²Œ 되면 100λ§Œμ› μ΄μƒμ˜ κΈˆμ „μ  손해와 ν•¨κ»˜, 범죄경λ ₯μ‘°νšŒμ—μ„œ 전과기둝이 λ‚¨κ²Œ λ©λ‹ˆλ‹€. 이 ν›„, κ²½μ°°, κ²€μ°°, λ²•μ›μ˜ 톡보λ₯Ό λ°›κ²Œ 되며, 곡무원, ꡐ원, 곡기업 μ·¨μ—… 등이 μ œν•œλ  수 μžˆμŠ΅λ‹ˆλ‹€.
λ”°λΌμ„œ μƒλŒ€λ°©κ³Όμ˜ μ›λ§Œν•œ ν•©μ˜λ₯Ό 톡해 폭행죄λ₯Ό μΈμ •ν•˜μ§€ μ•Šκ³  무혐의둜 사건을 μ’…κ²°μ§“λŠ” 것이 μ΅œμ„ μ˜ λ°©λ²•μž…λ‹ˆλ‹€. 경찰쑰사 λ‹Ήμ‹œ μƒλŒ€λ°©μ—κ²Œ μ—°λ½μ²˜λ‚˜ 연락할 수 μžˆλŠ” 방법을 λ¬Όμ–΄λ³΄μ‹œκ³ , ν•„μš”μ‹œ λ³€ν˜Έμ‚¬λ₯Ό ν†΅ν•œ μ€‘μž¬λ‚˜ ν•©μ˜λ₯Ό μ§„ν–‰ν•˜μ‹œκΈ° λ°”λžλ‹ˆλ‹€. </μ§ˆλ¬Έμžλ‹˜μ˜ λŒ“κΈ€μ΄ μž‘μ„±λœ 곳에 λŒ“κΈ€λ‘œ 도움 λ˜μ…¨μœΌλ©΄ ν•©λ‹ˆλ‹€. </inish
Downloads last month
0
Safetensors
Model size
8.03B params
Tensor type
F32
Β·
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The HF Inference API does not support text-generation models for adapter-transformers library.

Model tree for yainage90/Llama3-open-Ko-3-8B-Law-Chat-Full-Weights

Adapter
(14)
this model
Adapters
1 model