Update README.md
Browse files
README.md
CHANGED
@@ -48,11 +48,24 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
48 |
tokenizer = AutoTokenizer.from_pretrained("kikikara/llama_with_eeve_the_third_04_150M")
|
49 |
model = AutoModelForCausalLM.from_pretrained("kikikara/llama_with_eeve_the_third_04_150M")
|
50 |
|
51 |
-
question = "
|
52 |
|
53 |
prompt = f"### System:\nλΉμ μ λΉλλμ μ΄κ±°λ, μ±μ μ΄κ±°λ, λΆλ²μ μ΄κ±°λ λλ μ¬ν ν΅λ
μ μΌλ‘ νμ©λμ§ μλ λ°μΈμ νμ§ μμ΅λλ€.\nμ¬μ©μμ μ¦κ²κ² λννλ©°, μ¬μ©μμ μλ΅μ κ°λ₯ν μ ννκ³ μΉμ νκ² μλ΅ν¨μΌλ‘μ¨ μ΅λν λμμ£Όλ €κ³ λ
Έλ ₯ν©λλ€.\n\n\n### User:\n {question}"
|
54 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400, repetition_penalty=1.12)
|
55 |
result = pipe(prompt)
|
56 |
|
57 |
print(result[0]['generated_text'])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
```
|
|
|
48 |
tokenizer = AutoTokenizer.from_pretrained("kikikara/llama_with_eeve_the_third_04_150M")
|
49 |
model = AutoModelForCausalLM.from_pretrained("kikikara/llama_with_eeve_the_third_04_150M")
|
50 |
|
51 |
+
question = "κ³ κΈ° λ§μκ² κ΅½λ λ²μ μλ €μ€"
|
52 |
|
53 |
prompt = f"### System:\nλΉμ μ λΉλλμ μ΄κ±°λ, μ±μ μ΄κ±°λ, λΆλ²μ μ΄κ±°λ λλ μ¬ν ν΅λ
μ μΌλ‘ νμ©λμ§ μλ λ°μΈμ νμ§ μμ΅λλ€.\nμ¬μ©μμ μ¦κ²κ² λννλ©°, μ¬μ©μμ μλ΅μ κ°λ₯ν μ ννκ³ μΉμ νκ² μλ΅ν¨μΌλ‘μ¨ μ΅λν λμμ£Όλ €κ³ λ
Έλ ₯ν©λλ€.\n\n\n### User:\n {question}"
|
54 |
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400, repetition_penalty=1.12)
|
55 |
result = pipe(prompt)
|
56 |
|
57 |
print(result[0]['generated_text'])
|
58 |
+
|
59 |
+
### Assistant:
|
60 |
+
# κ³ κΈ° λ§μκ² κ΅½λ λ²μ λ€μκ³Ό κ°μ΅λλ€:
|
61 |
+
|
62 |
+
# 1. **κ³ κΈ°λ₯Ό 미리 쑰리ν©λλ€.
|
63 |
+
# 2. **μμ€ μ¬λ£λ₯Ό μ€λΉν©λλ€.
|
64 |
+
# 3. **μκΈκ³Ό νμΆλ₯Ό μλ
μΌλ‘ μ¬μ©ν©λλ€.
|
65 |
+
# 4. **κ°λ¨ν κ΅½μ΅λλ€.
|
66 |
+
# 5. **κ°λ¨ν κ΅½μ΅λλ€.
|
67 |
+
# 6. **μκΈκ³Ό νμΆλ‘ κ°μ λ§μΆμΈμ.
|
68 |
+
# 7. **쑰리 λ°©λ²μ μ ν΄μ€λλ€.
|
69 |
+
# 8. **κ³ κΈ°μ λ§μ λμ
λλ€.
|
70 |
+
# 9. **λ§μκ² λμΈμ!
|
71 |
```
|