Update README.md
Browse files
README.md
CHANGED
@@ -84,6 +84,35 @@ inputs = tokenizer([
|
|
84 |
outputs = model.generate(**inputs, max_new_tokens=192)
|
85 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
86 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
Output:
|
88 |
```
|
89 |
Instruction:
|
|
|
84 |
outputs = model.generate(**inputs, max_new_tokens=192)
|
85 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
86 |
```
|
87 |
+
|
88 |
+
Transformers Pipeline:
|
89 |
+
```py
|
90 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
91 |
+
|
92 |
+
tokenizer = AutoTokenizer.from_pretrained("myzens/llama3-8b-tr-finetuned")
|
93 |
+
model = AutoModelForCausalLM.from_pretrained("myzens/llama3-8b-tr-finetuned")
|
94 |
+
|
95 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
96 |
+
|
97 |
+
alpaca_prompt = """
|
98 |
+
Instruction:
|
99 |
+
{}
|
100 |
+
|
101 |
+
Input:
|
102 |
+
{}
|
103 |
+
|
104 |
+
Response:
|
105 |
+
{}"""
|
106 |
+
|
107 |
+
input = alpaca_prompt.format(
|
108 |
+
"",
|
109 |
+
"Ankara'da gezilebilecek 3 yeri söyle ve ne olduklarını kısaca açıkla.",
|
110 |
+
"",
|
111 |
+
)
|
112 |
+
|
113 |
+
pipe(input)
|
114 |
+
```
|
115 |
+
|
116 |
Output:
|
117 |
```
|
118 |
Instruction:
|