Update README.md
Browse files
README.md
CHANGED
@@ -3,9 +3,27 @@ language:
|
|
3 |
- en
|
4 |
pipeline_tag: text-generation
|
5 |
model: PY007/TinyLlama-1.1B-intermediate-step-715k-1.5T
|
|
|
6 |
---
|
7 |
TinyLLama 1.5T checkpoint trained to answer questions.
|
8 |
```
|
9 |
f"{'prompt'}\n{'completion'}\n<END>"
|
10 |
```
|
11 |
-
No input/output, just question, then newline to begin the answer.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
- en
|
4 |
pipeline_tag: text-generation
|
5 |
model: PY007/TinyLlama-1.1B-intermediate-step-715k-1.5T
|
6 |
+
dataset: ArmelR/oasst1_guanaco_english
|
7 |
---
|
8 |
TinyLLama 1.5T checkpoint trained to answer questions.
|
9 |
```
|
10 |
f"{'prompt'}\n{'completion'}\n<END>"
|
11 |
```
|
12 |
+
No input/output, just question, then newline to begin the answer.
|
13 |
+
|
14 |
+
|
15 |
+
```
|
16 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
17 |
+
|
18 |
+
pipe = pipeline("text-generation", model="Corianas/tiny-llama-miniguanaco-1.5T")# Load model directly
|
19 |
+
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained("Corianas/tiny-llama-miniguanaco-1.5T")
|
21 |
+
model = AutoModelForCausalLM.from_pretrained("Corianas/tiny-llama-miniguanaco-1.5T")
|
22 |
+
|
23 |
+
# Run text generation pipeline with our next model
|
24 |
+
prompt = "What is a large language model?"
|
25 |
+
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=500)
|
26 |
+
result = pipe(f"<s>{prompt}")
|
27 |
+
print(result[0]['generated_text'])
|
28 |
+
```
|
29 |
+
Result will have the answer, ending with <END> on a new line.
|