Update README.md
Browse files
README.md
CHANGED
@@ -14,4 +14,21 @@ bofenghuang/vigostral-7b-chat
|
|
14 |
|
15 |
base model : jpacifico/French-Alpaca-7B-Instruct-beta
|
16 |
|
17 |
-
This quantized q8_0 GGUF version can be used on a CPU device, compatible with llama.cpp and LM Studio.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
base model : jpacifico/French-Alpaca-7B-Instruct-beta
|
16 |
|
17 |
+
This quantized q8_0 GGUF version can be used on a CPU device, compatible with llama.cpp Ollama and LM Studio.
|
18 |
+
|
19 |
+
Ollama Modelfile example :
|
20 |
+
|
21 |
+
```bash
|
22 |
+
FROM ./Vigalpaca-French-7B-ties-quantized-q8_0.gguf
|
23 |
+
|
24 |
+
PARAMETER stop {
|
25 |
+
"stop": [
|
26 |
+
"[INST]",
|
27 |
+
"[/INST]"
|
28 |
+
]
|
29 |
+
}
|
30 |
+
|
31 |
+
TEMPLATE """
|
32 |
+
[INST] {{ .System }} {{ .Prompt }} [/INST]
|
33 |
+
"""
|
34 |
+
```
|