added code to run this model from gguf
Browse files
README.md
CHANGED
@@ -69,7 +69,30 @@ response = tokenizer.batch_decode(generated_ids)[0]
|
|
69 |
print(response)
|
70 |
|
71 |
```
|
|
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
## Example Dialogue
|
74 |
> Express joy and excitement about visiting a new place.
|
75 |
|
|
|
69 |
print(response)
|
70 |
|
71 |
```
|
72 |
+
*Directly using this model from GGUF*
|
73 |
|
74 |
+
```python
|
75 |
+
%pip install -U 'webscout[loacl]'
|
76 |
+
|
77 |
+
from webscout.Local.utils import download_model
|
78 |
+
from webscout.Local.model import Model
|
79 |
+
from webscout.Local.thread import Thread
|
80 |
+
from webscout.Local import formats
|
81 |
+
from webscout.Local import samplers
|
82 |
+
# 1. Download the model
|
83 |
+
repo_id = "OEvortex/HelpingAI-9B"
|
84 |
+
filename = "helpingai-9b.Q4_0.gguf"
|
85 |
+
model_path = download_model(repo_id, filename)
|
86 |
+
|
87 |
+
# 2. Load the model
|
88 |
+
model = Model(model_path, n_gpu_layers=48)
|
89 |
+
|
90 |
+
# 3. Create a Thread for conversation
|
91 |
+
thread = Thread(model, formats.chatml, samplers.TikTokenSampling)
|
92 |
+
|
93 |
+
# 4. Start interacting with the model
|
94 |
+
thread.interact()
|
95 |
+
```
|
96 |
## Example Dialogue
|
97 |
> Express joy and excitement about visiting a new place.
|
98 |
|