Upload transformers_inference.py with huggingface_hub
Browse files- transformers_inference.py +18 -0
transformers_inference.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
2 |
+
import torch
|
3 |
+
|
4 |
+
# Load the tokenizer
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("lucasdozie/aos-qmodel-hermeai")
|
6 |
+
|
7 |
+
# Load the quantized model
|
8 |
+
model = torch.jit.load("lucasdozie/aos-qmodel-hermeai/ggml-model-Q4_K_M.gguf")#"path/to/ggml-model-Q4_K_M.gguf")
|
9 |
+
|
10 |
+
# Prepare input text
|
11 |
+
input_text = "Hello, how are you?"
|
12 |
+
inputs = tokenizer(input_text, return_tensors="pt")
|
13 |
+
|
14 |
+
# Run inference
|
15 |
+
outputs = model.generate(**inputs)
|
16 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
17 |
+
|
18 |
+
print(generated_text)
|