File size: 444 Bytes
d1b09e0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 |
from llama_cpp import Llama
# Load the model (ensure you have the correct model path)
model = Llama(model_path="path_to_your_model.gguf")
# Example prompt to analyze a coin
response = model.create_chat_completion(
messages=[
{"role": "user", "content": "Analyze this coin for potential errors based on the provided image or description."}
]
)
# Access the response content
print(response["choices"][0]["message"]["content"])
|