from llama_cpp import Llama | |
# Load the model (ensure you have the correct model path) | |
model = Llama(model_path="path_to_your_model.gguf") | |
# Example prompt to analyze a coin | |
response = model.create_chat_completion( | |
messages=[ | |
{"role": "user", "content": "Analyze this coin for potential errors based on the provided image or description."} | |
] | |
) | |
# Access the response content | |
print(response["choices"][0]["message"]["content"]) | |