|
import streamlit as st |
|
from llama_cpp import Llama |
|
|
|
|
|
@st.cache_resource |
|
def load_llama_model(): |
|
return Llama.from_pretrained( |
|
repo_id="Orenguteng/Llama-3-8B-Lexi-Uncensored-GGUF", |
|
filename="Lexi-Llama-3-8B-Uncensored_F16.gguf", |
|
) |
|
|
|
|
|
st.title("AI Coin Error Detector") |
|
st.write("This AI uses the Llama model to analyze coins for potential errors.") |
|
|
|
|
|
model = load_llama_model() |
|
|
|
|
|
uploaded_file = st.file_uploader("Upload a coin image (optional):", type=["jpg", "jpeg", "png"]) |
|
coin_description = st.text_area("Describe the coin (e.g., year, denomination, visible features):") |
|
|
|
if st.button("Analyze"): |
|
if not coin_description and not uploaded_file: |
|
st.error("Please upload an image or provide a description of the coin.") |
|
else: |
|
|
|
prompt = "Analyze the following coin for errors:\n" |
|
if coin_description: |
|
prompt += f"Description: {coin_description}\n" |
|
if uploaded_file: |
|
prompt += "Image has been uploaded. Please account for its visual features.\n" |
|
|
|
|
|
response = model.create_chat_completion( |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
|
|
|
|
st.write("### AI Response:") |
|
st.write(response["choices"][0]["message"]["content"]) |
|
|