FROM ghcr.io/ggerganov/llama.cpp:full ENV DEBIAN_FRONTEND=noninteractive # Update and install necessary dependencies RUN apt update && \ apt install --no-install-recommends -y \ build-essential \ python3 \ python3-pip \ wget \ curl \ git \ cmake \ zlib1g-dev \ libblas-dev && \ apt clean && \ rm -rf /var/lib/apt/lists/* WORKDIR /app RUN wget https://huggingface.co/brittlewis12/Snorkel-Mistral-PairRM-DPO-GGUF/resolve/main/snorkel-mistral-pairrm-dpo.Q4_K_M.gguf RUN make # Expose the port EXPOSE 8080 # Use the model name variable in CMD as well CMD ["--server", "--model", "snorkel-mistral-pairrm-dpo.Q4_K_M.gguf", "--threads", "8", "--host", "0.0.0.0"]