Koboldcpp-KobbleTiny / Dockerfile
Henk717's picture
Disable MMQ
ec210a9
raw
history blame
535 Bytes
FROM nvidia/cuda:11.8.0-devel-ubuntu22.04
ARG MODEL
ARG MODEL_NAME
ARG CONTEXT_SIZE
RUN mkdir /opt/koboldcpp
RUN apt update && apt install git build-essential libopenblas-dev wget python3-pip -y
RUN git clone https://github.com/lostruins/koboldcpp /opt/koboldcpp
WORKDIR /opt/koboldcpp
RUN make LLAMA_OPENBLAS=1 LLAMA_CUBLAS=1 LLAMA_PORTABLE=1
RUN wget -O model.ggml $MODEL
CMD /bin/python3 ./koboldcpp.py --model model.ggml --usecublas --gpulayers 99 --multiuser --contextsize $CONTEXT_SIZE --port 7860 --hordeconfig $MODEL_NAME 1 1