|
FROM python:3.11-slim |
|
|
|
RUN apt update && apt install -y git cmake build-essential ninja-build wget |
|
WORKDIR /app |
|
RUN git clone https://github.com/ggerganov/llama.cpp --depth 1 repo && cd repo && \ |
|
sed -i 's/v1\/chat/api\/v1\/chat/g' examples/server/server.cpp |
|
|
|
RUN cd repo && cmake -B build && cmake --build build --config Release --target server && cp ./build/bin/server /app/server |
|
RUN wget -c -O model.gguf "https://huggingface.co/zhangtao103239/Qwen-1.8B-GGUF/resolve/main/qwen-1.8b-q5_k_m.gguf" |
|
EXPOSE 7860 |
|
ENTRYPOINT ["/app/server", "-m" "model.gguf", "--host", "0.0.0.0", "--port", "7860"] |
|
|
|
|
|
|