unclemusclez commited on
Commit
e3d5c47
·
verified ·
1 Parent(s): e86c1e5

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +9 -8
Dockerfile CHANGED
@@ -1,4 +1,4 @@
1
- FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
2
  ENV DEBIAN_FRONTEND=noninteractive
3
 
4
  RUN apt-get update && \
@@ -31,7 +31,10 @@ USER ${USER}
31
  ENV HOME=/home/${USER} \
32
  PATH=${HOME}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH}
33
  WORKDIR ${HOME}/app
34
-
 
 
 
35
  RUN curl https://pyenv.run | bash
36
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
37
  ARG PYTHON_VERSION=3.10.13
@@ -39,11 +42,10 @@ RUN pyenv install ${PYTHON_VERSION} && \
39
  pyenv global ${PYTHON_VERSION} && \
40
  pyenv rehash && \
41
  pip install --no-cache-dir -U pip setuptools wheel && \
42
- pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
43
 
44
  COPY --chown=1000 . ${HOME}/app
45
-
46
- RUN git clone https://github.com/ollama/ollama.git
47
  RUN git clone https://github.com/ggerganov/llama.cpp
48
  RUN pip install -r llama.cpp/requirements.txt
49
  COPY groups_merged.txt ${HOME}/app/llama.cpp/.
@@ -63,6 +65,5 @@ ENV PYTHONPATH=${HOME}/app \
63
  NVIDIA_VISIBLE_DEVICES=all \
64
  OLLAMA_HOST=0.0.0.0
65
 
66
- # EXPOSE 11434
67
- ENTRYPOINT /bin/sh start.sh
68
-
 
1
+ FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04
2
  ENV DEBIAN_FRONTEND=noninteractive
3
 
4
  RUN apt-get update && \
 
31
  ENV HOME=/home/${USER} \
32
  PATH=${HOME}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH}
33
  WORKDIR ${HOME}/app
34
+
35
+ COPY --from=ollama/ollama:0.1.49-rc13 /go/src/github.com/ollama/ollama/ollama /bin/ollama
36
+
37
+ ENV NVIDIA_VISIBLE_DEVICES=all
38
  RUN curl https://pyenv.run | bash
39
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
40
  ARG PYTHON_VERSION=3.10.13
 
42
  pyenv global ${PYTHON_VERSION} && \
43
  pyenv rehash && \
44
  pip install --no-cache-dir -U pip setuptools wheel && \
45
+ pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
46
 
47
  COPY --chown=1000 . ${HOME}/app
48
+
 
49
  RUN git clone https://github.com/ggerganov/llama.cpp
50
  RUN pip install -r llama.cpp/requirements.txt
51
  COPY groups_merged.txt ${HOME}/app/llama.cpp/.
 
65
  NVIDIA_VISIBLE_DEVICES=all \
66
  OLLAMA_HOST=0.0.0.0
67
 
68
+ # EXPOSE map[11434/tcp:{}]
69
+ ENTRYPOINT /bin/sh start.sh