unclemusclez commited on
Commit
915906a
·
verified ·
1 Parent(s): d98ecdc

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +4 -7
Dockerfile CHANGED
@@ -1,6 +1,6 @@
1
  FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
2
-
3
  ENV DEBIAN_FRONTEND=noninteractive
 
4
  RUN apt-get update && \
5
  apt-get upgrade -y && \
6
  apt-get install -y --no-install-recommends ca-certificates \
@@ -30,9 +30,7 @@ RUN useradd -m -u 1000 ${USER}
30
  USER ${USER}
31
  ENV HOME=/home/${USER} \
32
  PATH=${HOME}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH}
33
-
34
  WORKDIR ${HOME}/app
35
-
36
 
37
  RUN curl https://pyenv.run | bash
38
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
@@ -42,13 +40,13 @@ RUN pyenv install ${PYTHON_VERSION} && \
42
  pyenv rehash && \
43
  pip install --no-cache-dir -U pip setuptools wheel && \
44
  pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
45
-
46
  COPY --chown=1000 . ${HOME}/app
 
47
  RUN git clone https://github.com/ollama/ollama.git \
48
  git clone https://github.com/ggerganov/llama.cpp \
49
- pip install -r llama.cpp/requirements.txt \
50
  COPY groups_merged.txt ${HOME}/app/llama.cpp/.
51
-
52
 
53
  ENV PYTHONPATH=${HOME}/app \
54
  PYTHONUNBUFFERED=1 \
@@ -64,7 +62,6 @@ ENV PYTHONPATH=${HOME}/app \
64
  NVIDIA_DRIVER_CAPABILITIES=compute,utility \
65
  NVIDIA_VISIBLE_DEVICES=all \
66
  OLLAMA_HOST=0.0.0.0
67
-
68
 
69
  # EXPOSE 11434
70
  ENTRYPOINT /bin/sh start.sh
 
1
  FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
 
2
  ENV DEBIAN_FRONTEND=noninteractive
3
+
4
  RUN apt-get update && \
5
  apt-get upgrade -y && \
6
  apt-get install -y --no-install-recommends ca-certificates \
 
30
  USER ${USER}
31
  ENV HOME=/home/${USER} \
32
  PATH=${HOME}/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:${PATH}
 
33
  WORKDIR ${HOME}/app
 
34
 
35
  RUN curl https://pyenv.run | bash
36
  ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
 
40
  pyenv rehash && \
41
  pip install --no-cache-dir -U pip setuptools wheel && \
42
  pip install "huggingface-hub" "hf-transfer" "gradio[oauth]>=4.28.0" "gradio_huggingfacehub_search==0.0.7" "APScheduler"
43
+
44
  COPY --chown=1000 . ${HOME}/app
45
+
46
  RUN git clone https://github.com/ollama/ollama.git \
47
  git clone https://github.com/ggerganov/llama.cpp \
48
+ pip install -r llama.cpp/requirements.txt
49
  COPY groups_merged.txt ${HOME}/app/llama.cpp/.
 
50
 
51
  ENV PYTHONPATH=${HOME}/app \
52
  PYTHONUNBUFFERED=1 \
 
62
  NVIDIA_DRIVER_CAPABILITIES=compute,utility \
63
  NVIDIA_VISIBLE_DEVICES=all \
64
  OLLAMA_HOST=0.0.0.0
 
65
 
66
  # EXPOSE 11434
67
  ENTRYPOINT /bin/sh start.sh