|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
FROM nvidia/cuda:12.6.1-cudnn-runtime-ubuntu24.04 |
|
|
|
|
|
|
|
|
|
|
|
|
|
ARG REPO_URL=https://github.com/rmusser01/tldw.git |
|
ARG BRANCH=main |
|
ARG GPU_SUPPORT=cpu |
|
|
|
|
|
RUN apt-get update && apt-get install -y \ |
|
ffmpeg \ |
|
libsqlite3-dev \ |
|
build-essential \ |
|
git \ |
|
python3 \ |
|
python3-pyaudio \ |
|
portaudio19-dev \ |
|
python3-pip \ |
|
portaudio19-dev \ |
|
python3-venv \ |
|
&& rm -rf /var/lib/apt/lists/* |
|
|
|
|
|
WORKDIR /tldw |
|
|
|
|
|
RUN git clone -b ${BRANCH} ${REPO_URL} . |
|
|
|
|
|
RUN python3 -m venv ./venv |
|
ENV PATH="/tldw/venv/bin:$PATH" |
|
|
|
|
|
RUN pip install --upgrade pip wheel |
|
|
|
|
|
RUN pip install nvidia-cublas-cu12 nvidia-cudnn-cu12 |
|
|
|
|
|
RUN export LD_LIBRARY_PATH=`python3 -c 'import os; import nvidia.cublas.lib; import nvidia.cudnn.lib; print(os.path.dirname(nvidia.cublas.lib.__file__) + ":" + os.path.dirname(nvidia.cudnn.lib.__file__))'` |
|
|
|
|
|
|
|
RUN if [ "$GPU_SUPPORT" = "cuda" ]; then \ |
|
pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cu123; \ |
|
elif [ "$GPU_SUPPORT" = "amd" ]; then \ |
|
pip install torch-directml; \ |
|
else \ |
|
pip install torch==2.2.2 torchvision==0.17.2 torchaudio==2.2.2 --index-url https://download.pytorch.org/whl/cpu; \ |
|
fi |
|
|
|
|
|
RUN pip install -r requirements.txt |
|
|
|
|
|
RUN if [ "$GPU_SUPPORT" = "cpu" ]; then \ |
|
sed -i 's/cuda/cpu/' ./Config_Files/config.txt; \ |
|
fi |
|
|
|
|
|
VOLUME /tldw |
|
|
|
|
|
EXPOSE 7860 |
|
|
|
|
|
ENV GRADIO_SERVER_NAME="0.0.0.0" |
|
|
|
|
|
CMD ["python", "summarize.py", "-gui", "-log DEBUG"] |