Spaces:
Configuration error
Configuration error
Upload 7 files
Browse files- .env.sample +27 -0
- Dockerfile +124 -0
- README.md +115 -8
- docker-compose.yml +54 -0
- docker-entrypoint.sh +41 -0
- run.sh +36 -0
- runpod-readme.md +60 -0
.env.sample
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Make a copy of this file named `.env` and fill in the values below.
|
2 |
+
## Any environment variables supported by InvokeAI can be specified here,
|
3 |
+
## in addition to the examples below.
|
4 |
+
|
5 |
+
## INVOKEAI_ROOT is the path *on the host system* where Invoke will store its data.
|
6 |
+
## It is mounted into the container and allows both containerized and non-containerized usage of Invoke.
|
7 |
+
# Usually this is the only variable you need to set. It can be relative or absolute.
|
8 |
+
# INVOKEAI_ROOT=~/invokeai
|
9 |
+
|
10 |
+
## HOST_INVOKEAI_ROOT and CONTAINER_INVOKEAI_ROOT can be used to control the on-host
|
11 |
+
## and in-container paths separately, if needed.
|
12 |
+
## HOST_INVOKEAI_ROOT is the path on the docker host's filesystem where Invoke will store data.
|
13 |
+
## If relative, it will be relative to the docker directory in which the docker-compose.yml file is located
|
14 |
+
## CONTAINER_INVOKEAI_ROOT is the path within the container where Invoke will expect to find the runtime directory.
|
15 |
+
## It MUST be absolute. There is usually no need to change this.
|
16 |
+
# HOST_INVOKEAI_ROOT=../../invokeai-data
|
17 |
+
# CONTAINER_INVOKEAI_ROOT=/invokeai
|
18 |
+
|
19 |
+
## INVOKEAI_PORT is the port on which the InvokeAI web interface will be available
|
20 |
+
# INVOKEAI_PORT=9090
|
21 |
+
|
22 |
+
## GPU_DRIVER can be set to either `cuda` or `rocm` to enable GPU support in the container accordingly.
|
23 |
+
# GPU_DRIVER=cuda #| rocm
|
24 |
+
|
25 |
+
## CONTAINER_UID can be set to the UID of the user on the host system that should own the files in the container.
|
26 |
+
## It is usually not necessary to change this. Use `id -u` on the host system to find the UID.
|
27 |
+
# CONTAINER_UID=1000
|
Dockerfile
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# syntax=docker/dockerfile:1.4
|
2 |
+
|
3 |
+
## Builder stage
|
4 |
+
|
5 |
+
FROM library/ubuntu:23.04 AS builder
|
6 |
+
|
7 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
8 |
+
RUN rm -f /etc/apt/apt.conf.d/docker-clean; echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache
|
9 |
+
RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
10 |
+
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
11 |
+
apt update && apt-get install -y \
|
12 |
+
git \
|
13 |
+
python3-venv \
|
14 |
+
python3-pip \
|
15 |
+
build-essential
|
16 |
+
|
17 |
+
ENV INVOKEAI_SRC=/opt/invokeai
|
18 |
+
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
19 |
+
|
20 |
+
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
21 |
+
ARG GPU_DRIVER=cuda
|
22 |
+
ARG TARGETPLATFORM="linux/amd64"
|
23 |
+
# unused but available
|
24 |
+
ARG BUILDPLATFORM
|
25 |
+
|
26 |
+
WORKDIR ${INVOKEAI_SRC}
|
27 |
+
|
28 |
+
COPY invokeai ./invokeai
|
29 |
+
COPY pyproject.toml ./
|
30 |
+
|
31 |
+
# Editable mode helps use the same image for development:
|
32 |
+
# the local working copy can be bind-mounted into the image
|
33 |
+
# at path defined by ${INVOKEAI_SRC}
|
34 |
+
# NOTE: there are no pytorch builds for arm64 + cuda, only cpu
|
35 |
+
# x86_64/CUDA is default
|
36 |
+
RUN --mount=type=cache,target=/root/.cache/pip \
|
37 |
+
python3 -m venv ${VIRTUAL_ENV} &&\
|
38 |
+
if [ "$TARGETPLATFORM" = "linux/arm64" ] || [ "$GPU_DRIVER" = "cpu" ]; then \
|
39 |
+
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cpu"; \
|
40 |
+
elif [ "$GPU_DRIVER" = "rocm" ]; then \
|
41 |
+
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/rocm6.1"; \
|
42 |
+
else \
|
43 |
+
extra_index_url_arg="--extra-index-url https://download.pytorch.org/whl/cu124"; \
|
44 |
+
fi &&\
|
45 |
+
|
46 |
+
# xformers + triton fails to install on arm64
|
47 |
+
if [ "$GPU_DRIVER" = "cuda" ] && [ "$TARGETPLATFORM" = "linux/amd64" ]; then \
|
48 |
+
pip install $extra_index_url_arg -e ".[xformers]"; \
|
49 |
+
else \
|
50 |
+
pip install $extra_index_url_arg -e "."; \
|
51 |
+
fi
|
52 |
+
|
53 |
+
# #### Build the Web UI ------------------------------------
|
54 |
+
|
55 |
+
FROM node:20-slim AS web-builder
|
56 |
+
ENV PNPM_HOME="/pnpm"
|
57 |
+
ENV PATH="$PNPM_HOME:$PATH"
|
58 |
+
RUN corepack use pnpm@8.x
|
59 |
+
RUN corepack enable
|
60 |
+
|
61 |
+
WORKDIR /build
|
62 |
+
COPY invokeai/frontend/web/ ./
|
63 |
+
RUN --mount=type=cache,target=/pnpm/store \
|
64 |
+
pnpm install --frozen-lockfile
|
65 |
+
RUN npx vite build
|
66 |
+
|
67 |
+
#### Runtime stage ---------------------------------------
|
68 |
+
|
69 |
+
FROM library/ubuntu:23.04 AS runtime
|
70 |
+
|
71 |
+
ARG DEBIAN_FRONTEND=noninteractive
|
72 |
+
ENV PYTHONUNBUFFERED=1
|
73 |
+
ENV PYTHONDONTWRITEBYTECODE=1
|
74 |
+
|
75 |
+
RUN apt update && apt install -y --no-install-recommends \
|
76 |
+
git \
|
77 |
+
curl \
|
78 |
+
vim \
|
79 |
+
tmux \
|
80 |
+
ncdu \
|
81 |
+
iotop \
|
82 |
+
bzip2 \
|
83 |
+
gosu \
|
84 |
+
magic-wormhole \
|
85 |
+
libglib2.0-0 \
|
86 |
+
libgl1-mesa-glx \
|
87 |
+
python3-venv \
|
88 |
+
python3-pip \
|
89 |
+
build-essential \
|
90 |
+
libopencv-dev \
|
91 |
+
libstdc++-10-dev &&\
|
92 |
+
apt-get clean && apt-get autoclean
|
93 |
+
|
94 |
+
|
95 |
+
ENV INVOKEAI_SRC=/opt/invokeai
|
96 |
+
ENV VIRTUAL_ENV=/opt/venv/invokeai
|
97 |
+
ENV INVOKEAI_ROOT=/invokeai
|
98 |
+
ENV INVOKEAI_HOST=0.0.0.0
|
99 |
+
ENV INVOKEAI_PORT=9090
|
100 |
+
ENV PATH="$VIRTUAL_ENV/bin:$INVOKEAI_SRC:$PATH"
|
101 |
+
ENV CONTAINER_UID=${CONTAINER_UID:-1000}
|
102 |
+
ENV CONTAINER_GID=${CONTAINER_GID:-1000}
|
103 |
+
|
104 |
+
# --link requires buldkit w/ dockerfile syntax 1.4
|
105 |
+
COPY --link --from=builder ${INVOKEAI_SRC} ${INVOKEAI_SRC}
|
106 |
+
COPY --link --from=builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
|
107 |
+
COPY --link --from=web-builder /build/dist ${INVOKEAI_SRC}/invokeai/frontend/web/dist
|
108 |
+
|
109 |
+
# Link amdgpu.ids for ROCm builds
|
110 |
+
# contributed by https://github.com/Rubonnek
|
111 |
+
RUN mkdir -p "/opt/amdgpu/share/libdrm" &&\
|
112 |
+
ln -s "/usr/share/libdrm/amdgpu.ids" "/opt/amdgpu/share/libdrm/amdgpu.ids"
|
113 |
+
|
114 |
+
WORKDIR ${INVOKEAI_SRC}
|
115 |
+
|
116 |
+
# build patchmatch
|
117 |
+
RUN cd /usr/lib/$(uname -p)-linux-gnu/pkgconfig/ && ln -sf opencv4.pc opencv.pc
|
118 |
+
RUN python3 -c "from patchmatch import patch_match"
|
119 |
+
|
120 |
+
RUN mkdir -p ${INVOKEAI_ROOT} && chown -R ${CONTAINER_UID}:${CONTAINER_GID} ${INVOKEAI_ROOT}
|
121 |
+
|
122 |
+
COPY docker/docker-entrypoint.sh ./
|
123 |
+
ENTRYPOINT ["/opt/invokeai/docker-entrypoint.sh"]
|
124 |
+
CMD ["invokeai-web"]
|
README.md
CHANGED
@@ -1,10 +1,117 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
---
|
9 |
|
10 |
-
|
|
|
|
1 |
+
# Invoke in Docker
|
2 |
+
|
3 |
+
First things first:
|
4 |
+
|
5 |
+
- Ensure that Docker can use your [NVIDIA][nvidia docker docs] or [AMD][amd docker docs] GPU.
|
6 |
+
- This document assumes a Linux system, but should work similarly under Windows with WSL2.
|
7 |
+
- We don't recommend running Invoke in Docker on macOS at this time. It works, but very slowly.
|
8 |
+
|
9 |
+
## Quickstart
|
10 |
+
|
11 |
+
No `docker compose`, no persistence, single command, using the official images:
|
12 |
+
|
13 |
+
**CUDA (NVIDIA GPU):**
|
14 |
+
|
15 |
+
```bash
|
16 |
+
docker run --runtime=nvidia --gpus=all --publish 9090:9090 ghcr.io/invoke-ai/invokeai
|
17 |
+
```
|
18 |
+
|
19 |
+
**ROCm (AMD GPU):**
|
20 |
+
|
21 |
+
```bash
|
22 |
+
docker run --device /dev/kfd --device /dev/dri --publish 9090:9090 ghcr.io/invoke-ai/invokeai:main-rocm
|
23 |
+
```
|
24 |
+
|
25 |
+
Open `http://localhost:9090` in your browser once the container finishes booting, install some models, and generate away!
|
26 |
+
|
27 |
+
### Data persistence
|
28 |
+
|
29 |
+
To persist your generated images and downloaded models outside of the container, add a `--volume/-v` flag to the above command, e.g.:
|
30 |
+
|
31 |
+
```bash
|
32 |
+
docker run --volume /some/local/path:/invokeai {...etc...}
|
33 |
+
```
|
34 |
+
|
35 |
+
`/some/local/path/invokeai` will contain all your data.
|
36 |
+
It can *usually* be reused between different installs of Invoke. Tread with caution and read the release notes!
|
37 |
+
|
38 |
+
## Customize the container
|
39 |
+
|
40 |
+
The included `run.sh` script is a convenience wrapper around `docker compose`. It can be helpful for passing additional build arguments to `docker compose`. Alternatively, the familiar `docker compose` commands work just as well.
|
41 |
+
|
42 |
+
```bash
|
43 |
+
cd docker
|
44 |
+
cp .env.sample .env
|
45 |
+
# edit .env to your liking if you need to; it is well commented.
|
46 |
+
./run.sh
|
47 |
+
```
|
48 |
+
|
49 |
+
It will take a few minutes to build the image the first time. Once the application starts up, open `http://localhost:9090` in your browser to invoke!
|
50 |
+
|
51 |
+
>[!TIP]
|
52 |
+
>When using the `run.sh` script, the container will continue running after Ctrl+C. To shut it down, use the `docker compose down` command.
|
53 |
+
|
54 |
+
## Docker setup in detail
|
55 |
+
|
56 |
+
#### Linux
|
57 |
+
|
58 |
+
1. Ensure buildkit is enabled in the Docker daemon settings (`/etc/docker/daemon.json`)
|
59 |
+
2. Install the `docker compose` plugin using your package manager, or follow a [tutorial](https://docs.docker.com/compose/install/linux/#install-using-the-repository).
|
60 |
+
- The deprecated `docker-compose` (hyphenated) CLI probably won't work. Update to a recent version.
|
61 |
+
3. Ensure docker daemon is able to access the GPU.
|
62 |
+
- [NVIDIA docs](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)
|
63 |
+
- [AMD docs](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html)
|
64 |
+
|
65 |
+
#### macOS
|
66 |
+
|
67 |
+
> [!TIP]
|
68 |
+
> You'll be better off installing Invoke directly on your system, because Docker can not use the GPU on macOS.
|
69 |
+
|
70 |
+
If you are still reading:
|
71 |
+
|
72 |
+
1. Ensure Docker has at least 16GB RAM
|
73 |
+
2. Enable VirtioFS for file sharing
|
74 |
+
3. Enable `docker compose` V2 support
|
75 |
+
|
76 |
+
This is done via Docker Desktop preferences.
|
77 |
+
|
78 |
+
### Configure the Invoke Environment
|
79 |
+
|
80 |
+
1. Make a copy of `.env.sample` and name it `.env` (`cp .env.sample .env` (Mac/Linux) or `copy example.env .env` (Windows)). Make changes as necessary. Set `INVOKEAI_ROOT` to an absolute path to the desired location of the InvokeAI runtime directory. It may be an existing directory from a previous installation (post 4.0.0).
|
81 |
+
1. Execute `run.sh`
|
82 |
+
|
83 |
+
The image will be built automatically if needed.
|
84 |
+
|
85 |
+
The runtime directory (holding models and outputs) will be created in the location specified by `INVOKEAI_ROOT`. The default location is `~/invokeai`. Navigate to the Model Manager tab and install some models before generating.
|
86 |
+
|
87 |
+
### Use a GPU
|
88 |
+
|
89 |
+
- Linux is *recommended* for GPU support in Docker.
|
90 |
+
- WSL2 is *required* for Windows.
|
91 |
+
- only `x86_64` architecture is supported.
|
92 |
+
|
93 |
+
The Docker daemon on the system must be already set up to use the GPU. In case of Linux, this involves installing `nvidia-docker-runtime` and configuring the `nvidia` runtime as default. Steps will be different for AMD. Please see Docker/NVIDIA/AMD documentation for the most up-to-date instructions for using your GPU with Docker.
|
94 |
+
|
95 |
+
To use an AMD GPU, set `GPU_DRIVER=rocm` in your `.env` file before running `./run.sh`.
|
96 |
+
|
97 |
+
## Customize
|
98 |
+
|
99 |
+
Check the `.env.sample` file. It contains some environment variables for running in Docker. Copy it, name it `.env`, and fill it in with your own values. Next time you run `run.sh`, your custom values will be used.
|
100 |
+
|
101 |
+
You can also set these values in `docker-compose.yml` directly, but `.env` will help avoid conflicts when code is updated.
|
102 |
+
|
103 |
+
Values are optional, but setting `INVOKEAI_ROOT` is highly recommended. The default is `~/invokeai`. Example:
|
104 |
+
|
105 |
+
```bash
|
106 |
+
INVOKEAI_ROOT=/Volumes/WorkDrive/invokeai
|
107 |
+
HUGGINGFACE_TOKEN=the_actual_token
|
108 |
+
CONTAINER_UID=1000
|
109 |
+
GPU_DRIVER=cuda
|
110 |
+
```
|
111 |
+
|
112 |
+
Any environment variables supported by InvokeAI can be set here. See the [Configuration docs](https://invoke-ai.github.io/InvokeAI/features/CONFIGURATION/) for further detail.
|
113 |
+
|
114 |
---
|
115 |
|
116 |
+
[nvidia docker docs]: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
|
117 |
+
[amd docker docs]: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/how-to/docker.html
|
docker-compose.yml
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) 2023 Eugene Brodsky https://github.com/ebr
|
2 |
+
|
3 |
+
x-invokeai: &invokeai
|
4 |
+
image: "ghcr.io/invoke-ai/invokeai:latest"
|
5 |
+
build:
|
6 |
+
context: ..
|
7 |
+
dockerfile: docker/Dockerfile
|
8 |
+
|
9 |
+
# Create a .env file in the same directory as this docker-compose.yml file
|
10 |
+
# and populate it with environment variables. See .env.sample
|
11 |
+
env_file:
|
12 |
+
- .env
|
13 |
+
|
14 |
+
# variables without a default will automatically inherit from the host environment
|
15 |
+
environment:
|
16 |
+
# if set, CONTAINER_INVOKEAI_ROOT will override the Invoke runtime directory location *inside* the container
|
17 |
+
- INVOKEAI_ROOT=${CONTAINER_INVOKEAI_ROOT:-/invokeai}
|
18 |
+
- HF_HOME
|
19 |
+
ports:
|
20 |
+
- "${INVOKEAI_PORT:-9090}:${INVOKEAI_PORT:-9090}"
|
21 |
+
volumes:
|
22 |
+
- type: bind
|
23 |
+
source: ${HOST_INVOKEAI_ROOT:-${INVOKEAI_ROOT:-~/invokeai}}
|
24 |
+
target: ${CONTAINER_INVOKEAI_ROOT:-/invokeai}
|
25 |
+
bind:
|
26 |
+
create_host_path: true
|
27 |
+
- ${HF_HOME:-~/.cache/huggingface}:${HF_HOME:-/invokeai/.cache/huggingface}
|
28 |
+
tty: true
|
29 |
+
stdin_open: true
|
30 |
+
|
31 |
+
|
32 |
+
services:
|
33 |
+
invokeai-cuda:
|
34 |
+
<<: *invokeai
|
35 |
+
deploy:
|
36 |
+
resources:
|
37 |
+
reservations:
|
38 |
+
devices:
|
39 |
+
- driver: nvidia
|
40 |
+
count: 1
|
41 |
+
capabilities: [gpu]
|
42 |
+
|
43 |
+
invokeai-cpu:
|
44 |
+
<<: *invokeai
|
45 |
+
profiles:
|
46 |
+
- cpu
|
47 |
+
|
48 |
+
invokeai-rocm:
|
49 |
+
<<: *invokeai
|
50 |
+
devices:
|
51 |
+
- /dev/kfd:/dev/kfd
|
52 |
+
- /dev/dri:/dev/dri
|
53 |
+
profiles:
|
54 |
+
- rocm
|
docker-entrypoint.sh
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
set -e -o pipefail
|
3 |
+
|
4 |
+
### Container entrypoint
|
5 |
+
# Runs the CMD as defined by the Dockerfile or passed to `docker run`
|
6 |
+
# Can be used to configure the runtime dir
|
7 |
+
# Bypass by using ENTRYPOINT or `--entrypoint`
|
8 |
+
|
9 |
+
### Set INVOKEAI_ROOT pointing to a valid runtime directory
|
10 |
+
# Otherwise configure the runtime dir first.
|
11 |
+
|
12 |
+
### Set the CONTAINER_UID envvar to match your user.
|
13 |
+
# Ensures files created in the container are owned by you:
|
14 |
+
# docker run --rm -it -v /some/path:/invokeai -e CONTAINER_UID=$(id -u) <this image>
|
15 |
+
# Default UID: 1000 chosen due to popularity on Linux systems. Possibly 501 on MacOS.
|
16 |
+
|
17 |
+
USER_ID=${CONTAINER_UID:-1000}
|
18 |
+
USER=ubuntu
|
19 |
+
usermod -u ${USER_ID} ${USER} 1>/dev/null
|
20 |
+
|
21 |
+
### Set the $PUBLIC_KEY env var to enable SSH access.
|
22 |
+
# We do not install openssh-server in the image by default to avoid bloat.
|
23 |
+
# but it is useful to have the full SSH server e.g. on Runpod.
|
24 |
+
# (use SCP to copy files to/from the image, etc)
|
25 |
+
if [[ -v "PUBLIC_KEY" ]] && [[ ! -d "${HOME}/.ssh" ]]; then
|
26 |
+
apt-get update
|
27 |
+
apt-get install -y openssh-server
|
28 |
+
pushd "$HOME"
|
29 |
+
mkdir -p .ssh
|
30 |
+
echo "${PUBLIC_KEY}" >.ssh/authorized_keys
|
31 |
+
chmod -R 700 .ssh
|
32 |
+
popd
|
33 |
+
service ssh start
|
34 |
+
fi
|
35 |
+
|
36 |
+
mkdir -p "${INVOKEAI_ROOT}"
|
37 |
+
chown --recursive ${USER} "${INVOKEAI_ROOT}" || true
|
38 |
+
cd "${INVOKEAI_ROOT}"
|
39 |
+
|
40 |
+
# Run the CMD as the Container User (not root).
|
41 |
+
exec gosu ${USER} "$@"
|
run.sh
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env bash
|
2 |
+
set -e -o pipefail
|
3 |
+
|
4 |
+
run() {
|
5 |
+
local scriptdir=$(dirname "${BASH_SOURCE[0]}")
|
6 |
+
cd "$scriptdir" || exit 1
|
7 |
+
|
8 |
+
local build_args=""
|
9 |
+
local profile=""
|
10 |
+
|
11 |
+
# create .env file if it doesn't exist, otherwise docker compose will fail
|
12 |
+
touch .env
|
13 |
+
|
14 |
+
# parse .env file for build args
|
15 |
+
build_args=$(awk '$1 ~ /=[^$]/ && $0 !~ /^#/ {print "--build-arg " $0 " "}' .env) &&
|
16 |
+
profile="$(awk -F '=' '/GPU_DRIVER/ {print $2}' .env)"
|
17 |
+
|
18 |
+
# default to 'cuda' profile
|
19 |
+
[[ -z "$profile" ]] && profile="cuda"
|
20 |
+
|
21 |
+
local service_name="invokeai-$profile"
|
22 |
+
|
23 |
+
if [[ ! -z "$build_args" ]]; then
|
24 |
+
printf "%s\n" "docker compose build args:"
|
25 |
+
printf "%s\n" "$build_args"
|
26 |
+
fi
|
27 |
+
|
28 |
+
docker compose build $build_args $service_name
|
29 |
+
unset build_args
|
30 |
+
|
31 |
+
printf "%s\n" "starting service $service_name"
|
32 |
+
docker compose --profile "$profile" up -d "$service_name"
|
33 |
+
docker compose logs -f
|
34 |
+
}
|
35 |
+
|
36 |
+
run
|
runpod-readme.md
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# InvokeAI - A Stable Diffusion Toolkit
|
2 |
+
|
3 |
+
Stable Diffusion distribution by InvokeAI: https://github.com/invoke-ai
|
4 |
+
|
5 |
+
The Docker image tracks the `main` branch of the InvokeAI project, which means it includes the latest features, but may contain some bugs.
|
6 |
+
|
7 |
+
Your working directory is mounted under the `/workspace` path inside the pod. The models are in `/workspace/invokeai/models`, and outputs are in `/workspace/invokeai/outputs`.
|
8 |
+
|
9 |
+
> **Only the /workspace directory will persist between pod restarts!**
|
10 |
+
|
11 |
+
> **If you _terminate_ (not just _stop_) the pod, the /workspace will be lost.**
|
12 |
+
|
13 |
+
## Quickstart
|
14 |
+
|
15 |
+
1. Launch a pod from this template. **It will take about 5-10 minutes to run through the initial setup**. Be patient.
|
16 |
+
1. Wait for the application to load.
|
17 |
+
- TIP: you know it's ready when the CPU usage goes idle
|
18 |
+
- You can also check the logs for a line that says "_Point your browser at..._"
|
19 |
+
1. Open the Invoke AI web UI: click the `Connect` => `connect over HTTP` button.
|
20 |
+
1. Generate some art!
|
21 |
+
|
22 |
+
## Other things you can do
|
23 |
+
|
24 |
+
At any point you may edit the pod configuration and set an arbitrary Docker command. For example, you could run a command to downloads some models using `curl`, or fetch some images and place them into your outputs to continue a working session.
|
25 |
+
|
26 |
+
If you need to run *multiple commands*, define them in the Docker Command field like this:
|
27 |
+
|
28 |
+
`bash -c "cd ${INVOKEAI_ROOT}/outputs; wormhole receive 2-foo-bar; invoke.py --web --host 0.0.0.0"`
|
29 |
+
|
30 |
+
### Copying your data in and out of the pod
|
31 |
+
|
32 |
+
This image includes a couple of handy tools to help you get the data into the pod (such as your custom models or embeddings), and out of the pod (such as downloading your outputs). Here are your options for getting your data in and out of the pod:
|
33 |
+
|
34 |
+
- **SSH server**:
|
35 |
+
1. Make sure to create and set your Public Key in the RunPod settings (follow the official instructions)
|
36 |
+
1. Add an exposed port 22 (TCP) in the pod settings!
|
37 |
+
1. When your pod restarts, you will see a new entry in the `Connect` dialog. Use this SSH server to `scp` or `sftp` your files as necessary, or SSH into the pod using the fully fledged SSH server.
|
38 |
+
|
39 |
+
- [**Magic Wormhole**](https://magic-wormhole.readthedocs.io/en/latest/welcome.html):
|
40 |
+
1. On your computer, `pip install magic-wormhole` (see above instructions for details)
|
41 |
+
1. Connect to the command line **using the "light" SSH client** or the browser-based console. _Currently there's a bug where `wormhole` isn't available when connected to "full" SSH server, as described above_.
|
42 |
+
1. `wormhole send /workspace/invokeai/outputs` will send the entire `outputs` directory. You can also send individual files.
|
43 |
+
1. Once packaged, you will see a `wormhole receive <123-some-words>` command. Copy it
|
44 |
+
1. Paste this command into the terminal on your local machine to securely download the payload.
|
45 |
+
1. It works the same in reverse: you can `wormhole send` some models from your computer to the pod. Again, save your files somewhere in `/workspace` or they will be lost when the pod is stopped.
|
46 |
+
|
47 |
+
- **RunPod's Cloud Sync feature** may be used to sync the persistent volume to cloud storage. You could, for example, copy the entire `/workspace` to S3, add some custom models to it, and copy it back from S3 when launching new pod configurations. Follow the Cloud Sync instructions.
|
48 |
+
|
49 |
+
|
50 |
+
### Disable the NSFW checker
|
51 |
+
|
52 |
+
The NSFW checker is enabled by default. To disable it, edit the pod configuration and set the following command:
|
53 |
+
|
54 |
+
```
|
55 |
+
invoke --web --host 0.0.0.0 --no-nsfw_checker
|
56 |
+
```
|
57 |
+
|
58 |
+
---
|
59 |
+
|
60 |
+
Template ©2023 Eugene Brodsky [ebr](https://github.com/ebr)
|