version: "3.3" services: text-generation-webui: build: context: . args: # Requirements file to use: # | GPU | CPU | requirements file to use | # |--------|---------|---------| # | NVIDIA | has AVX2 | `requirements.txt` | # | NVIDIA | no AVX2 | `requirements_noavx2.txt` | # | AMD | has AVX2 | `requirements_amd.txt` | # | AMD | no AVX2 | `requirements_amd_noavx2.txt` | # | CPU only | has AVX2 | `requirements_cpu_only.txt` | # | CPU only | no AVX2 | `requirements_cpu_only_noavx2.txt` | # | Apple | Intel | `requirements_apple_intel.txt` | # | Apple | Apple Silicon | `requirements_apple_silicon.txt` | # Default: requirements.txt` # BUILD_REQUIREMENTS: requirements.txt # Extension requirements to build: # BUILD_EXTENSIONS: # specify which cuda version your card supports: https://developer.nvidia.com/cuda-gpus TORCH_CUDA_ARCH_LIST: ${TORCH_CUDA_ARCH_LIST:-7.5} BUILD_EXTENSIONS: ${BUILD_EXTENSIONS:-} APP_GID: ${APP_GID:-6972} APP_UID: ${APP_UID-6972} env_file: .env user: "${APP_RUNTIME_UID:-6972}:${APP_RUNTIME_GID:-6972}" ports: - "${HOST_PORT:-7860}:${CONTAINER_PORT:-7860}" - "${HOST_API_PORT:-5000}:${CONTAINER_API_PORT:-5000}" stdin_open: true tty: true volumes: - ./cache:/home/app/text-generation-webui/cache - ./characters:/home/app/text-generation-webui/characters - ./extensions:/home/app/text-generation-webui/extensions - ./loras:/home/app/text-generation-webui/loras - ./logs:/home/app/text-generation-webui/logs - ./models:/home/app/text-generation-webui/models - ./presets:/home/app/text-generation-webui/presets - ./prompts:/home/app/text-generation-webui/prompts - ./softprompts:/home/app/text-generation-webui/softprompts - ./training:/home/app/text-generation-webui/training - ./cloudflared:/etc/cloudflared