Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 25 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,27 @@
*
!environment*.yml

!backend
!frontend
!scripts
!server
!static
!LICENSE*
!main.py
!setup.py
!docker-build
docker-build/Dockerfile
!ldm
!installer

# Guard against pulling in any models that might exist in the directory tree
**/*.pt*

# unignore configs, but only ignore the custom models.yaml, in case it exists
!configs
configs/models.yaml

# unignore environment dirs/files, but ignore the environment.yml file or symlink in case it exists
!environment*
environment.yml

**/__pycache__
65 changes: 65 additions & 0 deletions .github/workflows/build-cloud-img.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
name: Build and push cloud image
on:
workflow_dispatch:
push:
branches:
- main
- development
## temp
- docker-min
tags:
- v*

permissions:
contents: read
packages: write

env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}

jobs:
docker:
strategy:
fail-fast: false
matrix:
# only x86_64 for now. aarch64+cuda isn't really a thing yet
arch:
- x86_64
runs-on: ubuntu-latest
name: ${{ matrix.arch }}
steps:
- name: Checkout
uses: actions/checkout@v3

- name: Docker meta
id: meta
uses: docker/metadata-action@v4
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=ref,event=branch
type=ref,event=tag
type=ref,event=pr
type=sha

- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2

- if: github.event_name != 'pull_request'
name: Docker login
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Build and push cloud image
uses: docker/build-push-action@v3
with:
context: .
file: docker-build/Dockerfile.cloud
platforms: Linux/${{ matrix.arch }}
push: ${{ github.event_name != 'pull_request' }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
62 changes: 62 additions & 0 deletions docker-build/Dockerfile.cloud
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Copyright (c) 2022 Eugene Brodsky (https://github.com/ebr)

#### Builder stage ####

FROM library/ubuntu:22.04 AS builder

ENV DEBIAN_FRONTEND=noninteractive
ENV PYTHONDONTWRITEBYTECODE=1
# unbuffered output, ensures stdout and stderr are printed in the correct order
ENV PYTHONUNBUFFERED=1

RUN --mount=type=cache,target=/var/cache/apt \
apt update && apt install -y \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip


ARG APP_ROOT=/invokeai
WORKDIR ${APP_ROOT}

ENV VIRTUAL_ENV=${APP_ROOT}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"

COPY . .
RUN --mount=type=cache,target=/root/.cache/pip \
cp installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.txt && \
python3 -m venv ${VIRTUAL_ENV} &&\
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
torch==1.12.0+cu116 \
torchvision==0.13.0+cu116 &&\
pip install -r requirements.txt &&\
pip install -e .


#### Runtime stage ####

FROM ubuntu:22.04 as runtime
RUN apt update && apt install -y \
git \
curl \
ncdu \
iotop \
bzip2 \
libglib2.0-0 \
libgl1-mesa-glx \
python3-venv \
python3-pip \
&& apt-get clean

ARG APP_ROOT=/invokeai
WORKDIR ${APP_ROOT}

ENV VIRTUAL_ENV=${APP_ROOT}/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"

COPY --from=builder ${APP_ROOT} ${APP_ROOT}

ENTRYPOINT ["bash", "-c", "python3 scripts/invoke.py" ]

CMD ["--web", "--host 0.0.0.0"]
58 changes: 58 additions & 0 deletions docker-build/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Copyright (c) 2022 Eugene Brodsky (https://github.com/ebr)

# Directory in the container where the INVOKEAI_ROOT will be mounted
INVOKEAI_ROOT=/mnt/invokeai
# Host directory to contain the model cache. Will be mounted at INVOKEAI_ROOT path in the container
INVOKEAI_CACHEDIR=${HOME}/invokeai

DOCKER_BUILDKIT=1
IMAGE=local/invokeai:latest

USER=$(shell id -u)
GROUP=$(shell id -g)

# All downloaded models and config will end up in ${INVOKEAI_CACHEDIR}.
# Contents can be moved to a persistent storage and used to rehydrate the cache on another host

build:
docker build -t local/invokeai:latest -f Dockerfile.cloud ..

# Copy only the content of config dir first, such that the configuration
# script can run with the expected config dir already populated.
# Then, run the configuration script.
configure:
docker run --rm -it \
-v ${INVOKEAI_CACHEDIR}/configs:/mnt/configs \
--entrypoint bash ${IMAGE} \
-c "cp -r ./configs/* /mnt/configs/"
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
${IMAGE} \
-c "scripts/configure_invokeai.py --root ${INVOKEAI_ROOT}"
sudo chown -R ${USER}:${GROUP} ${INVOKEAI_CACHEDIR}

# Run the container with the cache mounted and the web server exposed on port 9090
web:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
--entrypoint bash -p9090:9090 ${IMAGE} \
-c "scripts/invoke.py --web --host 0.0.0.0 --root ${INVOKEAI_ROOT}"

cli:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
--entrypoint bash ${IMAGE} \
-c "scripts/invoke.py --root ${INVOKEAI_ROOT}"

# Run the container with the cache mounted and open a bash shell instead of the Invoke CLI or webserver
shell:
docker run --rm -it --runtime=nvidia --gpus=all \
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
--entrypoint bash ${IMAGE} --

.PHONY: build configure web cli shell