Skip to content

Commit a665dba

Browse files
committed
support the new conda-less install; further optimize docker build
1 parent cf3d95f commit a665dba

File tree

3 files changed

+59
-41
lines changed

3 files changed

+59
-41
lines changed

.dockerignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
!main.py
1010
!setup.py
1111
!docker-build
12+
docker-build/Dockerfile
1213
!ldm
1314
!installer
1415

docker-build/Dockerfile.cloud

Lines changed: 41 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,42 @@
11
# Copyright (c) 2022 Eugene Brodsky (https://github.com/ebr)
22

3-
FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04 AS base
3+
#### Builder stage ####
4+
5+
FROM library/ubuntu:22.04 AS builder
46

57
ENV DEBIAN_FRONTEND=noninteractive
6-
# # no __pycache__ - unclear if there is a benefit
7-
# ENV PYTHONDONTWRITEBYTECODE=1
8+
ENV PYTHONDONTWRITEBYTECODE=1
89
# unbuffered output, ensures stdout and stderr are printed in the correct order
910
ENV PYTHONUNBUFFERED=1
1011

12+
RUN --mount=type=cache,target=/var/cache/apt \
13+
apt update && apt install -y \
14+
libglib2.0-0 \
15+
libgl1-mesa-glx \
16+
python3-venv \
17+
python3-pip
18+
19+
20+
ARG APP_ROOT=/invokeai
21+
WORKDIR ${APP_ROOT}
22+
23+
ENV VIRTUAL_ENV=${APP_ROOT}/.venv
24+
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
25+
26+
COPY . .
27+
RUN --mount=type=cache,target=/root/.cache/pip \
28+
cp installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.txt && \
29+
python3 -m venv ${VIRTUAL_ENV} &&\
30+
pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
31+
torch==1.12.0+cu116 \
32+
torchvision==0.13.0+cu116 &&\
33+
pip install -r requirements.txt &&\
34+
pip install -e .
35+
36+
37+
#### Runtime stage ####
38+
39+
FROM ubuntu:22.04 as runtime
1140
RUN apt update && apt install -y \
1241
git \
1342
curl \
@@ -16,36 +45,18 @@ RUN apt update && apt install -y \
1645
bzip2 \
1746
libglib2.0-0 \
1847
libgl1-mesa-glx \
48+
python3-venv \
49+
python3-pip \
1950
&& apt-get clean
2051

21-
# Micromamba is a minimal conda implementation
22-
ENV MAMBA_ROOT_PREFIX=/opt/conda
23-
RUN curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba
24-
25-
WORKDIR /invokeai
26-
27-
### Cache the dependencies first
28-
# Avoid re-downloading the dependencies when unrelated files change in context
29-
#
30-
# We could use relative paths in the environment file, but it's not currently set up that way.
31-
# So we copy it to the working directory to maintain compatibility with other installation methods
32-
COPY environments-and-requirements/environment-lin-cuda.yml environment.yml
33-
34-
# Patch the env file to remove installation of local package
35-
RUN sed -i '/-e \./d' environment.yml
36-
RUN micromamba create -y -f environment.yml &&\
37-
micromamba clean --all -f -y &&\
38-
rm -rf ${MAMBA_ROOT_PREFIX}/pkgs
39-
40-
### Copy the rest of the context and install local package
41-
COPY . .
42-
RUN micromamba -n invokeai run pip install -e .
52+
ARG APP_ROOT=/invokeai
53+
WORKDIR ${APP_ROOT}
4354

44-
### Default model config
45-
RUN cp configs/models.yaml.example configs/models.yaml
55+
ENV VIRTUAL_ENV=${APP_ROOT}/.venv
56+
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
4657

47-
ENTRYPOINT ["bash"]
58+
COPY --from=builder ${APP_ROOT} ${APP_ROOT}
4859

49-
EXPOSE 9090
60+
ENTRYPOINT ["bash", "-c", "python3 scripts/invoke.py" ]
5061

51-
CMD [ "-c", "micromamba -r ${MAMBA_ROOT_PREFIX} -n invokeai run python scripts/invoke.py --web --host 0.0.0.0"]
62+
CMD ["--web", "--host 0.0.0.0"]

docker-build/Makefile

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -15,31 +15,37 @@ GROUP=$(shell id -g)
1515
# Contents can be moved to a persistent storage and used to rehydrate the cache on another host
1616

1717
build:
18-
docker buildx build -t local/invokeai:latest -f Dockerfile.cloud ..
18+
docker build -t local/invokeai:latest -f Dockerfile.cloud ..
1919

20-
# Populate the cache.
21-
# First, pre-seed the config dir on the host with the content from the image,
22-
# such that the model preload step can run with the config mounted and pre-populated.
23-
# Then, run `load-models` to cache models, VAE, other static data.
24-
load-models:
20+
# Copy only the content of config dir first, such that the configuration
21+
# script can run with the expected config dir already populated.
22+
# Then, run the configuration script.
23+
configure:
2524
docker run --rm -it \
2625
-v ${INVOKEAI_CACHEDIR}/configs:/mnt/configs \
2726
--entrypoint bash ${IMAGE} \
2827
-c "cp -r ./configs/* /mnt/configs/"
2928
docker run --rm -it --runtime=nvidia --gpus=all \
3029
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
3130
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
32-
--entrypoint bash ${IMAGE} \
33-
-c "micromamba -n invokeai run python scripts/load_models.py --root ${INVOKEAI_ROOT}"
31+
${IMAGE} \
32+
-c "scripts/configure_invokeai.py --root ${INVOKEAI_ROOT}"
3433
sudo chown -R ${USER}:${GROUP} ${INVOKEAI_CACHEDIR}
3534

3635
# Run the container with the cache mounted and the web server exposed on port 9090
37-
run:
36+
web:
3837
docker run --rm -it --runtime=nvidia --gpus=all \
3938
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
4039
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
4140
--entrypoint bash -p9090:9090 ${IMAGE} \
42-
-c "micromamba -n invokeai run python scripts/invoke.py --web --host 0.0.0.0 --root ${INVOKEAI_ROOT}"
41+
-c "scripts/invoke.py --web --host 0.0.0.0 --root ${INVOKEAI_ROOT}"
42+
43+
cli:
44+
docker run --rm -it --runtime=nvidia --gpus=all \
45+
-v ${INVOKEAI_CACHEDIR}:${INVOKEAI_ROOT} \
46+
-v ${INVOKEAI_CACHEDIR}/.cache:/root/.cache \
47+
--entrypoint bash ${IMAGE} \
48+
-c "scripts/invoke.py --root ${INVOKEAI_ROOT}"
4349

4450
# Run the container with the cache mounted and open a bash shell instead of the Invoke CLI or webserver
4551
shell:
@@ -49,4 +55,4 @@ shell:
4955
-e INVOKEAI_ROOT=${INVOKEAI_ROOT} \
5056
--entrypoint bash ${IMAGE} --
5157

52-
.PHONY: build preload run shell
58+
.PHONY: build configure web cli shell

0 commit comments

Comments
 (0)