Skip to content

Commit 3f6d0fb

Browse files
authored
update dockerfile (#1551)
* update dockerfile * remove not existing file from .dockerignore * remove bloat and unecesary step also use --no-cache-dir for pip install image is now close to 2GB * make Dockerfile a variable * set base image to `ubuntu:22.10` * add build-essential * link outputs folder for persistence * update tag variable * update docs * fix not customizeable build args, add reqs output
1 parent 08ef4d6 commit 3f6d0fb

File tree

8 files changed

+77
-145
lines changed

8 files changed

+77
-145
lines changed

.dockerignore

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,12 @@
11
*
2-
!environment*.yml
3-
!docker-build
2+
!backend
3+
!configs
4+
!environments-and-requirements
5+
!frontend
6+
!installer
7+
!ldm
8+
!main.py
9+
!scripts
10+
!server
11+
!static
12+
!setup.py

.github/workflows/build-container.yml

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ on:
66
branches:
77
- 'main'
88
- 'development'
9+
- 'update-dockerfile'
910

1011
jobs:
1112
docker:
@@ -15,13 +16,11 @@ jobs:
1516
arch:
1617
- x86_64
1718
- aarch64
18-
include:
19-
- arch: x86_64
20-
conda-env-file: environment-lin-cuda.yml
21-
- arch: aarch64
22-
conda-env-file: environment-lin-aarch64.yml
19+
pip-requirements:
20+
- requirements-lin-amd.txt
21+
- requirements-lin-cuda.txt
2322
runs-on: ubuntu-latest
24-
name: ${{ matrix.arch }}
23+
name: ${{ matrix.pip-requirements }} ${{ matrix.arch }}
2524
steps:
2625
- name: prepare docker-tag
2726
env:
@@ -40,9 +39,5 @@ jobs:
4039
file: docker-build/Dockerfile
4140
platforms: Linux/${{ matrix.arch }}
4241
push: false
43-
tags: ${{ env.dockertag }}:${{ matrix.arch }}
44-
build-args: |
45-
conda_env_file=${{ matrix.conda-env-file }}
46-
conda_version=py39_4.12.0-Linux-${{ matrix.arch }}
47-
invokeai_git=${{ github.repository }}
48-
invokeai_branch=${{ github.ref_name }}
42+
tags: ${{ env.dockertag }}:${{ matrix.pip-requirements }}-${{ matrix.arch }}
43+
build-args: pip_requirements=${{ matrix.pip-requirements }}

docker-build/Dockerfile

Lines changed: 23 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1,34 +1,13 @@
1-
FROM ubuntu AS get_miniconda
2-
3-
SHELL ["/bin/bash", "-c"]
4-
5-
# install wget
6-
RUN apt-get update \
7-
&& apt-get install -y \
8-
wget \
9-
&& apt-get clean \
10-
&& rm -rf /var/lib/apt/lists/*
11-
12-
# download and install miniconda
13-
ARG conda_version=py39_4.12.0-Linux-x86_64
14-
ARG conda_prefix=/opt/conda
15-
RUN wget --progress=dot:giga -O /miniconda.sh \
16-
https://repo.anaconda.com/miniconda/Miniconda3-${conda_version}.sh \
17-
&& bash /miniconda.sh -b -p ${conda_prefix} \
18-
&& rm -f /miniconda.sh
19-
20-
FROM ubuntu AS invokeai
1+
FROM ubuntu:22.10
212

223
# use bash
234
SHELL [ "/bin/bash", "-c" ]
245

25-
# clean bashrc
26-
RUN echo "" > ~/.bashrc
27-
286
# Install necesarry packages
297
RUN apt-get update \
308
&& apt-get install -y \
319
--no-install-recommends \
10+
build-essential \
3211
gcc \
3312
git \
3413
libgl1-mesa-glx \
@@ -39,41 +18,24 @@ RUN apt-get update \
3918
&& apt-get clean \
4019
&& rm -rf /var/lib/apt/lists/*
4120

42-
# clone repository, create models.yaml and create symlinks
43-
ARG invokeai_git=invoke-ai/InvokeAI
44-
ARG invokeai_branch=main
45-
ARG project_name=invokeai
46-
ARG conda_env_file=environment-lin-cuda.yml
47-
RUN git clone -b ${invokeai_branch} https://github.com/${invokeai_git}.git "/${project_name}" \
48-
&& ln -sf \
49-
"/${project_name}/environments-and-requirements/${conda_env_file}" \
50-
"/${project_name}/environment.yml" \
51-
&& ln -sf \
52-
/data/outputs/ \
53-
"/${project_name}/outputs"
54-
55-
# set workdir and copy models.yaml
56-
WORKDIR "/${project_name}"
57-
COPY docker-build/models.yaml configs/models.yaml
58-
59-
# install conda env and preload models
60-
ARG conda_prefix=/opt/conda
61-
COPY --from=get_miniconda "${conda_prefix}" "${conda_prefix}"
62-
RUN source "${conda_prefix}/etc/profile.d/conda.sh" \
63-
&& conda init bash \
64-
&& source ~/.bashrc \
65-
&& conda env create \
66-
--name "${project_name}" \
67-
&& rm -Rf ~/.cache \
68-
&& conda clean -afy \
69-
&& echo "conda activate ${project_name}" >> ~/.bashrc
70-
71-
RUN source ~/.bashrc \
72-
&& python scripts/preload_models.py \
73-
--no-interactive
74-
75-
# Copy entrypoint and set env
76-
ENV CONDA_PREFIX="${conda_prefix}"
77-
ENV PROJECT_NAME="${project_name}"
78-
COPY docker-build/entrypoint.sh /
79-
ENTRYPOINT [ "/entrypoint.sh" ]
21+
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
22+
ARG PROJECT_NAME=invokeai
23+
ARG INVOKEAI_ROOT=/data
24+
ENV INVOKEAI_ROOT=${INVOKEAI_ROOT}
25+
26+
# set workdir and copy sources
27+
WORKDIR /${PROJECT_NAME}
28+
COPY . .
29+
30+
# install requirements and link outputs folder
31+
RUN cp \
32+
./environments-and-requirements/${PIP_REQUIREMENTS} \
33+
${PIP_REQUIREMENTS} \
34+
&& pip install \
35+
--no-cache-dir \
36+
-r ${PIP_REQUIREMENTS} \
37+
&& ln -sf /data/outputs /${PROJECT_NAME}/outputs
38+
39+
# set Entrypoint and default CMD
40+
ENTRYPOINT [ "python3" ]
41+
CMD [ "scripts/invoke.py", "--web", "--host", "0.0.0.0" ]

docker-build/build.sh

Lines changed: 19 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -6,23 +6,17 @@ set -e
66

77
source ./docker-build/env.sh || echo "please run from repository root" || exit 1
88

9-
invokeai_conda_version=${INVOKEAI_CONDA_VERSION:-py39_4.12.0-${platform/\//-}}
10-
invokeai_conda_prefix=${INVOKEAI_CONDA_PREFIX:-\/opt\/conda}
11-
invokeai_conda_env_file=${INVOKEAI_CONDA_ENV_FILE:-environment-lin-cuda.yml}
12-
invokeai_git=${INVOKEAI_GIT:-invoke-ai/InvokeAI}
13-
invokeai_branch=${INVOKEAI_BRANCH:-main}
14-
huggingface_token=${HUGGINGFACE_TOKEN?}
9+
pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
10+
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
1511

1612
# print the settings
1713
echo "You are using these values:"
14+
echo -e "Dockerfile:\t\t ${dockerfile}"
15+
echo -e "requirements:\t\t ${pip_requirements}"
1816
echo -e "project_name:\t\t ${project_name}"
1917
echo -e "volumename:\t\t ${volumename}"
2018
echo -e "arch:\t\t\t ${arch}"
2119
echo -e "platform:\t\t ${platform}"
22-
echo -e "invokeai_conda_version:\t ${invokeai_conda_version}"
23-
echo -e "invokeai_conda_prefix:\t ${invokeai_conda_prefix}"
24-
echo -e "invokeai_conda_env_file: ${invokeai_conda_env_file}"
25-
echo -e "invokeai_git:\t\t ${invokeai_git}"
2620
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"
2721

2822
_runAlpine() {
@@ -35,50 +29,33 @@ _runAlpine() {
3529
alpine "$@"
3630
}
3731

38-
_copyCheckpoints() {
39-
echo "creating subfolders for models and outputs"
40-
_runAlpine mkdir models
41-
_runAlpine mkdir outputs
42-
echo "downloading v1-5-pruned-emaonly.ckpt"
43-
_runAlpine wget \
44-
--header="Authorization: Bearer ${huggingface_token}" \
45-
-O models/v1-5-pruned-emaonly.ckpt \
46-
https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt
47-
echo "done"
48-
}
49-
5032
_checkVolumeContent() {
5133
_runAlpine ls -lhA /data/models
5234
}
5335

54-
_getModelMd5s() {
55-
_runAlpine \
56-
alpine sh -c "md5sum /data/models/*.ckpt"
57-
}
58-
5936
if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
6037
echo "Volume already exists"
61-
if [[ -z "$(_checkVolumeContent)" ]]; then
62-
echo "looks empty, copying checkpoint"
63-
_copyCheckpoints
64-
fi
65-
echo "Models in ${volumename}:"
66-
_checkVolumeContent
6738
else
6839
echo -n "createing docker volume "
6940
docker volume create "${volumename}"
70-
_copyCheckpoints
7141
fi
7242

7343
# Build Container
7444
docker build \
7545
--platform="${platform}" \
76-
--tag "${invokeai_tag}" \
77-
--build-arg project_name="${project_name}" \
78-
--build-arg conda_version="${invokeai_conda_version}" \
79-
--build-arg conda_prefix="${invokeai_conda_prefix}" \
80-
--build-arg conda_env_file="${invokeai_conda_env_file}" \
81-
--build-arg invokeai_git="${invokeai_git}" \
82-
--build-arg invokeai_branch="${invokeai_branch}" \
83-
--file ./docker-build/Dockerfile \
46+
--tag="${invokeai_tag}" \
47+
--build-arg="PROJECT_NAME=${project_name}" \
48+
--build-arg="PIP_REQUIREMENTS=${pip_requirements}" \
49+
--file="${dockerfile}" \
8450
.
51+
52+
docker run \
53+
--rm \
54+
--platform="$platform" \
55+
--name="$project_name" \
56+
--hostname="$project_name" \
57+
--mount="source=$volumename,target=/data" \
58+
--mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \
59+
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
60+
"${invokeai_tag}" \
61+
scripts/configure_invokeai.py --yes

docker-build/entrypoint.sh

Lines changed: 0 additions & 8 deletions
This file was deleted.

docker-build/env.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ project_name=${PROJECT_NAME:-invokeai}
44
volumename=${VOLUMENAME:-${project_name}_data}
55
arch=${ARCH:-x86_64}
66
platform=${PLATFORM:-Linux/${arch}}
7-
invokeai_tag=${INVOKEAI_TAG:-${project_name}-${arch}}
7+
invokeai_tag=${INVOKEAI_TAG:-${project_name}:${arch}}
88

99
export project_name
1010
export volumename

docker-build/models.yaml

Lines changed: 0 additions & 8 deletions
This file was deleted.

docs/installation/INSTALL_DOCKER.md

Lines changed: 16 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,19 @@ created in the last step.
7272

7373
Some Suggestions of variables you may want to change besides the Token:
7474

75-
| Environment-Variable | Default value | Description |
76-
| ------------------------- | ----------------------------- | ---------------------------------------------------------------------------- |
77-
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without you can't get the checkpoint |
78-
| `ARCH` | x86_64 | if you are using a ARM based CPU |
79-
| `INVOKEAI_TAG` | invokeai-x86_64 | the Container Repository / Tag which will be used |
80-
| `INVOKEAI_CONDA_ENV_FILE` | environment-lin-cuda.yml | since environment.yml wouldn't work with aarch |
81-
| `INVOKEAI_GIT` | invoke-ai/InvokeAI | the repository to use |
82-
| `INVOKEAI_BRANCH` | main | the branch to checkout |
75+
<figure markdown>
76+
77+
| Environment-Variable | Default value | Description |
78+
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
79+
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
80+
| `PROJECT_NAME` | `invokeai` | affects the project folder, tag- and volume name |
81+
| `VOLUMENAME` | `${PROJECT_NAME}_data` | affects the project folder, tag- and volume name |
82+
| `ARCH` | `x86_64` | can be changed to f.e. aarch64 if you are using a ARM based CPU |
83+
| `INVOKEAI_TAG` | `${PROJECT_NAME}:${ARCH}` | the Container Repository / Tag which will be used |
84+
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
85+
| `INVOKE_DOCKERFILE` | `docker-build/Dockerfile` | the Dockerfile which should be built, handy for development |
86+
87+
</figure>
8388

8489
#### Build the Image
8590

@@ -109,12 +114,12 @@ also do so.
109114
!!! example ""
110115

111116
```bash
112-
./docker-build/run.sh --from_file tests/validate_pr_prompt.txt
117+
./docker-build/run.sh scripts/invoke.py
113118
```
114119

115-
The output folder is located on the volume which is also used to store the model.
120+
This would start the CLI instead of the default command that starts the webserver.
116121

117-
Find out more about available CLI-Parameters at [features/CLI.md](../features/CLI.md/#arguments)
122+
Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)
118123

119124
---
120125

0 commit comments

Comments
 (0)