Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 10 additions & 17 deletions docker-build/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -18,24 +18,17 @@ RUN apt-get update \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*

ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
ARG PROJECT_NAME=invokeai
ARG INVOKEAI_ROOT=/data
ENV INVOKEAI_ROOT=${INVOKEAI_ROOT}

# set workdir and copy sources
WORKDIR /${PROJECT_NAME}
COPY . .
WORKDIR /invokeai
ARG PIP_REQUIREMENTS=requirements-lin-cuda.txt
COPY . ./environments-and-requirements/${PIP_REQUIREMENTS} ./

# install requirements and link outputs folder
RUN cp \
./environments-and-requirements/${PIP_REQUIREMENTS} \
${PIP_REQUIREMENTS} \
&& pip install \
--no-cache-dir \
-r ${PIP_REQUIREMENTS} \
&& ln -sf /data/outputs /${PROJECT_NAME}/outputs
RUN pip install \
--no-cache-dir \
-r ${PIP_REQUIREMENTS}

# set Entrypoint and default CMD
ENTRYPOINT [ "python3" ]
CMD [ "scripts/invoke.py", "--web", "--host", "0.0.0.0" ]
# set Environment, Entrypoint and default CMD
ENV INVOKEAI_ROOT /data
ENTRYPOINT [ "python3", "scripts/invoke.py", "--outdir=/data/outputs" ]
CMD [ "--web", "--host=0.0.0.0" ]
28 changes: 8 additions & 20 deletions docker-build/build.sh
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
#!/usr/bin/env bash
set -e
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoint!!!
# configure values by using env when executing build.sh
# f.e. env ARCH=aarch64 GITHUB_INVOKE_AI=https://github.com/yourname/yourfork.git ./build.sh

source ./docker-build/env.sh || echo "please run from repository root" || exit 1
# IMPORTANT: You need to have a token on huggingface.co to be able to download the checkpoints!!!
# configure values by using env when executing build.sh f.e. `env ARCH=aarch64 ./build.sh`

source ./docker-build/env.sh \
|| echo "please execute docker-build/build.sh from repository root" \
|| exit 1

pip_requirements=${PIP_REQUIREMENTS:-requirements-lin-cuda.txt}
dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
Expand All @@ -13,28 +15,14 @@ dockerfile=${INVOKE_DOCKERFILE:-docker-build/Dockerfile}
echo "You are using these values:"
echo -e "Dockerfile:\t\t ${dockerfile}"
echo -e "requirements:\t\t ${pip_requirements}"
echo -e "project_name:\t\t ${project_name}"
echo -e "volumename:\t\t ${volumename}"
echo -e "arch:\t\t\t ${arch}"
echo -e "platform:\t\t ${platform}"
echo -e "invokeai_tag:\t\t ${invokeai_tag}\n"

_runAlpine() {
docker run \
--rm \
--interactive \
--tty \
--mount source="$volumename",target=/data \
--workdir /data \
alpine "$@"
}

_checkVolumeContent() {
_runAlpine ls -lhA /data/models
}

if [[ -n "$(docker volume ls -f name="${volumename}" -q)" ]]; then
echo "Volume already exists"
echo
else
echo -n "createing docker volume "
docker volume create "${volumename}"
Expand All @@ -44,7 +32,6 @@ fi
docker build \
--platform="${platform}" \
--tag="${invokeai_tag}" \
--build-arg="PROJECT_NAME=${project_name}" \
--build-arg="PIP_REQUIREMENTS=${pip_requirements}" \
--file="${dockerfile}" \
.
Expand All @@ -57,5 +44,6 @@ docker run \
--mount="source=$volumename,target=/data" \
--mount="type=bind,source=$HOME/.huggingface,target=/root/.huggingface" \
--env="HUGGINGFACE_TOKEN=${HUGGINGFACE_TOKEN}" \
--entrypoint="python3" \
"${invokeai_tag}" \
scripts/configure_invokeai.py --yes
10 changes: 5 additions & 5 deletions docker-build/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ docker run \
--interactive \
--tty \
--rm \
--platform "$platform" \
--name "$project_name" \
--hostname "$project_name" \
--mount source="$volumename",target=/data \
--publish 9090:9090 \
--platform="$platform" \
--name="$project_name" \
--hostname="$project_name" \
--mount="source=$volumename,target=/data" \
--publish=9090:9090 \
"$invokeai_tag" ${1:+$@}
8 changes: 4 additions & 4 deletions docs/installation/INSTALL_DOCKER.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ Some Suggestions of variables you may want to change besides the Token:
| -------------------- | ----------------------------- | -------------------------------------------------------------------------------------------- |
| `HUGGINGFACE_TOKEN` | No default, but **required**! | This is the only **required** variable, without it you can't download the huggingface models |
| `PROJECT_NAME` | `invokeai` | affects the project folder, tag- and volume name |
| `VOLUMENAME` | `${PROJECT_NAME}_data` | affects the project folder, tag- and volume name |
| `VOLUMENAME` | `${PROJECT_NAME}_data` | Name of the Docker Volume where model files will be stored |
| `ARCH` | `x86_64` | can be changed to f.e. aarch64 if you are using a ARM based CPU |
| `INVOKEAI_TAG` | `${PROJECT_NAME}:${ARCH}` | the Container Repository / Tag which will be used |
| `PIP_REQUIREMENTS` | `requirements-lin-cuda.txt` | the requirements file to use (from `environments-and-requirements`) |
Expand Down Expand Up @@ -111,13 +111,13 @@ When used without arguments, the container will start the webserver and provide
you the link to open it. But if you want to use some other parameters you can
also do so.

!!! example ""
!!! example "run script example"

```bash
./docker-build/run.sh scripts/invoke.py
./docker-build/run.sh "banana sushi" -Ak_lms -S42 -s10
```

This would start the CLI instead of the default command that starts the webserver.
This would generate the legendary "banana sushi" with Seed 42, k_lms Sampler and 10 steps.

Find out more about available CLI-Parameters at [features/CLI.md](../../features/CLI/#arguments)

Expand Down