11# Copyright (c) 2022 Eugene Brodsky (https://github.com/ebr)
22
3- FROM nvidia/cuda:11.7.1-runtime-ubuntu22.04 AS base
3+ #### Builder stage ####
4+
5+ FROM library/ubuntu:22.04 AS builder
46
57ENV DEBIAN_FRONTEND=noninteractive
6- # # no __pycache__ - unclear if there is a benefit
7- # ENV PYTHONDONTWRITEBYTECODE=1
8+ ENV PYTHONDONTWRITEBYTECODE=1
89# unbuffered output, ensures stdout and stderr are printed in the correct order
910ENV PYTHONUNBUFFERED=1
1011
12+ RUN --mount=type=cache,target=/var/cache/apt \
13+ apt update && apt install -y \
14+ libglib2.0-0 \
15+ libgl1-mesa-glx \
16+ python3-venv \
17+ python3-pip
18+
19+
20+ ARG APP_ROOT=/invokeai
21+ WORKDIR ${APP_ROOT}
22+
23+ ENV VIRTUAL_ENV=${APP_ROOT}/.venv
24+ ENV PATH="$VIRTUAL_ENV/bin:$PATH"
25+
26+ COPY . .
27+ RUN --mount=type=cache,target=/root/.cache/pip \
28+ cp installer/py3.10-linux-x86_64-cuda-reqs.txt requirements.txt && \
29+ python3 -m venv ${VIRTUAL_ENV} &&\
30+ pip install --extra-index-url https://download.pytorch.org/whl/cu116 \
31+ torch==1.12.0+cu116 \
32+ torchvision==0.13.0+cu116 &&\
33+ pip install -r requirements.txt &&\
34+ pip install -e .
35+
36+
37+ #### Runtime stage ####
38+
39+ FROM ubuntu:22.04 as runtime
1140RUN apt update && apt install -y \
1241 git \
1342 curl \
@@ -16,36 +45,18 @@ RUN apt update && apt install -y \
1645 bzip2 \
1746 libglib2.0-0 \
1847 libgl1-mesa-glx \
48+ python3-venv \
49+ python3-pip \
1950 && apt-get clean
2051
21- # Micromamba is a minimal conda implementation
22- ENV MAMBA_ROOT_PREFIX=/opt/conda
23- RUN curl -Ls https://micro.mamba.pm/api/micromamba/linux-64/latest | tar -xvj bin/micromamba
24-
25- WORKDIR /invokeai
26-
27- ### Cache the dependencies first
28- # Avoid re-downloading the dependencies when unrelated files change in context
29- #
30- # We could use relative paths in the environment file, but it's not currently set up that way.
31- # So we copy it to the working directory to maintain compatibility with other installation methods
32- COPY environments-and-requirements/environment-lin-cuda.yml environment.yml
33-
34- # Patch the env file to remove installation of local package
35- RUN sed -i '/-e \./d' environment.yml
36- RUN micromamba create -y -f environment.yml &&\
37- micromamba clean --all -f -y &&\
38- rm -rf ${MAMBA_ROOT_PREFIX}/pkgs
39-
40- ### Copy the rest of the context and install local package
41- COPY . .
42- RUN micromamba -n invokeai run pip install -e .
52+ ARG APP_ROOT=/invokeai
53+ WORKDIR ${APP_ROOT}
4354
44- ### Default model config
45- RUN cp configs/models.yaml.example configs/models.yaml
55+ ENV VIRTUAL_ENV=${APP_ROOT}/.venv
56+ ENV PATH="$VIRTUAL_ENV/bin:$PATH"
4657
47- ENTRYPOINT ["bash"]
58+ COPY --from=builder ${APP_ROOT} ${APP_ROOT}
4859
49- EXPOSE 9090
60+ ENTRYPOINT ["bash", "-c", "python3 scripts/invoke.py" ]
5061
51- CMD [ "-c ", "micromamba -r ${MAMBA_ROOT_PREFIX} -n invokeai run python scripts/invoke.py --web --host 0.0.0.0"]
62+ CMD ["--web ", "--host 0.0.0.0"]
0 commit comments