Skip to content
3 changes: 1 addition & 2 deletions installer/install.bat
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,6 @@ set err_msg=----- InvokeAI setup failed -----
.venv\Scripts\python -m pip install %no_cache_dir% --no-warn-script-location -e .
if %errorlevel% neq 0 goto err_exit

echo ***** Installed InvokeAI *****

copy installer\invoke.bat .\invoke.bat
echo ***** Installed invoke launcher script ******

Expand All @@ -150,6 +148,7 @@ rd /s /q installer installer_files
call .venv\Scripts\python scripts\configure_invokeai.py
set err_msg=----- model download clone failed -----
if %errorlevel% neq 0 goto err_exit
deactivate

echo ***** Finished downloading models *****

Expand Down
1 change: 1 addition & 0 deletions installer/install.sh
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ rm -rf installer/ installer_files/
.venv/bin/python3 scripts/configure_invokeai.py
_err_msg="\n----- model download clone failed -----\n"
_err_exit $? _err_msg
deactivate

echo -e "\n***** Finished downloading models *****\n"

Expand Down
51 changes: 29 additions & 22 deletions scripts/configure_invokeai.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@
Default_config_file = './configs/models.yaml'
SD_Configs = './configs/stable-diffusion'

assert os.path.exists(Dataset_path),"The configs directory cannot be found. Please run this script from within the InvokeAI distribution directory, or from within the invokeai runtime directory."

Datasets = OmegaConf.load(Dataset_path)
completer = generic_completer(['yes','no'])

Expand Down Expand Up @@ -561,14 +563,14 @@ def get_root(root:str=None)->str:
return root

#-------------------------------------
def select_root(yes_to_all:bool=False):
default = os.path.expanduser('~/invokeai')
def select_root(root:str, yes_to_all:bool=False):
default = root or os.path.expanduser('~/invokeai')
if (yes_to_all):
return default
completer.set_default_dir(default)
completer.complete_extensions(())
completer.set_line(default)
return input(f"Select a directory in which to install InvokeAI's models and configuration files [{default}]: ")
return input(f"Select a directory in which to install InvokeAI's models and configuration files [{default}]: ") or default

#-------------------------------------
def select_outputs(root:str,yes_to_all:bool=False):
Expand All @@ -578,23 +580,32 @@ def select_outputs(root:str,yes_to_all:bool=False):
completer.set_default_dir(os.path.expanduser('~'))
completer.complete_extensions(())
completer.set_line(default)
return input('Select the default directory for image outputs [{default}]: ')
return input(f'Select the default directory for image outputs [{default}]: ') or default

#-------------------------------------
def initialize_rootdir(root:str,yes_to_all:bool=False):
assert os.path.exists('./configs'),'Run this script from within the top level of the InvokeAI source code directory, "InvokeAI"'

print(f'** INITIALIZING INVOKEAI RUNTIME DIRECTORY **')
root = root or select_root(yes_to_all)
outputs = select_outputs(root,yes_to_all)
Globals.root = root

print(f'InvokeAI models and configuration files will be placed into {root} and image outputs will be placed into {outputs}.')
print(f'\nYou may change these values at any time by editing the --root and --output_dir options in "{Globals.initfile}",')
root_selected = False
while not root_selected:
root = select_root(root,yes_to_all)
outputs = select_outputs(root,yes_to_all)
Globals.root = os.path.abspath(root)
outputs = outputs if os.path.isabs(outputs) else os.path.abspath(os.path.join(Globals.root,outputs))

print(f'\nInvokeAI models and configuration files will be placed into "{root}" and image outputs will be placed into "{outputs}".')
if not yes_to_all:
root_selected = yes_or_no('Accept these locations?')
else:
root_selected = True

print(f'\nYou may change the chosen directories at any time by editing the --root and --outdir options in "{Globals.initfile}",')
print(f'You may also change the runtime directory by setting the environment variable INVOKEAI_ROOT.\n')
for name in ('models','configs'):

for name in ['models','configs']:
os.makedirs(os.path.join(root,name), exist_ok=True)
for src in ['configs']:
for src in (['configs']):
dest = os.path.join(root,src)
if not os.path.samefile(src,dest):
shutil.copytree(src,dest,dirs_exist_ok=True)
Expand All @@ -610,7 +621,7 @@ def initialize_rootdir(root:str,yes_to_all:bool=False):
# or renaming it and then running configure_invokeai.py again.

# The --root option below points to the folder in which InvokeAI stores its models, configs and outputs.
--root="{root}"
--root="{Globals.root}"

# the --outdir option controls the default location of image files.
--outdir="{outputs}"
Expand Down Expand Up @@ -673,15 +684,10 @@ def main():
try:
introduction()

# We check for two files to see if the runtime directory is correctly initialized.
# 1. a key stable diffusion config file
# 2. the web front end static files
# We check for to see if the runtime directory is correctly initialized.
if Globals.root == '' \
or not os.path.exists(os.path.join(Globals.root,'configs/stable-diffusion/v1-inference.yaml')) \
or not os.path.exists(os.path.join(Globals.root,'frontend/dist')):
initialize_rootdir(Globals.root,(not opt.interactive) or opt.yes_to_all)

print(f'(Initializing with runtime root {Globals.root})\n')
or not os.path.exists(os.path.join(Globals.root,'configs/stable-diffusion/v1-inference.yaml')):
initialize_rootdir(Globals.root,opt.yes_to_all)

if opt.interactive:
print('** DOWNLOADING DIFFUSION WEIGHTS **')
Expand All @@ -698,7 +704,8 @@ def main():
except KeyboardInterrupt:
print('\nGoodbye! Come back soon.')
except Exception as e:
print(f'\nA problem occurred during download.\nThe error was: "{str(e)}"')
print(f'\nA problem occurred during initialization.\nThe error was: "{str(e)}"')
print(traceback.format_exc())

#-------------------------------------
if __name__ == '__main__':
Expand Down