Skip to content

Commit 0f3c456

Browse files
author
Lincoln Stein
committed
merge with v2.3
2 parents 2a2c868 + c088cf0 commit 0f3c456

File tree

18 files changed

+290
-249
lines changed

18 files changed

+290
-249
lines changed

.gitignore

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,5 +233,3 @@ installer/install.sh
233233
installer/update.bat
234234
installer/update.sh
235235

236-
# no longer stored in source directory
237-
models

installer/lib/installer.py

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -132,12 +132,13 @@ def app_venv(self, path: str = None):
132132

133133
# Prefer to copy python executables
134134
# so that updates to system python don't break InvokeAI
135-
try:
136-
venv.create(venv_dir, with_pip=True)
137-
# If installing over an existing environment previously created with symlinks,
138-
# the executables will fail to copy. Keep symlinks in that case
139-
except shutil.SameFileError:
140-
venv.create(venv_dir, with_pip=True, symlinks=True)
135+
if not venv_dir.exists():
136+
try:
137+
venv.create(venv_dir, with_pip=True)
138+
# If installing over an existing environment previously created with symlinks,
139+
# the executables will fail to copy. Keep symlinks in that case
140+
except shutil.SameFileError:
141+
venv.create(venv_dir, with_pip=True, symlinks=True)
141142

142143
# upgrade pip in Python 3.9 environments
143144
if int(platform.python_version_tuple()[1]) == 9:

invokeai/backend/invoke_ai_web_server.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
get_tokens_for_prompt_object,
3131
get_prompt_structure,
3232
split_weighted_subprompts,
33-
get_tokenizer,
3433
)
3534
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
3635
from ldm.invoke.generator.inpaint import infill_methods
@@ -1314,7 +1313,7 @@ def image_done(image, seed, first_seed, attention_maps_image=None):
13141313
None
13151314
if type(parsed_prompt) is Blend
13161315
else get_tokens_for_prompt_object(
1317-
get_tokenizer(self.generate.model), parsed_prompt
1316+
self.generate.model.tokenizer, parsed_prompt
13181317
)
13191318
)
13201319
attention_maps_image_base64_url = (

ldm/invoke/CLI.py

Lines changed: 0 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
import sys
55
import traceback
66
from argparse import Namespace
7-
from packaging import version
87
from pathlib import Path
98
from typing import Union
109

@@ -26,7 +25,6 @@
2625
from .globals import Globals, global_config_dir
2726
from .image_util import make_grid
2827
from .log import write_log
29-
from .model_manager import ModelManager
3028
from .pngwriter import PngWriter, retrieve_metadata, write_metadata
3129
from .readline import Completer, get_completer
3230
from ..util import url_attachment_name
@@ -66,9 +64,6 @@ def main():
6664
Globals.sequential_guidance = args.sequential_guidance
6765
Globals.ckpt_convert = True # always true as of 2.3.4 for LoRA support
6866

69-
# run any post-install patches needed
70-
run_patches()
71-
7267
print(f">> Internet connectivity is {Globals.internet_available}")
7368

7469
if not args.conf:
@@ -113,9 +108,6 @@ def main():
113108
if opt.lora_path:
114109
Globals.lora_models_dir = opt.lora_path
115110

116-
# migrate legacy models
117-
ModelManager.migrate_models()
118-
119111
# load the infile as a list of lines
120112
if opt.infile:
121113
try:
@@ -1299,62 +1291,6 @@ def retrieve_last_used_model()->str:
12991291
with open(model_file_path,'r') as f:
13001292
return f.readline()
13011293

1302-
# This routine performs any patch-ups needed after installation
1303-
def run_patches():
1304-
install_missing_config_files()
1305-
version_file = Path(Globals.root,'.version')
1306-
if version_file.exists():
1307-
with open(version_file,'r') as f:
1308-
root_version = version.parse(f.readline() or 'v2.3.2')
1309-
else:
1310-
root_version = version.parse('v2.3.2')
1311-
app_version = version.parse(ldm.invoke.__version__)
1312-
if root_version < app_version:
1313-
try:
1314-
do_version_update(root_version, ldm.invoke.__version__)
1315-
with open(version_file,'w') as f:
1316-
f.write(ldm.invoke.__version__)
1317-
except:
1318-
print("** Update failed. Will try again on next launch")
1319-
1320-
def install_missing_config_files():
1321-
"""
1322-
install ckpt configuration files that may have been added to the
1323-
distro after original root directory configuration
1324-
"""
1325-
import invokeai.configs as conf
1326-
from shutil import copyfile
1327-
1328-
root_configs = Path(global_config_dir(), 'stable-diffusion')
1329-
repo_configs = Path(conf.__path__[0], 'stable-diffusion')
1330-
for src in repo_configs.iterdir():
1331-
dest = root_configs / src.name
1332-
if not dest.exists():
1333-
copyfile(src,dest)
1334-
1335-
def do_version_update(root_version: version.Version, app_version: Union[str, version.Version]):
1336-
"""
1337-
Make any updates to the launcher .sh and .bat scripts that may be needed
1338-
from release to release. This is not an elegant solution. Instead, the
1339-
launcher should be moved into the source tree and installed using pip.
1340-
"""
1341-
if root_version < version.Version('v2.3.4'):
1342-
dest = Path(Globals.root,'loras')
1343-
dest.mkdir(exist_ok=True)
1344-
if root_version < version.Version('v2.3.3'):
1345-
if sys.platform == "linux":
1346-
print('>> Downloading new version of launcher script and its config file')
1347-
from ldm.util import download_with_progress_bar
1348-
url_base = f'https://raw.githubusercontent.com/invoke-ai/InvokeAI/v{str(app_version)}/installer/templates/'
1349-
1350-
dest = Path(Globals.root,'invoke.sh.in')
1351-
assert download_with_progress_bar(url_base+'invoke.sh.in',dest)
1352-
dest.replace(Path(Globals.root,'invoke.sh'))
1353-
os.chmod(Path(Globals.root,'invoke.sh'), 0o0755)
1354-
1355-
dest = Path(Globals.root,'dialogrc')
1356-
assert download_with_progress_bar(url_base+'dialogrc',dest)
1357-
dest.replace(Path(Globals.root,'.dialogrc'))
13581294

13591295
if __name__ == '__main__':
13601296
main()

ldm/invoke/conditioning.py

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,10 @@
1515
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
1616
Conjunction
1717
from .devices import torch_dtype
18+
from .generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
1819
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
1920
from ldm.invoke.globals import Globals
2021

21-
def get_tokenizer(model) -> CLIPTokenizer:
22-
# TODO remove legacy ckpt fallback handling
23-
return (getattr(model, 'tokenizer', None) # diffusers
24-
or model.cond_stage_model.tokenizer) # ldm
25-
26-
def get_text_encoder(model) -> Any:
27-
# TODO remove legacy ckpt fallback handling
28-
return (getattr(model, 'text_encoder', None) # diffusers
29-
or UnsqueezingLDMTransformer(model.cond_stage_model.transformer)) # ldm
30-
3122
class UnsqueezingLDMTransformer:
3223
def __init__(self, ldm_transformer):
3324
self.ldm_transformer = ldm_transformer
@@ -41,15 +32,15 @@ def __call__(self, *args, **kwargs):
4132
return insufficiently_unsqueezed_tensor.unsqueeze(0)
4233

4334

44-
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
35+
def get_uc_and_c_and_ec(prompt_string,
36+
model: StableDiffusionGeneratorPipeline,
37+
log_tokens=False, skip_normalize_legacy_blend=False):
4538
# lazy-load any deferred textual inversions.
4639
# this might take a couple of seconds the first time a textual inversion is used.
4740
model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string)
4841

49-
tokenizer = get_tokenizer(model)
50-
text_encoder = get_text_encoder(model)
51-
compel = Compel(tokenizer=tokenizer,
52-
text_encoder=text_encoder,
42+
compel = Compel(tokenizer=model.tokenizer,
43+
text_encoder=model.text_encoder,
5344
textual_inversion_manager=model.textual_inversion_manager,
5445
dtype_for_device_getter=torch_dtype)
5546

@@ -78,14 +69,20 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
7869
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
7970
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
8071

72+
tokens_count = get_max_token_count(model.tokenizer, positive_prompt)
8173
if log_tokens or getattr(Globals, "log_tokenization", False):
82-
log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
83-
84-
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
85-
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
86-
87-
tokens_count = get_max_token_count(tokenizer, positive_prompt)
88-
74+
log_tokenization(positive_prompt, negative_prompt, tokenizer=model.tokenizer)
75+
76+
# some LoRA models also mess with the text encoder, so they must be active while compel builds conditioning tensors
77+
lora_conditioning_ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
78+
lora_conditions=lora_conditions)
79+
with InvokeAIDiffuserComponent.custom_attention_context(model.unet,
80+
extra_conditioning_info=lora_conditioning_ec,
81+
step_count=-1):
82+
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
83+
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
84+
85+
# now build the "real" ec
8986
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
9087
cross_attention_control_args=options.get(
9188
'cross_attention_control', None),

ldm/invoke/config/invokeai_configure.py

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from shutil import get_terminal_size
2222

2323
import npyscreen
24-
import torch
2524
import transformers
2625
from diffusers import AutoencoderKL
2726
from huggingface_hub import HfFolder
@@ -664,8 +663,19 @@ def initialize_rootdir(root: str, yes_to_all: bool = False):
664663
configs_src = Path(configs.__path__[0])
665664
configs_dest = Path(root) / "configs"
666665
if not os.path.samefile(configs_src, configs_dest):
667-
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
668-
666+
shutil.copytree(configs_src,
667+
configs_dest,
668+
dirs_exist_ok=True,
669+
copy_function=shutil.copyfile,
670+
)
671+
# Fix up directory permissions so that they are writable
672+
# This can happen when running under Nix environment which
673+
# makes the runtime directory template immutable.
674+
for root,dirs,files in os.walk(os.path.join(root,name)):
675+
for d in dirs:
676+
Path(root,d).chmod(0o775)
677+
for f in files:
678+
Path(root,d).chmod(0o644)
669679

670680
# -------------------------------------
671681
def run_console_ui(

ldm/invoke/config/invokeai_update.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,18 @@ def invokeai_is_running()->bool:
4242
except psutil.AccessDenied:
4343
continue
4444
return False
45+
46+
def do_post_install():
47+
'''
48+
Run postinstallation script.
49+
'''
50+
print("Looking for postinstallation script to run on this version...")
51+
try:
52+
from ldm.invoke.config.post_install.py import post_install
53+
post_install()
54+
except:
55+
print("Postinstallation script not available for this version of InvokeAI")
4556

46-
4757
def welcome(versions: dict):
4858

4959
@group()
@@ -107,6 +117,7 @@ def main():
107117
print(f':heavy_check_mark: Upgrade successful')
108118
else:
109119
print(f':exclamation: [bold red]Upgrade failed[/red bold]')
120+
do_post_install()
110121

111122
if __name__ == "__main__":
112123
try:

ldm/invoke/config/model_install_backend.py

Lines changed: 13 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -388,7 +388,19 @@ def update_config_file(successfully_downloaded: dict, config_file: Path):
388388
if config_file is default_config_file() and not config_file.parent.exists():
389389
configs_src = Dataset_path.parent
390390
configs_dest = default_config_file().parent
391-
shutil.copytree(configs_src, configs_dest, dirs_exist_ok=True)
391+
shutil.copytree(configs_src,
392+
configs_dest,
393+
dirs_exist_ok=True,
394+
copy_function=shutil.copyfile,
395+
)
396+
# Fix up directory permissions so that they are writable
397+
# This can happen when running under Nix environment which
398+
# makes the runtime directory template immutable.
399+
for root,dirs,files in os.walk(default_config_file().parent):
400+
for d in dirs:
401+
Path(root,d).chmod(0o775)
402+
for f in files:
403+
Path(root,d).chmod(0o644)
392404

393405
yaml = new_config_file_contents(successfully_downloaded, config_file)
394406

0 commit comments

Comments
 (0)