Skip to content

Commit b332432

Browse files
authored
Merge branch 'v2.3' into lstein/bugfix/improve-update-handling
2 parents 6365a7c + 96c39b6 commit b332432

File tree

13 files changed

+200
-196
lines changed

13 files changed

+200
-196
lines changed

.github/CODEOWNERS

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
# continuous integration
2-
/.github/workflows/ @mauwii @lstein @blessedcoolant
2+
/.github/workflows/ @lstein @blessedcoolant
33

44
# documentation
5-
/docs/ @lstein @mauwii @blessedcoolant
6-
mkdocs.yml @mauwii @lstein
5+
/docs/ @lstein @blessedcoolant
6+
mkdocs.yml @lstein @ebr
77

88
# installation and configuration
9-
/pyproject.toml @mauwii @lstein @ebr
10-
/docker/ @mauwii
9+
/pyproject.toml @lstein @ebr
10+
/docker/ @lstein
1111
/scripts/ @ebr @lstein @blessedcoolant
1212
/installer/ @ebr @lstein
1313
ldm/invoke/config @lstein @ebr
@@ -21,13 +21,13 @@ invokeai/configs @lstein @ebr @blessedcoolant
2121

2222
# generation and model management
2323
/ldm/*.py @lstein @blessedcoolant
24-
/ldm/generate.py @lstein @keturn
24+
/ldm/generate.py @lstein @gregghelt2
2525
/ldm/invoke/args.py @lstein @blessedcoolant
2626
/ldm/invoke/ckpt* @lstein @blessedcoolant
2727
/ldm/invoke/ckpt_generator @lstein @blessedcoolant
2828
/ldm/invoke/CLI.py @lstein @blessedcoolant
29-
/ldm/invoke/config @lstein @ebr @mauwii @blessedcoolant
30-
/ldm/invoke/generator @keturn @damian0815
29+
/ldm/invoke/config @lstein @ebr @blessedcoolant
30+
/ldm/invoke/generator @gregghelt2 @damian0815
3131
/ldm/invoke/globals.py @lstein @blessedcoolant
3232
/ldm/invoke/merge_diffusers.py @lstein @blessedcoolant
3333
/ldm/invoke/model_manager.py @lstein @blessedcoolant
@@ -36,17 +36,17 @@ invokeai/configs @lstein @ebr @blessedcoolant
3636
/ldm/invoke/restoration @lstein @blessedcoolant
3737

3838
# attention, textual inversion, model configuration
39-
/ldm/models @damian0815 @keturn @blessedcoolant
39+
/ldm/models @damian0815 @gregghelt2 @blessedcoolant
4040
/ldm/modules/textual_inversion_manager.py @lstein @blessedcoolant
41-
/ldm/modules/attention.py @damian0815 @keturn
42-
/ldm/modules/diffusionmodules @damian0815 @keturn
43-
/ldm/modules/distributions @damian0815 @keturn
44-
/ldm/modules/ema.py @damian0815 @keturn
41+
/ldm/modules/attention.py @damian0815 @gregghelt2
42+
/ldm/modules/diffusionmodules @damian0815 @gregghelt2
43+
/ldm/modules/distributions @damian0815 @gregghelt2
44+
/ldm/modules/ema.py @damian0815 @gregghelt2
4545
/ldm/modules/embedding_manager.py @lstein
46-
/ldm/modules/encoders @damian0815 @keturn
47-
/ldm/modules/image_degradation @damian0815 @keturn
48-
/ldm/modules/losses @damian0815 @keturn
49-
/ldm/modules/x_transformer.py @damian0815 @keturn
46+
/ldm/modules/encoders @damian0815 @gregghelt2
47+
/ldm/modules/image_degradation @damian0815 @gregghelt2
48+
/ldm/modules/losses @damian0815 @gregghelt2
49+
/ldm/modules/x_transformer.py @damian0815 @gregghelt2
5050

5151
# Nodes
5252
apps/ @Kyle0654 @jpphoto

invokeai/backend/invoke_ai_web_server.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
get_tokens_for_prompt_object,
3131
get_prompt_structure,
3232
split_weighted_subprompts,
33-
get_tokenizer,
3433
)
3534
from ldm.invoke.generator.diffusers_pipeline import PipelineIntermediateState
3635
from ldm.invoke.generator.inpaint import infill_methods
@@ -1314,7 +1313,7 @@ def image_done(image, seed, first_seed, attention_maps_image=None):
13141313
None
13151314
if type(parsed_prompt) is Blend
13161315
else get_tokens_for_prompt_object(
1317-
get_tokenizer(self.generate.model), parsed_prompt
1316+
self.generate.model.tokenizer, parsed_prompt
13181317
)
13191318
)
13201319
attention_maps_image_base64_url = (

ldm/invoke/conditioning.py

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,10 @@
1515
from compel.prompt_parser import FlattenedPrompt, Blend, Fragment, CrossAttentionControlSubstitute, PromptParser, \
1616
Conjunction
1717
from .devices import torch_dtype
18+
from .generator.diffusers_pipeline import StableDiffusionGeneratorPipeline
1819
from ..models.diffusion.shared_invokeai_diffusion import InvokeAIDiffuserComponent
1920
from ldm.invoke.globals import Globals
2021

21-
def get_tokenizer(model) -> CLIPTokenizer:
22-
# TODO remove legacy ckpt fallback handling
23-
return (getattr(model, 'tokenizer', None) # diffusers
24-
or model.cond_stage_model.tokenizer) # ldm
25-
26-
def get_text_encoder(model) -> Any:
27-
# TODO remove legacy ckpt fallback handling
28-
return (getattr(model, 'text_encoder', None) # diffusers
29-
or UnsqueezingLDMTransformer(model.cond_stage_model.transformer)) # ldm
30-
3122
class UnsqueezingLDMTransformer:
3223
def __init__(self, ldm_transformer):
3324
self.ldm_transformer = ldm_transformer
@@ -41,15 +32,15 @@ def __call__(self, *args, **kwargs):
4132
return insufficiently_unsqueezed_tensor.unsqueeze(0)
4233

4334

44-
def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_legacy_blend=False):
35+
def get_uc_and_c_and_ec(prompt_string,
36+
model: StableDiffusionGeneratorPipeline,
37+
log_tokens=False, skip_normalize_legacy_blend=False):
4538
# lazy-load any deferred textual inversions.
4639
# this might take a couple of seconds the first time a textual inversion is used.
4740
model.textual_inversion_manager.create_deferred_token_ids_for_any_trigger_terms(prompt_string)
4841

49-
tokenizer = get_tokenizer(model)
50-
text_encoder = get_text_encoder(model)
51-
compel = Compel(tokenizer=tokenizer,
52-
text_encoder=text_encoder,
42+
compel = Compel(tokenizer=model.tokenizer,
43+
text_encoder=model.text_encoder,
5344
textual_inversion_manager=model.textual_inversion_manager,
5445
dtype_for_device_getter=torch_dtype)
5546

@@ -78,14 +69,20 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
7869
negative_conjunction = Compel.parse_prompt_string(negative_prompt_string)
7970
negative_prompt: FlattenedPrompt | Blend = negative_conjunction.prompts[0]
8071

72+
tokens_count = get_max_token_count(model.tokenizer, positive_prompt)
8173
if log_tokens or getattr(Globals, "log_tokenization", False):
82-
log_tokenization(positive_prompt, negative_prompt, tokenizer=tokenizer)
83-
84-
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
85-
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
86-
87-
tokens_count = get_max_token_count(tokenizer, positive_prompt)
88-
74+
log_tokenization(positive_prompt, negative_prompt, tokenizer=model.tokenizer)
75+
76+
# some LoRA models also mess with the text encoder, so they must be active while compel builds conditioning tensors
77+
lora_conditioning_ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
78+
lora_conditions=lora_conditions)
79+
with InvokeAIDiffuserComponent.custom_attention_context(model.unet,
80+
extra_conditioning_info=lora_conditioning_ec,
81+
step_count=-1):
82+
c, options = compel.build_conditioning_tensor_for_prompt_object(positive_prompt)
83+
uc, _ = compel.build_conditioning_tensor_for_prompt_object(negative_prompt)
84+
85+
# now build the "real" ec
8986
ec = InvokeAIDiffuserComponent.ExtraConditioningInfo(tokens_count_including_eos_bos=tokens_count,
9087
cross_attention_control_args=options.get(
9188
'cross_attention_control', None),

ldm/invoke/config/model_install.py

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -196,16 +196,6 @@ def create(self):
196196
scroll_exit=True,
197197
)
198198
self.nextrely += 1
199-
self.convert_models = self.add_widget_intelligent(
200-
npyscreen.TitleSelectOne,
201-
name="== CONVERT IMPORTED MODELS INTO DIFFUSERS==",
202-
values=["Keep original format", "Convert to diffusers"],
203-
value=0,
204-
begin_entry_at=4,
205-
max_height=4,
206-
hidden=True, # will appear when imported models box is edited
207-
scroll_exit=True,
208-
)
209199
self.cancel = self.add_widget_intelligent(
210200
npyscreen.ButtonPress,
211201
name="CANCEL",
@@ -240,8 +230,6 @@ def create(self):
240230
self.show_directory_fields.addVisibleWhenSelected(i)
241231

242232
self.show_directory_fields.when_value_edited = self._clear_scan_directory
243-
self.import_model_paths.when_value_edited = self._show_hide_convert
244-
self.autoload_directory.when_value_edited = self._show_hide_convert
245233

246234
def resize(self):
247235
super().resize()
@@ -252,13 +240,6 @@ def _clear_scan_directory(self):
252240
if not self.show_directory_fields.value:
253241
self.autoload_directory.value = ""
254242

255-
def _show_hide_convert(self):
256-
model_paths = self.import_model_paths.value or ""
257-
autoload_directory = self.autoload_directory.value or ""
258-
self.convert_models.hidden = (
259-
len(model_paths) == 0 and len(autoload_directory) == 0
260-
)
261-
262243
def _get_starter_model_labels(self) -> List[str]:
263244
window_width, window_height = get_terminal_size()
264245
label_width = 25
@@ -318,7 +299,6 @@ def marshall_arguments(self):
318299
.scan_directory: Path to a directory of models to scan and import
319300
.autoscan_on_startup: True if invokeai should scan and import at startup time
320301
.import_model_paths: list of URLs, repo_ids and file paths to import
321-
.convert_to_diffusers: if True, convert legacy checkpoints into diffusers
322302
"""
323303
# we're using a global here rather than storing the result in the parentapp
324304
# due to some bug in npyscreen that is causing attributes to be lost
@@ -354,7 +334,6 @@ def marshall_arguments(self):
354334

355335
# URLs and the like
356336
selections.import_model_paths = self.import_model_paths.value.split()
357-
selections.convert_to_diffusers = self.convert_models.value[0] == 1
358337

359338

360339
class AddModelApplication(npyscreen.NPSAppManaged):
@@ -367,7 +346,6 @@ def __init__(self):
367346
scan_directory=None,
368347
autoscan_on_startup=None,
369348
import_model_paths=None,
370-
convert_to_diffusers=None,
371349
)
372350

373351
def onStart(self):
@@ -387,15 +365,13 @@ def process_and_execute(opt: Namespace, selections: Namespace):
387365
directory_to_scan = selections.scan_directory
388366
scan_at_startup = selections.autoscan_on_startup
389367
potential_models_to_install = selections.import_model_paths
390-
convert_to_diffusers = selections.convert_to_diffusers
391368

392369
install_requested_models(
393370
install_initial_models=models_to_install,
394371
remove_models=models_to_remove,
395372
scan_directory=Path(directory_to_scan) if directory_to_scan else None,
396373
external_models=potential_models_to_install,
397374
scan_at_startup=scan_at_startup,
398-
convert_to_diffusers=convert_to_diffusers,
399375
precision="float32"
400376
if opt.full_precision
401377
else choose_precision(torch.device(choose_torch_device())),

ldm/invoke/config/model_install_backend.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,6 @@ def install_requested_models(
6868
scan_directory: Path = None,
6969
external_models: List[str] = None,
7070
scan_at_startup: bool = False,
71-
convert_to_diffusers: bool = False,
7271
precision: str = "float16",
7372
purge_deleted: bool = False,
7473
config_file_path: Path = None,
@@ -111,20 +110,20 @@ def install_requested_models(
111110
if len(external_models)>0:
112111
print("== INSTALLING EXTERNAL MODELS ==")
113112
for path_url_or_repo in external_models:
113+
print(f'DEBUG: path_url_or_repo = {path_url_or_repo}')
114114
try:
115115
model_manager.heuristic_import(
116116
path_url_or_repo,
117-
convert=convert_to_diffusers,
118117
config_file_callback=_pick_configuration_file,
119118
commit_to_conf=config_file_path
120119
)
121120
except KeyboardInterrupt:
122121
sys.exit(-1)
123-
except Exception:
124-
pass
122+
except Exception as e:
123+
print(f'An exception has occurred: {str(e)}')
125124

126125
if scan_at_startup and scan_directory.is_dir():
127-
argument = '--autoconvert' if convert_to_diffusers else '--autoimport'
126+
argument = '--autoconvert'
128127
initfile = Path(Globals.root, Globals.initfile)
129128
replacement = Path(Globals.root, f'{Globals.initfile}.new')
130129
directory = str(scan_directory).replace('\\','/')

ldm/invoke/generator/diffusers_pipeline.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -467,8 +467,9 @@ def generate_latents_from_embeddings(self, latents: torch.Tensor, timesteps,
467467
if additional_guidance is None:
468468
additional_guidance = []
469469
extra_conditioning_info = conditioning_data.extra
470-
with self.invokeai_diffuser.custom_attention_context(extra_conditioning_info=extra_conditioning_info,
471-
step_count=len(self.scheduler.timesteps)
470+
with InvokeAIDiffuserComponent.custom_attention_context(self.invokeai_diffuser.model,
471+
extra_conditioning_info=extra_conditioning_info,
472+
step_count=len(self.scheduler.timesteps)
472473
):
473474

474475
yield PipelineIntermediateState(run_id=run_id, step=-1, timestep=self.scheduler.num_train_timesteps,

ldm/models/diffusion/cross_attention_control.py

Lines changed: 9 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -288,16 +288,7 @@ def get_invokeai_attention_mem_efficient(self, q, k, v):
288288
return self.einsum_op_tensor_mem(q, k, v, 32)
289289

290290

291-
292-
def restore_default_cross_attention(model, is_running_diffusers: bool, processors_to_restore: Optional[AttnProcessor]=None):
293-
if is_running_diffusers:
294-
unet = model
295-
unet.set_attn_processor(processors_to_restore or CrossAttnProcessor())
296-
else:
297-
remove_attention_function(model)
298-
299-
300-
def override_cross_attention(model, context: Context, is_running_diffusers = False):
291+
def setup_cross_attention_control_attention_processors(unet: UNet2DConditionModel, context: Context):
301292
"""
302293
Inject attention parameters and functions into the passed in model to enable cross attention editing.
303294
@@ -323,22 +314,15 @@ def override_cross_attention(model, context: Context, is_running_diffusers = Fal
323314

324315
context.cross_attention_mask = mask.to(device)
325316
context.cross_attention_index_map = indices.to(device)
326-
if is_running_diffusers:
327-
unet = model
328-
old_attn_processors = unet.attn_processors
329-
if torch.backends.mps.is_available():
330-
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
331-
unet.set_attn_processor(SwapCrossAttnProcessor())
332-
else:
333-
# try to re-use an existing slice size
334-
default_slice_size = 4
335-
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
336-
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
317+
old_attn_processors = unet.attn_processors
318+
if torch.backends.mps.is_available():
319+
# see note in StableDiffusionGeneratorPipeline.__init__ about borked slicing on MPS
320+
unet.set_attn_processor(SwapCrossAttnProcessor())
337321
else:
338-
context.register_cross_attention_modules(model)
339-
inject_attention_function(model, context)
340-
341-
322+
# try to re-use an existing slice size
323+
default_slice_size = 4
324+
slice_size = next((p.slice_size for p in old_attn_processors.values() if type(p) is SlicedAttnProcessor), default_slice_size)
325+
unet.set_attn_processor(SlicedSwapCrossAttnProcesser(slice_size=slice_size))
342326

343327

344328
def get_cross_attention_modules(model, which: CrossAttentionType) -> list[tuple[str, InvokeAICrossAttentionMixin]]:

ldm/models/diffusion/ddim.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,6 @@ def __init__(self, model, schedule='linear', device=None, **kwargs):
1212
self.invokeai_diffuser = InvokeAIDiffuserComponent(self.model,
1313
model_forward_callback = lambda x, sigma, cond: self.model.apply_model(x, sigma, cond))
1414

15-
def prepare_to_sample(self, t_enc, **kwargs):
16-
super().prepare_to_sample(t_enc, **kwargs)
17-
18-
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
19-
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
20-
21-
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
22-
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
23-
else:
24-
self.invokeai_diffuser.restore_default_cross_attention()
25-
2615

2716
# This is the central routine
2817
@torch.no_grad()

ldm/models/diffusion/ksampler.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -38,15 +38,6 @@ def __init__(self, model, threshold = 0, warmup = 0):
3838
model_forward_callback=lambda x, sigma, cond: self.inner_model(x, sigma, cond=cond))
3939

4040

41-
def prepare_to_sample(self, t_enc, **kwargs):
42-
43-
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
44-
45-
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
46-
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = t_enc)
47-
else:
48-
self.invokeai_diffuser.restore_default_cross_attention()
49-
5041

5142
def forward(self, x, sigma, uncond, cond, cond_scale):
5243
next_x = self.invokeai_diffuser.do_diffusion_step(x, sigma, uncond, cond, cond_scale)

ldm/models/diffusion/plms.py

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,6 @@ class PLMSSampler(Sampler):
1414
def __init__(self, model, schedule='linear', device=None, **kwargs):
1515
super().__init__(model,schedule,model.num_timesteps, device)
1616

17-
def prepare_to_sample(self, t_enc, **kwargs):
18-
super().prepare_to_sample(t_enc, **kwargs)
19-
20-
extra_conditioning_info = kwargs.get('extra_conditioning_info', None)
21-
all_timesteps_count = kwargs.get('all_timesteps_count', t_enc)
22-
23-
if extra_conditioning_info is not None and extra_conditioning_info.wants_cross_attention_control:
24-
self.invokeai_diffuser.override_attention_processors(extra_conditioning_info, step_count = all_timesteps_count)
25-
else:
26-
self.invokeai_diffuser.restore_default_cross_attention()
27-
2817

2918
# this is the essential routine
3019
@torch.no_grad()

0 commit comments

Comments
 (0)