1212from transformers import CLIPTokenizer
1313
1414from compel import Compel
15- from compel .prompt_parser import FlattenedPrompt , Blend , Fragment , CrossAttentionControlSubstitute , PromptParser
15+ from compel .prompt_parser import FlattenedPrompt , Blend , Fragment , CrossAttentionControlSubstitute , PromptParser , \
16+ Conjunction
1617from .devices import torch_dtype
1718from ..models .diffusion .shared_invokeai_diffusion import InvokeAIDiffuserComponent
1819from ldm .invoke .globals import Globals
@@ -55,22 +56,25 @@ def get_uc_and_c_and_ec(prompt_string, model, log_tokens=False, skip_normalize_l
5556 # get rid of any newline characters
5657 prompt_string = prompt_string .replace ("\n " , " " )
5758 positive_prompt_string , negative_prompt_string = split_prompt_to_positive_and_negative (prompt_string )
59+
5860 legacy_blend = try_parse_legacy_blend (positive_prompt_string , skip_normalize_legacy_blend )
59- positive_prompt : FlattenedPrompt | Blend
60- lora_conditions = None
61+ positive_conjunction : Conjunction
6162 if legacy_blend is not None :
62- positive_prompt = legacy_blend
63+ positive_conjunction = legacy_blend
6364 else :
6465 positive_conjunction = Compel .parse_prompt_string (positive_prompt_string )
65- positive_prompt = positive_conjunction .prompts [0 ]
66- should_use_lora_manager = True
67- lora_weights = positive_conjunction .lora_weights
68- if model .peft_manager :
69- should_use_lora_manager = model .peft_manager .should_use (lora_weights )
70- if not should_use_lora_manager :
71- model .peft_manager .set_loras (lora_weights )
72- if model .lora_manager and should_use_lora_manager :
73- lora_conditions = model .lora_manager .set_loras_conditions (lora_weights )
66+ positive_prompt = positive_conjunction .prompts [0 ]
67+
68+ should_use_lora_manager = True
69+ lora_weights = positive_conjunction .lora_weights
70+ lora_conditions = None
71+ if model .peft_manager :
72+ should_use_lora_manager = model .peft_manager .should_use (lora_weights )
73+ if not should_use_lora_manager :
74+ model .peft_manager .set_loras (lora_weights )
75+ if model .lora_manager and should_use_lora_manager :
76+ lora_conditions = model .lora_manager .set_loras_conditions (lora_weights )
77+
7478 negative_conjunction = Compel .parse_prompt_string (negative_prompt_string )
7579 negative_prompt : FlattenedPrompt | Blend = negative_conjunction .prompts [0 ]
7680
@@ -93,12 +97,12 @@ def get_prompt_structure(prompt_string, skip_normalize_legacy_blend: bool = Fals
9397 Union [FlattenedPrompt , Blend ], FlattenedPrompt ):
9498 positive_prompt_string , negative_prompt_string = split_prompt_to_positive_and_negative (prompt_string )
9599 legacy_blend = try_parse_legacy_blend (positive_prompt_string , skip_normalize_legacy_blend )
96- positive_prompt : FlattenedPrompt | Blend
100+ positive_conjunction : Conjunction
97101 if legacy_blend is not None :
98- positive_prompt = legacy_blend
102+ positive_conjunction = legacy_blend
99103 else :
100104 positive_conjunction = Compel .parse_prompt_string (positive_prompt_string )
101- positive_prompt = positive_conjunction .prompts [0 ]
105+ positive_prompt = positive_conjunction .prompts [0 ]
102106 negative_conjunction = Compel .parse_prompt_string (negative_prompt_string )
103107 negative_prompt : FlattenedPrompt | Blend = negative_conjunction .prompts [0 ]
104108
@@ -217,18 +221,26 @@ def log_tokenization_for_text(text, tokenizer, display_label=None):
217221 print (f'{ discarded } \x1b [0m' )
218222
219223
220- def try_parse_legacy_blend (text : str , skip_normalize : bool = False ) -> Optional [Blend ]:
224+ def try_parse_legacy_blend (text : str , skip_normalize : bool = False ) -> Optional [Conjunction ]:
221225 weighted_subprompts = split_weighted_subprompts (text , skip_normalize = skip_normalize )
222226 if len (weighted_subprompts ) <= 1 :
223227 return None
224228 strings = [x [0 ] for x in weighted_subprompts ]
225- weights = [x [1 ] for x in weighted_subprompts ]
226229
227230 pp = PromptParser ()
228231 parsed_conjunctions = [pp .parse_conjunction (x ) for x in strings ]
229- flattened_prompts = [x .prompts [0 ] for x in parsed_conjunctions ]
230-
231- return Blend (prompts = flattened_prompts , weights = weights , normalize_weights = not skip_normalize )
232+ flattened_prompts = []
233+ weights = []
234+ loras = []
235+ for i , x in enumerate (parsed_conjunctions ):
236+ if len (x .prompts )> 0 :
237+ flattened_prompts .append (x .prompts [0 ])
238+ weights .append (weighted_subprompts [i ][1 ])
239+ if len (x .lora_weights )> 0 :
240+ loras .extend (x .lora_weights )
241+
242+ return Conjunction ([Blend (prompts = flattened_prompts , weights = weights , normalize_weights = not skip_normalize )],
243+ lora_weights = loras )
232244
233245
234246def split_weighted_subprompts (text , skip_normalize = False )-> list :
0 commit comments