You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
This is useful for training and when model weights are offloaded.
12
+
"""
13
+
14
+
def__init__(self):
15
+
self.loaded_lora=None
16
+
17
+
@classmethod
18
+
defINPUT_TYPES(s):
19
+
return {
20
+
"required": {
21
+
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
22
+
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
23
+
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
24
+
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
25
+
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}),
26
+
}
27
+
}
28
+
29
+
RETURN_TYPES= ("MODEL", "CLIP")
30
+
OUTPUT_TOOLTIPS= ("The modified diffusion model.", "The modified CLIP model.")
31
+
FUNCTION="load_lora"
32
+
33
+
CATEGORY="loaders"
34
+
DESCRIPTION="Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios."
This is useful for training and when model weights are offloaded.
731
-
"""
732
-
733
-
def__init__(self):
734
-
self.loaded_lora=None
735
-
736
-
@classmethod
737
-
defINPUT_TYPES(s):
738
-
return {
739
-
"required": {
740
-
"model": ("MODEL", {"tooltip": "The diffusion model the LoRA will be applied to."}),
741
-
"clip": ("CLIP", {"tooltip": "The CLIP model the LoRA will be applied to."}),
742
-
"lora_name": (folder_paths.get_filename_list("loras"), {"tooltip": "The name of the LoRA."}),
743
-
"strength_model": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the diffusion model. This value can be negative."}),
744
-
"strength_clip": ("FLOAT", {"default": 1.0, "min": -100.0, "max": 100.0, "step": 0.01, "tooltip": "How strongly to modify the CLIP model. This value can be negative."}),
745
-
}
746
-
}
747
-
748
-
RETURN_TYPES= ("MODEL", "CLIP")
749
-
OUTPUT_TOOLTIPS= ("The modified diffusion model.", "The modified CLIP model.")
750
-
FUNCTION="load_lora"
751
-
752
-
CATEGORY="loaders"
753
-
DESCRIPTION="Apply LoRA in bypass mode. Unlike regular LoRA, this doesn't modify model weights - instead it injects the LoRA computation during forward pass. Useful for training scenarios."
0 commit comments