|
69 | 69 | extra_images_max = 4 |
70 | 70 |
|
71 | 71 | # global vars |
72 | | -KcppVersion = "1.99.4" |
| 72 | +KcppVersion = "1.100" |
73 | 73 | showdebug = True |
74 | 74 | kcpp_instance = None #global running instance |
75 | 75 | global_memory = {"tunnel_url": "", "restart_target":"", "input_to_exit":False, "load_complete":False, "restart_model": "", "currentConfig": None, "modelOverride": None, "currentModel": None} |
@@ -1731,7 +1731,7 @@ def sd_load_model(model_filename,vae_filename,lora_filename,t5xxl_filename,clipl |
1731 | 1731 | inputs.flash_attention = args.sdflashattention |
1732 | 1732 | inputs.offload_cpu = args.sdoffloadcpu |
1733 | 1733 | inputs.vae_cpu = args.sdvaecpu |
1734 | | - inputs.clip_cpu = args.sdclipcpu |
| 1734 | + inputs.clip_cpu = False if args.sdclipgpu else True |
1735 | 1735 | sdconvdirect = sd_convdirect_option(args.sdconvdirect) |
1736 | 1736 | inputs.diffusion_conv_direct = sdconvdirect == 'full' |
1737 | 1737 | inputs.vae_conv_direct = sdconvdirect in ['vaeonly', 'full'] |
@@ -5718,7 +5718,7 @@ def hide_tooltip(event): |
5718 | 5718 | sd_flash_attention_var = ctk.IntVar(value=0) |
5719 | 5719 | sd_offload_cpu_var = ctk.IntVar(value=0) |
5720 | 5720 | sd_vae_cpu_var = ctk.IntVar(value=0) |
5721 | | - sd_clip_cpu_var = ctk.IntVar(value=0) |
| 5721 | + sd_clip_gpu_var = ctk.IntVar(value=0) |
5722 | 5722 | sd_vaeauto_var = ctk.IntVar(value=0) |
5723 | 5723 | sd_tiled_vae_var = ctk.StringVar(value=str(default_vae_tile_threshold)) |
5724 | 5724 | sd_convdirect_var = ctk.StringVar(value=str(sd_convdirect_choices[0])) |
@@ -6503,7 +6503,7 @@ def toggletaesd(a,b,c): |
6503 | 6503 | makecheckbox(images_tab, "SD Flash Attention", sd_flash_attention_var, 44,padx=230, tooltiptxt="Enable Flash Attention for image diffusion. May save memory or improve performance.") |
6504 | 6504 | makecheckbox(images_tab, "Model CPU Offload", sd_offload_cpu_var, 50,padx=8, tooltiptxt="Offload image weights in RAM to save VRAM, swap into VRAM when needed.") |
6505 | 6505 | makecheckbox(images_tab, "VAE on CPU", sd_vae_cpu_var, 50,padx=160, tooltiptxt="Force VAE to CPU only for image generation.") |
6506 | | - makecheckbox(images_tab, "CLIP on CPU", sd_clip_cpu_var, 50,padx=280, tooltiptxt="Force CLIP to CPU only for image generation.") |
| 6506 | + makecheckbox(images_tab, "CLIP on GPU", sd_clip_gpu_var, 50,padx=280, tooltiptxt="Put CLIP and T5 to GPU for image generation. Otherwise, CLIP will use CPU.") |
6507 | 6507 |
|
6508 | 6508 | # audio tab |
6509 | 6509 | audio_tab = tabcontent["Audio"] |
@@ -6751,8 +6751,8 @@ def export_vars(): |
6751 | 6751 | args.sdoffloadcpu = True |
6752 | 6752 | if sd_vae_cpu_var.get()==1: |
6753 | 6753 | args.sdvaecpu = True |
6754 | | - if sd_clip_cpu_var.get()==1: |
6755 | | - args.sdclipcpu = True |
| 6754 | + if sd_clip_gpu_var.get()==1: |
| 6755 | + args.sdclipgpu = True |
6756 | 6756 | args.sdthreads = (0 if sd_threads_var.get()=="" else int(sd_threads_var.get())) |
6757 | 6757 | args.sdclamped = (0 if int(sd_clamped_var.get())<=0 else int(sd_clamped_var.get())) |
6758 | 6758 | args.sdclampedsoft = (0 if int(sd_clamped_soft_var.get())<=0 else int(sd_clamped_soft_var.get())) |
@@ -6997,7 +6997,7 @@ def import_vars(dict): |
6997 | 6997 | sd_flash_attention_var.set(1 if ("sdflashattention" in dict and dict["sdflashattention"]) else 0) |
6998 | 6998 | sd_offload_cpu_var.set(1 if ("sdoffloadcpu" in dict and dict["sdoffloadcpu"]) else 0) |
6999 | 6999 | sd_vae_cpu_var.set(1 if ("sdvaecpu" in dict and dict["sdvaecpu"]) else 0) |
7000 | | - sd_clip_cpu_var.set(1 if ("sdclipcpu" in dict and dict["sdclipcpu"]) else 0) |
| 7000 | + sd_clip_gpu_var.set(1 if ("sdclipgpu" in dict and dict["sdclipgpu"]) else 0) |
7001 | 7001 | sd_convdirect_var.set(sd_convdirect_option(dict.get("sdconvdirect"))) |
7002 | 7002 | sd_vae_var.set(dict["sdvae"] if ("sdvae" in dict and dict["sdvae"]) else "") |
7003 | 7003 | sd_t5xxl_var.set(dict["sdt5xxl"] if ("sdt5xxl" in dict and dict["sdt5xxl"]) else "") |
@@ -8868,7 +8868,7 @@ def range_checker(arg: str): |
8868 | 8868 | sdparsergroup.add_argument("--sdflashattention", help="Enables Flash Attention for image generation.", action='store_true') |
8869 | 8869 | sdparsergroup.add_argument("--sdoffloadcpu", help="Offload image weights in RAM to save VRAM, swap into VRAM when needed.", action='store_true') |
8870 | 8870 | sdparsergroup.add_argument("--sdvaecpu", help="Force VAE to CPU only for image generation.", action='store_true') |
8871 | | - sdparsergroup.add_argument("--sdclipcpu", help="Force CLIP to CPU only for image generation.", action='store_true') |
| 8871 | + sdparsergroup.add_argument("--sdclipgpu", help="Put CLIP and T5 to GPU for image generation. Otherwise, CLIP will use CPU.", action='store_true') |
8872 | 8872 | sdparsergroup.add_argument("--sdconvdirect", help="Enables Conv2D Direct. May improve performance or reduce memory usage. Might crash if not supported by the backend. Can be 'off' (default) to disable, 'full' to turn it on for all operations, or 'vaeonly' to enable only for the VAE.", type=sd_convdirect_option, choices=sd_convdirect_choices, default=sd_convdirect_choices[0]) |
8873 | 8873 | sdparsergroupvae = sdparsergroup.add_mutually_exclusive_group() |
8874 | 8874 | sdparsergroupvae.add_argument("--sdvae", metavar=('[filename]'), help="Specify an image generation safetensors VAE which replaces the one in the model.", default="") |
|
0 commit comments