⚠ Don't upgrade to Pytorch 2 if you have a 1080/TI GPU! (working now). #8709
ZeroCool22
started this conversation in
General
Replies: 2 comments
-
correct not supported for 1xxx series yet 24.) Automatic1111 Web UI - PC - Free |
Beta Was this translation helpful? Give feedback.
0 replies
-
Just posting again, to say after the last updates Torch 2 got, now it's working with the 1080 TI. A little speed improvement, as is expected for this old card. |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
-
I did it just to see if it works following this guide (https://github.com/d8ahazard/sd_dreambooth_extension/releases/tag/1.0.13) and my GUI get fucked and couldn't create any image.
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect. For debugging consider passing CUDA_LAUNCH_BLOCKING=1. Compile with
TORCH_USE_CUDA_DSA` to enable device-side assertions.0%| | 0/20 [00:00<?, ?it/s]
Error completing request00, ?it/s]
Arguments: ('task(gkmg3awt4ativg6)', 'Ripped viking fighting a dragon, photorealistic, ((detailed face)), amazing natural skin tone, 4k textures, soft cinematic light, photoshop, epic scene, art by artgerm and greg rutkowski', '(anime:1.2), (manga:1.2), pigtail, paint, cartoon, render, (areola nipples:1.1), 3d, asian, deformities, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry', [], 20, 0, False, False, 1, 1, 7, -1.0, -1.0, 0, 0, 0, False, 704, 512, True, 0.7, 2, 'None', 0, 0, 0, [], 0, <scripts.external_code.ControlNetUnit object at 0x0000029294E50640>, <scripts.external_code.ControlNetUnit object at 0x0000029294E50100>, False, False, 'positive', 'comma', 0, False, False, '', 1, '', 0, '', 0, '', True, False, False, False, 0, None, None, 50) {}
Traceback (most recent call last):
File "C:\Users\ZeroCool22\Desktop\Auto\modules\call_queue.py", line 56, in f
res = list(func(*args, **kwargs))
File "C:\Users\ZeroCool22\Desktop\Auto\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\txt2img.py", line 56, in txt2img
processed = process_images(p)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\processing.py", line 486, in process_images
res = process_images_inner(p)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\processing.py", line 636, in process_images_inner
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, prompts=prompts)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\processing.py", line 836, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_samplers_kdiffusion.py", line 351, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_samplers_kdiffusion.py", line 227, in launch_sampling
return func()
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_samplers_kdiffusion.py", line 351, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args={
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\k-diffusion\k_diffusion\sampling.py", line 145, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_samplers_kdiffusion.py", line 119, in forward
x_out = self.inner_model(x_in, sigma_in, cond={"c_crossattn": [cond_in], "c_concat": [image_cond_in]})
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 858, in apply_model
x_recon = self.model(x_noisy, t, **cond)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 1329, in forward
out = self.diffusion_model(x, t, context=cc)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 776, in forward
h = module(h, emb, context)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\openaimodel.py", line 84, in forward
x = layer(x, context)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 324, in forward
x = block(x, context=context[i])
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 259, in forward
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 114, in checkpoint
return CheckpointFunction.apply(func, len(inputs), *args)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\autograd\function.py", line 506, in apply
return super().apply(*args, **kwargs) # type: ignore[misc]
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\diffusionmodules\util.py", line 129, in forward
output_tensors = ctx.run_function(*ctx.input_tensors)
File "C:\Users\ZeroCool22\Desktop\Auto\repositories\stable-diffusion-stability-ai\ldm\modules\attention.py", line 262, in _forward
x = self.attn1(self.norm1(x), context=context if self.disable_self_attn else None) + x
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in call_impl
return forward_call(*args, **kwargs)
File "C:\Users\ZeroCool22\Desktop\Auto\modules\sd_hijack_optimizations.py", line 342, in xformers_attention_forward
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=get_xformers_flash_attention_op(q, k, v))
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\xformers\ops\fmha_init.py", line 196, in memory_efficient_attention
return memory_efficient_attention(
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\xformers\ops\fmha_init.py", line 292, in _memory_efficient_attention
return memory_efficient_attention_forward(
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\xformers\ops\fmha_init.py", line 312, in memory_efficient_attention_forward
out, * = op.apply(inp, needs_gradient=False)
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\xformers\ops\fmha\cutlass.py", line 175, in apply
out, lse, rng_seed, rng_offset = cls.OPERATOR(
File "C:\Users\ZeroCool22\Desktop\Auto\venv\lib\site-packages\torch_ops.py", line 502, in call
return self._op(*args, **kwargs or {})
RuntimeError: CUDA error: no kernel image is available for execution on the device
CUDA kernel errors might be asynchronously reported at some other API call, so the stacktrace below might be incorrect.
For debugging consider passing CUDA_LAUNCH_BLOCKING=1.
Compile with
TORCH_USE_CUDA_DSA
to enable device-side assertions.`You will need to delete your VENV folder and download it again to get it generating images again.
Beta Was this translation helpful? Give feedback.
All reactions