RuntimeError: "LayerNormKernelImpl" not implemented for 'Half' #14427
Replies: 1 comment
-
Same issue |
Beta Was this translation helpful? Give feedback.
0 replies
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Adding "--no-half" to "set COMMANDLINE_ARGS=" fixes this however generations are being done on CPU rather then GPU (whish is not ideal). it originally worked on GPU just fine however the last time i closed and reopened it stopped working. followed the same AMD windows installation guide.
venv "C:\stable diffusion\stable-diffusion-webui-directml\venv\Scripts\Python.exe"
fatal: No names found, cannot describe anything.
Python 3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]
Version: 1.7.0
Commit hash: 668ee14
Launching Web UI with arguments: --skip-torch-cuda-test
no module 'xformers'. Processing without...
no module 'xformers'. Processing without...
No module 'xformers'. Proceeding without it.
Style database not found: C:\stable diffusion\stable-diffusion-webui-directml\styles.csv
Warning: caught exception 'Found no NVIDIA driver on your system. Please check that you have an NVIDIA GPU and installed a driver from http://www.nvidia.com/Download/index.aspx', memory monitor disabled
Loading weights [6ce0161689] from C:\stable diffusion\stable-diffusion-webui-directml\models\Stable-diffusion\v1-5-pruned-emaonly.safetensors
Creating model from config: C:\stable diffusion\stable-diffusion-webui-directml\configs\v1-inference.yaml
Startup time: 7.0s (prepare environment: 0.2s, import torch: 2.4s, import gradio: 0.8s, setup paths: 0.7s, initialize shared: 0.4s, other imports: 0.6s, load scripts: 1.0s, create ui: 0.3s, gradio launch: 0.4s).
Applying attention optimization: InvokeAI... done.
loading stable diffusion model: RuntimeError
Traceback (most recent call last):
File "C:\Users\MrHob\AppData\Local\Programs\Python\Python310\lib\threading.py", line 973, in _bootstrap
self._bootstrap_inner()
File "C:\Users\MrHob\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1016, in _bootstrap_inner
self.run()
File "C:\Users\MrHob\AppData\Local\Programs\Python\Python310\lib\threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\initialize.py", line 147, in load_model
shared.sd_model # noqa: B018
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\shared_items.py", line 128, in sd_model
return modules.sd_models.model_data.get_sd_model()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_models.py", line 576, in get_sd_model
load_model()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_models.py", line 746, in load_model
sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_models.py", line 628, in get_empty_cond
return sd_model.cond_stage_model([""])
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_clip.py", line 234, in forward
z = self.process_tokens(tokens, multipliers)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_clip.py", line 273, in process_tokens
z = self.encode_with_transformers(tokens)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_clip.py", line 326, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 740, in forward
encoder_outputs = self.encoder(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 654, in forward
layer_outputs = encoder_layer(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 382, in forward
hidden_states = self.layer_norm1(hidden_states)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\extensions-builtin\Lora\networks.py", line 531, in network_LayerNorm_forward
return originals.LayerNorm_forward(self, input)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\normalization.py", line 190, in forward
return F.layer_norm(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\functional.py", line 2515, in layer_norm
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: "LayerNormKernelImpl" not implemented for 'Half'
Stable diffusion model failed to load
Traceback (most recent call last):
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
Exception in thread Thread-16 (load_model):
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
Traceback (most recent call last):
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
File "C:\Users\MrHob\AppData\Local\Programs\Python\Python310\lib\threading.py", line 1016, in _bootstrap_inner
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
self.run()
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\anyio_backends_asyncio.py", line 807, in run
result = context.run(func, *args)
File "C:\Users\MrHob\AppData\Local\Programs\Python\Python310\lib\threading.py", line 953, in run
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(*args, **kwargs)
self._target(*self._args, **self._kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\ui_extra_networks.py", line 419, in pages_html
return refresh()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\initialize.py", line 153, in load_model
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\ui_extra_networks.py", line 425, in refresh
pg.refresh()
devices.first_time_calculation()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\ui_extra_networks_textual_inversion.py", line 15, in refresh
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\devices.py", line 177, in first_time_calculation
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\textual_inversion\textual_inversion.py", line 222, in load_textual_inversion_embeddings
self.expected_shape = self.get_expected_shape()
linear(x)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\textual_inversion\textual_inversion.py", line 154, in get_expected_shape
vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in call_impl
AttributeError: 'NoneType' object has no attribute 'cond_stage_model'
return forward_call(*args, **kwargs)
Traceback (most recent call last):
File "C:\stable diffusion\stable-diffusion-webui-directml\extensions-builtin\Lora\networks.py", line 486, in network_Linear_forward
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict
output = await app.get_blocks().process_api(
return originals.Linear_forward(self, input)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api
result = await self.call_function(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function
prediction = await anyio.to_thread.run_sync(
return F.linear(input, self.weight, self.bias)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
return await get_asynclib().run_sync_in_worker_thread(
RuntimeError: "addmm_impl_cpu" not implemented for 'Half'
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\anyio_backends_asyncio.py", line 877, in run_sync_in_worker_thread
return await future
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\anyio_backends_asyncio.py", line 807, in run
result = context.run(func, *args)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper
response = f(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\ui_extra_networks.py", line 419, in pages_html
return refresh()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\ui_extra_networks.py", line 425, in refresh
pg.refresh()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\ui_extra_networks_textual_inversion.py", line 15, in refresh
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\textual_inversion\textual_inversion.py", line 222, in load_textual_inversion_embeddings
self.expected_shape = self.get_expected_shape()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\textual_inversion\textual_inversion.py", line 154, in get_expected_shape
vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1)
AttributeError: 'NoneType' object has no attribute 'cond_stage_model'
Using already loaded model v1-5-pruned-emaonly.safetensors [6ce0161689]: done in 0.0s
*** Error completing request
*** Arguments: ('task(2d2y3m0da9cc81p)', 'bgvfd', '', [], 20, 'DPM++ 2M Karras', 1, 1, 7, 512, 512, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x000001DE4D23EB00>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, 'start', '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False) {}
Traceback (most recent call last):
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\txt2img.py", line 64, in txt2img
processed = processing.process_images(p)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 735, in process_images
res = process_images_inner(p)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 861, in process_images_inner
p.setup_conds()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 1312, in setup_conds
super().setup_conds()
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 469, in setup_conds
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\processing.py", line 455, in get_conds_with_caching
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\prompt_parser.py", line 188, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "C:\stable diffusion\stable-diffusion-webui-directml\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
c = self.cond_stage_model(c)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_clip.py", line 234, in forward
z = self.process_tokens(tokens, multipliers)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_clip.py", line 273, in process_tokens
z = self.encode_with_transformers(tokens)
File "C:\stable diffusion\stable-diffusion-webui-directml\modules\sd_hijack_clip.py", line 326, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 740, in forward
encoder_outputs = self.encoder(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 654, in forward
layer_outputs = encoder_layer(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 382, in forward
hidden_states = self.layer_norm1(hidden_states)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "C:\stable diffusion\stable-diffusion-webui-directml\extensions-builtin\Lora\networks.py", line 531, in network_LayerNorm_forward
return originals.LayerNorm_forward(self, input)
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\modules\normalization.py", line 190, in forward
return F.layer_norm(
File "C:\stable diffusion\stable-diffusion-webui-directml\venv\lib\site-packages\torch\nn\functional.py", line 2515, in layer_norm
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: "LayerNormKernelImpl" not implemented for 'Half'
Beta Was this translation helpful? Give feedback.
All reactions