You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Python 3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]
Version: f1.0.2v1.10.1-previous-98-g0b129b71
Commit hash: 0b129b7
Launching Web UI with arguments:
Total VRAM 4096 MB, total RAM 8126 MB
pytorch version: 2.1.2+cu121
Set vram state to: NORMAL_VRAM
Device: cuda:0 NVIDIA GeForce GTX 1650 : native
VAE dtype preferences: [torch.float32] -> torch.float32
CUDA Stream Activated: False
Using pytorch cross attention
Using pytorch attention for VAE
ControlNet preprocessor location: C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\models\ControlNetPreprocessor
Loading weights [15012c538f] from C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\models\Stable-diffusion\realisticVisionV51_v51VAE.safetensors
2024-08-04 00:56:06,770 - ControlNet - INFO - ControlNet UI callback registered.
Running on local URL: http://127.0.0.1:7860
To create a public link, set share=True in launch().
Startup time: 17.7s (prepare environment: 3.2s, launcher: 2.5s, import torch: 4.0s, setup paths: 1.1s, initialize shared: 0.2s, other imports: 0.8s, load scripts: 2.0s, create ui: 2.7s, gradio launch: 1.2s).
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
To load target model JointCLIPTextEncoder
Begin to load 1 model
Moving model(s) has taken 0.00 seconds
loading stable diffusion model: RuntimeError
Traceback (most recent call last):
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\launch.py", line 51, in
main()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\launch.py", line 47, in main
start()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\launch_utils.py", line 552, in start
main_thread.loop()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules_forge\main_thread.py", line 37, in loop
task.work()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules_forge\main_thread.py", line 26, in work
self.result = self.func(*self.args, **self.kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_models.py", line 567, in get_sd_model
load_model()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_models.py", line 698, in load_model
sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_models.py", line 594, in get_empty_cond
d = sd_model.get_learned_conditioning([""])
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
c = self.cond_stage_model(c)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_hijack_clip.py", line 313, in forward
return super().forward(texts)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_hijack_clip.py", line 227, in forward
z = self.process_tokens(tokens, multipliers)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_hijack_clip.py", line 269, in process_tokens
z = self.encode_with_transformers(tokens)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules_forge\forge_clip.py", line 24, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 740, in forward
encoder_outputs = self.encoder(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 654, in forward
layer_outputs = encoder_layer(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 382, in forward
hidden_states = self.layer_norm1(hidden_states)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\backend\operations.py", line 132, in forward
return super().forward(x)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\normalization.py", line 196, in forward
return F.layer_norm(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\functional.py", line 2543, in layer_norm
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument weight in method wrapper_CUDA__native_layer_norm)
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
Uh oh!
There was an error while loading. Please reload this page.
Uh oh!
There was an error while loading. Please reload this page.
-
Crash on start
Python 3.10.6 (tags/v3.10.6:9c7b4bd, Aug 1 2022, 21:53:49) [MSC v.1932 64 bit (AMD64)]
Version: f1.0.2v1.10.1-previous-98-g0b129b71
Commit hash: 0b129b7
Launching Web UI with arguments:
Total VRAM 4096 MB, total RAM 8126 MB
pytorch version: 2.1.2+cu121
Set vram state to: NORMAL_VRAM
Device: cuda:0 NVIDIA GeForce GTX 1650 : native
VAE dtype preferences: [torch.float32] -> torch.float32
CUDA Stream Activated: False
Using pytorch cross attention
Using pytorch attention for VAE
ControlNet preprocessor location: C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\models\ControlNetPreprocessor
Loading weights [15012c538f] from C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\models\Stable-diffusion\realisticVisionV51_v51VAE.safetensors
2024-08-04 00:56:06,770 - ControlNet - INFO - ControlNet UI callback registered.
Running on local URL: http://127.0.0.1:7860
To create a public link, set
share=True
inlaunch()
.Startup time: 17.7s (prepare environment: 3.2s, launcher: 2.5s, import torch: 4.0s, setup paths: 1.1s, initialize shared: 0.2s, other imports: 0.8s, load scripts: 2.0s, create ui: 2.7s, gradio launch: 1.2s).
Working with z of shape (1, 4, 32, 32) = 4096 dimensions.
To load target model JointCLIPTextEncoder
Begin to load 1 model
Moving model(s) has taken 0.00 seconds
loading stable diffusion model: RuntimeError
Traceback (most recent call last):
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\launch.py", line 51, in
main()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\launch.py", line 47, in main
start()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\launch_utils.py", line 552, in start
main_thread.loop()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules_forge\main_thread.py", line 37, in loop
task.work()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules_forge\main_thread.py", line 26, in work
self.result = self.func(*self.args, **self.kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_models.py", line 567, in get_sd_model
load_model()
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_models.py", line 698, in load_model
sd_model.cond_stage_model_empty_prompt = get_empty_cond(sd_model)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_models.py", line 594, in get_empty_cond
d = sd_model.get_learned_conditioning([""])
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
c = self.cond_stage_model(c)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_hijack_clip.py", line 313, in forward
return super().forward(texts)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_hijack_clip.py", line 227, in forward
z = self.process_tokens(tokens, multipliers)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules\sd_hijack_clip.py", line 269, in process_tokens
z = self.encode_with_transformers(tokens)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\modules_forge\forge_clip.py", line 24, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 822, in forward
return self.text_model(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 740, in forward
encoder_outputs = self.encoder(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 654, in forward
layer_outputs = encoder_layer(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\transformers\models\clip\modeling_clip.py", line 382, in forward
hidden_states = self.layer_norm1(hidden_states)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1518, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\module.py", line 1527, in _call_impl
return forward_call(*args, **kwargs)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\webui\backend\operations.py", line 132, in forward
return super().forward(x)
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\modules\normalization.py", line 196, in forward
return F.layer_norm(
File "C:\Users\sumsa\Downloads\webui_forge_cu121_torch21\system\python\lib\site-packages\torch\nn\functional.py", line 2543, in layer_norm
return torch.layer_norm(input, normalized_shape, weight, bias, eps, torch.backends.cudnn.enabled)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument weight in method wrapper_CUDA__native_layer_norm)
Stable diffusion model failed to load
How to fix
Beta Was this translation helpful? Give feedback.
All reactions