Skip to content
2 changes: 1 addition & 1 deletion src/interfaces/vae_model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class SeedVR2LoadVAEModel(io.ComfyNode):
def define_schema(cls) -> io.Schema:
devices = get_device_list()
vae_models = get_available_vae_models()

return io.Schema(
node_id="SeedVR2LoadVAEModel",
display_name="SeedVR2 (Down)Load VAE Model",
Expand Down
10 changes: 6 additions & 4 deletions src/optimization/compatibility.py
Original file line number Diff line number Diff line change
Expand Up @@ -683,16 +683,18 @@ def _check_conv3d_memory_bug():
# Bfloat16 CUBLAS support
def _probe_bfloat16_support() -> bool:
if not torch.cuda.is_available():
return True
return False
try:
torch.cuda.device('cuda:0')
except (RuntimeError, AssertionError):
return False
try:
a = torch.randn(8, 8, dtype=torch.bfloat16, device='cuda:0')
_ = torch.matmul(a, a)
del a
return True
except RuntimeError as e:
if "CUBLAS_STATUS_NOT_SUPPORTED" in str(e):
return False
raise
return False

BFLOAT16_SUPPORTED = _probe_bfloat16_support()
COMPUTE_DTYPE = torch.bfloat16 if BFLOAT16_SUPPORTED else torch.float16
Expand Down
12 changes: 10 additions & 2 deletions src/optimization/memory_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_gpu_backend() -> str:
return 'cpu'


def get_device_list(include_none: bool = False, include_cpu: bool = False) -> List[str]:
def get_device_list(include_none: bool = False, include_cpu: bool = True) -> List[str]:
"""
Get list of available compute devices for SeedVR2

Expand Down Expand Up @@ -119,7 +119,15 @@ def get_basic_vram_info(device: Optional[torch.device] = None) -> Dict[str, Any]
free_memory = mem.total - mem.used
total_memory = mem.total
else:
return {"error": "No GPU backend available (CUDA/MPS)"}
from loguru import logger
logger.info("SeedVR2_VideoUpscaler: use fake memory info")
free_memory, total_memory = (24893718528*2, 25438126080*2)
free_gb = free_memory / (1024**3)
total_gb = total_memory / (1024**3)
return {
"free_gb": free_gb,
"total_gb": total_gb
}

return {
"free_gb": free_memory / (1024**3),
Expand Down