|
| 1 | +"""Device detection utilities for kernel tests.""" |
| 2 | + |
| 3 | +from typing import List |
| 4 | + |
| 5 | +import pytest |
| 6 | +import torch |
| 7 | + |
| 8 | + |
| 9 | +def get_device() -> torch.device: |
| 10 | + """Return the best available compute device (MPS > CUDA > XPU > CPU).""" |
| 11 | + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): |
| 12 | + return torch.device("mps") |
| 13 | + if torch.cuda.is_available(): |
| 14 | + return torch.device("cuda") |
| 15 | + if hasattr(torch, "xpu") and torch.xpu.is_available(): |
| 16 | + return torch.device("xpu") |
| 17 | + return torch.device("cpu") |
| 18 | + |
| 19 | + |
| 20 | +def get_available_devices() -> List[str]: |
| 21 | + """Return device strings suitable for pytest parametrization. |
| 22 | +
|
| 23 | + On MPS: ``["mps"]`` |
| 24 | + On CUDA: ``["cuda:0", "cuda:1", ...]`` for each visible GPU. |
| 25 | + On XPU: ``["xpu:0", "xpu:1", ...]`` for each visible accelerator. |
| 26 | + Fallback: ``["cpu"]`` |
| 27 | + """ |
| 28 | + if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): |
| 29 | + return ["mps"] |
| 30 | + if torch.cuda.is_available(): |
| 31 | + return [f"cuda:{i}" for i in range(max(1, torch.cuda.device_count()))] |
| 32 | + if hasattr(torch, "xpu") and torch.xpu.is_available(): |
| 33 | + return [f"xpu:{i}" for i in range(max(1, torch.xpu.device_count()))] |
| 34 | + return ["cpu"] |
| 35 | + |
| 36 | + |
| 37 | +def skip_if_no_gpu() -> None: |
| 38 | + """Call inside a test to skip when no GPU is available.""" |
| 39 | + dev = get_device() |
| 40 | + if dev.type == "cpu": |
| 41 | + pytest.skip("No GPU device available") |
0 commit comments