We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 4c8261a commit f922be5Copy full SHA for f922be5
tests/_test_utils/deploy_utils.py
@@ -58,6 +58,9 @@ def __init__(
58
59
def run(self):
60
"""Run the deployment based on the specified backend."""
61
+ if not torch.cuda.is_available() or torch.cuda.device_count() == 0:
62
+ pytest.skip("CUDA is not available")
63
+ return
64
if torch.cuda.get_device_capability() < (
65
self.mini_sm // 10,
66
self.mini_sm % 10,
@@ -68,7 +71,6 @@ def run(self):
68
71
if torch.cuda.device_count() < self.tensor_parallel_size:
69
72
pytest.skip(reason=f"Requires at least {self.tensor_parallel_size} GPUs")
70
73
return
-
74
if self.backend == "vllm":
75
self._deploy_vllm()
76
elif self.backend == "trtllm":
0 commit comments