We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 140b137 commit 7da1676Copy full SHA for 7da1676
tests/gpu/examples/test_paged_attention.py
@@ -2,6 +2,7 @@
2
import random
3
from typing import List, Optional, Tuple
4
import intel_extension_for_pytorch as ipex # noqa
5
+import pytest
6
from torch.testing._internal.common_utils import (
7
TestCase,
8
)
@@ -310,6 +311,10 @@ def test_fp16(self):
310
311
for version in ["v1", "v2"]:
312
self.paged_attention(version, torch.float16)
313
314
+ @pytest.mark.skipif(
315
+ not torch.xpu.has_xmx(),
316
+ reason="Paged_attention: No bf16 support for current gpu arch.",
317
+ )
318
def test_bf16(self):
319
320
self.paged_attention(version, torch.bfloat16)
0 commit comments