We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ff389db commit 1171d00Copy full SHA for 1171d00
bitsandbytes/backends/xpu/ops.py
@@ -1,14 +1,16 @@
1
from collections.abc import Sequence
2
import warnings
3
4
+from packaging import version
5
import torch
6
7
from ..._ops import register_kernel
8
from ..utils import ipex_xpu, triton_available
9
-# _int_mm is available in torch starting from 2.7 version,
10
-# but currently it's don't have xpu implementation.
11
-if ipex_xpu and torch.__version__ >= (2, 7):
+# _int_mm is available in torch starting from 2.9 version, or ipex 2.7
+if version.parse(torch.__version__).release >= version.parse("2.9").release or (
12
+ ipex_xpu and torch.__version__ >= (2, 7)
13
+):
14
15
@register_kernel("bitsandbytes::int8_linear_matmul", "xpu")
16
def _(A: torch.Tensor, B: torch.Tensor):
0 commit comments