Skip to content
This repository was archived by the owner on May 21, 2025. It is now read-only.

Commit 4b06538

Browse files
Update for autoloading
1 parent 640a438 commit 4b06538

File tree

3 files changed

+5
-4
lines changed

3 files changed

+5
-4
lines changed

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,5 +6,5 @@
66
package_dir={"": "src"},
77
packages=find_packages(where="src"),
88
install_requires=["bitsandbytes"],
9-
entry_points={"torch.backends": ["bitsandbytes_intel = bitsandbytes_intel:_autoload"]},
9+
entry_points={"bitsandbytes.backends": ["bitsandbytes_intel = bitsandbytes_intel:_autoload"]},
1010
)

src/bitsandbytes_intel/__main__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ def main():
88

99
try:
1010
A = torch.randint(-128, 127, (32, 64), dtype=torch.int8).to("xpu")
11-
B = torch.randint(-128, 127, (64, 128), dtype=torch.int8).to("xpu")
11+
B = torch.randint(-128, 127, (128, 64), dtype=torch.int8).to("xpu")
1212

1313
result = torch.ops.bitsandbytes.int8_linear_matmul(A, B)
1414
# Simple output verification

src/bitsandbytes_intel/cpu_xpu_common.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -203,8 +203,9 @@ def int8_linear_matmul_impl(
203203
A_reshaped = A.reshape(m, k)
204204

205205
# torch._int_mm is available on CPU since torch 2.4, XPU since torch 2.6
206-
if (A.device.type == "cpu" and _torch_version_prereq(2, 4)) or (
207-
A.device.type == "xpu" and _torch_version_prereq(2, 6)
206+
if (
207+
A.device.type == "cpu" and _torch_version_prereq(2, 4)
208+
# or (A.device.type == "xpu" and _torch_version_prereq(2, 6)
208209
):
209210
C = torch._int_mm(A_reshaped, B.T).to(dtype)
210211
else:

0 commit comments

Comments
 (0)