|
1 | | -from typing import Tuple |
2 | | - |
3 | 1 | import numpy as np |
4 | 2 | import pytest |
5 | 3 | from scipy.stats import norm |
6 | 4 | import torch |
7 | 5 |
|
8 | | -import bitsandbytes as bnb |
9 | 6 | from bitsandbytes import functional as F |
10 | | -from tests.helpers import ( |
11 | | - BOOLEAN_TUPLES, |
12 | | - describe_dtype, |
13 | | - get_test_dims, |
14 | | - id_formatter, |
15 | | -) |
16 | | - |
17 | | - |
18 | | -@pytest.mark.parametrize("dim1", get_test_dims(16, 64, n=1), ids=id_formatter("dim1")) |
19 | | -@pytest.mark.parametrize("dim2", get_test_dims(32, 96, n=1), ids=id_formatter("dim2")) |
20 | | -@pytest.mark.parametrize("dim3", get_test_dims(32, 96, n=1), ids=id_formatter("dim3")) |
21 | | -@pytest.mark.parametrize("dim4", get_test_dims(32, 96, n=1), ids=id_formatter("dim4")) |
22 | | -@pytest.mark.parametrize( |
23 | | - "funcs", |
24 | | - [(torch.bmm, bnb.bmm_cublas), (torch.matmul, bnb.matmul_cublas)], |
25 | | - ids=["func=bmm", "func=matmul"], |
26 | | -) |
27 | | -@pytest.mark.parametrize("dtype", [torch.float32, torch.float16], ids=describe_dtype) |
28 | | -@pytest.mark.parametrize("req_grad", BOOLEAN_TUPLES, ids=id_formatter("req_grad")) |
29 | | -@pytest.mark.parametrize("transpose", BOOLEAN_TUPLES, ids=id_formatter("transpose")) |
30 | | -@pytest.mark.deprecated |
31 | | -def test_matmul(dim1, dim2, dim3, dim4, funcs, dtype, req_grad: Tuple[bool, bool], transpose: Tuple[bool, bool]): |
32 | | - if dim2 > 0: |
33 | | - dim2 = dim2 - (dim2 % 16) |
34 | | - dim3 = dim3 - (dim3 % 16) |
35 | | - dim4 = dim4 - (dim4 % 16) |
36 | | - for i in range(25): |
37 | | - # normal multiply |
38 | | - if funcs[0] in [torch.mm, torch.matmul]: |
39 | | - dimA = (dim2, dim3) if not transpose[0] else (dim3, dim2) |
40 | | - dimB = (dim3, dim4) if not transpose[1] else (dim4, dim3) |
41 | | - A = torch.randn(size=dimA, device="cuda", requires_grad=req_grad[0]) |
42 | | - B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1]) |
43 | | - target = torch.randn(size=(dim2, dim4), device="cuda", requires_grad=req_grad[1]) |
44 | | - torch.nn.init.xavier_uniform_(B) |
45 | | - |
46 | | - if not transpose[0] and not transpose[1]: |
47 | | - out_torch = funcs[0](A, B) |
48 | | - out_bnb = funcs[1](A, B) |
49 | | - elif not transpose[0] and transpose[1]: |
50 | | - out_torch = funcs[0](A, B.t()) |
51 | | - out_bnb = funcs[1](A, B.t()) |
52 | | - elif transpose[0] and not transpose[1]: |
53 | | - out_torch = funcs[0](A.t(), B) |
54 | | - out_bnb = funcs[1](A.t(), B) |
55 | | - elif transpose[0] and transpose[1]: |
56 | | - out_torch = funcs[0](A.t(), B.t()) |
57 | | - out_bnb = funcs[1](A.t(), B.t()) |
58 | | - |
59 | | - n = out_bnb.numel() |
60 | | - idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1) |
61 | | - assert (idx == 0).sum().item() < n * 0.0175 |
62 | | - idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2) |
63 | | - assert (idx == 0).sum().item() < n * 0.001 |
64 | | - |
65 | | - if any(req_grad): |
66 | | - out_bnb.data.copy_(out_torch) |
67 | | - torch.cuda.synchronize() |
68 | | - loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean() |
69 | | - loss_bnb.backward() |
70 | | - gradA1 = A.grad |
71 | | - gradB1 = B.grad |
72 | | - A.grad = None |
73 | | - B.grad = None |
74 | | - |
75 | | - loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean() |
76 | | - loss_torch.backward() |
77 | | - gradA2 = A.grad |
78 | | - gradB2 = B.grad |
79 | | - A.grad = None |
80 | | - B.grad = None |
81 | | - |
82 | | - if req_grad[0]: |
83 | | - torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1) |
84 | | - if req_grad[1]: |
85 | | - n = gradB1.numel() |
86 | | - idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3) |
87 | | - assert (idx == 0).sum().item() < n * 0.1 |
88 | | - idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3) |
89 | | - assert (idx == 0).sum().item() < n * 0.02 |
90 | | - torch.testing.assert_close(gradB1, gradB2, atol=0.18, rtol=0.3) |
91 | | - |
92 | | - # batched matrix multiply |
93 | | - if funcs[0] in [torch.bmm, torch.matmul]: |
94 | | - A = torch.randn( |
95 | | - size=(dim1, dim2, dim3), |
96 | | - device="cuda", |
97 | | - requires_grad=req_grad[0], |
98 | | - ) |
99 | | - B = torch.randn( |
100 | | - size=(dim1, dim3, dim4), |
101 | | - device="cuda", |
102 | | - requires_grad=req_grad[1], |
103 | | - ) |
104 | | - target = torch.randn( |
105 | | - size=(dim1, dim2, dim4), |
106 | | - device="cuda", |
107 | | - requires_grad=req_grad[1], |
108 | | - ) |
109 | | - torch.nn.init.xavier_uniform_(B) |
110 | | - |
111 | | - out_torch = funcs[0](A, B) |
112 | | - out_bnb = funcs[1](A, B) |
113 | | - |
114 | | - n = out_bnb.numel() |
115 | | - idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1) |
116 | | - assert (idx == 0).sum().item() < n * 0.01 |
117 | | - torch.testing.assert_close(out_bnb, out_torch, atol=0.027, rtol=0.2) |
118 | | - |
119 | | - if any(req_grad): |
120 | | - out_bnb.data.copy_(out_torch) |
121 | | - torch.cuda.synchronize() |
122 | | - loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean() |
123 | | - loss_bnb.backward() |
124 | | - gradA1 = A.grad |
125 | | - gradB1 = B.grad |
126 | | - A.grad = None |
127 | | - B.grad = None |
128 | | - |
129 | | - loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean() |
130 | | - loss_torch.backward() |
131 | | - gradA2 = A.grad |
132 | | - gradB2 = B.grad |
133 | | - A.grad = None |
134 | | - B.grad = None |
135 | | - |
136 | | - if req_grad[0]: |
137 | | - torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1) |
138 | | - if req_grad[1]: |
139 | | - n = gradB1.numel() |
140 | | - idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3) |
141 | | - assert (idx == 0).sum().item() < n * 0.1 |
142 | | - idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3) |
143 | | - assert (idx == 0).sum().item() < n * 0.02 |
144 | | - |
145 | | - if funcs[0] in [torch.matmul]: |
146 | | - dim1 = dim1 - (dim1 % 16) |
147 | | - A = torch.randn( |
148 | | - size=(dim1, dim2, dim3), |
149 | | - device="cuda", |
150 | | - requires_grad=req_grad[0], |
151 | | - ) |
152 | | - dimB = (dim4, dim3) if transpose[1] else (dim3, dim4) |
153 | | - B = torch.randn(size=dimB, device="cuda", requires_grad=req_grad[1]) |
154 | | - target = torch.randn( |
155 | | - size=(dim1, dim2, dim4), |
156 | | - device="cuda", |
157 | | - requires_grad=req_grad[1], |
158 | | - ) |
159 | | - torch.nn.init.xavier_uniform_(B) |
160 | | - |
161 | | - if transpose[1]: |
162 | | - out_torch = funcs[0](A, B.t()) |
163 | | - out_bnb = funcs[1](A, B.t()) |
164 | | - else: |
165 | | - out_torch = funcs[0](A, B) |
166 | | - out_bnb = funcs[1](A, B) |
167 | | - |
168 | | - n = out_bnb.numel() |
169 | | - idx = torch.isclose(out_bnb, out_torch, atol=0.01, rtol=0.1) |
170 | | - assert (idx == 0).sum().item() < n * 0.0175 |
171 | | - idx = torch.isclose(out_bnb, out_torch, atol=0.035, rtol=0.2) |
172 | | - assert (idx == 0).sum().item() < n * 0.001 |
173 | | - |
174 | | - if any(req_grad): |
175 | | - out_bnb.data.copy_(out_torch) |
176 | | - torch.cuda.synchronize() |
177 | | - loss_bnb = torch.nn.functional.mse_loss(out_bnb, target).mean() |
178 | | - loss_bnb.backward() |
179 | | - gradA1 = A.grad |
180 | | - gradB1 = B.grad |
181 | | - A.grad = None |
182 | | - B.grad = None |
183 | | - |
184 | | - loss_torch = torch.nn.functional.mse_loss(out_torch, target).mean() |
185 | | - loss_torch.backward() |
186 | | - gradA2 = A.grad |
187 | | - gradB2 = B.grad |
188 | | - A.grad = None |
189 | | - B.grad = None |
190 | | - |
191 | | - if req_grad[0]: |
192 | | - torch.testing.assert_close(gradA1, gradA2, atol=0.015, rtol=0.1) |
193 | | - if req_grad[1]: |
194 | | - n = gradB1.numel() |
195 | | - idx = torch.isclose(gradB1, gradB2, atol=0.06, rtol=0.3) |
196 | | - assert (idx == 0).sum().item() < n * 0.1 |
197 | | - idx = torch.isclose(gradB1, gradB2, atol=0.10, rtol=0.3) |
198 | | - assert (idx == 0).sum().item() < n * 0.02 |
199 | 7 |
|
200 | 8 |
|
201 | 9 | @pytest.mark.deprecated |
|
0 commit comments