Skip to content

Commit 4dd04db

Browse files
Revert "[Inductor][ROCm][CK] Enable lowering conv2d instances in CK Inductor backend (pytorch#138643)"
This reverts commit 4d92d6e. Reverted pytorch#138643 on behalf of https://github.com/wdvr due to reverting due to a large number of internal failures, see below ([comment](pytorch#138643 (comment)))
1 parent d90717e commit 4dd04db

File tree

2 files changed

+1
-51
lines changed

2 files changed

+1
-51
lines changed

test/inductor/test_ck_backend.py

Lines changed: 0 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -364,44 +364,6 @@ def linear(x_fp8, x_inverse_scale, w_t_fp8, w_inverse_scale, bias):
364364

365365
torch.testing.assert_close(y_eager, y_compiled, rtol=1e-2, atol=0.05)
366366

367-
@unittest.skipIf(not torch.version.hip, "ROCM only")
368-
@unittest.mock.patch.dict(
369-
os.environ,
370-
{"PATH": _get_path_without_sccache(), "PYTORCH_MIOPEN_SUGGEST_NHWC": "1"},
371-
)
372-
@parametrize("max_autotune_conv_backends", ("CK", "ATEN,CK,TRITON"))
373-
def test_max_autotune_conv2d(self, max_autotune_conv_backends):
374-
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
375-
376-
tensor_options = {"device": "cuda", "dtype": torch.float32}
377-
378-
x = torch.randn(1, 8, 224, 224, **tensor_options)
379-
w = torch.randn(64, 8, 7, 7, **tensor_options)
380-
x_cl = x.to(memory_format=torch.channels_last)
381-
w_cl = w.to(memory_format=torch.channels_last)
382-
383-
assert "rocm" in dir(config)
384-
385-
with config.patch(
386-
{
387-
"max_autotune": True,
388-
"autotune_in_subproc": False,
389-
"max_autotune_conv_backends": max_autotune_conv_backends,
390-
"compile_threads": 4,
391-
"rocm.ck_dir": self.ck_dir,
392-
"rocm.n_max_profiling_configs": 4,
393-
}
394-
):
395-
396-
@torch.compile(dynamic=False)
397-
def conv2d(x, w):
398-
return torch.conv2d(x, w)
399-
400-
Y_eager = torch.conv2d(x_cl, w_cl)
401-
Y_compiled = conv2d(x_cl, w_cl)
402-
403-
torch.testing.assert_close(Y_compiled, Y_eager, atol=2e-4, rtol=2e-4)
404-
405367

406368
if __name__ == "__main__":
407369
from torch._inductor.utils import is_big_gpu

torch/_inductor/kernel/conv.py

Lines changed: 1 addition & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
from typing import cast, List, Optional, Sequence, Tuple, TYPE_CHECKING, TypedDict
77

88
import torch
9-
from torch._inductor.codegen.rocm.ck_conv_template import CKGroupedConvFwdTemplate
109

1110
from .. import config, ir
1211
from ..lowering import (
@@ -26,7 +25,6 @@
2625
is_zeros,
2726
pad_listlike,
2827
sympy_product,
29-
use_ck_conv_template,
3028
use_triton_template,
3129
)
3230
from ..virtualized import V
@@ -661,17 +659,7 @@ def channels_last_conv():
661659
num_warps=cfg.num_warps,
662660
**cfg.kwargs,
663661
)
664-
if use_ck_conv_template(layout):
665-
CKGroupedConvFwdTemplate.add_ck_conv_choices(
666-
choices,
667-
layout,
668-
input_nodes=(x, weight) + ((bias,) if bias is not None else tuple()),
669-
stride=stride,
670-
padding=padding,
671-
dilation=dilation,
672-
groups=groups,
673-
n_spatial_dimensions=ndim,
674-
)
662+
675663
return autotune_select_algorithm("convolution", choices, args, layout)
676664

677665

0 commit comments

Comments
 (0)