Skip to content

Commit 9663f2d

Browse files
[AUTOGENERATED] [release/2.6] skip 3D NCHW FP16 batchnorm test due to Native accuracy issue (#2391)
Cherry-pick of #2370 Co-authored-by: Dmitry Nikolaev <[email protected]>
1 parent 74d7ddf commit 9663f2d

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

test/test_nn.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5126,6 +5126,8 @@ def test_batchnorm_nhwc_cuda(self):
51265126
name_fn=lambda f, b, m, t: f"{f}_vs_{b}{'_mixed' if m else ''}_{dtype_name(t)}"
51275127
)
51285128
def test_batchnorm(self, dims, mode, memory_format, ref_backend, mixed, dtype):
5129+
if self._testMethodName == "test_batchnorm_3D_train_NCHW_vs_native_mixed_float16":
5130+
self.skipTest("3D float16 NCHW train failed on CUDA and ROCm due to Native batchnorm accuracy issue SWDEV-541024")
51295131
if torch.version.hip:
51305132
if self._testMethodName in ("test_batchnorm_2D_train_NHWC_vs_NCHW_mixed_bfloat16",
51315133
"test_batchnorm_2D_train_NCHW_vs_cpu_mixed_bfloat16",
@@ -5141,10 +5143,6 @@ def test_batchnorm(self, dims, mode, memory_format, ref_backend, mixed, dtype):
51415143
) and _get_torch_rocm_version() >= (6, 4):
51425144
self.skipTest("bfloat16 NCHW train failed due to native tolerance issue SWDEV-507600")
51435145

5144-
if self._testMethodName == "test_batchnorm_3D_train_NCHW_vs_native_mixed_float16" \
5145-
and _get_torch_rocm_version() < (6, 4):
5146-
self.skipTest("3D float16 NCHW train failed on ROCm<=6.3 ")
5147-
51485146
if dims == 3 and memory_format in ("NHWC", "NCHW"):
51495147
memory_format = memory_format + "3D"
51505148

0 commit comments

Comments
 (0)