diff --git a/test/test_nn.py b/test/test_nn.py index 358633ae435ee..2cc0b0122a1cf 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -5126,6 +5126,8 @@ def test_batchnorm_nhwc_cuda(self): name_fn=lambda f, b, m, t: f"{f}_vs_{b}{'_mixed' if m else ''}_{dtype_name(t)}" ) def test_batchnorm(self, dims, mode, memory_format, ref_backend, mixed, dtype): + if self._testMethodName == "test_batchnorm_3D_train_NCHW_vs_native_mixed_float16": + self.skipTest("3D float16 NCHW train failed on CUDA and ROCm due to Native batchnorm accuracy issue SWDEV-541024") if torch.version.hip: if self._testMethodName in ("test_batchnorm_2D_train_NHWC_vs_NCHW_mixed_bfloat16", "test_batchnorm_2D_train_NCHW_vs_cpu_mixed_bfloat16", @@ -5141,10 +5143,6 @@ def test_batchnorm(self, dims, mode, memory_format, ref_backend, mixed, dtype): ) and _get_torch_rocm_version() >= (6, 4): self.skipTest("bfloat16 NCHW train failed due to native tolerance issue SWDEV-507600") - if self._testMethodName == "test_batchnorm_3D_train_NCHW_vs_native_mixed_float16" \ - and _get_torch_rocm_version() < (6, 4): - self.skipTest("3D float16 NCHW train failed on ROCm<=6.3 ") - if dims == 3 and memory_format in ("NHWC", "NCHW"): memory_format = memory_format + "3D"