Skip to content

Commit d5542b8

Browse files
[AUTOGENERATED] [release/2.7] skip 3D NCHW FP16 batchnorm test due to Native accuracy issue (#2390)
Cherry-pick of #2370 Co-authored-by: Dmitry Nikolaev <[email protected]>
1 parent 35daec9 commit d5542b8

File tree

1 file changed

+2
-4
lines changed

1 file changed

+2
-4
lines changed

test/test_nn.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5177,6 +5177,8 @@ def test_batchnorm_nhwc_cuda(self):
51775177
name_fn=lambda f, b, m, t: f"{f}_vs_{b}{'_mixed' if m else ''}_{dtype_name(t)}"
51785178
)
51795179
def test_batchnorm(self, dims, mode, memory_format, ref_backend, mixed, dtype):
5180+
if self._testMethodName == "test_batchnorm_3D_train_NCHW_vs_native_mixed_float16":
5181+
self.skipTest("3D float16 NCHW train failed on CUDA and ROCm due to Native batchnorm accuracy issue SWDEV-541024")
51805182
if torch.version.hip:
51815183
if self._testMethodName in ("test_batchnorm_2D_train_NHWC_vs_NCHW_mixed_bfloat16",
51825184
"test_batchnorm_2D_train_NCHW_vs_cpu_mixed_bfloat16",
@@ -5192,10 +5194,6 @@ def test_batchnorm(self, dims, mode, memory_format, ref_backend, mixed, dtype):
51925194
) and _get_torch_rocm_version() >= (6, 4):
51935195
self.skipTest("bfloat16 NCHW train failed due to native tolerance issue SWDEV-507600")
51945196

5195-
if self._testMethodName == "test_batchnorm_3D_train_NCHW_vs_native_mixed_float16" \
5196-
and _get_torch_rocm_version() < (6, 4):
5197-
self.skipTest("3D float16 NCHW train failed on ROCm<=6.3 ")
5198-
51995197
if dims == 3 and memory_format in ("NHWC", "NCHW"):
52005198
memory_format = memory_format + "3D"
52015199

0 commit comments

Comments
 (0)