@@ -13477,35 +13477,6 @@ def pad_same(x, k, s, d=(1, 1), value=0):
1347713477 ref = pad_same (x , (5 , 5 ), (2 , 2 ))
1347813478 self .assertEqual (res , ref , atol = 0 , rtol = 0 )
1347913479
13480- @skip_if_halide # only 32-bit indexing
13481- @largeTensorTest ("16GB" , inductor = True )
13482- def test_split_reduction_with_int64_size (self ):
13483- if torch ._inductor .config .cpu_backend == "triton" :
13484- raise unittest .SkipTest (
13485- "Fail for triton cpu backend with error: https://gist.github.com/shunting314/a873fb32b6b7b5a437f44280ae86839f"
13486- )
13487-
13488- if self .device == "cpu" :
13489- raise unittest .SkipTest (
13490- "The test fails some times on CI: "
13491- "https://github.com/pytorch/pytorch/actions/runs/15333913377/job/43153170162. "
13492- "Skip for now."
13493- )
13494-
13495- size = (30000 , 100000 )
13496-
13497- # rand rather than randn since the mean for the latter is close to 0
13498- # which happens to be close to the value generated by the bug.
13499- t = torch .rand (size , dtype = torch .float , device = self .device )
13500- op = torch .mean
13501- expected = op (t )
13502- actual = torch .compile (op )(t )
13503- # self.common takes more GPU memory. Do the check dirctly
13504- self .assertTrue (
13505- torch .allclose (expected , actual , atol = 1e-2 , rtol = 1e-2 ),
13506- f"{ expected = } { actual = } " ,
13507- )
13508-
1350913480 def test_remove_noop_view_default (self ):
1351013481 def f (x ):
1351113482 batch_size = x .shape [0 ]
0 commit comments