@@ -66,7 +66,7 @@ def test_forward_x1_neq_x2(self, use_keops=True, ard=False, **kwargs):
6666 # The patch makes sure that we're actually using KeOps
6767 k1 = kern1 (x1 , x2 ).to_dense ()
6868 k2 = kern2 (x1 , x2 ).to_dense ()
69- self .assertLess (torch .norm (k1 - k2 ), 1e-4 )
69+ self .assertLess (torch .norm (k1 - k2 ), 1e-3 )
7070
7171 if use_keops :
7272 self .assertTrue (keops_mock .called )
@@ -86,7 +86,7 @@ def test_batch_matmul(self, use_keops=True, **kwargs):
8686 # The patch makes sure that we're actually using KeOps
8787 res1 = kern1 (x1 , x1 ).matmul (rhs )
8888 res2 = kern2 (x1 , x1 ).matmul (rhs )
89- self .assertLess (torch .norm (res1 - res2 ), 1e-4 )
89+ self .assertLess (torch .norm (res1 - res2 ), 1e-3 )
9090
9191 if use_keops :
9292 self .assertTrue (keops_mock .called )
@@ -115,7 +115,7 @@ def test_gradient(self, use_keops=True, ard=False, **kwargs):
115115 # stack all gradients into a tensor
116116 grad_s1 = torch .vstack (torch .autograd .grad (s1 , [* kern1 .hyperparameters ()]))
117117 grad_s2 = torch .vstack (torch .autograd .grad (s2 , [* kern2 .hyperparameters ()]))
118- self .assertAllClose (grad_s1 , grad_s2 , rtol = 1e-4 , atol = 1e-5 )
118+ self .assertAllClose (grad_s1 , grad_s2 , rtol = 1e-3 , atol = 1e-3 )
119119
120120 if use_keops :
121121 self .assertTrue (keops_mock .called )
0 commit comments