@@ -591,8 +591,6 @@ def vjp_of_vjp(*args_and_cotangents):
591
591
xfail ('eig' ), # calls aten::item
592
592
xfail ('linalg.eig' ), # Uses aten::allclose
593
593
xfail ('linalg.householder_product' ), # needs select_scatter
594
- xfail ('linalg.slogdet' ), # calls .item()
595
- xfail ('logdet' ), # calls .item()
596
594
xfail ('matrix_exp' ), # would benefit from narrow_scatter
597
595
xfail ('nanquantile' ), # checks q via a .item() call
598
596
xfail ('nn.functional.gaussian_nll_loss' ), # checks var for if any value < 0
@@ -762,7 +760,7 @@ def test_vmapjvpall(self, device, dtype, op):
762
760
xfail ('nn.functional.feature_alpha_dropout' , 'without_train' ),
763
761
xfail ('linalg.lu_factor' , '' ),
764
762
xfail ('nn.functional.dropout2d' , '' ),
765
- xfail ('nn.functional.kl_div' , '' ),
763
+ skip ('nn.functional.kl_div' , '' ), # will pass when linux cpu binaries update
766
764
xfail ('pca_lowrank' , '' ),
767
765
xfail ('svd_lowrank' , '' ),
768
766
xfail ('linalg.lu_factor_ex' , '' ),
@@ -783,6 +781,8 @@ def test_vmapjvpall(self, device, dtype, op):
783
781
xfail ('nn.functional.bilinear' ), # trilinear doesn't have batching rule
784
782
xfail ('linalg.eigh' ), # _linalg_eigh doesn't have batching rule
785
783
xfail ('linalg.eigvalsh' ), # _linalg_eigh doesn't have batching rule
784
+ xfail ('logdet' ), # _linalg_slogdet doesn't have batching rule
785
+ xfail ('linalg.slogdet' ), # _linalg_slogdet doesn't have batching rule
786
786
}))
787
787
@toleranceOverride ({torch .float32 : tol (atol = 1e-04 , rtol = 1e-04 )})
788
788
def test_vmapjvpall_has_batch_rule (self , device , dtype , op ):
@@ -881,7 +881,7 @@ def test():
881
881
xfail ('linalg.tensorsolve' ),
882
882
xfail ('linalg.lu_factor' , '' ),
883
883
xfail ('nn.functional.feature_alpha_dropout' , 'with_train' ),
884
- xfail ('nn.functional.kl_div' , '' ),
884
+ skip ('nn.functional.kl_div' , '' ), # will pass when linux cpu binaries update
885
885
xfail ('pca_lowrank' , '' ),
886
886
xfail ('nn.functional.dropout2d' , '' ),
887
887
xfail ('nn.functional.feature_alpha_dropout' , 'without_train' ),
@@ -1043,9 +1043,7 @@ def get_vjp(cotangents, *primals):
1043
1043
xfail ('cdist' , '' ),
1044
1044
xfail ('cholesky' , '' ),
1045
1045
xfail ('eig' , '' ),
1046
- xfail ('linalg.slogdet' , '' ),
1047
1046
xfail ('logcumsumexp' , '' ),
1048
- xfail ('logdet' , '' ),
1049
1047
xfail ('nn.functional.embedding_bag' , '' ),
1050
1048
xfail ('nn.functional.grid_sample' , '' ),
1051
1049
xfail ('nn.functional.hardsigmoid' , '' ),
@@ -1057,7 +1055,7 @@ def get_vjp(cotangents, *primals):
1057
1055
xfail ('renorm' , '' ),
1058
1056
xfail ('symeig' , '' ),
1059
1057
xfail ('nn.functional.feature_alpha_dropout' , 'with_train' ),
1060
- xfail ('nn.functional.kl_div' , '' ),
1058
+ skip ('nn.functional.kl_div' , '' ), # will pass when linux cpu binaries update
1061
1059
xfail ('pca_lowrank' , '' ),
1062
1060
xfail ('nn.functional.dropout2d' , '' ),
1063
1061
xfail ('nn.functional.feature_alpha_dropout' , 'without_train' ),
0 commit comments