Skip to content

Commit 8a5465a

Browse files
author
Samantha Andow
authored
Fix CI (#931)
* fix ci * add batch rule xfail, skip kl_div until tomorrow
1 parent 42757c2 commit 8a5465a

File tree

2 files changed

+5
-8
lines changed

2 files changed

+5
-8
lines changed

test/test_ops.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -591,8 +591,6 @@ def vjp_of_vjp(*args_and_cotangents):
591591
xfail('eig'), # calls aten::item
592592
xfail('linalg.eig'), # Uses aten::allclose
593593
xfail('linalg.householder_product'), # needs select_scatter
594-
xfail('linalg.slogdet'), # calls .item()
595-
xfail('logdet'), # calls .item()
596594
xfail('matrix_exp'), # would benefit from narrow_scatter
597595
xfail('nanquantile'), # checks q via a .item() call
598596
xfail('nn.functional.gaussian_nll_loss'), # checks var for if any value < 0
@@ -762,7 +760,7 @@ def test_vmapjvpall(self, device, dtype, op):
762760
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
763761
xfail('linalg.lu_factor', ''),
764762
xfail('nn.functional.dropout2d', ''),
765-
xfail('nn.functional.kl_div', ''),
763+
skip('nn.functional.kl_div', ''), # will pass when linux cpu binaries update
766764
xfail('pca_lowrank', ''),
767765
xfail('svd_lowrank', ''),
768766
xfail('linalg.lu_factor_ex', ''),
@@ -783,6 +781,8 @@ def test_vmapjvpall(self, device, dtype, op):
783781
xfail('nn.functional.bilinear'), # trilinear doesn't have batching rule
784782
xfail('linalg.eigh'), # _linalg_eigh doesn't have batching rule
785783
xfail('linalg.eigvalsh'), # _linalg_eigh doesn't have batching rule
784+
xfail('logdet'), # _linalg_slogdet doesn't have batching rule
785+
xfail('linalg.slogdet'), # _linalg_slogdet doesn't have batching rule
786786
}))
787787
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
788788
def test_vmapjvpall_has_batch_rule(self, device, dtype, op):
@@ -881,7 +881,7 @@ def test():
881881
xfail('linalg.tensorsolve'),
882882
xfail('linalg.lu_factor', ''),
883883
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
884-
xfail('nn.functional.kl_div', ''),
884+
skip('nn.functional.kl_div', ''), # will pass when linux cpu binaries update
885885
xfail('pca_lowrank', ''),
886886
xfail('nn.functional.dropout2d', ''),
887887
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
@@ -1043,9 +1043,7 @@ def get_vjp(cotangents, *primals):
10431043
xfail('cdist', ''),
10441044
xfail('cholesky', ''),
10451045
xfail('eig', ''),
1046-
xfail('linalg.slogdet', ''),
10471046
xfail('logcumsumexp', ''),
1048-
xfail('logdet', ''),
10491047
xfail('nn.functional.embedding_bag', ''),
10501048
xfail('nn.functional.grid_sample', ''),
10511049
xfail('nn.functional.hardsigmoid', ''),
@@ -1057,7 +1055,7 @@ def get_vjp(cotangents, *primals):
10571055
xfail('renorm', ''),
10581056
xfail('symeig', ''),
10591057
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
1060-
xfail('nn.functional.kl_div', ''),
1058+
skip('nn.functional.kl_div', ''), # will pass when linux cpu binaries update
10611059
xfail('pca_lowrank', ''),
10621060
xfail('nn.functional.dropout2d', ''),
10631061
xfail('nn.functional.feature_alpha_dropout', 'without_train'),

test/test_pythonkey.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -322,7 +322,6 @@ class TestEagerFusionOpInfo(TestCase):
322322
xfail('diag_embed'),
323323
xfail('linalg.householder_product'),
324324
xfail('logit'),
325-
xfail('logdet'),
326325
xfail('matrix_exp'),
327326
xfail('trapezoid'),
328327
xfail('trapz'),

0 commit comments

Comments
 (0)