Skip to content

Commit b33262d

Browse files
Samantha Andowzou3519
authored andcommitted
[functorch] fix CI (pytorch/functorch#816)
1 parent c865b35 commit b33262d

File tree

3 files changed

+8
-109
lines changed

3 files changed

+8
-109
lines changed

functorch/test/test_eager_transforms.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@
1919
from torch.testing._internal.common_utils import IS_WINDOWS
2020
from functools import partial
2121
from functorch.experimental import replace_all_batch_norm_modules_
22-
from contextlib import nullcontext
2322

2423
import functorch
2524
from functorch import (
@@ -815,7 +814,6 @@ def f2(value):
815814
self.assertEqual(result, (x <= 0).type_as(x))
816815

817816
def test_tensor_ctor_inside_grad(self, device):
818-
self.skipTest("Only fails on CUDA but I can't figure out how to test that")
819817
def foo(x):
820818
return x * torch.tensor(2., device=device)
821819

functorch/test/test_ops.py

Lines changed: 8 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -309,7 +309,6 @@ def is_inplace(op, variant):
309309
skip('pca_lowrank', ''), # fails on cuda, runs okay on cpu
310310
skip('svd_lowrank', ''), # fails on cuda, runs okay on cpu
311311
skip('nn.functional.dropout2d', ''), # fails on cuda, runs okay on cpu
312-
xfail('__getitem__', device_type='cuda'),
313312
}
314313

315314

@@ -318,18 +317,6 @@ class TestOperators(TestCase):
318317
@skipOps('TestOperators', 'test_grad', vjp_fail.union({
319318
skip('nn.functional.fractional_max_pool2d'), # fails on cuda, runs okay on cpu
320319
skip('nn.functional.fractional_max_pool3d'), # fails on cuda, runs okay on cpu
321-
xfail('__getitem__', 'functorch', device_type='cuda'),
322-
xfail('_masked.amax', device_type='cuda'),
323-
xfail('_masked.amin', device_type='cuda'),
324-
xfail('_masked.log_softmax', device_type='cuda'),
325-
xfail('_masked.mean', device_type='cuda'),
326-
xfail('_masked.norm', device_type='cuda'),
327-
xfail('_masked.prod', device_type='cuda'),
328-
xfail('_masked.softmax', device_type='cuda'),
329-
xfail('_masked.softmin', device_type='cuda'),
330-
xfail('_masked.std', device_type='cuda'),
331-
xfail('_masked.sum', device_type='cuda'),
332-
xfail('_masked.var', device_type='cuda'),
333320
}))
334321
@opsToleranceOverride('TestOperators', 'test_grad', (
335322
tol1('nn.functional.binary_cross_entropy_with_logits',
@@ -409,16 +396,6 @@ def wrapped_fn(*args, **kwargs):
409396
skip('nn.functional.max_unpool1d'), # fails everywhere except on mac
410397
skip('nn.functional.max_unpool2d'), # fails everywhere except on windows
411398
xfail('nn.functional.max_unpool3d'),
412-
xfail('__getitem__', device_type='cuda'),
413-
xfail('_masked.log_softmax', device_type='cuda'),
414-
xfail('_masked.mean', device_type='cuda'),
415-
xfail('_masked.norm', device_type='cuda'),
416-
xfail('_masked.prod', device_type='cuda'),
417-
xfail('_masked.softmax', device_type='cuda'),
418-
xfail('_masked.softmin', device_type='cuda'),
419-
xfail('_masked.std', device_type='cuda'),
420-
xfail('_masked.sum', device_type='cuda'),
421-
xfail('_masked.var', device_type='cuda'),
422399
}))
423400
@opsToleranceOverride('TestOperators', 'test_jvp', (
424401
tol1('nn.functional.conv_transpose3d',
@@ -466,19 +443,6 @@ def test_jvp(self, device, dtype, op):
466443
xfail('nn.functional.dropout2d', ''),
467444
xfail('nn.functional.feature_alpha_dropout', 'without_train'),
468445
xfail('svd_lowrank', ''),
469-
470-
xfail('__getitem__', 'functorch', device_type='cuda'),
471-
xfail('_masked.amax', device_type='cuda'),
472-
xfail('_masked.amin', device_type='cuda'),
473-
xfail('_masked.log_softmax', device_type='cuda'),
474-
xfail('_masked.mean', device_type='cuda'),
475-
xfail('_masked.norm', device_type='cuda'),
476-
xfail('_masked.prod', device_type='cuda'),
477-
xfail('_masked.softmax', device_type='cuda'),
478-
xfail('_masked.softmin', device_type='cuda'),
479-
xfail('_masked.std', device_type='cuda'),
480-
xfail('_masked.sum', device_type='cuda'),
481-
xfail('_masked.var', device_type='cuda'),
482446
}))
483447
@opsToleranceOverride('TestOperators', 'test_vjp', (
484448
tol1('nn.functional.conv_transpose3d',
@@ -524,19 +488,6 @@ def _test(_op):
524488
skip('nn.functional.fractional_max_pool2d'), # randomness
525489
skip('nn.functional.fractional_max_pool3d'), # randomness
526490
xfail('nn.functional.binary_cross_entropy'), # testing problem
527-
528-
xfail('__getitem__', 'functorch', device_type='cuda'),
529-
xfail('_masked.amax', device_type='cuda'),
530-
xfail('_masked.amin', device_type='cuda'),
531-
xfail('_masked.log_softmax', device_type='cuda'),
532-
xfail('_masked.mean', device_type='cuda'),
533-
xfail('_masked.norm', device_type='cuda'),
534-
xfail('_masked.prod', device_type='cuda'),
535-
xfail('_masked.softmax', device_type='cuda'),
536-
xfail('_masked.softmin', device_type='cuda'),
537-
xfail('_masked.std', device_type='cuda'),
538-
xfail('_masked.sum', device_type='cuda'),
539-
xfail('_masked.var', device_type='cuda'),
540491
}))
541492
@opsToleranceOverride('TestOperators', 'test_vjpvjp', (
542493
tol1('nn.functional.conv_transpose3d',
@@ -672,19 +623,6 @@ def vjp_of_vjp(*args_and_cotangents):
672623
# NYI: querying is_contiguous inside of vmap for memory_format other than torch.contiguous_format
673624
xfail('nn.functional.max_unpool2d'),
674625
xfail('nn.functional.max_unpool2d', 'grad'),
675-
676-
xfail('__getitem__', 'functorch', device_type='cuda'),
677-
xfail('_masked.amax', device_type='cuda'),
678-
xfail('_masked.amin', device_type='cuda'),
679-
xfail('_masked.log_softmax', device_type='cuda'),
680-
xfail('_masked.mean', device_type='cuda'),
681-
xfail('_masked.norm', device_type='cuda'),
682-
xfail('_masked.prod', device_type='cuda'),
683-
xfail('_masked.softmax', device_type='cuda'),
684-
xfail('_masked.softmin', device_type='cuda'),
685-
xfail('_masked.std', device_type='cuda'),
686-
xfail('_masked.sum', device_type='cuda'),
687-
xfail('_masked.var', device_type='cuda'),
688626
})
689627

690628
@ops(functorch_lagging_op_db + additional_op_db, allowed_dtypes=(torch.float,))
@@ -774,18 +712,8 @@ def test_vmapvjp(self, device, dtype, op):
774712
xfail('nn.functional.max_unpool2d'),
775713
xfail('nn.functional.max_unpool3d'),
776714
777-
xfail('__getitem__', device_type='cuda'),
778-
xfail('_masked.amax', device_type='cuda'),
779-
xfail('_masked.amin', device_type='cuda'),
780-
xfail('_masked.log_softmax', device_type='cuda'),
781-
xfail('_masked.mean', device_type='cuda'),
782-
xfail('_masked.norm', device_type='cuda'),
783-
xfail('_masked.prod', device_type='cuda'),
784-
xfail('_masked.softmax', device_type='cuda'),
785-
xfail('_masked.softmin', device_type='cuda'),
786-
xfail('_masked.std', device_type='cuda'),
787-
xfail('_masked.sum', device_type='cuda'),
788-
xfail('_masked.var', device_type='cuda'),
715+
xfail('nn.functional.embedding'), # embedding_renorm_ does not support fwd AD
716+
xfail('put'), # calls put_ during vmap with only vmaps over other, not self
789717
})
790718
def test_vmapjvp(self, device, dtype, op):
791719
if is_inplace(op, op.get_op()):
@@ -820,15 +748,13 @@ def test_vmapjvp(self, device, dtype, op):
820748

821749
# The following are bugs that we should fix
822750
skip('nn.functional.max_pool1d'), # fails on cpu, runs on cuda
823-
xfail('_masked.mean', device_type='cuda'),
824-
xfail('_masked.prod', device_type='cuda'),
825751
xfail('nn.functional.batch_norm', device_type='cuda'),
826752
xfail('nn.functional.batch_norm', 'without_cudnn', device_type='cuda'),
827753
xfail('nn.functional.hinge_embedding_loss', device_type='cuda'),
754+
xfail('_masked.mean'),
755+
xfail('_masked.prod'),
828756

829757
# Causing issues with multiple cpu levels of forward mode AD
830-
xfail('_masked.mean', device_type='cpu'),
831-
xfail('_masked.prod', device_type='cpu'),
832758
xfail('nn.functional.batch_norm', device_type='cpu'),
833759
xfail('nn.functional.hinge_embedding_loss', device_type='cpu'),
834760

@@ -863,18 +789,9 @@ def test_vmapjvp(self, device, dtype, op):
863789
xfail('nn.functional.max_unpool2d'),
864790
xfail('nn.functional.max_unpool3d'),
865791

866-
xfail('__getitem__', device_type='cuda'),
867-
xfail('_masked.amax', device_type='cuda'),
868-
xfail('_masked.amin', device_type='cuda'),
869-
xfail('_masked.log_softmax', device_type='cuda'),
870-
xfail('_masked.mean', device_type='cuda'),
871-
xfail('_masked.norm', device_type='cuda'),
872-
xfail('_masked.prod', device_type='cuda'),
873-
xfail('_masked.softmax', device_type='cuda'),
874-
xfail('_masked.softmin', device_type='cuda'),
875-
xfail('_masked.std', device_type='cuda'),
876-
xfail('_masked.sum', device_type='cuda'),
877-
xfail('_masked.var', device_type='cuda'),
792+
xfail('nn.functional.embedding'), # embedding_renorm_ does not support fwd AD
793+
xfail('put'), # calls put_ during vmap with only vmaps over other, not self
794+
xfail('nn.functional.prelu'), # Call Tensor.as_strided
878795
}
879796

880797
@ops(functorch_lagging_op_db, allowed_dtypes=(torch.float,))
@@ -962,6 +879,7 @@ def test_vmapjvpall(self, device, dtype, op):
962879
xfail('nn.functional.max_unpool1d', 'grad'),
963880
xfail('lu_unpack'),
964881
xfail('nn.functional.glu'),
882+
xfail('nn.functional.bilinear'), # trilinear doesn't have batching rule
965883
}))
966884
@toleranceOverride({torch.float32: tol(atol=1e-04, rtol=1e-04)})
967885
def test_vmapjvpall_has_batch_rule(self, device, dtype, op):
@@ -1222,11 +1140,9 @@ def test_vjpvmap(self, device, dtype, op):
12221140
xfail('nansum', ''),
12231141
xfail('nn.functional.batch_norm', ''),
12241142
xfail('nn.functional.batch_norm', 'without_cudnn', device_type='cuda'),
1225-
xfail('nn.functional.bilinear', ''),
12261143
xfail('nn.functional.embedding', ''),
12271144
xfail('nn.functional.embedding', 'functorch'),
12281145
xfail('nn.functional.embedding_bag', ''),
1229-
xfail('nn.functional.glu', ''),
12301146
xfail('nn.functional.grid_sample', ''),
12311147
xfail('nn.functional.hardsigmoid', ''),
12321148
xfail('nn.functional.hardswish', ''),
@@ -1239,11 +1155,9 @@ def test_vjpvmap(self, device, dtype, op):
12391155
xfail('nn.functional.softmin', ''),
12401156
xfail('nn.functional.softmin', 'with_dtype'),
12411157
xfail('nn.functional.softplus', ''),
1242-
xfail('put', ''),
12431158
xfail('renorm', ''),
12441159
xfail('std_mean', ''),
12451160
xfail('symeig', ''),
1246-
xfail('take', ''),
12471161
xfail('var_mean', ''),
12481162
xfail('nn.functional.feature_alpha_dropout', 'with_train'),
12491163
xfail('nn.functional.kl_div', ''),
@@ -1264,18 +1178,6 @@ def test_vjpvmap(self, device, dtype, op):
12641178
xfail('scatter_reduce', 'prod'),
12651179
skip('linalg.householder_product', '', device_type='cuda'), # flaky, I'm not sure why
12661180
xfail('nn.functional.binary_cross_entropy_with_logits'),
1267-
xfail('__getitem__', 'functorch', device_type='cuda'),
1268-
xfail('_masked.amax', device_type='cuda'),
1269-
xfail('_masked.amin', device_type='cuda'),
1270-
xfail('_masked.log_softmax', device_type='cuda'),
1271-
xfail('_masked.mean', device_type='cuda'),
1272-
xfail('_masked.norm', device_type='cuda'),
1273-
xfail('_masked.prod', device_type='cuda'),
1274-
xfail('_masked.softmax', device_type='cuda'),
1275-
xfail('_masked.softmin', device_type='cuda'),
1276-
xfail('_masked.std', device_type='cuda'),
1277-
xfail('_masked.sum', device_type='cuda'),
1278-
xfail('_masked.var', device_type='cuda'),
12791181
}))
12801182
def test_jvpvjp(self, device, dtype, op):
12811183
if not op.supports_autograd:

functorch/test/test_pythonkey.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -376,7 +376,6 @@ class TestEagerFusionOpInfo(TestCase):
376376
xfail('linalg.householder_product'),
377377
xfail('logit'),
378378
xfail('matrix_exp'),
379-
xfail('trace'),
380379
xfail('trapezoid'),
381380
xfail('trapz'),
382381
skip('nn.functional.binary_cross_entropy_with_logits'), # seems to fail sometimes?

0 commit comments

Comments
 (0)