@@ -299,27 +299,17 @@ def is_inplace(op, variant):
299
299
300
300
301
301
vjp_fail = {
302
- skip ('nn.functional.dropout' ), # randomness testing artifact
303
- skip ('nn.functional.rrelu' ), # randomness testing artifact
304
- skip ('bernoulli' ), # randomness testing artifact
305
- skip ('normal' , '' ), # randomness testing artifact
306
- skip ('normal' , 'number_mean' ), # randomness testing artifact
307
302
xfail ('tensor_split' ),
308
303
xfail ('to_sparse' ),
309
304
xfail ('nn.functional.ctc_loss' ),
310
- skip ('nn.functional.feature_alpha_dropout' , 'with_train' ), # fails on cuda, runs okay on cpu
311
- skip ('nn.functional.feature_alpha_dropout' , 'without_train' ), # fails on cuda, runs okay on cpu
312
305
skip ('pca_lowrank' , '' ), # fails on cuda, runs okay on cpu
313
306
skip ('svd_lowrank' , '' ), # fails on cuda, runs okay on cpu
314
- skip ('nn.functional.dropout2d' , '' ), # fails on cuda, runs okay on cpu
315
307
}
316
308
317
309
318
310
class TestOperators (TestCase ):
319
311
@ops (functorch_lagging_op_db + additional_op_db , allowed_dtypes = (torch .float ,))
320
312
@skipOps ('TestOperators' , 'test_grad' , vjp_fail .union ({
321
- skip ('nn.functional.fractional_max_pool2d' ), # fails on cuda, runs okay on cpu
322
- skip ('nn.functional.fractional_max_pool3d' ), # fails on cuda, runs okay on cpu
323
313
xfail ('linalg.eig' ), # diagonal_scatter does not support complex
324
314
}))
325
315
@opsToleranceOverride ('TestOperators' , 'test_grad' , (
@@ -368,16 +358,9 @@ def wrapped_fn(*args, **kwargs):
368
358
369
359
@ops (functorch_lagging_op_db + additional_op_db , allowed_dtypes = (torch .float ,))
370
360
@skipOps ('TestOperators' , 'test_jvp' , set ({
371
- skip ('nn.functional.dropout' ), # randomness testing artifact; not actually a problem
372
- skip ('nn.functional.rrelu' ), # randomness testing artifact; not actually a problem
373
- skip ('nn.functional.fractional_max_pool2d' ), # fails on cuda, runs okay on cpu
374
- skip ('nn.functional.fractional_max_pool3d' ), # fails on cuda, runs okay on cpu
375
361
skip ('nn.functional.max_pool1d' ), # fails on cpu, runs okay on cuda
376
- skip ('nn.functional.feature_alpha_dropout' , 'with_train' ), # fails on cuda, runs okay on cpu
377
- skip ('nn.functional.feature_alpha_dropout' , 'without_train' ), # fails on cuda, runs okay on cpu
378
362
skip ('pca_lowrank' , '' ), # fails on cuda, runs okay on cpu
379
363
skip ('svd_lowrank' , '' ), # fails on cuda, runs okay on cpu
380
- skip ('nn.functional.dropout2d' , '' ), # fails on cuda, runs okay on cpu
381
364
382
365
# =============================================
383
366
# NB: The above failures also fail using PyTorch core's
@@ -389,8 +372,6 @@ def wrapped_fn(*args, **kwargs):
389
372
# RuntimeError: Cannot access data pointer of Tensor that doesn't have storage
390
373
xfail ('tensor_split' ),
391
374
392
- skip ('bernoulli' ), # cuda set seed randomness issues
393
-
394
375
# BUG: runs and produces numerical differences
395
376
skip ('nn.functional.max_unpool1d' ), # fails everywhere except on mac
396
377
skip ('nn.functional.max_unpool2d' ), # fails everywhere except on windows
@@ -435,12 +416,7 @@ def test_jvp(self, device, dtype, op):
435
416
436
417
@ops (functorch_lagging_op_db + additional_op_db , allowed_dtypes = (torch .float ,))
437
418
@skipOps ('TestOperators' , 'test_vjp' , vjp_fail .union ({
438
- skip ('nn.functional.fractional_max_pool2d' ), # fails on cpu, runs okay on cuda
439
- skip ('nn.functional.fractional_max_pool3d' ), # fails on cpu, runs okay on cuda
440
- xfail ('nn.functional.feature_alpha_dropout' , 'with_train' ),
441
419
xfail ('pca_lowrank' , '' ),
442
- xfail ('nn.functional.dropout2d' , '' ),
443
- xfail ('nn.functional.feature_alpha_dropout' , 'without_train' ),
444
420
xfail ('svd_lowrank' , '' ),
445
421
}))
446
422
@opsToleranceOverride ('TestOperators' , 'test_vjp' , (
@@ -484,8 +460,6 @@ def _test(_op):
484
460
@skipOps ('TestOperators' , 'test_vjpvjp' , vjp_fail .union ({
485
461
skip ('nn.functional.max_unpool1d' ), # Flaky
486
462
skip ('nn.functional.max_unpool2d' ), # Flaky
487
- skip ('nn.functional.fractional_max_pool2d' ), # randomness
488
- skip ('nn.functional.fractional_max_pool3d' ), # randomness
489
463
}))
490
464
@opsToleranceOverride ('TestOperators' , 'test_vjpvjp' , (
491
465
tol1 ('nn.functional.conv_transpose3d' ,
@@ -576,7 +550,11 @@ def vjp_of_vjp(*args_and_cotangents):
576
550
skip ('bernoulli' ), # randomness
577
551
skip ('normal' , '' ), # randomness
578
552
skip ('normal' , 'number_mean' ), # randomness
579
- xfail ('nn.functional.dropout' ), # randomness
553
+ skip ('nn.functional.rrelu' ), # randomness
554
+ skip ('nn.functional.feature_alpha_dropout' , 'with_train' ), # randomness
555
+ skip ('nn.functional.feature_alpha_dropout' , 'without_train' ), # randomness
556
+ skip ('nn.functional.dropout' ), # randomness
557
+ skip ('nn.functional.dropout2d' ), # randomness
580
558
xfail ('as_strided' ), # as_strided is too wild for us to support, wontfix
581
559
xfail ('index_put' , '' ), # not possible due to dynamic shapes; we support a subset
582
560
xfail ('masked_scatter' ), # dynamic
@@ -934,6 +912,9 @@ def test():
934
912
skip ('bernoulli' , '' ), # vjpvmap testing can't handle randomness
935
913
skip ('normal' , '' ), # vjpvmap testing can't handle randomness
936
914
skip ('normal' , 'number_mean' ), # vjpvmap testing can't handle randomness
915
+ skip ('nn.functional.rrelu' ), # randomness
916
+ skip ('nn.functional.feature_alpha_dropout' , 'with_train' ), # randomness
917
+ skip ('nn.functional.feature_alpha_dropout' , 'without_train' ), # randomness
937
918
938
919
# fallback path doesn't work
939
920
# All of the following are bugs and need to be fixed
@@ -951,8 +932,6 @@ def test():
951
932
xfail ('nn.functional.dropout2d' , '' ),
952
933
xfail ('svd_lowrank' , '' ),
953
934
xfail ('pca_lowrank' , '' ),
954
- xfail ('nn.functional.feature_alpha_dropout' , 'without_train' ),
955
- xfail ('nn.functional.feature_alpha_dropout' , 'with_train' ),
956
935
xfail ('clamp' ),
957
936
# something weird happening with channels_last
958
937
xfail ('bfloat16' ),
@@ -1025,10 +1004,6 @@ def get_vjp(cotangents, *primals):
1025
1004
1026
1005
@ops (functorch_lagging_op_db + additional_op_db , allowed_dtypes = (torch .float ,))
1027
1006
@skipOps ('TestOperators' , 'test_jvpvjp' , vjp_fail .union ({
1028
- # These are weirdly non-deterministic
1029
- skip ('nn.functional.fractional_max_pool2d' ), # Random
1030
- skip ('nn.functional.fractional_max_pool3d' ), # Random
1031
-
1032
1007
# RuntimeError: Trying to set a forward gradient that has a different size than that of the original Tensor,
1033
1008
# this is not supported. Tensor is of size [5, 2, 3] while the given forward gradient is of size [1, 2, 3].
1034
1009
xfail ('normal' , '' ),
@@ -1049,11 +1024,8 @@ def get_vjp(cotangents, *primals):
1049
1024
xfail ('nn.functional.softmin' , 'with_dtype' ),
1050
1025
xfail ('renorm' , '' ),
1051
1026
xfail ('symeig' , '' ),
1052
- xfail ('nn.functional.feature_alpha_dropout' , 'with_train' ),
1053
1027
skip ('nn.functional.kl_div' , '' ), # will pass when linux cpu binaries update
1054
1028
xfail ('pca_lowrank' , '' ),
1055
- xfail ('nn.functional.dropout2d' , '' ),
1056
- xfail ('nn.functional.feature_alpha_dropout' , 'without_train' ),
1057
1029
xfail ('svd_lowrank' , '' ),
1058
1030
xfail ('nn.functional.multilabel_margin_loss' , '' ),
1059
1031
xfail ('nn.functional.multilabel_soft_margin_loss' , '' ),
0 commit comments