@@ -462,12 +462,18 @@ def vjp_of_vjp(*args_and_cotangents):
462
462
xfail ('_masked.prod' ), # calls aten::item
463
463
xfail ('stft' ),
464
464
xfail ('nn.functional.glu' ),
465
- xfail ('nn.functional.conv_transpose1d' , device_type = 'cuda' ),
466
- skip ('nn.functional.conv_transpose2d' , device_type = 'cuda' ),
467
- xfail ('nn.functional.conv_transpose3d' , device_type = 'cuda' ),
465
+
468
466
xfail ('nn.functional.fractional_max_pool3d' ),
469
467
xfail ('as_strided' ),
470
468
xfail ('nn.functional.fractional_max_pool2d' ),
469
+
470
+ # PyTorch changed its convolution recently.
471
+ # Maybe it is responsible for all of the following changes.
472
+ xfail ('nn.functional.conv1d' ),
473
+ xfail ('nn.functional.conv_transpose1d' ),
474
+ xfail ('nn.functional.conv_transpose2d' ),
475
+ xfail ('nn.functional.conv_transpose3d' ),
476
+
471
477
})
472
478
@ops (functorch_lagging_op_db + additional_op_db , allowed_dtypes = (torch .float ,))
473
479
@skipOps ('TestOperators' , 'test_vmapvjp' , vmapvjp_fail )
@@ -542,6 +548,16 @@ def test_vmapvjp(self, device, dtype, op):
542
548
# See https://github.com/pytorch/pytorch/issues/66357
543
549
xfail ('nn.functional.pad' , 'circular' ),
544
550
551
+ # RuntimeError: expand: the number of sizes provided (1) must be greater or equal to the number of dimensions in the tensor (2)
552
+ xfail ('nanquantile' ),
553
+ xfail ('quantile' ),
554
+
555
+ # RuntimeError: vmap: inplace arithmetic(self, *extra_args)
556
+ xfail ('nn.functional.gelu' ),
557
+
558
+ # Not implemented
559
+ xfail ('scatter' ),
560
+
545
561
# =============================================
546
562
# NB: The above failures also fail in PyTorch core.
547
563
# The failures below only fail in functorch
0 commit comments