@@ -4108,6 +4108,57 @@ def backward(ctx, gO):
41084108 ):
41094109 fn ()
41104110
4111+ def test_higher_order_gradients (self ):
4112+ def f (x ):
4113+ return x ** 3
4114+
4115+ def fn (fwd_compiler , ca_compiler ):
4116+ torch .manual_seed (123 )
4117+ x = torch .tensor (2.0 , requires_grad = True )
4118+ first , second , third , fourth = None , None , None , None
4119+ try :
4120+ with compiled_autograd ._enable (ca_compiler ):
4121+ first = torch .autograd .grad (
4122+ fwd_compiler (f )(x ), x , create_graph = True
4123+ )[0 ]
4124+ second = torch .autograd .grad (first , x , create_graph = True )[0 ]
4125+ third = torch .autograd .grad (second , x , create_graph = True )[0 ]
4126+ fourth = torch .autograd .grad (third , x , create_graph = True )[0 ]
4127+ except RuntimeError as e :
4128+ assert "does not currently support higher order gradients" in str (e )
4129+ return (first , second , third , fourth )
4130+
4131+ return (first , second , third , fourth )
4132+
4133+ def eager ():
4134+ return torch .compile (backend = "eager" )
4135+
4136+ def aot_eager ():
4137+ return torch .compile (backend = "aot_eager" )
4138+
4139+ # Without AOTAutograd, no problem
4140+ first , second , third , fourth = fn (eager (), eager ())
4141+ self .assertEqual (counters ["compiled_autograd" ]["captures" ], 4 )
4142+ self .assertEqual (first , 12 ) # 3x^2
4143+ self .assertEqual (second , 12 ) # 6x
4144+ self .assertEqual (third , 6 ) # 6
4145+ self .assertEqual (fourth , 0 )
4146+ # and should cache hit
4147+ counters .clear ()
4148+ _ = fn (eager (), eager ())
4149+ self .assertEqual (counters ["compiled_autograd" ]["captures" ], 0 )
4150+ torch ._dynamo .reset ()
4151+
4152+ # With AOTAutograd, can't create_graph
4153+ first , second , third , fourth = fn (aot_eager (), aot_eager ())
4154+ self .assertIsNone (second )
4155+
4156+ first , second , third , fourth = fn (aot_eager (), eager ())
4157+ self .assertIsNone (second )
4158+
4159+ first , second , third , fourth = fn (eager (), aot_eager ())
4160+ self .assertIsNone (third )
4161+
41114162
41124163def load_test_module (name ):
41134164 testdir = Path (__file__ ).absolute ().parent .parent
@@ -4227,6 +4278,10 @@ def wrap_test_class(orig_cls):
42274278 "test_prehook_ordering" , # retains_grad_hooks
42284279 "test_will_engine_execute_node" , # retains_grad_hooks
42294280 "test_backward_to_node" , # retains_grad_hooks
4281+ "test_backward_with_nonleaf_inputs" , # retains_grad_hook on non-leaf input
4282+ "test_create_graph_and_full_backward_hook_cycle" , # _pack_with_none
4283+ "test_full_backward_hook_double_backward" , # _pack_with_none
4284+ "test_grad_mode_restored_reentrant" , # assertTrue
42304285}
42314286
42324287test_contexts = {
@@ -4246,42 +4301,20 @@ def wrap_test_class(orig_cls):
42464301
42474302known_failing_tests = {
42484303 # Category: Compiled autograd
4249- "test_grad_mode_restored_reentrant" , # create_graph
42504304 "test_reentrant_with_callbacks_both_depths" , # queue_callback
42514305 "test_reentrant_with_callbacks_depth_0" , # queue_callback
42524306 "test_reentrant_with_callbacks_depth_1" , # queue_callback
42534307 "test_current_graph_task_execution_order" , # nodes are already freed by the time dynamo traces the lifted hook
42544308 "test_autograd_inplace_views_cross_dtype" , # view_fn not supported by compiled autograd
42554309 "test_current_node" , # TorchDispatchMode not yet implemented for compiled autograd
42564310 "test_post_accumulate_grad_hook_ordering" , # accuracy error
4257- "test_accumulate_grad" , # create_graph
4258- "test_anomaly_assign_parent_cleanup" , # create_graph
4259- "test_backward_create_graph_warns" , # create_graph
4260- "test_backward_with_nonleaf_inputs" , # create_graph
4261- "test_create_graph_and_full_backward_hook_cycle" , # create_graph
42624311 "test_current_graph_task_id" , # autograd state already cleared once dynamo is called
4263- "test_custom_autograd_repeated_grad_grad" , # create_graph
42644312 "test_custom_function_forward_mode_forward_is_no_op" , # forward AD
42654313 "test_custom_function_forward_mode_inplace_checks" , # forward AD
42664314 "test_custom_function_forward_mode_view_checks" , # forward AD
42674315 "test_custom_function_forward_mode_wrong_formula" , # forward AD
4268- "test_default_saved_tensors_hooks_double_backward" , # create_graph
42694316 "test_node_post_hook_registered_during_unpack_hook" , # 'NoneType' object has no attribute 'register_hook'
4270- "test_full_backward_hook_double_backward" , # create_graph
4271- "test_function" , # create_graph
4272- "test_grad" , # create_graph
4273- "test_grad_materialize_grads" , # create_graph
4274- "test_grad_nonleaf" , # create_graph
4275- "test_grad_nonleaf_many_outputs" , # create_graph
4276- "test_hessian_vector" , # create_graph
4277- "test_inplace_on_view_backward" , # create_graph
42784317 "test_multi_grad_any_hooks" , # register_multi_grad_hook
4279- "test_nested_anomaly_detect_nan" , # create_graph
4280- "test_nested_anomaly_printstack_cleanup" , # create_graph
4281- "test_once_differentiable" , # create_graph
4282- "test_saved_variable_packing_unpacking_saved_original_with_hooks" , # create_graph
4283- "test_select_sum" , # create_graph, also needs graph breaks
4284- "test_custom_autograd_no_early_free" , # create_graph
42854318 "test_custom_function_error" , # vjp
42864319 "test_custom_function_save_for_forward" , # vjp
42874320 "test_dont_materialize_grads" , # undefined grad
@@ -4290,10 +4323,16 @@ def wrap_test_class(orig_cls):
42904323 "test_node_ordering_when_none_returned" , # torch._dynamo.exc.Unsupported: TypeError <built-in method clone
42914324 "test_save_output_nr" , # output_nr grad passed as None
42924325 "test_setup_context_when_forward_has_default_args" , # autograd.Function with class methods
4293- "test_lobpcg" , # create_graph
42944326 # IndexError: list index out of range (NB: x.grad = y where both x and y are input tensors)
42954327 "test_grad_nonleaf_register_hook" ,
42964328 "test_backward_twice_without_saved_values" , # https://github.com/pytorch/pytorch/issues/129938
4329+ # Category: Higher Order Gradients
4330+ "test_default_saved_tensors_hooks_double_backward" , # wrong when pack hook returns non-leaf
4331+ "test_saved_variable_packing_unpacking_saved_original_with_hooks" , # wrong when pack hook returns non-leaf
4332+ "test_nested_anomaly_detect_nan" , # nested anomaly
4333+ "test_select_sum" , # batched gradients
4334+ "test_custom_autograd_no_early_free" , # batched gradients
4335+ "test_lobpcg" , # NaNs
42974336 # Category: Dynamo (pass when directly running CA graph)
42984337 "test_accumulate_grad_tensor_reference" , # Out of bounds: frame_state_entry.stride[i] is None
42994338 "test_custom_function_exception" , # torch.no_grad(), torch._dynamo.exc.Unsupported: missing: WITH_EXCEPT_START
@@ -4339,8 +4378,14 @@ def wrap_test_class(orig_cls):
43394378 "test_anomaly_mode_no_check_nan" , # different error messages
43404379 "test_anomaly_grad_warnings" , # different error messages
43414380 "test_anomaly_detect_nan" , # fake tensor errors on NaN
4381+ "test_once_differentiable" , # different node name: CompiledFunctionBackward
4382+ "test_function" , # different node name: CompiledFunctionBackward
4383+ "test_inplace_on_view_backward" , # different node name: CompiledFunctionBackward
4384+ "test_nested_anomaly_printstack_cleanup" , # anomaly NaN error message different
43424385 # Uncategorized
43434386 "test_not_implemented_grad" , # Dynamo changes the types of exceptions
4387+ "test_grad" , # AOT backward higher order gradients
4388+ "test_grad_materialize_grads" , # AOT backward higher order gradients
43444389}
43454390
43464391if not HAS_CUDA :
0 commit comments