Skip to content

Commit a15b67f

Browse files
authored
[Nonlinear.ReverseAD] fix NLPBlock and bridges (#2524)
1 parent 2ae5939 commit a15b67f

File tree

2 files changed

+35
-0
lines changed

2 files changed

+35
-0
lines changed

src/Nonlinear/ReverseAD/reverse_mode.jl

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,27 @@ function _reverse_mode(d::NLPEvaluator, x)
5454
for con in d.constraints
5555
_reverse_eval(con)
5656
end
57+
# If a JuMP model uses the legacy nonlinear interface, then JuMP constructs
58+
# a NLPEvaluator at the start of a call to `JuMP.optimize!` and it passes in
59+
# the list of variables in the JuMP model to `.ordered_variables`.
60+
#
61+
# During `MOI.initialize`, `.last_x` gets filled with `NaN` to match the
62+
# length of `ordered_variables`, that is, the number of variables in the
63+
# JuMP model.
64+
#
65+
# However, if the model includes a bridge that adds new decision variables
66+
# then the total number of variables in the optimizer (in `x`) will be
67+
# larger than the cache in `last_x`.
68+
#
69+
# It is safe to resize `last_x` because only the variables in
70+
# `ordered_variables` can appear in the NLPBlock.
71+
#
72+
# I don't think we need any other fixes because callers to things like
73+
# `eval_objective` can pass in a longer input `x` vector without fear
74+
# because the excess elements won't be used.
75+
if length(d.last_x) < length(x)
76+
resize!(d.last_x, length(x))
77+
end
5778
copyto!(d.last_x, x)
5879
return
5980
end

test/Nonlinear/ReverseAD.jl

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1121,6 +1121,20 @@ function test_timers()
11211121
return
11221122
end
11231123

1124+
function test_varying_length_x()
1125+
model = MOI.Nonlinear.Model()
1126+
x = MOI.VariableIndex(1)
1127+
MOI.Nonlinear.set_objective(model, :(sin($x)))
1128+
evaluator =
1129+
MOI.Nonlinear.Evaluator(model, MOI.Nonlinear.SparseReverseMode(), [x])
1130+
MOI.initialize(evaluator, Symbol[:Jac])
1131+
∇f = [NaN]
1132+
MOI.eval_objective_gradient(evaluator, ∇f, [1.0, 2.0])
1133+
@test length(∇f) == 1
1134+
@test ∇f[1] cos(1.0)
1135+
return
1136+
end
1137+
11241138
end # module
11251139

11261140
TestReverseAD.runtests()

0 commit comments

Comments
 (0)