@@ -531,8 +531,8 @@ This method is really indicated and I'm not proud of it. That's because of 3 ele
531531 and as efficient as possible.
532532- The `JuMP` NLP syntax forces splatting for the decision variable, which implies use
533533 of `Vararg{T,N}` (see the [performance tip](https://docs.julialang.org/en/v1/manual/performance-tips/#Be-aware-of-when-Julia-avoids-specializing))
534- and memoization to avoid redundant computations. This is already complex,
535- but it's even worse knowing that AD tools for gradients do not support splatting.
534+ and memoization to avoid redundant computations. This is already complex, but it's even
535+ worse knowing that most automatic differentiation tools do not support splatting.
536536- The signature of gradient and hessian functions is not the same for univariate (`nZ̃ == 1`)
537537 and multivariate (`nZ̃ > 1`) operators in `JuMP`. Both must be defined.
538538
@@ -546,6 +546,7 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
546546 nΔŨ, nUe, nŶe = nu* Hc + nϵ, nU + nu, nŶ + ny
547547 Ncache = nZ̃ + 3
548548 myNaN = convert (JNT, NaN ) # fill Z̃ with NaNs to force update_simulations! at 1st call:
549+ # ---------------------- differentiation cache ---------------------------------------
549550 Z̃_cache:: DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache (fill (myNaN, nZ̃), Ncache)
550551 ΔŨ_cache:: DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache (zeros (JNT, nΔŨ), Ncache)
551552 x̂0end_cache:: DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache (zeros (JNT, nx̂), Ncache)
@@ -586,14 +587,14 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
586587 end
587588 return nothing
588589 end
589- # --------------------- cache for the AD functions ------- ----------------------------
590+ # --------------------- normal cache for the AD functions ----------------------------
590591 Z̃arg_vec = Vector {JNT} (undef, nZ̃)
591- ∇J = Vector {JNT} (undef, nZ̃) # gradient of J
592+ ∇J = Vector {JNT} (undef, nZ̃) # gradient of objective J
592593 g_vec = Vector {JNT} (undef, ng)
593- ∇g = Matrix {JNT} (undef, ng, nZ̃) # Jacobian of g
594+ ∇g = Matrix {JNT} (undef, ng, nZ̃) # Jacobian of inequality constraints g
594595 geq_vec = Vector {JNT} (undef, neq)
595- ∇geq = Matrix {JNT} (undef, neq, nZ̃) # Jacobian of geq
596- # --------------------- objective function - -------------------------------------------
596+ ∇geq = Matrix {JNT} (undef, neq, nZ̃) # Jacobian of equality constraints geq
597+ # --------------------- objective functions -------------------------------------------
597598 function Jfunc (Z̃arg:: Vararg{T, N} ) where {N, T<: Real }
598599 update_simulations! (Z̃arg, get_tmp (Z̃_cache, T))
599600 ΔŨ = get_tmp (ΔŨ_cache, T)
@@ -674,7 +675,7 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT
674675 ∇geqfuncs! = Vector {Function} (undef, neq)
675676 for i in eachindex (∇geqfuncs!)
676677 # only multivariate syntax, univariate is impossible since nonlinear equality
677- # constraints imply MultipleShooting thus input AND state in Z̃:
678+ # constraints imply MultipleShooting, thus input increment ΔU and state X̂0 in Z̃:
678679 ∇geqfuncs![i] =
679680 function (∇geq_i, Z̃arg:: Vararg{T, N} ) where {N, T<: Real }
680681 Z̃arg_vec .= Z̃arg
0 commit comments