diff --git a/Project.toml b/Project.toml index 1b9922b7c..cd78878f3 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ModelPredictiveControl" uuid = "61f9bdb8-6ae4-484a-811f-bbf86720c31c" authors = ["Francis Gagnon"] -version = "1.1.4" +version = "1.2.0" [deps] ControlSystemsBase = "aaaaaaaa-a6ca-5380-bf3e-84a91bcd477e" diff --git a/src/controller/execute.jl b/src/controller/execute.jl index 1b339da5e..c70058e83 100644 --- a/src/controller/execute.jl +++ b/src/controller/execute.jl @@ -121,12 +121,11 @@ function getinfo(mpc::PredictiveController{NT}) where NT<:Real Ȳ, Ū = similar(mpc.Yop), similar(mpc.Uop) Ŷe, Ue = Vector{NT}(undef, nŶe), Vector{NT}(undef, nUe) Ŷ0, x̂0end = predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc, model, mpc.ΔŨ) - Ue, Ŷe = extended_predictions!(Ue, Ŷe, Ū, mpc, model, Ŷ0, mpc.ΔŨ) + Ŷe, Ue = extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, mpc.ΔŨ) J = obj_nonlinprog!(Ȳ, Ū, mpc, model, Ue, Ŷe, mpc.ΔŨ) - U = Ū - U .= @views Ue[1:end-model.nu] - Ŷ = Ȳ - Ŷ .= @views Ŷe[model.ny+1:end] + U, Ŷ = Ū, Ȳ + U .= mul!(U, mpc.S̃, mpc.ΔŨ) .+ mpc.T_lastu + Ŷ .= Ŷ0 .+ mpc.Yop oldF = copy(mpc.F) F = predictstoch!(mpc, mpc.estim) Ŷs .= F # predictstoch! init mpc.F with Ŷs value if estim is an InternalModel @@ -376,26 +375,41 @@ function predict!(Ŷ0, x̂0, x̂0next, u0, û0, mpc::PredictiveController, mod end """ - extended_predictions!(Ue, Ŷe, Ū, mpc, model, Ŷ0, ΔŨ) -> Ŷe, Ue + extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) -> Ŷe, Ue -Compute the extended vectors `Ue` and `Ŷe` and for the nonlinear optimization. +Compute the extended vectors `Ŷe` and `Ue` for the nonlinear optimization. -The function mutates `Ue`, `Ŷe` and `Ū` in arguments, without assuming any initial values. +The function mutates `Ŷe`, `Ue` and `Ū` in arguments, without assuming any initial values +for them. Using `nocustomfcts = mpc.weights.iszero_E && mpc.con.nc == 0`, there is two +special cases in which `Ŷe`, `Ue` and `Ū` are not mutated: + +- If `mpc.weights.iszero_M_Hp[] && nocustomfcts`, the `Ŷe` vector is not computed to reduce + the burden in the optimization problem. +- If `mpc.weights.iszero_L_Hc[] && nocustomfcts`, the `Ue` vector is not computed for the + same reason as above. """ -function extended_predictions!(Ue, Ŷe, Ū, mpc, model, Ŷ0, ΔŨ) +function extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) ny, nu = model.ny, model.nu - # --- extended manipulated inputs Ue = [U; u(k+Hp-1)] --- - U = Ū - U .= mul!(U, mpc.S̃, ΔŨ) .+ mpc.T_lastu - Ue[1:end-nu] .= U - # u(k + Hp) = u(k + Hp - 1) since Δu(k+Hp) = 0 (because Hc ≤ Hp): - Ue[end-nu+1:end] .= @views U[end-nu+1:end] + nocustomfcts = (mpc.weights.iszero_E && iszero_nc(mpc)) # --- extended output predictions Ŷe = [ŷ(k); Ŷ] --- - Ŷe[1:ny] .= mpc.ŷ - Ŷe[ny+1:end] .= Ŷ0 .+ mpc.Yop - return Ue, Ŷe + if !(mpc.weights.iszero_M_Hp[] && nocustomfcts) + Ŷe[1:ny] .= mpc.ŷ + Ŷe[ny+1:end] .= Ŷ0 .+ mpc.Yop + end + # --- extended manipulated inputs Ue = [U; u(k+Hp-1)] --- + if !(mpc.weights.iszero_L_Hp[] && nocustomfcts) + U = Ū + U .= mul!(U, mpc.S̃, ΔŨ) .+ mpc.T_lastu + Ue[1:end-nu] .= U + # u(k + Hp) = u(k + Hp - 1) since Δu(k+Hp) = 0 (because Hc ≤ Hp): + Ue[end-nu+1:end] .= @views U[end-nu+1:end] + end + return Ŷe, Ue end +"Verify if the custom nonlinear constraint has zero elements." +iszero_nc(mpc::PredictiveController) = (mpc.con.nc == 0) + """ obj_nonlinprog!( _ , _ , mpc::PredictiveController, model::LinModel, Ue, Ŷe, ΔŨ) diff --git a/src/controller/explicitmpc.jl b/src/controller/explicitmpc.jl index 14909b0c4..108a33b50 100644 --- a/src/controller/explicitmpc.jl +++ b/src/controller/explicitmpc.jl @@ -195,6 +195,8 @@ function predict!(Ŷ, x̂, _ , _ , _ , mpc::ExplicitMPC, ::LinModel, ΔŨ) return Ŷ, x̂ end +"`ExplicitMPC` does not support custom nonlinear constraint, return `true`." +iszero_nc(mpc::ExplicitMPC) = true """ addinfo!(info, mpc::ExplicitMPC) -> info diff --git a/src/controller/nonlinmpc.jl b/src/controller/nonlinmpc.jl index 8e0c96655..a73df83bc 100644 --- a/src/controller/nonlinmpc.jl +++ b/src/controller/nonlinmpc.jl @@ -481,18 +481,19 @@ function init_optimization!(mpc::NonLinMPC, model::SimModel, optim) JuMP.set_attribute(optim, "nlp_scaling_max_gradient", 10.0/C) end end - Jfunc, gfunc = get_optim_functions(mpc, mpc.optim) + Jfunc, gfuncs = get_optim_functions(mpc, mpc.optim) @operator(optim, J, nΔŨ, Jfunc) @objective(optim, Min, J(ΔŨvar...)) - init_nonlincon!(mpc, model, gfunc) + init_nonlincon!(mpc, model, gfuncs) set_nonlincon!(mpc, model, mpc.optim) return nothing end """ - get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel) -> Jfunc, gfunc + get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel) -> Jfunc, gfuncs -Get the objective `Jfunc` and constraints `gfunc` functions for [`NonLinMPC`](@ref). +Get the objective `Jfunc` function and constraint `gfuncs` function vector for +[`NonLinMPC`](@ref). Inspired from: [User-defined operators with vector outputs](https://jump.dev/JuMP.jl/stable/tutorials/nonlinear/tips_and_tricks/#User-defined-operators-with-vector-outputs) """ @@ -513,28 +514,9 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT û0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Ncache) g_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, ng), Ncache) gc_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nc), Ncache) - function Jfunc(ΔŨtup::T...) where T<:Real - ΔŨ1 = ΔŨtup[begin] - ΔŨ, g = get_tmp(ΔŨ_cache, ΔŨ1), get_tmp(g_cache, ΔŨ1) - for i in eachindex(ΔŨtup) - ΔŨ[i] = ΔŨtup[i] # ΔŨ .= ΔŨtup seems to produce a type instability - end - Ŷe, Ue = get_tmp(Ŷe_cache, ΔŨ1), get_tmp(Ue_cache, ΔŨ1) - Ȳ, Ū = get_tmp(Ȳ_cache, ΔŨ1), get_tmp(Ū_cache, ΔŨ1) - x̂0, x̂0next = get_tmp(x̂0_cache, ΔŨ1), get_tmp(x̂0next_cache, ΔŨ1) - u0, û0 = get_tmp(u0_cache, ΔŨ1), get_tmp(û0_cache, ΔŨ1) - gc = get_tmp(gc_cache, ΔŨ1) - Ŷ0, x̂0end = predict!(Ȳ, x̂0, x̂0next, u0, û0, mpc, model, ΔŨ) - Ue, Ŷe = extended_predictions!(Ue, Ŷe, Ū, mpc, model, Ŷ0, ΔŨ) - ϵ = (nϵ ≠ 0) ? ΔŨ[end] : zero(T) # ϵ = 0 if nϵ == 0 (meaning no relaxation) - gc = con_custom!(gc, mpc, Ue, Ŷe, ϵ) - g = con_nonlinprog!(g, mpc, model, x̂0end, Ŷ0, gc, ϵ) - return obj_nonlinprog!(Ȳ, Ū, mpc, model, Ue, Ŷe, ΔŨ)::T - end - function gfunc_i(i, ΔŨtup::NTuple{N, T}) where {N, T<:Real} - ΔŨ1 = ΔŨtup[begin] - ΔŨ, g = get_tmp(ΔŨ_cache, ΔŨ1), get_tmp(g_cache, ΔŨ1) + function update_simulations!(ΔŨ, ΔŨtup::NTuple{N, T}) where {N, T<:Real} if any(new !== old for (new, old) in zip(ΔŨtup, ΔŨ)) # new ΔŨtup, update predictions: + ΔŨ1 = ΔŨtup[begin] for i in eachindex(ΔŨtup) ΔŨ[i] = ΔŨtup[i] # ΔŨ .= ΔŨtup seems to produce a type instability end @@ -542,60 +524,81 @@ function get_optim_functions(mpc::NonLinMPC, ::JuMP.GenericModel{JNT}) where JNT Ȳ, Ū = get_tmp(Ȳ_cache, ΔŨ1), get_tmp(Ū_cache, ΔŨ1) x̂0, x̂0next = get_tmp(x̂0_cache, ΔŨ1), get_tmp(x̂0next_cache, ΔŨ1) u0, û0 = get_tmp(u0_cache, ΔŨ1), get_tmp(û0_cache, ΔŨ1) - gc = get_tmp(gc_cache, ΔŨ1) + gc, g = get_tmp(gc_cache, ΔŨ1), get_tmp(g_cache, ΔŨ1) Ŷ0, x̂0end = predict!(Ȳ, x̂0, x̂0next, u0, û0, mpc, model, ΔŨ) - Ue, Ŷe = extended_predictions!(Ue, Ŷe, Ū, mpc, model, Ŷ0, ΔŨ) + Ŷe, Ue = extended_predictions!(Ŷe, Ue, Ū, mpc, model, Ŷ0, ΔŨ) ϵ = (nϵ ≠ 0) ? ΔŨ[end] : zero(T) # ϵ = 0 if nϵ == 0 (meaning no relaxation) gc = con_custom!(gc, mpc, Ue, Ŷe, ϵ) g = con_nonlinprog!(g, mpc, model, x̂0end, Ŷ0, gc, ϵ) end + return nothing + end + function Jfunc(ΔŨtup::Vararg{T, N}) where {N, T<:Real} + ΔŨ1 = ΔŨtup[begin] + ΔŨ = get_tmp(ΔŨ_cache, ΔŨ1) + update_simulations!(ΔŨ, ΔŨtup) + Ȳ, Ū = get_tmp(Ȳ_cache, ΔŨ1), get_tmp(Ū_cache, ΔŨ1) + Ŷe, Ue = get_tmp(Ŷe_cache, ΔŨ1), get_tmp(Ue_cache, ΔŨ1) + return obj_nonlinprog!(Ȳ, Ū, mpc, model, Ue, Ŷe, ΔŨ)::T + end + function gfunc_i(i, ΔŨtup::NTuple{N, T}) where {N, T<:Real} + ΔŨ1 = ΔŨtup[begin] + ΔŨ = get_tmp(ΔŨ_cache, ΔŨ1) + update_simulations!(ΔŨ, ΔŨtup) + g = get_tmp(g_cache, ΔŨ1) return g[i]::T end - gfunc = [(ΔŨ...) -> gfunc_i(i, ΔŨ) for i in 1:ng] - return Jfunc, gfunc + gfuncs = Vector{Function}(undef, ng) + for i in 1:ng + # this is another syntax for anonymous function, allowing parameters T and N: + gfuncs[i] = function (ΔŨtup::Vararg{T, N}) where {N, T<:Real} + return gfunc_i(i, ΔŨtup) + end + end + return Jfunc, gfuncs end -function init_nonlincon!(mpc::NonLinMPC, ::LinModel, gfunc::Vector{<:Function}) +function init_nonlincon!(mpc::NonLinMPC, ::LinModel, gfuncs::Vector{<:Function}) optim, con = mpc.optim, mpc.con nΔŨ = length(mpc.ΔŨ) if length(con.i_g) ≠ 0 i_base = 0 for i in 1:con.nc name = Symbol("g_c_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfuncs[i_base+i]; name) end end return nothing end -function init_nonlincon!(mpc::NonLinMPC, ::NonLinModel, gfunc::Vector{<:Function}) +function init_nonlincon!(mpc::NonLinMPC, ::NonLinModel, gfuncs::Vector{<:Function}) optim, con = mpc.optim, mpc.con ny, nx̂, Hp, nΔŨ = mpc.estim.model.ny, mpc.estim.nx̂, mpc.Hp, length(mpc.ΔŨ) if length(con.i_g) ≠ 0 i_base = 0 for i in eachindex(con.Y0min) name = Symbol("g_Y0min_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfuncs[i_base+i]; name) end i_base = 1Hp*ny for i in eachindex(con.Y0max) name = Symbol("g_Y0max_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfuncs[i_base+i]; name) end i_base = 2Hp*ny for i in eachindex(con.x̂0min) name = Symbol("g_x̂0min_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfuncs[i_base+i]; name) end i_base = 2Hp*ny + nx̂ for i in eachindex(con.x̂0max) name = Symbol("g_x̂0max_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfuncs[i_base+i]; name) end i_base = 2Hp*ny + 2nx̂ for i in 1:con.nc name = Symbol("g_c_$i") - optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfunc[i_base+i]; name) + optim[name] = JuMP.add_nonlinear_operator(optim, nΔŨ, gfuncs[i_base+i]; name) end end return nothing diff --git a/src/estimator/mhe/construct.jl b/src/estimator/mhe/construct.jl index 8129bae4c..ca86c59ba 100644 --- a/src/estimator/mhe/construct.jl +++ b/src/estimator/mhe/construct.jl @@ -1251,7 +1251,7 @@ function init_optimization!( JuMP.set_attribute(optim, "nlp_scaling_max_gradient", 10.0/C) end end - Jfunc, gfunc = get_optim_functions(estim, optim) + Jfunc, gfuncs = get_optim_functions(estim, optim) @operator(optim, J, nZ̃, Jfunc) @objective(optim, Min, J(Z̃var...)) nV̂, nX̂ = estim.He*estim.nym, estim.He*estim.nx̂ @@ -1259,28 +1259,28 @@ function init_optimization!( for i in eachindex(con.X̂0min) name = Symbol("g_X̂0min_$i") optim[name] = JuMP.add_nonlinear_operator( - optim, nZ̃, gfunc[i]; name + optim, nZ̃, gfuncs[i]; name ) end i_end_X̂min = nX̂ for i in eachindex(con.X̂0max) name = Symbol("g_X̂0max_$i") optim[name] = JuMP.add_nonlinear_operator( - optim, nZ̃, gfunc[i_end_X̂min + i]; name + optim, nZ̃, gfuncs[i_end_X̂min + i]; name ) end i_end_X̂max = 2*nX̂ for i in eachindex(con.V̂min) name = Symbol("g_V̂min_$i") optim[name] = JuMP.add_nonlinear_operator( - optim, nZ̃, gfunc[i_end_X̂max + i]; name + optim, nZ̃, gfuncs[i_end_X̂max + i]; name ) end i_end_V̂min = 2*nX̂ + nV̂ for i in eachindex(con.V̂max) name = Symbol("g_V̂max_$i") optim[name] = JuMP.add_nonlinear_operator( - optim, nZ̃, gfunc[i_end_V̂min + i]; name + optim, nZ̃, gfuncs[i_end_V̂min + i]; name ) end end @@ -1289,9 +1289,10 @@ end """ - get_optim_functions(estim::MovingHorizonEstimator, ::JuMP.GenericModel) -> Jfunc, gfunc + get_optim_functions(estim::MovingHorizonEstimator, ::JuMP.GenericModel) -> Jfunc, gfuncs -Get the objective `Jfunc` and constraints `gfunc` functions for [`MovingHorizonEstimator`](@ref). +Get the objective `Jfunc` function and constraint `gfuncs` function vector for +[`MovingHorizonEstimator`](@ref). Inspired from: [User-defined operators with vector outputs](https://jump.dev/JuMP.jl/stable/tutorials/nonlinear/tips_and_tricks/#User-defined-operators-with-vector-outputs) """ @@ -1309,35 +1310,41 @@ function get_optim_functions( x̄_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nx̂), Nc) û0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nu), Nc) ŷ0_cache::DiffCache{Vector{JNT}, Vector{JNT}} = DiffCache(zeros(JNT, nŷ), Nc) - function Jfunc(Z̃tup::T...)::T where {T <: Real} - Z̃1 = Z̃tup[begin] - Z̃, g = get_tmp(Z̃_cache, Z̃1), get_tmp(g_cache, Z̃1) - for i in eachindex(Z̃tup) - Z̃[i] = Z̃tup[i] # Z̃ .= Z̃tup seems to produce a type instability - end - V̂, X̂0 = get_tmp(V̂_cache, Z̃1), get_tmp(X̂0_cache, Z̃1) - û0, ŷ0 = get_tmp(û0_cache, Z̃1), get_tmp(ŷ0_cache, Z̃1) - V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, Z̃) - ϵ = (nϵ ≠ 0) ? Z̃[begin] : zero(T) # ϵ = 0 if Cwt=Inf (meaning: no relaxation) - g = con_nonlinprog!(g, estim, model, X̂0, V̂, ϵ) - x̄ = get_tmp(x̄_cache, Z̃1) - return obj_nonlinprog!(x̄, estim, model, V̂, Z̃)::T - end - function gfunc_i(i, Z̃tup::NTuple{N, T})::T where {N, T <:Real} - Z̃1 = Z̃tup[begin] - Z̃, g = get_tmp(Z̃_cache, Z̃1), get_tmp(g_cache, Z̃1) + function update_simulations!(Z̃, Z̃tup::NTuple{N, T}) where {N, T <:Real} if any(new !== old for (new, old) in zip(Z̃tup, Z̃)) # new Z̃tup, update predictions: - V̂, X̂0 = get_tmp(V̂_cache, Z̃1), get_tmp(X̂0_cache, Z̃1) - û0, ŷ0 = get_tmp(û0_cache, Z̃1), get_tmp(ŷ0_cache, Z̃1) + Z̃1 = Z̃tup[begin] for i in eachindex(Z̃tup) Z̃[i] = Z̃tup[i] # Z̃ .= Z̃tup seems to produce a type instability end - V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, Z̃) + V̂, X̂0 = get_tmp(V̂_cache, Z̃1), get_tmp(X̂0_cache, Z̃1) + û0, ŷ0 = get_tmp(û0_cache, Z̃1), get_tmp(ŷ0_cache, Z̃1) + g = get_tmp(g_cache, Z̃1) + V̂, X̂0 = predict!(V̂, X̂0, û0, ŷ0, estim, model, Z̃) ϵ = (nϵ ≠ 0) ? Z̃[begin] : zero(T) # ϵ = 0 if Cwt=Inf (meaning: no relaxation) g = con_nonlinprog!(g, estim, model, X̂0, V̂, ϵ) end + return nothing + end + function Jfunc(Z̃tup::Vararg{T, N}) where {N, T<:Real} + Z̃1 = Z̃tup[begin] + Z̃ = get_tmp(Z̃_cache, Z̃1) + update_simulations!(Z̃, Z̃tup) + x̄, V̂ = get_tmp(x̄_cache, Z̃1), get_tmp(V̂_cache, Z̃1) + return obj_nonlinprog!(x̄, estim, model, V̂, Z̃)::T + end + function gfunc_i(i, Z̃tup::NTuple{N, T})::T where {N, T <:Real} + Z̃1 = Z̃tup[begin] + Z̃ = get_tmp(Z̃_cache, Z̃1) + update_simulations!(Z̃, Z̃tup) + g = get_tmp(g_cache, Z̃1) return g[i] end - gfunc = [(Z̃...) -> gfunc_i(i, Z̃) for i in 1:ng] - return Jfunc, gfunc + gfuncs = Vector{Function}(undef, ng) + for i in 1:ng + # this is another syntax for anonymous function, allowing parameters T and N: + gfuncs[i] = function (ΔŨtup::Vararg{T, N}) where {N, T<:Real} + return gfunc_i(i, ΔŨtup) + end + end + return Jfunc, gfuncs end \ No newline at end of file diff --git a/src/model/solver.jl b/src/model/solver.jl index fb5b353b2..4051abf41 100644 --- a/src/model/solver.jl +++ b/src/model/solver.jl @@ -39,54 +39,70 @@ RungeKutta(order::Int=4; supersample::Int=1) = RungeKutta(order, supersample) "Get the `f!` and `h!` functions for the explicit Runge-Kutta solvers." function get_solver_functions(NT::DataType, solver::RungeKutta, fc!, hc!, Ts, _ , nx, _ , _ ) - order = solver.order - Ts_inner = Ts/solver.supersample Nc = nx + 2 + f! = if solver.order==4 + get_rk4_function(NT, solver, fc!, Ts, nx, Nc) + elseif solver.order==1 + get_euler_function(NT, solver, fc!, Ts, nx, Nc) + else + throw(ArgumentError("only 1st and 4th order Runge-Kutta is supported.")) + end + h! = hc! + return f!, h! +end + +"Get the f! function for the 4th order explicit Runge-Kutta solver." +function get_rk4_function(NT, solver, fc!, Ts, nx, Nc) + Ts_inner = Ts/solver.supersample xcur_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) k1_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) k2_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) k3_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) k4_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) - if order==1 - f! = function euler_solver!(xnext, x, u, d, p) - CT = promote_type(eltype(x), eltype(u), eltype(d)) - xcur = get_tmp(xcur_cache, CT) - k1 = get_tmp(k1_cache, CT) - xterm = xnext - @. xcur = x - for i=1:solver.supersample - fc!(k1, xcur, u, d, p) - @. xcur = xcur + k1 * Ts_inner - end - @. xnext = xcur - return nothing + f! = function rk4_solver!(xnext, x, u, d, p) + CT = promote_type(eltype(x), eltype(u), eltype(d)) + xcur = get_tmp(xcur_cache, CT) + k1 = get_tmp(k1_cache, CT) + k2 = get_tmp(k2_cache, CT) + k3 = get_tmp(k3_cache, CT) + k4 = get_tmp(k4_cache, CT) + xterm = xnext + @. xcur = x + for i=1:solver.supersample + fc!(k1, xcur, u, d, p) + @. xterm = xcur + k1 * Ts_inner/2 + fc!(k2, xterm, u, d, p) + @. xterm = xcur + k2 * Ts_inner/2 + fc!(k3, xterm, u, d, p) + @. xterm = xcur + k3 * Ts_inner + fc!(k4, xterm, u, d, p) + @. xcur = xcur + (k1 + 2k2 + 2k3 + k4)*Ts_inner/6 end - elseif order==4 - f! = function rk4_solver!(xnext, x, u, d, p) - CT = promote_type(eltype(x), eltype(u), eltype(d)) - xcur = get_tmp(xcur_cache, CT) - k1 = get_tmp(k1_cache, CT) - k2 = get_tmp(k2_cache, CT) - k3 = get_tmp(k3_cache, CT) - k4 = get_tmp(k4_cache, CT) - xterm = xnext - @. xcur = x - for i=1:solver.supersample - fc!(k1, xcur, u, d, p) - @. xterm = xcur + k1 * Ts_inner/2 - fc!(k2, xterm, u, d, p) - @. xterm = xcur + k2 * Ts_inner/2 - fc!(k3, xterm, u, d, p) - @. xterm = xcur + k3 * Ts_inner - fc!(k4, xterm, u, d, p) - @. xcur = xcur + (k1 + 2k2 + 2k3 + k4)*Ts_inner/6 - end - @. xnext = xcur - return nothing + @. xnext = xcur + return nothing + end + return f! +end + +"Get the f! function for the explicit Euler solver." +function get_euler_function(NT, solver, fc!, Ts, nx, Nc) + Ts_inner = Ts/solver.supersample + xcur_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) + k_cache::DiffCache{Vector{NT}, Vector{NT}} = DiffCache(zeros(NT, nx), Nc) + f! = function euler_solver!(xnext, x, u, d, p) + CT = promote_type(eltype(x), eltype(u), eltype(d)) + xcur = get_tmp(xcur_cache, CT) + k = get_tmp(k_cache, CT) + xterm = xnext + @. xcur = x + for i=1:solver.supersample + fc!(k, xcur, u, d, p) + @. xcur = xcur + k * Ts_inner end + @. xnext = xcur + return nothing end - h! = hc! - return f!, h! + return f! end """