diff --git a/src/ExtendedStateSpace.jl b/src/ExtendedStateSpace.jl index e1bf13ae..8222224b 100644 --- a/src/ExtendedStateSpace.jl +++ b/src/ExtendedStateSpace.jl @@ -654,7 +654,15 @@ Partition `P` into an [`ExtendedStateSpace`](@ref). """ function partition(P::AbstractStateSpace; u=nothing, y=nothing, w = nothing, - z = nothing + z = nothing, + B1 = nothing, + B2 = nothing, + C1 = nothing, + C2 = nothing, + D11 = nothing, + D12 = nothing, + D21 = nothing, + D22 = nothing, ) if w === nothing w = setdiff(1:P.nu, u) @@ -672,8 +680,16 @@ function partition(P::AbstractStateSpace; u=nothing, y=nothing, y = vcat(y) z = vcat(z) w = vcat(w) - ss(P.A, P.B[:, w], P.B[:, u], P.C[z, :], P.C[y, :], - P.D[z, w], P.D[z, u], P.D[y, w], P.D[y, u], P.timeevol) + ss(P.A, + B1 === nothing ? P.B[:, w] : B1, + B2 === nothing ? P.B[:, u] : B2, + C1 === nothing ? P.C[z, :] : C1, + C2 === nothing ? P.C[y, :] : C2 , + D11 === nothing ? P.D[z, w] : D11, + D12 === nothing ? P.D[z, u] : D12, + D21 === nothing ? P.D[y, w] : D21, + D22 === nothing ? P.D[y, u] : D22, + P.timeevol) end """ diff --git a/src/RobustAndOptimalControl.jl b/src/RobustAndOptimalControl.jl index f06c6da1..92ad34ff 100644 --- a/src/RobustAndOptimalControl.jl +++ b/src/RobustAndOptimalControl.jl @@ -28,9 +28,6 @@ using DescriptorSystems: dss using ChainRulesCore -export show_construction, vec2sys -include("utils.jl") - export dss, hinfnorm2, linfnorm2, h2norm, hankelnorm, nugap, νgap, baltrunc2, stab_unstab, baltrunc_unstab, baltrunc_coprime include("descriptor.jl") @@ -83,4 +80,7 @@ include("mimo_diskmargin.jl") export nu_reduction, nu_reduction_recursive include("mcm_nugap.jl") +export show_construction, vec2sys +include("utils.jl") + end diff --git a/src/lqg.jl b/src/lqg.jl index af60697b..8a5baba9 100644 --- a/src/lqg.jl +++ b/src/lqg.jl @@ -68,6 +68,11 @@ Several functions are defined for instances of `LQGProblem` - [`observer_controller`](@ref) A video tutorial on how to use the LQG interface is available [here](https://youtu.be/NuAxN1mGCPs) + +## Introduction of references +The most principled way of introducing references is to add references as measured inputs to the extended statespace model, and to let the performance output `z` be the differences between the references and the outputs for which references are provided. + +A less cumbersome way is not not consider references when constructing the `LQGProblem`, and instead pass the `z` keyword arugment to [`extended_controller`](@ref) in order to obtain a closed-loop system from state references to controlled outputs, and use some form of inverse of the DC gain of this system (or one of its subsystems) to pre-compensate the reference input. """ struct LQGProblem sys::ExtendedStateSpace @@ -266,15 +271,15 @@ function extended_controller(K::AbstractStateSpace) end """ - extended_controller(l::LQGProblem, L = lqr(l), K = kalman(l)) + extended_controller(l::LQGProblem, L = lqr(l), K = kalman(l); z = nothing) -Returns an expression for the controller that is obtained when state-feedback `u = -L(xᵣ-x̂)` is combined with a Kalman filter with gain `K` that produces state estimates x̂. The controller is an instance of `ExtendedStateSpace` where `C2 = -L, D21 = L` and `B2 = K`. +Returns a statespace system representing the controller that is obtained when state-feedback `u = L(xᵣ-x̂)` is combined with a Kalman filter with gain `K` that produces state estimates x̂. The controller is an instance of `ExtendedStateSpace` where `C2 = -L, D21 = L` and `B2 = K`. -The returned system has *inputs* `[xᵣ; y]` and outputs the control signal `u`. If a reference model `R` is used to generate state references `xᵣ`, the controller from `e = ry - y -> u` is given by +The returned system has *inputs* `[xᵣ; y]` and outputs the control signal `u`. If a reference model `R` is used to generate state references `xᵣ`, the controller from `(ry, y) -> u` where `ry - y = e` is given by ```julia Ce = extended_controller(l) -Ce = named_ss(Ce; x = :xC, y = :u, u = [R.y; :y^l.ny]) # Name the inputs of Ce the same as the outputs of `R`. -connect([R, Ce]; u1 = R.y, y1 = R.y, w1 = [:ry^l.ny, :y^l.ny], z1=[:u]) +Ce = named_ss(ss(Ce); x = :xC, y = :u, u = [R.y; :y^l.ny]) # Name the inputs of Ce the same as the outputs of `R`. +connect([R, Ce]; u1 = R.y, y1 = R.y, w1 = [R.u; :y^l.ny], z1=[:u]) ``` Since the negative part of the feedback is built into the returned system, we have @@ -283,20 +288,31 @@ C = observer_controller(l) Ce = extended_controller(l) system_mapping(Ce) == -C ``` + +Please note, without the reference pre-filter, the DC gain from references to controlled outputs may not be identity. If a vector of output indices is provided through the keyword argument `z`, the closed-loop system from state reference `xᵣ` to outputs `z` is returned as a second return argument. The inverse of the DC-gain of this closed-loop system may be useful to compensate for the DC-gain of the controller. """ -function extended_controller(l::LQGProblem, L = lqr(l), K = kalman(l)) - A,B,C,D = ssdata(system_mapping(l)) +function extended_controller(l::LQGProblem, L::AbstractMatrix = lqr(l), K::AbstractMatrix = kalman(l); z::Union{Nothing, AbstractVector} = nothing) + P = system_mapping(l) + A,B,C,D = ssdata(P) Ac = A - B*L - K*C + K*D*L # 8.26b - nx = l.nx + (; nx, nu, ny) = P B1 = zeros(nx, nx) # dynamics not affected by r # l.D21 does not appear here, see comment in kalman B2 = K # input y D21 = L # L*xᵣ # should be D21? C2 = -L # - L*x̂ C1 = zeros(0, nx) - ss(Ac, B1, B2, C1, C2; D21, Ts = l.timeevol) + Ce0 = ss(Ac, B1, B2, C1, C2; D21, Ts = l.timeevol) + if z === nothing + return Ce0 + end + r = 1:nx + Ce = ss(Ce0) + cl = feedback(P, Ce, Z1 = z, Z2=[], U2=(1:ny) .+ nx, Y1 = :, W2=r, W1=[]) + Ce0, cl end + """ observer_controller(l::LQGProblem, L = lqr(l), K = kalman(l)) @@ -339,15 +355,31 @@ end Return the feedforward controller ``C_{ff}`` that maps references to plant inputs: ``u = C_{fb}y + C_{ff}r`` +The following should hold +``` +Cff = RobustAndOptimalControl.ff_controller(l) +Cfb = observer_controller(l) +Gcl = feedback(system_mapping(l), Cfb) * Cff # Note the comma in feedback, P/(I + PC) * Cff +dcgain(Gcl) ≈ I # Or some I-like non-square matrix +``` + +Note, if [`extended_controller`](@ref) is used, the DC-gain compensation above cannot be used. The [`extended_controller`](@ref) assumes that the references enter like `u = L(xᵣ - x̂)`. + See also [`observer_controller`](@ref). """ -function ff_controller(l::LQGProblem, L = lqr(l), K = kalman(l)) +function ff_controller(l::LQGProblem, L = lqr(l), K = kalman(l); comp_dc = true) Ae,Be,Ce,De = ssdata(system_mapping(l, identity)) Ac = Ae - Be*L - K*Ce + K*De*L # 8.26c - Bc = Be*static_gain_compensation(l, L) Cc = L Dc = 0 - return 1 - ss(Ac, Bc, Cc, Dc, l.timeevol) + if comp_dc + Lr = static_gain_compensation(l, L) + Bc = Be*Lr + return Lr - ss(Ac, Bc, Cc, Dc, l.timeevol) + else + Bc = Be + return I(size(Cc, 1)) - ss(Ac, Bc, Cc, Dc, l.timeevol) + end end """ @@ -355,7 +387,7 @@ end Closed-loop system as defined in Glad and Ljung eq. 8.28. Note, this definition of closed loop is not the same as lft(P, K), which has B1 instead of B2 as input matrix. Use `lft(l)` to get the system from disturbances to controlled variables `w -> z`. -The return value will be the closed loop from reference only, other disturbance signals (B1) are ignored. See [`feedback`](@ref) for a more advanced option. +The return value will be the closed loop from filtred reference only, other disturbance signals (B1) are ignored. See [`feedback`](@ref) for a more advanced option. This function assumes that the control signal is computed as `u = r̃ - Lx̂` (not `u = L(xᵣ - x̂)`), i.e., the feedforward signal `r̃` is added directly to the plant input. `r̃` must thus be produced by an inverse-like model that takes state references and output the feedforward signal. Use `static_gain_compensation` to adjust the gain from references acting on the input B2, `dcgain(closedloop(l))*static_gain_compensation(l) ≈ I` """ diff --git a/src/named_systems2.jl b/src/named_systems2.jl index bef58fb8..174e7019 100644 --- a/src/named_systems2.jl +++ b/src/named_systems2.jl @@ -401,7 +401,7 @@ function ControlSystemsBase.feedback(s1::NamedStateSpace{T}, s2::NamedStateSpace @assert sys.nu == length(W1) + length(W2) @assert sys.ny == length(Z1) + length(Z2) @assert sys.nx == length(x1) - nsys = NamedStateSpace{T,typeof(sys)}(sys, x1, s1.u[[W1; W2]], s1.y[[Z1; Z2]], "") + nsys = NamedStateSpace{T,typeof(sys)}(sys, x1, [s1.u[W1]; s2.u[W2]], [s1.y[Z1]; s2.y[Z2]], "") sminreal(nsys) end @@ -697,30 +697,51 @@ end function partition(P::NamedStateSpace; u=nothing, y=nothing, w = nothing, - z = nothing + z = nothing, + B1 = nothing, + B2 = nothing, + C1 = nothing, + C2 = nothing, + D11 = nothing, + D12 = nothing, + D21 = nothing, + D22 = nothing, ) if w === nothing - w = names2indices(setdiff(P.u, u), P.u) - u = names2indices(u, P.u) + inds = names2indices(u, P.u) + w = setdiff(1:P.nu, inds) + u = inds end if z === nothing - z = names2indices(setdiff(P.y, y), P.y) - y = names2indices(y, P.y) + inds = names2indices(y, P.y) + z = setdiff(1:P.ny, inds) + y = inds end if u === nothing - u = names2indices(setdiff(P.u, w), P.u) - w = names2indices(w, P.u) + inds = names2indices(w, P.u) + u = setdiff(1:P.nu, inds) + w = inds end if y === nothing - y = names2indices(setdiff(P.y, z), P.y) - z = names2indices(z, P.y) + inds = names2indices(z, P.y) + y = setdiff(1:P.ny, inds) + z = inds end u = vcat(u) y = vcat(y) z = vcat(z) w = vcat(w) - ss(P.A, P.B[:, w], P.B[:, u], P.C[z, :], P.C[y, :], - P.D[z, w], P.D[z, u], P.D[y, w], P.D[y, u], P.timeevol) + ss(P.A, + B1 === nothing ? P.B[:, w] : B1, + B2 === nothing ? P.B[:, u] : B2, + C1 === nothing ? P.C[z, :] : C1, + C2 === nothing ? P.C[y, :] : C2 , + D11 === nothing ? P.D[z, w] : D11, + D12 === nothing ? P.D[z, u] : D12, + D21 === nothing ? P.D[y, w] : D21, + D22 === nothing ? P.D[y, u] : D22, + P.timeevol + ) end function CS.c2d(s::NamedStateSpace{Continuous}, Ts::Real, method::Symbol = :zoh, args...; diff --git a/src/utils.jl b/src/utils.jl index fe7bd267..4569e8d3 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -48,6 +48,24 @@ function show_construction(io::IO, sys::LTISystem; name = "temp", letb = true) nothing end +function show_construction(io::IO, sys::NamedStateSpace; name = "temp", letb = true) + # sys = StateSpace(sys) + letb && println(io, "$name = let") + prestr = letb ? " " : "" + println(io, prestr*"$(name)A = ", sys.A) + println(io, prestr*"$(name)B = ", sys.B) + println(io, prestr*"$(name)C = ", sys.C) + println(io, prestr*"$(name)D = ", sys.D) + letb || print(io, "$name = ") + if isdiscrete(sys) + println(io, prestr*"named_ss(ss($(name)A, $(name)B, $(name)C, $(name)D), $(sys.Ts), x=$(sys.x), u=$(sys.u), y=$(sys.y))") + else + println(io, prestr*"named_ss(ss($(name)A, $(name)B, $(name)C, $(name)D), x=$(sys.x), u=$(sys.u), y=$(sys.y))") + end + letb && println(io, "end") + nothing +end + function Base.vec(sys::LTISystem) [vec(sys.A); vec(sys.B); vec(sys.C); vec(sys.D)] end diff --git a/test/test_extendedstatespace.jl b/test/test_extendedstatespace.jl index 95142768..509ec170 100644 --- a/test/test_extendedstatespace.jl +++ b/test/test_extendedstatespace.jl @@ -1,6 +1,7 @@ using RobustAndOptimalControl using ControlSystemsBase using ControlSystemsBase: balance_statespace +using Test G = ssrand(3,4,5) diff --git a/test/test_lqg.jl b/test/test_lqg.jl index a5d7975c..9d5903cf 100644 --- a/test/test_lqg.jl +++ b/test/test_lqg.jl @@ -1,4 +1,4 @@ -using RobustAndOptimalControl, ControlSystemsBase, LinearAlgebra +using RobustAndOptimalControl, ControlSystemsBase, LinearAlgebra, Test # NOTE: Glad, Ljung chap 9 contains several numerical examples that can be used as test cases @@ -402,4 +402,136 @@ predicted_costf = dot([x0; zeros(nu)], QNf, [x0; zeros(nu)]) @test predicted_costf ≈ predicted_cost # @show (actual_cost - predicted_cost) / actual_cost -@test actual_cost ≈ predicted_cost rtol=1e-10 \ No newline at end of file +@test actual_cost ≈ predicted_cost rtol=1e-10 + +## Double mass model with output penalty + +# One input, to z outputs +P = DemoSystems.double_mass_model(outputs = 1:4) +# Pe = partition(P, y = 1:2, u = 1) +Pe = ExtendedStateSpace(P, C1 = P.C[3:4, :], C2 = P.C[1:1, :], B1 = P.C[[2,4], :]') + +Q1 = 1.0I(Pe.nz) +Q2 = 0.1I(Pe.nu) +R1 = 1.0I(Pe.nw) +R2 = 0.1I(Pe.ny) + +G = LQGProblem(Pe, Q1, Q2, R1, R2) +Cfb = observer_controller(G) +Ce = extended_controller(G) + +@test Cfb.nu == Pe.ny +@test Cfb.ny == Pe.nu + + +Cff = RobustAndOptimalControl.ff_controller(G) # TODO: need to add argument that determins whether to use state references or references of z +@test Cff.nu == Pe.nz +@test Cff.ny == Pe.nu + +Gcl2 = feedback(system_mapping(Pe), Cfb) * Cff +@test size(Gcl2) == (1, 2) +@test isstable(Gcl2) +@test dcgain(Gcl2) ≈ [1.0 0] atol=1e-6 + + + +## One input, one z output +P = DemoSystems.double_mass_model(outputs = 1:4) +# Pe = partition(P, y = 1:2, u = 1) +Pe = ExtendedStateSpace(P, C1 = P.C[3:3, :], C2 = P.C[1:1, :], B1 = P.C[[2,4], :]') + +Q1 = 1.0I(Pe.nz) +Q2 = 0.1I(Pe.nu) +R1 = 1.0I(Pe.nw) +R2 = 0.1I(Pe.ny) + +G = LQGProblem(Pe, Q1, Q2, R1, R2) +Cfb = observer_controller(G) +Ce = extended_controller(G) + +@test Cfb.nu == Pe.ny +@test Cfb.ny == Pe.nu + + +Cff = RobustAndOptimalControl.ff_controller(G) +@test Cff.nu == Pe.nz +@test Cff.ny == Pe.nu + +Gcl = feedback(system_mapping(Pe) * Cfb) +@test size(Gcl) == (1, 1) +@test isstable(Gcl) +@test dcgain(Gcl)[] ≈ 1.0 + +Gcl2 = feedback(system_mapping(Pe), Cfb) * Cff +@test size(Gcl2) == (1, 1) +@test isstable(Gcl2) +@test dcgain(Gcl2)[] ≈ 1.0 + +## Multibody cartpole tests + +lsys = let + lsysA = [0.0 0.0 0.0 1.0; 0.0 0.0 1.0 0.0; 4.474102070258828 0.0 0.0 0.0; 39.683507165892394 0.0 0.0 0.0] + lsysB = [0.0; 0.0; 0.8415814526118872; 2.3385955754360115;;] + lsysC = [0.0 1.0 0.0 0.0; 1.0 0.0 0.0 0.0; 0.0 0.0 1.0 0.0; 0.0 0.0 0.0 1.0] + lsysD = [0.0; 0.0; 0.0; 0.0;;] + named_ss(ss(lsysA, lsysB, lsysC, lsysD), x=[:revolute₊phi, :prismatic₊s, :prismatic₊v, :revolute₊w], u=[Symbol("u(t)")], y=[Symbol("x(t)"), Symbol("phi(t)"), Symbol("v(t)"), Symbol("w(t)")]) +end + +C = lsys.C +Q1 = Diagonal([10, 10, 10, 1]) +Q2 = Diagonal([0.1]) + +R1 = lsys.B*Diagonal([1])*lsys.B' +R2 = Diagonal([0.01, 0.01]) +Pn = lsys[[:x, :phi], :] +P = ss(Pn) + +lqg = LQGProblem(P, Q1, Q2, R1, R2) + +# Below we test that the closed-loop DC gain from references to cart position is 1 +Ce = extended_controller(lqg) + + +# Method 1: compute DC gain compensation using ff_controller, this is used as reference for the others. I feel uneasy about the (undocumented) comp_dc = false argument here, ideally a more generally applicable function ff_controller would be implemented that can handle both state and output references etc. +dc_gain_compensation = dcgain(RobustAndOptimalControl.ff_controller(lqg, comp_dc = false))[] + +# Method 2, using the observer controller and static_gain_compensation +Cfb = observer_controller(lqg) +@test inv(dcgain((feedback(P, Cfb)*static_gain_compensation(lqg))[1,2]))[] ≈ dc_gain_compensation rtol=1e-8 + +# Method 3, using the observer controller and the ff_controller +cl = feedback(system_mapping(lqg), observer_controller(lqg))*RobustAndOptimalControl.ff_controller(lqg, comp_dc = true) +@test dcgain(cl)[1,2] ≈ 1 rtol=1e-8 + +# Method 4: Build compensation into R and compute the closed-loop DC gain, should be 1 +R = named_ss(ss(dc_gain_compensation*I(4)), "R") # Reference filter +Ce = named_ss(ss(Ce); x = :xC, y = :u, u = [R.y; :y^lqg.ny]) + +Cry = RobustAndOptimalControl.connect([R, Ce]; u1 = R.y, y1 = R.y, w1 = [R.u; :y^lqg.ny], z1=[:u]) + +connections = [ + :u => :u + [:x, :phi] .=> [:y1, :y2] +] +cl = RobustAndOptimalControl.connect([lsys, Cry], connections; w1 = R.u, z1 = [:x, :phi]) +@test inv(dcgain(cl)[1,2]) ≈ 1 rtol=1e-8 + + +# Method 5: close the loop manually with reference as input and position as output + +R = named_ss(ss(I(4)), "R") # Reference filter, used for signal names only +Ce = named_ss(ss(extended_controller(lqg)); x = :xC, y = :u, u = [:Ry^4; :y^lqg.ny]) +cl = feedback(lsys, Ce, z1 = [:x], z2=[], u2=:y^2, y1 = [:x, :phi], w2=[:Ry2], w1=[]) +@test inv(dcgain(cl)[]) ≈ dc_gain_compensation rtol=1e-8 + +cl = feedback(lsys, Ce, z1 = [:x, :phi], z2=[], u2=:y^2, y1 = [:x, :phi], w2=[:Ry2], w1=[]) +@test pinv(dcgain(cl)) ≈ [dc_gain_compensation 0] atol=1e-8 + +# Method 6: use the z argument to extended_controller to compute the closed-loop TF + +Ce, cl = extended_controller(lqg, z=[1, 2]) +@test pinv(dcgain(cl)[1,2]) ≈ dc_gain_compensation atol=1e-8 + +Ce, cl = extended_controller(lqg, z=[1]) +@test pinv(dcgain(cl)[1,2]) ≈ dc_gain_compensation atol=1e-8 + diff --git a/test/test_named_systems2.jl b/test/test_named_systems2.jl index 3ae01138..e786fa7e 100644 --- a/test/test_named_systems2.jl +++ b/test/test_named_systems2.jl @@ -16,6 +16,8 @@ s_autoname = named_ss(G1, :G) @test s_autoname.y == [:Gy] @test s_autoname.u == [:Gu] +@test_nowarn RobustAndOptimalControl.show_construction(s_autoname) + @test ControlSystemsBase.state_names(s_autoname) == string.(s_autoname.x) @test ControlSystemsBase.input_names(s_autoname, 1) == string.(s_autoname.u[]) @test_throws BoundsError ControlSystemsBase.output_names(s_autoname, 2)