Skip to content

Commit 83b0436

Browse files
committed
Adding global negotiation priority parameter. Adding tests for priorities.
1 parent 2e64d4d commit 83b0436

File tree

6 files changed

+168
-30
lines changed

6 files changed

+168
-30
lines changed

Project.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ version = "0.1.0"
77
[deps]
88
AutoHashEquals = "15f4f7f2-30c1-5605-9d31-71845cf9641f"
99
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
10+
ECOS = "e2685f51-7e38-5353-a97d-a921fd2c8199"
1011
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
1112
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
1213
Mango = "5e49fdec-d473-4d14-b295-7bff2fcf1925"
@@ -16,6 +17,7 @@ Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"
1617
[compat]
1718
AutoHashEquals = "2.2.0"
1819
Distributions = "0.25.115"
20+
ECOS = "1.1.3"
1921
JuMP = "1.25.0"
2022
LinearAlgebra = "1.11.0"
2123
Mango = "^0.4"

src/algorithm/admm/consensus_admm.jl

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
export create_consensus_target_reach_admm_coordinator, ADMMConsensusGlobalActor, create_admm_start_with_target
1+
export create_consensus_target_reach_admm_coordinator, ADMMConsensusGlobalActor
22

33

44
@kwdef struct ADMMConsensusGlobalActor <: ADMMGlobalActor
@@ -41,7 +41,3 @@ end
4141
function create_consensus_target_reach_admm_coordinator()
4242
return ADMMGenericCoordinator(global_actor=ADMMConsensusGlobalActor())
4343
end
44-
45-
function create_admm_start_with_target(target::Vector{<:Real})
46-
return ADMMStart(target, length(target))
47-
end

src/algorithm/admm/core.jl

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,14 @@
1-
export ADMMStart, ADMMAnswer, ADMMAnswer, ADMMGlobalActor, ADMMGlobalObjective, ADMMGenericCoordinator
1+
export ADMMStart, ADMMAnswer, ADMMAnswer, ADMMGlobalActor, ADMMGlobalObjective, ADMMGenericCoordinator, create_admm_start
22

33
struct ADMMStart
44
data::Any
55
solution_length::Int
66
end
77

8+
function create_admm_start(data::Any, length::Int)
9+
return ADMMStart(data, length)
10+
end
11+
812
struct ADMMMessage
913
v::Vector{Float64}
1014
ρ::Float64
@@ -49,7 +53,7 @@ end
4953
@kwdef struct ADMMGenericCoordinator <: Coordinator
5054
global_actor::ADMMGlobalActor
5155
ρ::Float64 = 1.0
52-
max_iters::Int64 = 100
56+
max_iters::Int64 = 1000
5357
slack_penalty::Int64 = 100
5458
abs_tol::Float64 = 1e-4
5559
rel_tol::Float64 = 1e-3
@@ -105,11 +109,11 @@ function _start_coordinator(admm::ADMMGenericCoordinator, carrier::Carrier, inpu
105109

106110
# Varying penalty paramter according to B. S. He, H. Yang, and S. L. Wang, “Alternating direction method with self
107111
# adaptive penalty parameters for monotone variational inequalities,”
108-
if r_norm > μ * s_norm
109-
ρ = ρ * τ
110-
elseif s_norm > μ * r_norm
111-
ρ = ρ / τ
112-
end
112+
# if r_norm > μ * s_norm
113+
# ρ = ρ * τ
114+
# elseif s_norm > μ * r_norm
115+
# ρ = ρ / τ
116+
# end
113117

114118
if k == max_iters
115119
@warn "Reached max iterations ($max_iters) without full convergence."
@@ -122,4 +126,4 @@ end
122126
function start_optimization(coordinator::ADMMGenericCoordinator, carrier::Carrier, start_data::ADMMStart, meta::Any)
123127
x,_,_ = _start_coordinator(coordinator, carrier, start_data.data, start_data.solution_length)
124128
return x
125-
end
129+
end

src/algorithm/admm/flex_actor.jl

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,13 +9,13 @@ mutable struct ADMMFlexActor <: DistributedAlgorithm
99
u::Vector{Float64} # upper bounds
1010
C::Matrix{Float64} # coupling matrix
1111
d::Vector{Float64} # coupling RHS
12-
S::Vector{Float64} # prio penalty per sector
12+
S::Vector{<:Real} # prio penalty per sector
1313
x::Vector{Float64} # intermediate result/result
1414
ADMMFlexActor(l::Vector{Float64},
1515
u::Vector{Float64},
1616
C::Matrix{Float64},
1717
d::Vector{Float64},
18-
S::Vector{Float64}) = new(l, u, C, d, S, Vector{Float64}())
18+
S::Vector{<:Real}) = new(l, u, C, d, S, Vector{Float64}())
1919
end
2020

2121
function _create_C_and_d(u::Vector{<:Real})
@@ -44,17 +44,18 @@ function _create_C_and_d(u::Vector{<:Real})
4444
return C, d
4545
end
4646

47-
function create_admm_flex_actor_one_to_many(in_capacity::Real, η::Vector{Float64}, S::Union{Nothing,Vector{<:Real}}=nothing)
47+
function create_admm_flex_actor_one_to_many(in_capacity::Real, η::Vector{Float64}, P::Union{Nothing,Vector{<:Real}}=nothing)
4848
tech_capacity = in_capacity .* η
4949

50-
if isnothing(S)
51-
S = zeros(length(η))
50+
if isnothing(P)
51+
P = zeros(length(η))
5252
end
5353
l = min.(zeros(length(tech_capacity)), tech_capacity)
5454
u = max.(tech_capacity, zeros(length(tech_capacity)))
5555
C, d = _create_C_and_d(tech_capacity)
5656

57-
return ADMMFlexActor(l, u, C, d, S)
57+
# negative P to convert from penalty to reward
58+
return ADMMFlexActor(l, u, C, d, -P)
5859
end
5960

6061
function result(actor::ADMMFlexActor)

src/algorithm/admm/sharing_admm.jl

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
1-
export create_sharing_target_distance_admm_coordinator, ADMMSharingGlobalActor, ADMMTargetDistanceObjective
1+
export create_sharing_target_distance_admm_coordinator, ADMMSharingGlobalActor, ADMMTargetDistanceObjective, create_admm_sharing_data
22

33
using JuMP
44
using OSQP
55
using LinearAlgebra
66

77
struct ADMMTargetDistanceObjective <: ADMMGlobalObjective end
88

9+
# currently unused
910
function objective(objective::ADMMTargetDistanceObjective, target::Vector{<:Real}, x, u, z, N)
1011
m = length(z)
1112

13+
#+ objective(actor.global_objective, input, x, u, N*z, N))
1214
# quadratic distance
1315
return sum((target[i] - z[i])^2 for i in 1:m)
1416
end
@@ -17,7 +19,24 @@ struct ADMMSharingGlobalActor <: ADMMGlobalActor
1719
global_objective::ADMMGlobalObjective
1820
end
1921

20-
function z_update(actor::ADMMSharingGlobalActor, input::Any, x, u, z, ρ, N)
22+
struct ADMMSharingData
23+
target::Vector{<:Real}
24+
priorities::Vector{<:Real}
25+
end
26+
27+
function create_admm_sharing_data(target::Vector{<:Real}, priorities::Union{Nothing,Vector{<:Real}}=nothing)
28+
if isnothing(priorities)
29+
priorities = ones(length(target))
30+
end
31+
# negative to turn penalty to priority
32+
return ADMMSharingData(target, -priorities)
33+
end
34+
35+
function create_admm_start(data::ADMMSharingData)
36+
return ADMMStart(data, length(data.target))
37+
end
38+
39+
function z_update(actor::ADMMSharingGlobalActor, input::ADMMSharingData, x, u, z, ρ, N)
2140
x_avg = sum(x) ./ length(x)
2241

2342
m = length(x_avg)
@@ -26,9 +45,16 @@ function z_update(actor::ADMMSharingGlobalActor, input::Any, x, u, z, ρ, N)
2645
set_silent(model)
2746

2847
@variable(model, z[1:m])
29-
@objective(model, Min, (N*ρ/2)*sum((z[i] - u[i] - x_avg[i])^2 for i in 1:m)
30-
+ objective(actor.global_objective, input, x, u, N*z, N))
31-
optimize!(model)
48+
@variable(model, d[1:m] >= 0) # absolute value proxy
49+
50+
for i in 1:m
51+
@constraint(model, d[i] >= input.priorities[i] * (N*z[i] - input.target[i]))
52+
@constraint(model, d[i] >= - input.priorities[i] * (N*z[i] - input.target[i]))
53+
end
54+
55+
@objective(model, Min, (N*ρ/2)*sum((z[i] - u[i] - x_avg[i])^2 for i in 1:m)
56+
+ sum(d[i] for i=1:m))
57+
optimize!(model)
3258

3359
return value.(z)
3460
end

test/sharing_admm_tests.jl

Lines changed: 115 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ end
3434
auto_assign!(complete_topology(3, tid=:custom), container)
3535

3636
activate(container) do
37-
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start_with_target([2.0, 2.0, 3.0])), address(ca)))
37+
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start(create_admm_sharing_data([2.0, 2.0, 3.0]))), address(ca)))
3838
wait(coord_role.task)
3939
end
4040

@@ -70,7 +70,7 @@ end
7070
auto_assign!(complete_topology(3, tid=:custom), container)
7171

7272
activate(container) do
73-
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start_with_target([0.2, 1, -2])), address(ca)))
73+
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start(create_admm_sharing_data([0.2, 1, -2]))), address(ca)))
7474
wait(coord_role.task)
7575
end
7676

@@ -106,13 +106,122 @@ end
106106
auto_assign!(complete_topology(3, tid=:custom), container)
107107

108108
activate(container) do
109-
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start_with_target([1.2, 1, -4])), address(ca)))
109+
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start(create_admm_sharing_data([1.2, 1, -4]))), address(ca)))
110110
wait(coord_role.task)
111111
end
112112

113-
@test isapprox(flex_actor.x, [0.1338046305205991, 0.6710353497283177, -1.3420877350612257], atol=1e-3)
114-
@test isapprox(flex_actor2.x, [0.13380191440490008, 0.6710343387311162, -1.3420863674684713], atol=1e-3)
115-
@test isapprox(flex_actor3.x, [1.1249597637572557, -1.079012246032314e-6, -1.1249611366933858], atol=1e-3)
113+
@test isapprox(flex_actor.x, [0.1551348309982447, 0.7764857848417002, -1.5529580879935843], atol=1e-3)
114+
@test isapprox(flex_actor2.x, [0.1551322355095896, 0.7764795912324005, -1.5529455908199763], atol=1e-3)
115+
@test isapprox(flex_actor3.x, [0.8928358745473691, -1.2344094570466974e-6, -0.8928393030645658], atol=1e-3)
116+
@test handle.got_it
117+
@test handle2.got_it
118+
@test handle3.got_it
119+
end
120+
121+
@testset "TestFlexADMMAWithMang2" begin
122+
container = create_tcp_container("127.0.0.1", 5555)
123+
124+
flex_actor = create_admm_flex_actor_one_to_many(10, [0.1, 0.5, -1])
125+
flex_actor2 = create_admm_flex_actor_one_to_many(15, [0.1, 0.5, -1])
126+
flex_actor3 = create_admm_flex_actor_one_to_many(10, [-1.0, 0.0, 1.0])
127+
coordinator = create_sharing_target_distance_admm_coordinator()
128+
129+
dor = DistributedOptimizationRole(flex_actor, tid=:custom)
130+
dor2 = DistributedOptimizationRole(flex_actor2, tid=:custom)
131+
dor3 = DistributedOptimizationRole(flex_actor3, tid=:custom)
132+
coord_role = CoordinatorRole(coordinator, tid=:custom, include_self=true)
133+
134+
handle = HandleOptimizationResultRole()
135+
handle2 = HandleOptimizationResultRole()
136+
handle3 = HandleOptimizationResultRole()
137+
138+
add_agent_composed_of(container, dor, handle)
139+
c = add_agent_composed_of(container, dor2, handle2)
140+
ca = add_agent_composed_of(container, coord_role, dor3, handle3)
141+
142+
auto_assign!(complete_topology(3, tid=:custom), container)
143+
144+
activate(container) do
145+
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start(create_admm_sharing_data([-4, 0, 6]))), address(ca)))
146+
wait(coord_role.task)
147+
end
148+
149+
@test isapprox(flex_actor.x, [0, 0, 0], atol=1e-3)
150+
@test isapprox(flex_actor2.x, [0, 0, 0], atol=1e-3)
151+
@test isapprox(flex_actor3.x, [-5.6179147130732225, 0, 5.617914767964074], atol=1e-3)
152+
@test handle.got_it
153+
@test handle2.got_it
154+
@test handle3.got_it
155+
end
156+
157+
158+
@testset "TestFlexADMMAWithMangoPriosThird" begin
159+
container = create_tcp_container("127.0.0.1", 5555)
160+
161+
flex_actor = create_admm_flex_actor_one_to_many(10, [0.1, 0.5, -1])
162+
flex_actor2 = create_admm_flex_actor_one_to_many(15, [0.1, 0.5, -1])
163+
flex_actor3 = create_admm_flex_actor_one_to_many(10, [-1.0, 0.0, 1.0])
164+
coordinator = create_sharing_target_distance_admm_coordinator()
165+
166+
dor = DistributedOptimizationRole(flex_actor, tid=:custom)
167+
dor2 = DistributedOptimizationRole(flex_actor2, tid=:custom)
168+
dor3 = DistributedOptimizationRole(flex_actor3, tid=:custom)
169+
coord_role = CoordinatorRole(coordinator, tid=:custom, include_self=true)
170+
171+
handle = HandleOptimizationResultRole()
172+
handle2 = HandleOptimizationResultRole()
173+
handle3 = HandleOptimizationResultRole()
174+
175+
add_agent_composed_of(container, dor, handle)
176+
c = add_agent_composed_of(container, dor2, handle2)
177+
ca = add_agent_composed_of(container, coord_role, dor3, handle3)
178+
179+
auto_assign!(complete_topology(3, tid=:custom), container)
180+
181+
activate(container) do
182+
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start(create_admm_sharing_data([-4, 0, 6], [1,1,5]))), address(ca)))
183+
wait(coord_role.task)
184+
end
185+
186+
@test isapprox(flex_actor.x, [0.00018920502370025307, -6.385117748282918e-5, 0.0001192856645796826], atol=1e-3)
187+
@test isapprox(flex_actor2.x, [0.00018947342781542052, -6.398798323709664e-5, 0.00011955174808086514], atol=1e-3)
188+
@test isapprox(flex_actor3.x, [-6.024383392342817, -8.105452791414886e-7, 6.02438348197089], atol=1e-3)
189+
@test handle.got_it
190+
@test handle2.got_it
191+
@test handle3.got_it
192+
end
193+
194+
@testset "TestFlexADMMAWithMangoPriosFirst" begin
195+
container = create_tcp_container("127.0.0.1", 5555)
196+
197+
flex_actor = create_admm_flex_actor_one_to_many(10, [0.1, 0.5, -1])
198+
flex_actor2 = create_admm_flex_actor_one_to_many(15, [0.1, 0.5, -1])
199+
flex_actor3 = create_admm_flex_actor_one_to_many(10, [-1.0, 0.0, 1.0])
200+
coordinator = create_sharing_target_distance_admm_coordinator()
201+
202+
dor = DistributedOptimizationRole(flex_actor, tid=:custom)
203+
dor2 = DistributedOptimizationRole(flex_actor2, tid=:custom)
204+
dor3 = DistributedOptimizationRole(flex_actor3, tid=:custom)
205+
coord_role = CoordinatorRole(coordinator, tid=:custom, include_self=true)
206+
207+
handle = HandleOptimizationResultRole()
208+
handle2 = HandleOptimizationResultRole()
209+
handle3 = HandleOptimizationResultRole()
210+
211+
add_agent_composed_of(container, dor, handle)
212+
c = add_agent_composed_of(container, dor2, handle2)
213+
ca = add_agent_composed_of(container, coord_role, dor3, handle3)
214+
215+
auto_assign!(complete_topology(3, tid=:custom), container)
216+
217+
activate(container) do
218+
wait(send_message(c, StartCoordinatedDistributedOptimization(create_admm_start(create_admm_sharing_data([-4, 0, 6], [5,1,1]))), address(ca)))
219+
wait(coord_role.task)
220+
end
221+
222+
@test isapprox(flex_actor.x, [0.00018920502370025307, -6.385117748282918e-5, 0.0001192856645796826], atol=1e-3)
223+
@test isapprox(flex_actor2.x, [0.00018947342781542052, -6.398798323709664e-5, 0.00011955174808086514], atol=1e-3)
224+
@test isapprox(flex_actor3.x, [-3.9830282351073403, -7.203077063324391e-7, 3.98302823714599], atol=1e-3)
116225
@test handle.got_it
117226
@test handle2.got_it
118227
@test handle3.got_it

0 commit comments

Comments
 (0)