Skip to content

Commit 6d056c8

Browse files
committed
[DEV] Compute optimal control at a given point xt
+ add unit-tests + fix mix of idents in runtests.jl
1 parent b20f820 commit 6d056c8

File tree

2 files changed

+132
-96
lines changed

2 files changed

+132
-96
lines changed

src/SDDPoptimize.jl

Lines changed: 33 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ function run_SDDP(model::SPModel,
119119
iteration_count += 1
120120

121121

122-
122+
123123

124124
if (display > 0) && (iteration_count%display==0)
125125
println("Pass number ", iteration_count,
@@ -133,7 +133,7 @@ function run_SDDP(model::SPModel,
133133
if (display>0)
134134
upb = upper_bound(costs)
135135
V0 = get_bellman_value(model, param, 1, V[1], model.initialState)
136-
136+
137137
println("Estimate upper-bound with Monte-Carlo ...")
138138
upb, costs = estimate_upper_bound(model, param, V, problems)
139139
println("Estimation of upper-bound: ", round(upb,4),
@@ -453,6 +453,37 @@ function get_lower_bound(model::SPModel, param::SDDPparameters,
453453
end
454454

455455

456+
"""
457+
Compute optimal control at point xt and time t.
458+
459+
Parameters:
460+
- model (SPModel)
461+
Parametrization of the problem
462+
463+
- param (SDDPparameters)
464+
Parameters of SDDP
465+
466+
- lpproblem (Vector{JuMP.Model})
467+
Linear problems used to approximate the value functions
468+
469+
- t (Int64)
470+
Time
471+
472+
- xt (Vector{Float64})
473+
Position where to compute optimal control
474+
475+
- xi (Vector{Float64})
476+
Alea at time t
477+
478+
Return:
479+
Vector{Float64}: optimal control at time t
480+
481+
"""
482+
function get_control(model::SPModel, param::SDDPparameters, lpproblem::Vector{JuMP.Model}, t, xt, xi)
483+
return solve_one_step_one_alea(model, param, lpproblem[t], t, xt, xi)[2].optimal_control
484+
end
485+
486+
456487
"""
457488
Add several cuts to JuMP.Model from a PolyhedralFunction
458489

test/runtests.jl

Lines changed: 99 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -101,18 +101,18 @@ facts("SDDP algorithm: 1D case") do
101101

102102
# Instantiate parameters of SDDP:
103103
params = StochDynamicProgramming.SDDPparameters(solver, n_scenarios,
104-
epsilon, max_iterations)
104+
epsilon, max_iterations)
105105

106106
V = nothing
107107
model = StochDynamicProgramming.LinearDynamicLinearCostSPmodel(n_stages,
108-
u_bounds, x0,
109-
cost,
110-
dynamic, laws)
108+
u_bounds, x0,
109+
cost,
110+
dynamic, laws)
111111
# Generate scenarios for forward simulations:
112112
noise_scenarios = simulate_scenarios(model.noises,
113-
(model.stageNumber,
114-
params.forwardPassNumber,
115-
model.dimNoises))
113+
(model.stageNumber,
114+
params.forwardPassNumber,
115+
model.dimNoises))
116116

117117
sddp_costs = 0
118118
context("Linear cost") do
@@ -133,7 +133,7 @@ facts("SDDP algorithm: 1D case") do
133133
# Test upper bounds estimation with Monte-Carlo:
134134
n_simulations = 100
135135
upb = StochDynamicProgramming.estimate_upper_bound(model, params, V, pbs,
136-
n_simulations)[1]
136+
n_simulations)[1]
137137
@fact typeof(upb) --> Float64
138138

139139
sddp_costs, stocks = forward_simulations(model, params, V, pbs, noise_scenarios)
@@ -143,6 +143,11 @@ facts("SDDP algorithm: 1D case") do
143143
@fact typeof(ef_cost) --> Float64
144144

145145
@fact mean(sddp_costs) --> roughly(ef_cost)
146+
147+
# Test computation of optimal control:
148+
aleas = StochDynamicProgramming.extract_vector_from_3Dmatrix(noise_scenarios, 1, 1)
149+
opt = StochDynamicProgramming.get_control(model, params, pbs, 1, model.initialState, aleas)
150+
@fact typeof(opt) --> Vector{Float64}
146151
end
147152

148153
context("Hotstart") do
@@ -157,9 +162,9 @@ facts("SDDP algorithm: 1D case") do
157162
context("Piecewise linear cost") do
158163
# Test Piecewise linear costs:
159164
model = StochDynamicProgramming.PiecewiseLinearCostSPmodel(n_stages,
160-
u_bounds, x0,
161-
[cost],
162-
dynamic, laws)
165+
u_bounds, x0,
166+
[cost],
167+
dynamic, laws)
163168
set_state_bounds(model, x_bounds)
164169
V, pbs = solve_SDDP(model, params, false)
165170
end
@@ -218,14 +223,14 @@ facts("SDDP algorithm: 2D case") do
218223

219224
# Instantiate parameters of SDDP:
220225
params = StochDynamicProgramming.SDDPparameters(solver, n_scenarios,
221-
epsilon, max_iterations)
226+
epsilon, max_iterations)
222227
V = nothing
223228
context("Linear cost") do
224229
# Instantiate a SDDP linear model:
225230
model = StochDynamicProgramming.LinearDynamicLinearCostSPmodel(n_stages,
226-
u_bounds, x0,
227-
cost,
228-
dynamic, laws)
231+
u_bounds, x0,
232+
cost,
233+
dynamic, laws)
229234
set_state_bounds(model, x_bounds)
230235

231236

@@ -240,15 +245,15 @@ facts("SDDP algorithm: 2D case") do
240245
# Test upper bounds estimation with Monte-Carlo:
241246
n_simulations = 100
242247
upb = StochDynamicProgramming.estimate_upper_bound(model, params, V, pbs,
243-
n_simulations)[1]
248+
n_simulations)[1]
244249
@fact typeof(upb) --> Float64
245250

246251

247-
# Test a simulation upon given scenarios:
252+
# Test a simulation upon given scenarios:
248253
noise_scenarios = simulate_scenarios(model.noises,
249-
(model.stageNumber,
250-
n_simulations,
251-
model.dimNoises))
254+
(model.stageNumber,
255+
n_simulations,
256+
model.dimNoises))
252257

253258
sddp_costs, stocks = forward_simulations(model, params, V, pbs, noise_scenarios)
254259

@@ -345,111 +350,111 @@ facts("SDP algorithm") do
345350
end
346351

347352
"""Build admissible scenarios for water inflow over the time horizon."""
348-
function build_scenarios(n_scenarios::Int64, N_STAGES)
349-
scenarios = zeros(n_scenarios, N_STAGES)
353+
function build_scenarios(n_scenarios::Int64, N_STAGES)
354+
scenarios = zeros(n_scenarios, N_STAGES)
350355

351-
for scen in 1:n_scenarios
352-
scenarios[scen, :] = (W_MAX-W_MIN)*rand(N_STAGES)+W_MIN
356+
for scen in 1:n_scenarios
357+
scenarios[scen, :] = (W_MAX-W_MIN)*rand(N_STAGES)+W_MIN
358+
end
359+
return scenarios
353360
end
354-
return scenarios
355-
end
356361

357362
"""Build probability distribution at each timestep based on N scenarios.
358-
Return a Vector{NoiseLaw}"""
359-
function generate_probability_laws(N_STAGES, N_SCENARIOS)
360-
aleas = zeros(N_SCENARIOS, N_STAGES, 1)
361-
aleas[:, :, 1] = build_scenarios(N_SCENARIOS, N_STAGES)
363+
Return a Vector{NoiseLaw}"""
364+
function generate_probability_laws(N_STAGES, N_SCENARIOS)
365+
aleas = zeros(N_SCENARIOS, N_STAGES, 1)
366+
aleas[:, :, 1] = build_scenarios(N_SCENARIOS, N_STAGES)
362367

363-
laws = Vector{NoiseLaw}(N_STAGES)
368+
laws = Vector{NoiseLaw}(N_STAGES)
364369

365-
# uniform probabilities:
366-
proba = 1/N_SCENARIOS*ones(N_SCENARIOS)
370+
# uniform probabilities:
371+
proba = 1/N_SCENARIOS*ones(N_SCENARIOS)
367372

368-
for t=1:N_STAGES
369-
aleas_t = reshape(aleas[:, t, :], N_SCENARIOS, 1)'
370-
laws[t] = NoiseLaw(aleas_t, proba)
373+
for t=1:N_STAGES
374+
aleas_t = reshape(aleas[:, t, :], N_SCENARIOS, 1)'
375+
laws[t] = NoiseLaw(aleas_t, proba)
376+
end
377+
378+
return laws
371379
end
372380

373-
return laws
374-
end
381+
N_SCENARIO = 10
382+
aleas = generate_probability_laws(TF-1, N_SCENARIO)
375383

376-
N_SCENARIO = 10
377-
aleas = generate_probability_laws(TF-1, N_SCENARIO)
384+
x_bounds = [(VOLUME_MIN, VOLUME_MAX), (VOLUME_MIN, VOLUME_MAX)];
385+
u_bounds = [(CONTROL_MIN, CONTROL_MAX), (VOLUME_MIN, VOLUME_MAX)];
378386

379-
x_bounds = [(VOLUME_MIN, VOLUME_MAX), (VOLUME_MIN, VOLUME_MAX)];
380-
u_bounds = [(CONTROL_MIN, CONTROL_MAX), (VOLUME_MIN, VOLUME_MAX)];
387+
x0 = [5, 0]
381388

382-
x0 = [5, 0]
389+
alea_year = Array([7.0 7.0])
383390

384-
alea_year = Array([7.0 7.0])
391+
aleas_scen = zeros(2, 1, 1)
392+
aleas_scen[:, 1, 1] = alea_year;
385393

386-
aleas_scen = zeros(2, 1, 1)
387-
aleas_scen[:, 1, 1] = alea_year;
394+
modelSDP = StochDynProgModel(TF, N_CONTROLS,
395+
N_STATES, N_NOISES,
396+
x_bounds, u_bounds,
397+
x0, cost_t,
398+
finalCostFunction, dynamic,
399+
constraints, aleas);
388400

389-
modelSDP = StochDynProgModel(TF, N_CONTROLS,
390-
N_STATES, N_NOISES,
391-
x_bounds, u_bounds,
392-
x0, cost_t,
393-
finalCostFunction, dynamic,
394-
constraints, aleas);
401+
stateSteps = [1,1];
402+
controlSteps = [1,1];
403+
monteCarloSize = 2;
395404

396-
stateSteps = [1,1];
397-
controlSteps = [1,1];
398-
monteCarloSize = 2;
405+
paramsSDP = StochDynamicProgramming.SDPparameters(modelSDP, stateSteps,
406+
controlSteps,
407+
infoStruct,
408+
"Exact",
409+
monteCarloSize);
399410

400-
paramsSDP = StochDynamicProgramming.SDPparameters(modelSDP, stateSteps,
401-
controlSteps,
402-
infoStruct,
403-
"Exact",
404-
monteCarloSize);
411+
context("Compare StochDynProgModel constructors") do
405412

406-
context("Compare StochDynProgModel constructors") do
413+
modelSDPPiecewise = StochDynamicProgramming.PiecewiseLinearCostSPmodel(TF,
414+
u_bounds, x0,
415+
[cost_t],
416+
dynamic, aleas)
417+
set_state_bounds(modelSDPPiecewise, x_bounds)
407418

408-
modelSDPPiecewise = StochDynamicProgramming.PiecewiseLinearCostSPmodel(TF,
409-
u_bounds, x0,
410-
[cost_t],
411-
dynamic, aleas)
412-
set_state_bounds(modelSDPPiecewise, x_bounds)
419+
modelSDPLinear = StochDynamicProgramming.LinearDynamicLinearCostSPmodel(TF,
420+
u_bounds, x0,
421+
cost_t,
422+
dynamic, aleas)
413423

414-
modelSDPLinear = StochDynamicProgramming.LinearDynamicLinearCostSPmodel(TF,
415-
u_bounds, x0,
416-
cost_t,
417-
dynamic, aleas)
424+
set_state_bounds(modelSDPLinear, x_bounds)
418425

419-
set_state_bounds(modelSDPLinear, x_bounds)
426+
test_costs = true
427+
x = x0
428+
u = [1, 1]
429+
w = [4]
420430

421-
test_costs = true
422-
x = x0
423-
u = [1, 1]
424-
w = [4]
431+
for t in 1:TF-1
432+
test_costs &= (modelSDPLinear.costFunctions(t,x,u,w)==modelSDP.costFunctions(t,x,u,w))
433+
test_costs &= (modelSDPPiecewise.costFunctions[1](t,x,u,w)==modelSDP.costFunctions(t,x,u,w))
434+
end
425435

426-
for t in 1:TF-1
427-
test_costs &= (modelSDPLinear.costFunctions(t,x,u,w)==modelSDP.costFunctions(t,x,u,w))
428-
test_costs &= (modelSDPPiecewise.costFunctions[1](t,x,u,w)==modelSDP.costFunctions(t,x,u,w))
436+
@fact test_costs --> true
429437
end
430438

431-
@fact test_costs --> true
432-
end
433439

440+
context("Solve and simulate using SDP") do
434441

435-
context("Solve and simulate using SDP") do
442+
V_sdp = sdp_optimize(modelSDP, paramsSDP, false);
436443

437-
V_sdp = sdp_optimize(modelSDP, paramsSDP, false);
444+
@fact size(V_sdp) --> (paramsSDP.stateVariablesSizes..., TF)
438445

439-
@fact size(V_sdp) --> (paramsSDP.stateVariablesSizes..., TF)
446+
costs_sdp, stocks_sdp, controls_sdp = sdp_forward_simulation(modelSDP,
447+
paramsSDP,
448+
aleas_scen, x0,
449+
V_sdp, true )
440450

441-
costs_sdp, stocks_sdp, controls_sdp = sdp_forward_simulation(modelSDP,
442-
paramsSDP,
443-
aleas_scen, x0,
444-
V_sdp, true )
451+
@fact size(stocks_sdp) --> (3,1,2)
452+
@fact size(controls_sdp) --> (2,1,2)
445453

446-
@fact size(stocks_sdp) --> (3,1,2)
447-
@fact size(controls_sdp) --> (2,1,2)
454+
state_ref = zeros(2)
455+
state_ref[1] = stocks_sdp[2,1,1]
456+
state_ref[2] = stocks_sdp[2,1,2]
448457

449-
state_ref = zeros(2)
450-
state_ref[1] = stocks_sdp[2,1,1]
451-
state_ref[2] = stocks_sdp[2,1,2]
458+
end
452459

453460
end
454-
455-
end

0 commit comments

Comments
 (0)