Skip to content

Commit 71f9df7

Browse files
committed
[FIX] test and stock examples
1 parent cdaf509 commit 71f9df7

File tree

4 files changed

+29
-37
lines changed

4 files changed

+29
-37
lines changed

examples/multistock-example.jl

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ println("library loaded")
1717

1818
run_sddp = true # false if you don't want to run sddp
1919
run_sdp = true # false if you don't want to run sdp
20+
test_simulation = true # false if you don't want to test your strategies
2021

2122
######## Optimization parameters ########
2223
# choose the LP solver used.
@@ -26,6 +27,8 @@ const SOLVER = ClpSolver() # require "using Clp"
2627
# convergence test
2728
const MAX_ITER = 10 # number of iterations of SDDP
2829

30+
const step = 0.1 # discretization step of SDP
31+
2932
######## Stochastic Model Parameters ########
3033
const N_STAGES = 6 # number of stages of the SP problem
3134
const N_STOCKS = 3 # number of stocks of the SP problem
@@ -40,9 +43,9 @@ const XI_MAX = 0.3 # bounds on the noise
4043
const XI_MIN = 0
4144
const N_XI = 3 # discretization of the noise
4245

43-
const r = 1 # bound on cumulative control : \sum_{i=1}^N u_i < rN
46+
const r = 0.5 # bound on cumulative control : \sum_{i=1}^N u_i < rN
4447

45-
const S0 = [0.5*r for i=1:N_STOCKS] # initial stock
48+
const S0 = [0.5 for i=1:N_STOCKS] # initial stock
4649

4750
# create law of noises
4851
proba = 1/N_XI*ones(N_XI) # uniform probabilities
@@ -79,7 +82,6 @@ if run_sddp
7982
# 10 forward pass, stop at MAX_ITER
8083
paramSDDP = SDDPparameters(SOLVER,
8184
passnumber=10,
82-
gap=0,
8385
max_iterations=MAX_ITER)
8486
V, pbs = solve_SDDP(spmodel, paramSDDP, 2) # display information every 2 iterations
8587
lb_sddp = StochDynamicProgramming.get_lower_bound(spmodel, paramSDDP, V)
@@ -90,7 +92,6 @@ end
9092
######### Solving the problem via Dynamic Programming
9193
if run_sdp
9294
tic()
93-
step = 0.1
9495
println("Starting resolution by SDP")
9596
stateSteps = [step for i=1:N_STOCKS] # discretization step of the state
9697
controlSteps = [step for i=1:N_STOCKS] # discretization step of the control
@@ -108,15 +109,11 @@ end
108109

109110
######### Comparing the solutions on simulated scenarios.
110111
#srand(1234) # to fix the random seed accross runs
111-
scenarios = StochDynamicProgramming.simulate_scenarios(xi_laws,1000)
112-
if run_sddp
113-
costsddp, stocks_sddp = forward_simulations(spmodel, paramSDDP, pbs, scenarios)
114-
end
115-
116-
if run_sdp
117-
costsdp, stocks_sdp, controls = sdp_forward_simulation(spmodel_sdp,paramSDP,scenarios,Vs)
118-
end
119-
120-
run_sddp && run_sdp && println("Simulated relative gain of sddp over sdp: "
112+
if run_sddp && run_sdp && test_simulation
113+
scenarios = StochDynamicProgramming.simulate_scenarios(xi_laws,1000)
114+
costsddp, stocks = forward_simulations(spmodel, paramSDDP, pbs, scenarios)
115+
costsdp, states, controls = sdp_forward_simulation(spmodel,paramSDP,scenarios,Vs)
116+
println("Simulated relative gain of sddp over sdp: "
121117
*string(round(200*mean(costsdp-costsddp)/abs(mean(costsddp+costsdp)),3))*"%")
118+
end
122119

examples/stock-example.jl

Lines changed: 9 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ println("library loaded")
1717
run_sddp = true # false if you don't want to run sddp
1818
run_sdp = true # false if you don't want to run sdp
1919
run_ef = true # false if you don't want to run extensive formulation
20+
test_simulation = true # false if you don't want to test your strategies
2021

2122
######## Optimization parameters ########
2223
# choose the LP solver used.
@@ -25,13 +26,13 @@ const SOLVER = ClpSolver() # require "using Clp"
2526

2627
# convergence test
2728
const MAX_ITER = 10 # number of iterations of SDDP
29+
const step = 0.1 # discretization step of SDP
2830

2931
######## Stochastic Model Parameters ########
3032
const N_STAGES = 6 # number of stages of the SP problem
3133
const COSTS = [sin(3*t)-1 for t in 1:N_STAGES]
3234
#const COSTS = rand(N_STAGES) # randomly generating deterministic costs
3335

34-
3536
const CONTROL_MAX = 0.5 # bounds on the control
3637
const CONTROL_MIN = 0
3738

@@ -70,9 +71,8 @@ if run_sddp
7071
println("Starting resolution by SDDP")
7172
# 10 forward pass, stop at MAX_ITER
7273
paramSDDP = SDDPparameters(SOLVER,
73-
passnumber=10,
74-
gap=0,
75-
max_iterations=MAX_ITER)
74+
passnumber=10,
75+
max_iterations=MAX_ITER)
7676
V, pbs = solve_SDDP(spmodel, paramSDDP, 2) # display information every 2 iterations
7777
lb_sddp = StochDynamicProgramming.get_lower_bound(spmodel, paramSDDP, V)
7878
println("Lower bound obtained by SDDP: "*string(round(lb_sddp,4)))
@@ -82,7 +82,6 @@ end
8282
######### Solving the problem via Dynamic Programming
8383
if run_sdp
8484
tic()
85-
step = 0.01
8685
println("Starting resolution by SDP")
8786
stateSteps = [step] # discretization step of the state
8887
controlSteps = [step] # discretization step of the control
@@ -100,23 +99,18 @@ if run_ef
10099
println("Starting resolution by Extensive Formulation")
101100
value_ef = extensive_formulation(spmodel, paramSDDP)[1]
102101
println("Value obtained by Extensive Formulation: "*string(round(value_ef,4)))
103-
println("Relative error of SDP value: "*string(100*round(value_sdp/value_ef-1,4))*"%")
104-
println("Relative error of SDDP lower bound: "*string(100*round(lb_sddp/value_ef-1,4))*"%")
102+
println("Relative error of SDP value: "*string(100*round(abs(value_sdp/value_ef)-1,4))*"%")
103+
println("Relative error of SDDP lower bound: "*string(100*round(abs(lb_sddp/value_ef)-1,4))*"%")
105104
toc(); println();
106105
end
107106

108107
######### Comparing the solutions on simulated scenarios.
109108

110109
#srand(1234) # to fix the random seed accross runs
111-
scenarios = StochDynamicProgramming.simulate_scenarios(xi_laws,1000)
112-
if run_sddp
110+
if run_sddp && run_sdp && test_simulation
111+
scenarios = StochDynamicProgramming.simulate_scenarios(xi_laws,1000)
113112
costsddp, stocks = forward_simulations(spmodel, paramSDDP, pbs, scenarios)
114-
end
115-
if run_sdp
116-
costsdp, states, controls =sdp_forward_simulation(spmodel,paramSDP,scenarios,Vs)
117-
end
118-
119-
if run_sddp && run_sdp
113+
costsdp, states, controls = sdp_forward_simulation(spmodel,paramSDP,scenarios,Vs)
120114
println("Simulated relative gain of sddp over sdp: "
121115
*string(round(200*mean(costsdp-costsddp)/abs(mean(costsddp+costsdp)),3))*"%")
122116
end

src/SDDPoptimize.jl

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@
1010

1111

1212
"""
13-
Solve SDDP algorithm and return estimation of bellman functions.
13+
Solve spmodel using SDDP algorithm and return lower approximation of Bellman functions.
1414
1515
# Description
16-
Alternate forward and backward phase till the stopping criterion is
16+
Alternate forward and backward phases untill the stopping criterion is
1717
fulfilled.
1818
1919
# Arguments
@@ -32,7 +32,7 @@ fulfilled.
3232
* `problems::Array{JuMP.Model}`:
3333
the collection of linear problems used to approximate
3434
each value function
35-
* `sddp_stats::SDDPStat`:
35+
* `sddp_stats::SDDPStat`: statistics of the algorithm run
3636
3737
"""
3838
function solve_SDDP(model::SPModel, param::SDDPparameters, verbose=0::Int64)
@@ -46,10 +46,11 @@ function solve_SDDP(model::SPModel, param::SDDPparameters, verbose=0::Int64)
4646
end
4747

4848
"""
49-
Solve SDDP algorithm with hotstart and return estimation of bellman functions.
49+
Solve spmodel using SDDP algorithm and return lower approximation of Bellman functions.
50+
Use hotstart.
5051
5152
# Description
52-
Alternate forward and backward phase till the stopping criterion is
53+
Alternate forward and backward phases untill the stopping criterion is
5354
fulfilled.
5455
5556
# Arguments
@@ -70,7 +71,7 @@ fulfilled.
7071
* `problems::Array{JuMP.Model}`:
7172
the collection of linear problems used to approximate
7273
each value function
73-
* `sddp_stats::SDDPStat`:
74+
* `sddp_stats::SDDPStat`: statistics of the algorithm run
7475
"""
7576
function solve_SDDP(model::SPModel, param::SDDPparameters, V::Vector{PolyhedralFunction}, verbose=0::Int64)
7677
check_SDDPparameters(model,param,verbose)

src/objects.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ type SDDPparameters
159159
confidence_level::Float64
160160
# Maximum iterations of the SDDP algorithms:
161161
maxItNumber::Int64
162-
# Prune cuts every %% iterations:
162+
# Define the pruning method
163163
pruning::Dict{Symbol, Any}
164164
# Estimate upper-bound every %% iterations:
165165
compute_ub::Int64
@@ -175,7 +175,7 @@ type SDDPparameters
175175
function SDDPparameters(solver; passnumber=10, gap=0., confidence=.975,
176176
max_iterations=20, prune_cuts=0,
177177
pruning_algo="none",
178-
compute_ub=-1, montecarlo_final=-1, montecarlo_in_iter=100,
178+
compute_ub=-1, montecarlo_final=1000, montecarlo_in_iter=100,
179179
mipsolver=nothing,
180180
rho0=0., alpha=1.)
181181

0 commit comments

Comments
 (0)