Skip to content

Commit 2dabf85

Browse files
committed
Merge pull request #52 from leclere/stock-example
Stock example
2 parents de90a10 + 24d1a89 commit 2dabf85

File tree

7 files changed

+452
-139
lines changed

7 files changed

+452
-139
lines changed

examples/stock-example.jl

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
# Copyright 2015, Vincent Leclere, Francois Pacaud and Henri Gerard
2+
# This Source Code Form is subject to the terms of the Mozilla Public
3+
# License, v. 2.0. If a copy of the MPL was not distributed with this
4+
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
5+
#############################################################################
6+
# Compare different ways of solving a stock problem :
7+
# Min E [\sum_{t=1}^TF c_t u_t]
8+
# s.t. s_{t+1} = s_t + u_t - xi_t, s_0 given
9+
# 0 <= s_t <= 1
10+
# u_min <= u_t <= u_max
11+
# u_t choosen knowing xi_1 .. xi_t
12+
#############################################################################
13+
push!(LOAD_PATH, "../src")
14+
using StochDynamicProgramming, JuMP, Clp, Distributions
15+
println("library loaded")
16+
17+
run_sddp = true
18+
run_sdp = true
19+
20+
######## Optimization parameters ########
21+
# choose the LP solver used.
22+
const SOLVER = ClpSolver()
23+
# const SOLVER = CplexSolver(CPX_PARAM_SIMDISPLAY=0) # require "using CPLEX"
24+
25+
# convergence test
26+
const MAX_ITER = 100 # maximum iteration of SDDP
27+
28+
######## Stochastic Model Parameters ########
29+
const N_STAGES = 5
30+
const COSTS = rand(N_STAGES)
31+
32+
const CONTROL_MAX = 0.5
33+
const CONTROL_MIN = 0
34+
35+
const XI_MAX = 0.3
36+
const XI_MIN = 0
37+
const N_XI = 10
38+
# initial stock
39+
const S0 = 0.5
40+
41+
# create law of noises
42+
proba = 1/N_XI*ones(N_XI) # uniform probabilities
43+
xi_support = collect(linspace(XI_MIN,XI_MAX,N_XI))
44+
xi_law = NoiseLaw(xi_support, proba)
45+
xi_laws = NoiseLaw[xi_law for t in 1:N_STAGES-1]
46+
47+
# Define dynamic of the stock:
48+
function dynamic(t, x, u, xi)
49+
return [x[1] + u[1] - xi[1]]
50+
end
51+
52+
# Define cost corresponding to each timestep:
53+
function cost_t(t, x, u, w)
54+
return COSTS[t] * u[1]
55+
end
56+
57+
######## Setting up the SPmodel
58+
s_bounds = [(0, 1)]
59+
u_bounds = [(CONTROL_MIN, CONTROL_MAX)]
60+
spmodel = LinearDynamicLinearCostSPmodel(N_STAGES,u_bounds,[S0],cost_t,dynamic,xi_laws)
61+
set_state_bounds(spmodel, s_bounds)
62+
63+
64+
######### Solving the problem via SDDP
65+
if run_sddp
66+
paramSDDP = SDDPparameters(SOLVER, 2, 0, MAX_ITER) # 10 forward pass, stop at MAX_ITER
67+
V, pbs = solve_SDDP(spmodel, paramSDDP, 10) # display information every 10 iterations
68+
lb_sddp = StochDynamicProgramming.get_lower_bound(spmodel, paramSDDP, V)
69+
println("Lower bound obtained by SDDP: "*string(lb_sddp))
70+
end
71+
72+
######### Solving the problem via Dynamic Programming
73+
if run_sdp
74+
stateSteps = [0.01]
75+
controlSteps = [0.01]
76+
infoStruct = "HD" # noise at time t is known before taking the decision at time t
77+
78+
paramSDP = SDPparameters(spmodel, stateSteps, controlSteps, infoStruct)
79+
Vs = sdp_optimize(spmodel,paramSDP)
80+
lb_sdp = StochDynamicProgramming.get_value(spmodel,paramSDP,Vs)
81+
println("Value obtained by SDP: "*string(lb_sdp))
82+
end
83+
84+
######### Comparing the solution
85+
scenarios = StochDynamicProgramming.simulate_scenarios(xi_laws,1000)
86+
if run_sddp
87+
costsddp, stocks = forward_simulations(spmodel, paramSDDP, V, pbs, scenarios)
88+
end
89+
if run_sdp
90+
costsdp, states, stocks =sdp_forward_simulation(spmodel,paramSDP,scenarios,Vs)
91+
end
92+
if run_sddp && run_sdp
93+
println(mean(costsddp-costsdp))
94+
end

src/SDDPoptimize.jl

Lines changed: 62 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,7 @@ function run_SDDP(model::SPModel,
9494

9595
# Build given number of scenarios according to distribution
9696
# law specified in model.noises:
97-
noise_scenarios = simulate_scenarios(model.noises ,
98-
(model.stageNumber-1,
99-
param.forwardPassNumber,
100-
model.dimNoises))
97+
noise_scenarios = simulate_scenarios(model.noises, param.forwardPassNumber)
10198

10299
# Forward pass
103100
costs, stockTrajectories, _ = forward_simulations(model,
@@ -119,13 +116,12 @@ function run_SDDP(model::SPModel,
119116
iteration_count += 1
120117

121118

122-
119+
123120

124121
if (display > 0) && (iteration_count%display==0)
125122
println("Pass number ", iteration_count,
126-
"\tEstimation of upper-bound: ", upper_bound(costs),
127-
"\tLower-bound: ", get_bellman_value(model, param, 1, V[1], model.initialState),
128-
"\tTime: ", toq())
123+
"\tLower-bound: ", round(get_bellman_value(model, param, 1, V[1], model.initialState),4),
124+
"\tTime: ", round(toq(),2),"s")
129125
end
130126

131127
end
@@ -134,14 +130,14 @@ function run_SDDP(model::SPModel,
134130
if (display>0)
135131
upb = upper_bound(costs)
136132
V0 = get_bellman_value(model, param, 1, V[1], model.initialState)
137-
133+
138134
println("Estimate upper-bound with Monte-Carlo ...")
139135
upb, costs = estimate_upper_bound(model, param, V, problems)
140-
println("Estimation of upper-bound: ", upb,
141-
"\tExact lower bound: ", V0,
142-
"\t Gap (\%) < ", (V0-upb)/V0 , " with prob. > 97.5 \%")
136+
println("Estimation of upper-bound: ", round(upb,4),
137+
"\tExact lower bound: ", round(V0,4),
138+
"\t Gap < ", round(100*(upb-V0)/V0) , "\% with prob. > 97.5 \%")
143139
println("Estimation of cost of the solution (fiability 95\%):",
144-
mean(costs), " +/- ", 1.96*std(costs)/sqrt(length(costs)))
140+
round(mean(costs),4), " +/- ", round(1.96*std(costs)/sqrt(length(costs)),4))
145141
end
146142

147143
return V, problems
@@ -432,6 +428,59 @@ function get_bellman_value(model::SPModel, param::SDDPparameters,
432428
end
433429

434430

431+
"""
432+
Compute value of Bellman function at point xt. Return V_t(xt)
433+
434+
Parameters:
435+
- model (SPModel)
436+
Parametrization of the problem
437+
438+
- param (SDDPparameters)
439+
Parameters of SDDP
440+
441+
- V (Vector{Polyhedral function})
442+
Estimation of bellman function as Polyhedral function
443+
444+
Return:
445+
current lower bound of the problem (Float64)
446+
"""
447+
function get_lower_bound(model::SPModel, param::SDDPparameters,
448+
V::Vector{PolyhedralFunction})
449+
return get_bellman_value(model, param, 1, V[1], model.initialState)
450+
end
451+
452+
453+
"""
454+
Compute optimal control at point xt and time t.
455+
456+
Parameters:
457+
- model (SPModel)
458+
Parametrization of the problem
459+
460+
- param (SDDPparameters)
461+
Parameters of SDDP
462+
463+
- lpproblem (Vector{JuMP.Model})
464+
Linear problems used to approximate the value functions
465+
466+
- t (Int64)
467+
Time
468+
469+
- xt (Vector{Float64})
470+
Position where to compute optimal control
471+
472+
- xi (Vector{Float64})
473+
Alea at time t
474+
475+
Return:
476+
Vector{Float64}: optimal control at time t
477+
478+
"""
479+
function get_control(model::SPModel, param::SDDPparameters, lpproblem::Vector{JuMP.Model}, t, xt, xi)
480+
return solve_one_step_one_alea(model, param, lpproblem[t], t, xt, xi)[2].optimal_control
481+
end
482+
483+
435484
"""
436485
Add several cuts to JuMP.Model from a PolyhedralFunction
437486

0 commit comments

Comments
 (0)