1414using StochDynamicProgramming, JuMP, Clp, Distributions
1515println (" library loaded" )
1616
17- run_sddp = true
18- run_sdp = true
17+ run_sddp = true # false if you don't want to run sddp
18+ run_sdp = true # false if you don't want to run sdp
1919
2020# ####### Optimization parameters ########
2121# choose the LP solver used.
2222const SOLVER = ClpSolver ()
2323# const SOLVER = CplexSolver(CPX_PARAM_SIMDISPLAY=0) # require "using CPLEX"
2424
2525# convergence test
26- const MAX_ITER = 100 # maximum iteration of SDDP
26+ const MAX_ITER = 100 # number of iterations of SDDP
2727
2828# ####### Stochastic Model Parameters ########
2929const N_STAGES = 5
@@ -55,33 +55,33 @@ function cost_t(t, x, u, w)
5555end
5656
5757# ####### Setting up the SPmodel
58- s_bounds = [(0 , 1 )]
59- u_bounds = [(CONTROL_MIN, CONTROL_MAX)]
58+ s_bounds = [(0 , 1 )] # bounds on the state
59+ u_bounds = [(CONTROL_MIN, CONTROL_MAX)] # bounds on controls
6060 spmodel = LinearDynamicLinearCostSPmodel (N_STAGES,u_bounds,[S0],cost_t,dynamic,xi_laws)
61- set_state_bounds (spmodel, s_bounds)
61+ set_state_bounds (spmodel, s_bounds) # adding the bounds to the model
6262
6363
6464# ######## Solving the problem via SDDP
6565if run_sddp
66- paramSDDP = SDDPparameters (SOLVER, 2 , 0 , MAX_ITER) # 10 forward pass, stop at MAX_ITER
66+ paramSDDP = SDDPparameters (SOLVER, 2 , 0 , MAX_ITER) # 2 forward pass, stop at MAX_ITER
6767 V, pbs = solve_SDDP (spmodel, paramSDDP, 10 ) # display information every 10 iterations
6868 lb_sddp = StochDynamicProgramming. get_lower_bound (spmodel, paramSDDP, V)
69- println (" Lower bound obtained by SDDP: " * string (lb_sddp))
69+ println (" Lower bound obtained by SDDP: " * string (round ( lb_sddp, 4 ) ))
7070end
7171
7272# ######## Solving the problem via Dynamic Programming
7373if run_sdp
74- stateSteps = [0.01 ]
75- controlSteps = [0.01 ]
74+ stateSteps = [0.01 ] # discretization step of the state
75+ controlSteps = [0.01 ] # discretization step of the control
7676 infoStruct = " HD" # noise at time t is known before taking the decision at time t
7777
7878 paramSDP = SDPparameters (spmodel, stateSteps, controlSteps, infoStruct)
7979 Vs = solve_DP (spmodel,paramSDP, 1 )
80- lb_sdp = StochDynamicProgramming. get_bellman_value (spmodel,paramSDP,Vs)
81- println (" Value obtained by SDP: " * string (lb_sdp ))
80+ value_sdp = StochDynamicProgramming. get_bellman_value (spmodel,paramSDP,Vs)
81+ println (" Value obtained by SDP: " * string (round (value_sdp, 4 ) ))
8282end
8383
84- # ######## Comparing the solution
84+ # ######## Comparing the solutions
8585scenarios = StochDynamicProgramming. simulate_scenarios (xi_laws,1000 )
8686if run_sddp
8787 costsddp, stocks = forward_simulations (spmodel, paramSDDP, pbs, scenarios)
@@ -90,5 +90,5 @@ if run_sdp
9090 costsdp, states, stocks = sdp_forward_simulation (spmodel,paramSDP,scenarios,Vs)
9191end
9292if run_sddp && run_sdp
93- println (mean (costsddp- costsdp))
93+ println (" Relative difference between sddp and sdp: " * string ( 2 * round ( mean (costsddp- costsdp) / mean (costsddp + costsdp), 4 ) ))
9494end
0 commit comments