@@ -94,10 +94,7 @@ function run_SDDP(model::SPModel,
9494
9595 # Build given number of scenarios according to distribution
9696 # law specified in model.noises:
97- noise_scenarios = simulate_scenarios (model. noises ,
98- (model. stageNumber- 1 ,
99- param. forwardPassNumber,
100- model. dimNoises))
97+ noise_scenarios = simulate_scenarios (model. noises, param. forwardPassNumber)
10198
10299 # Forward pass
103100 costs, stockTrajectories, _ = forward_simulations (model,
@@ -119,13 +116,12 @@ function run_SDDP(model::SPModel,
119116 iteration_count += 1
120117
121118
122-
119+
123120
124121 if (display > 0 ) && (iteration_count% display== 0 )
125122 println (" Pass number " , iteration_count,
126- " \t Estimation of upper-bound: " , upper_bound (costs),
127- " \t Lower-bound: " , get_bellman_value (model, param, 1 , V[1 ], model. initialState),
128- " \t Time: " , toq ())
123+ " \t Lower-bound: " , round (get_bellman_value (model, param, 1 , V[1 ], model. initialState),4 ),
124+ " \t Time: " , round (toq (),2 )," s" )
129125 end
130126
131127 end
@@ -134,14 +130,14 @@ function run_SDDP(model::SPModel,
134130 if (display> 0 )
135131 upb = upper_bound (costs)
136132 V0 = get_bellman_value (model, param, 1 , V[1 ], model. initialState)
137-
133+
138134 println (" Estimate upper-bound with Monte-Carlo ..." )
139135 upb, costs = estimate_upper_bound (model, param, V, problems)
140- println (" Estimation of upper-bound: " , upb,
141- " \t Exact lower bound: " , V0 ,
142- " \t Gap ( \% ) < " , (V0 - upb)/ V0 , " with prob. > 97.5 \% " )
136+ println (" Estimation of upper-bound: " , round ( upb, 4 ) ,
137+ " \t Exact lower bound: " , round (V0, 4 ) ,
138+ " \t Gap < " , round ( 100 * ( upb- V0 )/ V0) , " \% with prob. > 97.5 \% " )
143139 println (" Estimation of cost of the solution (fiability 95\% ):" ,
144- mean (costs), " +/- " , 1.96 * std (costs)/ sqrt (length (costs)))
140+ round ( mean (costs),4 ), " +/- " , round ( 1.96 * std (costs)/ sqrt (length (costs)), 4 ))
145141 end
146142
147143 return V, problems
@@ -432,6 +428,59 @@ function get_bellman_value(model::SPModel, param::SDDPparameters,
432428end
433429
434430
431+ """
432+ Compute value of Bellman function at point xt. Return V_t(xt)
433+
434+ Parameters:
435+ - model (SPModel)
436+ Parametrization of the problem
437+
438+ - param (SDDPparameters)
439+ Parameters of SDDP
440+
441+ - V (Vector{Polyhedral function})
442+ Estimation of bellman function as Polyhedral function
443+
444+ Return:
445+ current lower bound of the problem (Float64)
446+ """
447+ function get_lower_bound (model:: SPModel , param:: SDDPparameters ,
448+ V:: Vector{PolyhedralFunction} )
449+ return get_bellman_value (model, param, 1 , V[1 ], model. initialState)
450+ end
451+
452+
453+ """
454+ Compute optimal control at point xt and time t.
455+
456+ Parameters:
457+ - model (SPModel)
458+ Parametrization of the problem
459+
460+ - param (SDDPparameters)
461+ Parameters of SDDP
462+
463+ - lpproblem (Vector{JuMP.Model})
464+ Linear problems used to approximate the value functions
465+
466+ - t (Int64)
467+ Time
468+
469+ - xt (Vector{Float64})
470+ Position where to compute optimal control
471+
472+ - xi (Vector{Float64})
473+ Alea at time t
474+
475+ Return:
476+ Vector{Float64}: optimal control at time t
477+
478+ """
479+ function get_control (model:: SPModel , param:: SDDPparameters , lpproblem:: Vector{JuMP.Model} , t, xt, xi)
480+ return solve_one_step_one_alea (model, param, lpproblem[t], t, xt, xi)[2 ]. optimal_control
481+ end
482+
483+
435484"""
436485Add several cuts to JuMP.Model from a PolyhedralFunction
437486
0 commit comments