Skip to content

Commit 6dcf04a

Browse files
committed
[FIX] Fix indentations in forwardbackwarditerations.jl
1 parent bbc6f40 commit 6dcf04a

File tree

1 file changed

+128
-128
lines changed

1 file changed

+128
-128
lines changed

src/forwardBackwardIterations.jl

Lines changed: 128 additions & 128 deletions
Original file line numberDiff line numberDiff line change
@@ -122,172 +122,172 @@ end
122122
"""
123123
Add to polyhedral function a cut with shape Vt >= beta + <lambda,.>
124124
125-
Parameters:
126-
- model (SPModel)
127-
Store the problem definition
128-
129-
- t (Int64)
130-
Current time
131-
132-
- Vt (PolyhedralFunction)
133-
Current lower approximation of the Bellman function at time t
134-
135-
- beta (Float)
136-
affine part of the cut to add
125+
Parameters:
126+
- model (SPModel)
127+
Store the problem definition
137128
138-
- lambda (Array{float,1})
139-
subgradient of the cut to add
129+
- t (Int64)
130+
Current time
140131
141-
"""
142-
function add_cut!(model::SPModel,
143-
t::Int64, Vt::PolyhedralFunction,
144-
beta::Float64, lambda::Vector{Float64})
145-
Vt.lambdas = vcat(Vt.lambdas, reshape(lambda, 1, model.dimStates))
146-
Vt.betas = vcat(Vt.betas, beta)
147-
Vt.numCuts += 1
148-
end
132+
- Vt (PolyhedralFunction)
133+
Current lower approximation of the Bellman function at time t
149134
135+
- beta (Float)
136+
affine part of the cut to add
150137
151-
"""
152-
Add a cut to the JuMP linear problem.
138+
- lambda (Array{float,1})
139+
subgradient of the cut to add
153140
154-
Parameters:
155-
- model (SPModel)
156-
Store the problem definition
141+
"""
142+
function add_cut!(model::SPModel,
143+
t::Int64, Vt::PolyhedralFunction,
144+
beta::Float64, lambda::Vector{Float64})
145+
Vt.lambdas = vcat(Vt.lambdas, reshape(lambda, 1, model.dimStates))
146+
Vt.betas = vcat(Vt.betas, beta)
147+
Vt.numCuts += 1
148+
end
157149

158-
- problem (JuMP.Model)
159-
Linear problem used to approximate the value functions
160150

161-
- t (Int)
162-
Time index
151+
"""
152+
Add a cut to the JuMP linear problem.
163153
164-
- beta (Float)
165-
affine part of the cut to add
154+
Parameters:
155+
- model (SPModel)
156+
Store the problem definition
166157
167-
- lambda (Array{float,1})
168-
subgradient of the cut to add
158+
- problem (JuMP.Model)
159+
Linear problem used to approximate the value functions
169160
170-
"""
171-
function add_cut_to_model!(model::SPModel, problem::JuMP.Model,
172-
t::Int64, beta::Float64, lambda::Vector{Float64})
173-
alpha = getVar(problem, :alpha)
174-
x = getVar(problem, :x)
175-
u = getVar(problem, :u)
176-
w = getVar(problem, :w)
177-
@addConstraint(problem, beta + dot(lambda, model.dynamics(t, x, u, w)) <= alpha)
178-
end
161+
- t (Int)
162+
Time index
179163
164+
- beta (Float)
165+
affine part of the cut to add
180166
181-
"""
182-
Make a backward pass of the algorithm
167+
- lambda (Array{float,1})
168+
subgradient of the cut to add
183169
184-
For t:T-1 -> 0, compute a valid cut of the Bellman function
185-
Vt at the state given by stockTrajectories and add them to
186-
the current estimation of Vt.
170+
"""
171+
function add_cut_to_model!(model::SPModel, problem::JuMP.Model,
172+
t::Int64, beta::Float64, lambda::Vector{Float64})
173+
alpha = getVar(problem, :alpha)
174+
x = getVar(problem, :x)
175+
u = getVar(problem, :u)
176+
w = getVar(problem, :w)
177+
@addConstraint(problem, beta + dot(lambda, model.dynamics(t, x, u, w)) <= alpha)
178+
end
187179

188180

189-
Parameters:
190-
- model (SPmodel)
191-
the stochastic problem we want to optimize
181+
"""
182+
Make a backward pass of the algorithm
192183
193-
- param (SDDPparameters)
194-
the parameters of the SDDP algorithm
184+
For t:T-1 -> 0, compute a valid cut of the Bellman function
185+
Vt at the state given by stockTrajectories and add them to
186+
the current estimation of Vt.
195187
196-
- V (Array{PolyhedralFunction})
197-
the current estimation of Bellman's functions
198188
199-
- solverProblems (Array{JuMP.Model})
200-
Linear model used to approximate each value function
189+
Parameters:
190+
- model (SPmodel)
191+
the stochastic problem we want to optimize
201192
202-
- stockTrajectories (Array{Float64,3})
203-
stockTrajectories[t,k,:] is the vector of stock where the cut is computed
204-
for scenario k and time t.
193+
- param (SDDPparameters)
194+
the parameters of the SDDP algorithm
205195
206-
- law (Array{NoiseLaw})
207-
Conditionnal distributions of perturbation, for each timestep
196+
- V (Array{PolyhedralFunction})
197+
the current estimation of Bellman's functions
208198
209-
- init (Bool)
210-
If specified, then init PolyhedralFunction
199+
- solverProblems (Array{JuMP.Model})
200+
Linear model used to approximate each value function
211201
212-
- updateV (Bool)
213-
Store new cuts in given Polyhedral functions if specified
202+
- stockTrajectories (Array{Float64,3})
203+
stockTrajectories[t,k,:] is the vector of stock where the cut is computed
204+
for scenario k and time t.
214205
206+
- law (Array{NoiseLaw})
207+
Conditionnal distributions of perturbation, for each timestep
215208
216-
Return:
217-
- V0 (Float64)
218-
Approximation of initial cost
209+
- init (Bool)
210+
If specified, then init PolyhedralFunction
219211
220-
"""
221-
function backward_pass!(model::SPModel,
222-
param::SDDPparameters,
223-
V::Vector{PolyhedralFunction},
224-
solverProblems::Vector{JuMP.Model},
225-
stockTrajectories::Array{Float64, 3},
226-
law,
227-
init=false::Bool)
212+
- updateV (Bool)
213+
Store new cuts in given Polyhedral functions if specified
228214
229-
T = model.stageNumber
230-
nb_forward = size(stockTrajectories)[2]
231215
232-
# Estimation of initial cost:
233-
V0 = 0.
216+
Return:
217+
- V0 (Float64)
218+
Approximation of initial cost
234219
235-
costs::Vector{Float64} = zeros(1)
236-
costs_npass = zeros(Float64, nb_forward)
237-
state_t = zeros(Float64, model.dimStates)
220+
"""
221+
function backward_pass!(model::SPModel,
222+
param::SDDPparameters,
223+
V::Vector{PolyhedralFunction},
224+
solverProblems::Vector{JuMP.Model},
225+
stockTrajectories::Array{Float64, 3},
226+
law,
227+
init=false::Bool)
238228

239-
for t = T-1:-1:1
240-
costs = zeros(law[t].supportSize)
229+
T = model.stageNumber
230+
nb_forward = size(stockTrajectories)[2]
241231

242-
for k = 1:nb_forward
232+
# Estimation of initial cost:
233+
V0 = 0.
243234

244-
subgradient_array = zeros(Float64, model.dimStates, law[t].supportSize)
245-
state_t = extract_vector_from_3Dmatrix(stockTrajectories, t, k)
235+
costs::Vector{Float64} = zeros(1)
236+
costs_npass = zeros(Float64, nb_forward)
237+
state_t = zeros(Float64, model.dimStates)
246238

247-
for w in 1:law[t].supportSize
239+
for t = T-1:-1:1
240+
costs = zeros(law[t].supportSize)
248241

249-
alea_t = collect(law[t].support[:, w])
242+
for k = 1:nb_forward
250243

251-
nextstep = solve_one_step_one_alea(model,
252-
param,
253-
solverProblems[t],
254-
t,
255-
state_t,
256-
alea_t)[2]
257-
subgradient_array[:, w] = nextstep.sub_gradient
258-
costs[w] = nextstep.cost
259-
end
244+
subgradient_array = zeros(Float64, model.dimStates, law[t].supportSize)
245+
state_t = extract_vector_from_3Dmatrix(stockTrajectories, t, k)
260246

261-
# Compute expectation of subgradient:
262-
subgradient = vec(sum(law[t].proba' .* subgradient_array, 2))
263-
# ... and esperancy of cost:
264-
costs_npass[k] = dot(law[t].proba, costs)
265-
beta = costs_npass[k] - dot(subgradient, state_t)
247+
for w in 1:law[t].supportSize
266248

249+
alea_t = collect(law[t].support[:, w])
267250

268-
# Add cut to polyhedral function and JuMP model:
269-
if init
270-
V[t] = PolyhedralFunction([beta],
271-
reshape(subgradient,
272-
1,
273-
model.dimStates), 1)
274-
if t > 1
275-
add_cut_to_model!(model, solverProblems[t-1], t, beta, subgradient)
276-
end
251+
nextstep = solve_one_step_one_alea(model,
252+
param,
253+
solverProblems[t],
254+
t,
255+
state_t,
256+
alea_t)[2]
257+
subgradient_array[:, w] = nextstep.sub_gradient
258+
costs[w] = nextstep.cost
259+
end
277260

278-
else
279-
add_cut!(model, t, V[t], beta, subgradient)
280-
if t > 1
281-
add_cut_to_model!(model, solverProblems[t-1], t, beta, subgradient)
282-
end
283-
end
261+
# Compute expectation of subgradient:
262+
subgradient = vec(sum(law[t].proba' .* subgradient_array, 2))
263+
# ... and esperancy of cost:
264+
costs_npass[k] = dot(law[t].proba, costs)
265+
beta = costs_npass[k] - dot(subgradient, state_t)
266+
267+
268+
# Add cut to polyhedral function and JuMP model:
269+
if init
270+
V[t] = PolyhedralFunction([beta],
271+
reshape(subgradient,
272+
1,
273+
model.dimStates), 1)
274+
if t > 1
275+
add_cut_to_model!(model, solverProblems[t-1], t, beta, subgradient)
276+
end
277+
278+
else
279+
add_cut!(model, t, V[t], beta, subgradient)
280+
if t > 1
281+
add_cut_to_model!(model, solverProblems[t-1], t, beta, subgradient)
282+
end
283+
end
284284

285-
end
285+
end
286286

287-
if t==1
288-
V0 = mean(costs_npass)
289-
end
287+
if t==1
288+
V0 = mean(costs_npass)
289+
end
290290

291-
end
292-
return V0
293-
end
291+
end
292+
return V0
293+
end

0 commit comments

Comments
 (0)