Skip to content

Commit 76131b7

Browse files
committed
Preallocate on LineModel
1 parent 39032ca commit 76131b7

File tree

4 files changed

+122
-21
lines changed

4 files changed

+122
-21
lines changed

src/linesearch/line_model.jl

Lines changed: 37 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import NLPModels: obj, grad, grad!, objgrad!, objgrad, hess
22

33
export LineModel
4-
export obj, grad, derivative, grad!, objgrad!, objgrad, derivative!, hess, redirect!
4+
export obj, grad, derivative, grad!, objgrad!, objgrad, derivative!, hess, hess!, redirect!
55

66
"""A type to represent the restriction of a function to a direction.
77
Given f : R → Rⁿ, x ∈ Rⁿ and a nonzero direction d ∈ Rⁿ,
@@ -12,17 +12,23 @@ represents the function ϕ : R → R defined by
1212
1313
ϕ(t) := f(x + td).
1414
"""
15-
mutable struct LineModel <: AbstractNLPModel
15+
mutable struct LineModel{T} <: AbstractNLPModel
1616
meta::NLPModelMeta
1717
counters::Counters
1818
nlp::AbstractNLPModel
19-
x::AbstractVector
20-
d::AbstractVector
19+
x::Vector{T}
20+
d::Vector{T}
21+
xt::Vector{T}
2122
end
2223

23-
function LineModel(nlp::AbstractNLPModel, x::AbstractVector, d::AbstractVector)
24-
meta = NLPModelMeta(1, x0 = zeros(eltype(x), 1), name = "LineModel to $(nlp.meta.name))")
25-
return LineModel(meta, Counters(), nlp, x, d)
24+
function LineModel(
25+
nlp::AbstractNLPModel,
26+
x::AbstractVector{T},
27+
d::AbstractVector{T};
28+
xt::AbstractVector{T} = similar(x),
29+
) where {T}
30+
meta = NLPModelMeta(1, x0 = zeros(T, 1), name = "LineModel to $(nlp.meta.name))")
31+
return LineModel{T}(meta, Counters(), nlp, x, d, xt)
2632
end
2733

2834
"""`redirect!(ϕ, x, d)`
@@ -40,7 +46,8 @@ end
4046
"""
4147
function obj(f::LineModel, t::AbstractFloat)
4248
NLPModels.increment!(f, :neval_obj)
43-
return obj(f.nlp, f.x + t * f.d)
49+
@. f.xt = f.x + t * f.d
50+
return obj(f.nlp, f.xt)
4451
end
4552

4653
"""`grad(f, t)` evaluates the first derivative of the `LineModel`
@@ -53,7 +60,8 @@ i.e.,
5360
"""
5461
function grad(f::LineModel, t::AbstractFloat)
5562
NLPModels.increment!(f, :neval_grad)
56-
return dot(grad(f.nlp, f.x + t * f.d), f.d)
63+
@. f.xt = f.x + t * f.d
64+
return dot(grad(f.nlp, f.xt), f.d)
5765
end
5866
derivative(f::LineModel, t::AbstractFloat) = grad(f, t)
5967

@@ -69,7 +77,8 @@ The gradient ∇f(x + td) is stored in `g`.
6977
"""
7078
function grad!(f::LineModel, t::AbstractFloat, g::AbstractVector)
7179
NLPModels.increment!(f, :neval_grad)
72-
return dot(grad!(f.nlp, f.x + t * f.d, g), f.d)
80+
@. f.xt = f.x + t * f.d
81+
return dot(grad!(f.nlp, f.xt, g), f.d)
7382
end
7483
derivative!(f::LineModel, t::AbstractFloat, g::AbstractVector) = grad!(f, t, g)
7584

@@ -86,7 +95,8 @@ The gradient ∇f(x + td) is stored in `g`.
8695
function objgrad!(f::LineModel, t::AbstractFloat, g::AbstractVector)
8796
NLPModels.increment!(f, :neval_obj)
8897
NLPModels.increment!(f, :neval_grad)
89-
fx, gx = objgrad!(f.nlp, f.x + t * f.d, g)
98+
@. f.xt = f.x + t * f.d
99+
fx, gx = objgrad!(f.nlp, f.xt, g)
90100
return fx, dot(gx, f.d)
91101
end
92102

@@ -112,5 +122,20 @@ i.e.,
112122
"""
113123
function hess(f::LineModel, t::AbstractFloat)
114124
NLPModels.increment!(f, :neval_hess)
115-
return dot(f.d, hprod(f.nlp, f.x + t * f.d, f.d))
125+
@. f.xt = f.x + t * f.d
126+
return dot(f.d, hprod(f.nlp, f.xt, f.d))
127+
end
128+
129+
"""Evaluate the second derivative of the `LineModel`
130+
131+
ϕ(t) := f(x + td),
132+
133+
i.e.,
134+
135+
ϕ"(t) = dᵀ∇²f(x + td)d.
136+
"""
137+
function hess!(f::LineModel, t::AbstractFloat, Hv::AbstractVector)
138+
NLPModels.increment!(f, :neval_hess)
139+
@. f.xt = f.x + t * f.d
140+
return dot(f.d, hprod!(f.nlp, f.xt, f.d, Hv))
116141
end

test/runtests.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ using ADNLPModels, NLPModels
88
using LinearAlgebra, Logging, Test
99

1010
include("dummy_solver.jl")
11+
include("simple_model.jl")
1112

1213
include("test_auxiliary.jl")
1314
include("test_linesearch.jl")

test/simple_model.jl

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
mutable struct SimpleModel <: AbstractNLPModel
2+
meta :: AbstractNLPModelMeta
3+
counters :: Counters
4+
end
5+
6+
SimpleModel(n :: Int) = SimpleModel(NLPModelMeta(n, x0=ones(n)), Counters())
7+
8+
function NLPModels.obj(nlp::SimpleModel, x::AbstractVector)
9+
increment!(nlp, :neval_obj)
10+
sum(xi ^ 4 for xi in x) / 12
11+
end
12+
13+
function NLPModels.grad!(nlp::SimpleModel, x::AbstractVector, g::AbstractVector)
14+
increment!(nlp, :neval_grad)
15+
@. g = x ^ 3 / 3
16+
g
17+
end
18+
19+
function NLPModels.objgrad!(nlp::SimpleModel, x::AbstractVector, g::AbstractVector)
20+
increment!(nlp, :neval_obj)
21+
increment!(nlp, :neval_grad)
22+
@. g = x ^ 3 / 3
23+
return sum(xi ^4 for xi in x) / 12, g
24+
end
25+
26+
function NLPModels.hprod!(nlp::SimpleModel, x::AbstractVector{T}, v::AbstractVector, Hv::AbstractVector; obj_weight::T = one(T)) where T
27+
increment!(nlp, :neval_hprod)
28+
@. Hv = obj_weight * x ^ 2 * v
29+
Hv
30+
end
31+
32+
function NLPModels.hess(nlp::SimpleModel, x::AbstractVector{T}; obj_weight::T = one(T)) where T
33+
increment!(nlp, :neval_hprod)
34+
return obj_weight .* diagm(0 => x .^ 2)
35+
end

test/test_linesearch.jl

Lines changed: 49 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
@testset "Linesearch" begin
22
@testset "LineModel" begin
3-
nlp = ADNLPModel(x -> x[1]^2 + 4 * x[2]^2, ones(2))
3+
n = 200
4+
nlp = SimpleModel(n)
45
x = nlp.meta.x0
5-
d = -ones(2)
6+
d = -ones(n)
67
lm = LineModel(nlp, x, d)
7-
g = zeros(2)
8+
g = zeros(n)
89

910
@test obj(lm, 0.0) == obj(nlp, x)
1011
@test grad(lm, 0.0) == dot(grad(nlp, x), d)
@@ -17,19 +18,19 @@
1718
@test g == grad(nlp, x + d)
1819
@test objgrad(lm, 0.0) == (obj(nlp, x), dot(grad(nlp, x), d))
1920
@test hess(lm, 0.0) == dot(d, Symmetric(hess(nlp, x), :L) * d)
21+
@test hess!(lm, 0.0, g) == dot(d, hprod!(nlp, x, d, g))
2022

2123
@test obj(lm, 1.0) == 0.0
2224
@test grad(lm, 1.0) == 0.0
23-
@test hess(lm, 1.0) == 2d[1]^2 + 8d[2]^2
25+
@test hess(lm, 1.0) == 0.0
2426

25-
redirect!(lm, zeros(2), ones(2))
26-
@test obj(lm, 0.0) == 0.0
27-
@test grad(lm, 0.0) == 0.0
28-
@test hess(lm, 0.0) == 10.0
27+
@test obj(lm, 0.0) n / 12
28+
@test grad(lm, 0.0) -n / 3
29+
@test hess(lm, 0.0) == n
2930

3031
@test neval_obj(lm) == 5
3132
@test neval_grad(lm) == 8
32-
@test neval_hess(lm) == 3
33+
@test neval_hess(lm) == 4
3334
end
3435

3536
@testset "Armijo-Wolfe" begin
@@ -63,4 +64,43 @@
6364
@test nbk > 0
6465
@test nbW > 0
6566
end
67+
68+
@testset "Don't allocate" begin
69+
n = 200
70+
nlp = SimpleModel(n)
71+
x = nlp.meta.x0
72+
g = zeros(n)
73+
d = -40 * ones(n)
74+
lm = LineModel(nlp, x, d)
75+
76+
al1 = @allocated obj(nlp, x)
77+
al2 = @allocated obj(lm, 1.0)
78+
@test al1 == al2
79+
80+
al1 = @allocated grad!(nlp, x, g)
81+
al2 = @allocated grad!(lm, 1.0, g)
82+
@test al1 + 16 == al2
83+
84+
al1 = @allocated objgrad!(nlp, x, g)
85+
al2 = @allocated objgrad!(lm, 1.0, g)
86+
al1 = @allocated objgrad!(nlp, x, g)
87+
al2 = @allocated objgrad!(lm, 1.0, g)
88+
@test al1 + 64 == al2
89+
90+
al1 = @allocated hprod!(nlp, x, d, g)
91+
al2 = @allocated hess!(lm, 1.0, g)
92+
@test al1 + 16 == al2
93+
94+
h₀ = obj(lm, 0.0)
95+
slope = grad(lm, 0.0)
96+
(t, gg, ht, nbk, nbW) = armijo_wolfe(lm, h₀, slope, g)
97+
al = @allocated armijo_wolfe(lm, h₀, slope, g)
98+
@test al == (8 + nbk) * 32 + 16
99+
100+
for bk_max = 0:8
101+
(t, gg, ht, nbk, nbW) = armijo_wolfe(lm, h₀, slope, g, bk_max=bk_max)
102+
al = @allocated armijo_wolfe(lm, h₀, slope, g, bk_max=bk_max)
103+
@test al == min(12.5, 7 + 1.5nbk) * 32 + 208
104+
end
105+
end
66106
end

0 commit comments

Comments
 (0)