Skip to content

Commit 39032ca

Browse files
dpoabelsiqueira
authored andcommitted
🤖 Format .jl files
1 parent 436384f commit 39032ca

File tree

10 files changed

+232
-151
lines changed

10 files changed

+232
-151
lines changed

docs/make.jl

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,15 +5,16 @@ makedocs(
55
doctest = true,
66
linkcheck = true,
77
strict = true,
8-
format = Documenter.HTML(assets = ["assets/style.css"], prettyurls = get(ENV, "CI", nothing) == "true"),
8+
format = Documenter.HTML(
9+
assets = ["assets/style.css"],
10+
prettyurls = get(ENV, "CI", nothing) == "true",
11+
),
912
sitename = "SolverTools.jl",
10-
pages = ["Home" => "index.md",
11-
"Reference" => "reference.md",
12-
]
13+
pages = ["Home" => "index.md", "Reference" => "reference.md"],
1314
)
1415

1516
deploydocs(
1617
repo = "github.com/JuliaSmoothOptimizers/SolverTools.jl.git",
1718
push_preview = true,
18-
devbranch = "master"
19+
devbranch = "master",
1920
)

src/auxiliary/blas.jl

Lines changed: 32 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,26 +4,40 @@ import LinearAlgebra.BLAS: nrm2, blascopy!, axpy!, axpby!, scal!
44

55
export dot, nrm2, blascopy!, axpy!, axpby!, scal!, copyaxpy!
66

7-
nrm2(n :: Int, x :: Vector{T}) where T <: BLAS.BlasReal = BLAS.nrm2(n, x, 1)
8-
nrm2(n :: Int, x :: AbstractVector{T}) where T <: Number = norm(x)
9-
10-
dot(n :: Int, x :: Vector{T}, y :: Vector{T}) where T <: BLAS.BlasReal = BLAS.dot(n, x, 1, y, 1)
11-
dot(n :: Int, x :: AbstractVector{T}, y :: AbstractVector{T}) where T <: Number = dot(x, y)
12-
13-
axpy!(n :: Int, t :: T, d :: Vector{T}, x :: Vector{T}) where T <: BLAS.BlasReal = BLAS.axpy!(n, t, d, 1, x, 1)
14-
axpy!(n :: Int, t :: T, d :: AbstractVector{T}, x :: AbstractVector{T}) where T <: Number = (x .+= t .* d)
15-
16-
axpby!(n :: Int, t :: T, d :: Vector{T}, s :: T, x :: Vector{T}) where T <: BLAS.BlasReal = BLAS.axpby!(n, t, d, 1, s, x, 1)
17-
axpby!(n :: Int, t :: T, d :: AbstractVector{T}, s :: T, x :: AbstractVector{T}) where T <: Number = (x .= t .* d .+ s .* x)
18-
19-
scal!(n :: Int, t :: T, x :: Vector{T}) where T <: BLAS.BlasReal = BLAS.scal!(n, t, x, 1)
20-
scal!(n :: Int, t :: T, x :: AbstractVector{T}) where T <: Number = (x .*= t)
21-
22-
function copyaxpy!(n :: Int, t :: T, d :: Vector{T}, x :: Vector{T}, xt :: Vector{T}) where T <: BLAS.BlasReal
7+
nrm2(n::Int, x::Vector{T}) where {T <: BLAS.BlasReal} = BLAS.nrm2(n, x, 1)
8+
nrm2(n::Int, x::AbstractVector{T}) where {T <: Number} = norm(x)
9+
10+
dot(n::Int, x::Vector{T}, y::Vector{T}) where {T <: BLAS.BlasReal} = BLAS.dot(n, x, 1, y, 1)
11+
dot(n::Int, x::AbstractVector{T}, y::AbstractVector{T}) where {T <: Number} = dot(x, y)
12+
13+
axpy!(n::Int, t::T, d::Vector{T}, x::Vector{T}) where {T <: BLAS.BlasReal} =
14+
BLAS.axpy!(n, t, d, 1, x, 1)
15+
axpy!(n::Int, t::T, d::AbstractVector{T}, x::AbstractVector{T}) where {T <: Number} = (x .+= t .* d)
16+
17+
axpby!(n::Int, t::T, d::Vector{T}, s::T, x::Vector{T}) where {T <: BLAS.BlasReal} =
18+
BLAS.axpby!(n, t, d, 1, s, x, 1)
19+
axpby!(n::Int, t::T, d::AbstractVector{T}, s::T, x::AbstractVector{T}) where {T <: Number} =
20+
(x .= t .* d .+ s .* x)
21+
22+
scal!(n::Int, t::T, x::Vector{T}) where {T <: BLAS.BlasReal} = BLAS.scal!(n, t, x, 1)
23+
scal!(n::Int, t::T, x::AbstractVector{T}) where {T <: Number} = (x .*= t)
24+
25+
function copyaxpy!(
26+
n::Int,
27+
t::T,
28+
d::Vector{T},
29+
x::Vector{T},
30+
xt::Vector{T},
31+
) where {T <: BLAS.BlasReal}
2332
BLAS.blascopy!(n, x, 1, xt, 1)
2433
BLAS.axpy!(n, t, d, 1, xt, 1)
2534
end
26-
function copyaxpy!(n :: Int, t :: T, d :: AbstractVector{T}, x :: AbstractVector{T}, xt :: AbstractVector{T}) where T <: Number
35+
function copyaxpy!(
36+
n::Int,
37+
t::T,
38+
d::AbstractVector{T},
39+
x::AbstractVector{T},
40+
xt::AbstractVector{T},
41+
) where {T <: Number}
2742
xt .= x .+ t .* d
2843
end
29-

src/auxiliary/bounds.jl

Lines changed: 36 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,13 @@ export active, breakpoints, compute_Hs_slope_qs!, project!, project_step!
88
Computes the active bounds at x, using tolerance `min(rtol * (uᵢ-ℓᵢ), atol)`.
99
If ℓᵢ or uᵢ is not finite, only `atol` is used.
1010
"""
11-
function active(x::AbstractVector{T}, ℓ::AbstractVector{T},
12-
u::AbstractVector{T}; rtol::Real = sqrt(eps(eltype(x))),
13-
atol::Real = sqrt(eps(eltype(x)))) where {T <: Real}
11+
function active(
12+
x::AbstractVector{T},
13+
::AbstractVector{T},
14+
u::AbstractVector{T};
15+
rtol::Real = sqrt(eps(eltype(x))),
16+
atol::Real = sqrt(eps(eltype(x))),
17+
) where {T <: Real}
1418
A = Int[]
1519
n = length(x)
1620
for i = 1:n
@@ -33,10 +37,14 @@ Find the smallest and largest values of `α` such that `x + αd` lies on the
3337
boundary. `x` is assumed to be feasible. `nbrk` is the number of breakpoints
3438
from `x` in the direction `d`.
3539
"""
36-
function breakpoints(x::AbstractVector{T}, d::AbstractVector{T},
37-
::AbstractVector{T}, u::AbstractVector{T}) where {T <: Real}
38-
pos = findall( (d .> 0) .& (x .< u) )
39-
neg = findall( (d .< 0) .& (x .> ℓ) )
40+
function breakpoints(
41+
x::AbstractVector{T},
42+
d::AbstractVector{T},
43+
::AbstractVector{T},
44+
u::AbstractVector{T},
45+
) where {T <: Real}
46+
pos = findall((d .> 0) .& (x .< u))
47+
neg = findall((d .< 0) .& (x .> ℓ))
4048

4149
nbrk = length(pos) + length(neg)
4250
nbrk == 0 && return 0, zero(T), zero(T)
@@ -65,10 +73,14 @@ Computes
6573
slope = dot(g,s)
6674
qs = ¹/₂sᵀHs + slope
6775
"""
68-
function compute_Hs_slope_qs!(Hs::AbstractVector{T}, H::Union{AbstractMatrix,AbstractLinearOperator},
69-
s::AbstractVector{T}, g::AbstractVector{T}) where {T <: Real}
76+
function compute_Hs_slope_qs!(
77+
Hs::AbstractVector{T},
78+
H::Union{AbstractMatrix, AbstractLinearOperator},
79+
s::AbstractVector{T},
80+
g::AbstractVector{T},
81+
) where {T <: Real}
7082
Hs .= H * s
71-
slope = dot(g,s)
83+
slope = dot(g, s)
7284
qs = dot(s, Hs) / 2 + slope
7385
return slope, qs
7486
end
@@ -79,8 +91,12 @@ end
7991
Projects `x` into bounds `ℓ` and `u`, in the sense of
8092
`yᵢ = max(ℓᵢ, min(xᵢ, uᵢ))`.
8193
"""
82-
function project!(y :: AbstractVector{T}, x :: AbstractVector{T},
83-
:: AbstractVector{T}, u :: AbstractVector{T}) where {T <: Real}
94+
function project!(
95+
y::AbstractVector{T},
96+
x::AbstractVector{T},
97+
::AbstractVector{T},
98+
u::AbstractVector{T},
99+
) where {T <: Real}
84100
y .= max.(ℓ, min.(x, u))
85101
end
86102

@@ -89,9 +105,14 @@ end
89105
90106
Computes the projected direction `y = P(x + α * d) - x`.
91107
"""
92-
function project_step!(y::AbstractVector{T}, x::AbstractVector{T},
93-
d::AbstractVector{T}, ℓ::AbstractVector{T},
94-
u::AbstractVector{T}, α::Real = 1.0) where {T <: Real}
108+
function project_step!(
109+
y::AbstractVector{T},
110+
x::AbstractVector{T},
111+
d::AbstractVector{T},
112+
::AbstractVector{T},
113+
u::AbstractVector{T},
114+
α::Real = 1.0,
115+
) where {T <: Real}
95116
y .= x .+ α .* d
96117
project!(y, y, ℓ, u)
97118
y .-= x

src/linesearch/armijo_wolfe.jl

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -35,40 +35,42 @@ The following keyword arguments can be provided:
3535
- `bW_max`: maximum number of increases (default `5`);
3636
- `verbose`: whether to print information (default `false`).
3737
"""
38-
function armijo_wolfe(h :: LineModel,
39-
h₀ :: T,
40-
slope :: T,
41-
g :: Array{T,1};
42-
t :: T=one(T),
43-
τ₀ :: T=max(T(1.0e-4), sqrt(eps(T))),
44-
τ₁ :: T=T(0.9999),
45-
bk_max :: Int=10,
46-
bW_max :: Int=5,
47-
verbose :: Bool=false) where T <: AbstractFloat
38+
function armijo_wolfe(
39+
h::LineModel,
40+
h₀::T,
41+
slope::T,
42+
g::Array{T, 1};
43+
t::T = one(T),
44+
τ₀::T = max(T(1.0e-4), sqrt(eps(T))),
45+
τ₁::T = T(0.9999),
46+
bk_max::Int = 10,
47+
bW_max::Int = 5,
48+
verbose::Bool = false,
49+
) where {T <: AbstractFloat}
4850

4951
# Perform improved Armijo linesearch.
5052
nbk = 0
5153
nbW = 0
5254

5355
# First try to increase t to satisfy loose Wolfe condition
5456
ht, slope_t = objgrad!(h, t, g)
55-
while (slope_t < τ₁*slope) && (ht <= h₀ + τ₀ * t * slope) && (nbW < bW_max)
57+
while (slope_t < τ₁ * slope) && (ht <= h₀ + τ₀ * t * slope) && (nbW < bW_max)
5658
t *= 5
5759
ht, slope_t = objgrad!(h, t, g)
5860
nbW += 1
5961
end
6062

61-
hgoal = h₀ + slope * t * τ₀;
63+
hgoal = h₀ + slope * t * τ₀
6264
fact = -T(0.8)
63-
ϵ = eps(T)^T(3/5)
65+
ϵ = eps(T)^T(3 / 5)
6466

6567
# Enrich Armijo's condition with Hager & Zhang numerical trick
6668
Armijo = (ht <= hgoal) || ((ht <= h₀ + ϵ * abs(h₀)) && (slope_t <= fact * slope))
6769
good_grad = true
6870
while !Armijo && (nbk < bk_max)
6971
t *= T(0.4)
7072
ht = obj(h, t)
71-
hgoal = h₀ + slope * t * τ₀;
73+
hgoal = h₀ + slope * t * τ₀
7274

7375
# avoids unused grad! calls
7476
Armijo = false
@@ -86,7 +88,7 @@ function armijo_wolfe(h :: LineModel,
8688
nbk += 1
8789
end
8890

89-
verbose && @printf(" %4d %4d\n", nbk, nbW);
91+
verbose && @printf(" %4d %4d\n", nbk, nbW)
9092

9193
return (t, good_grad, ht, nbk, nbW)
9294
end

src/linesearch/line_model.jl

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -13,23 +13,23 @@ represents the function ϕ : R → R defined by
1313
ϕ(t) := f(x + td).
1414
"""
1515
mutable struct LineModel <: AbstractNLPModel
16-
meta :: NLPModelMeta
17-
counters :: Counters
18-
nlp :: AbstractNLPModel
19-
x :: AbstractVector
20-
d :: AbstractVector
16+
meta::NLPModelMeta
17+
counters::Counters
18+
nlp::AbstractNLPModel
19+
x::AbstractVector
20+
d::AbstractVector
2121
end
2222

23-
function LineModel(nlp :: AbstractNLPModel, x :: AbstractVector, d :: AbstractVector)
24-
meta = NLPModelMeta(1, x0=zeros(eltype(x), 1), name="LineModel to $(nlp.meta.name))")
23+
function LineModel(nlp::AbstractNLPModel, x::AbstractVector, d::AbstractVector)
24+
meta = NLPModelMeta(1, x0 = zeros(eltype(x), 1), name = "LineModel to $(nlp.meta.name))")
2525
return LineModel(meta, Counters(), nlp, x, d)
2626
end
2727

2828
"""`redirect!(ϕ, x, d)`
2929
3030
Change the values of x and d of the LineModel ϕ, but retains the counters.
3131
"""
32-
function redirect! :: LineModel, x :: AbstractVector, d :: AbstractVector)
32+
function redirect!::LineModel, x::AbstractVector, d::AbstractVector)
3333
ϕ.x, ϕ.d = x, d
3434
return ϕ
3535
end
@@ -38,7 +38,7 @@ end
3838
3939
ϕ(t) := f(x + td).
4040
"""
41-
function obj(f :: LineModel, t :: AbstractFloat)
41+
function obj(f::LineModel, t::AbstractFloat)
4242
NLPModels.increment!(f, :neval_obj)
4343
return obj(f.nlp, f.x + t * f.d)
4444
end
@@ -51,11 +51,11 @@ i.e.,
5151
5252
ϕ'(t) = ∇f(x + td)ᵀd.
5353
"""
54-
function grad(f :: LineModel, t :: AbstractFloat)
54+
function grad(f::LineModel, t::AbstractFloat)
5555
NLPModels.increment!(f, :neval_grad)
5656
return dot(grad(f.nlp, f.x + t * f.d), f.d)
5757
end
58-
derivative(f :: LineModel, t :: AbstractFloat) = grad(f, t)
58+
derivative(f::LineModel, t::AbstractFloat) = grad(f, t)
5959

6060
"""`grad!(f, t, g)` evaluates the first derivative of the `LineModel`
6161
@@ -67,11 +67,11 @@ i.e.,
6767
6868
The gradient ∇f(x + td) is stored in `g`.
6969
"""
70-
function grad!(f :: LineModel, t :: AbstractFloat, g :: AbstractVector)
70+
function grad!(f::LineModel, t::AbstractFloat, g::AbstractVector)
7171
NLPModels.increment!(f, :neval_grad)
7272
return dot(grad!(f.nlp, f.x + t * f.d, g), f.d)
7373
end
74-
derivative!(f :: LineModel, t :: AbstractFloat, g :: AbstractVector) = grad!(f, t, g)
74+
derivative!(f::LineModel, t::AbstractFloat, g::AbstractVector) = grad!(f, t, g)
7575

7676
"""`objgrad!(f, t, g)` evaluates the objective and first derivative of the `LineModel`
7777
@@ -83,7 +83,7 @@ and
8383
8484
The gradient ∇f(x + td) is stored in `g`.
8585
"""
86-
function objgrad!(f :: LineModel, t :: AbstractFloat, g :: AbstractVector)
86+
function objgrad!(f::LineModel, t::AbstractFloat, g::AbstractVector)
8787
NLPModels.increment!(f, :neval_obj)
8888
NLPModels.increment!(f, :neval_grad)
8989
fx, gx = objgrad!(f.nlp, f.x + t * f.d, g)
@@ -98,7 +98,7 @@ and
9898
9999
ϕ'(t) = ∇f(x + td)ᵀd.
100100
"""
101-
function objgrad(f :: LineModel, t :: AbstractFloat)
101+
function objgrad(f::LineModel, t::AbstractFloat)
102102
return obj(f, t), grad(f, t)
103103
end
104104

@@ -110,7 +110,7 @@ i.e.,
110110
111111
ϕ"(t) = dᵀ∇²f(x + td)d.
112112
"""
113-
function hess(f :: LineModel, t :: AbstractFloat)
113+
function hess(f::LineModel, t::AbstractFloat)
114114
NLPModels.increment!(f, :neval_hess)
115115
return dot(f.d, hprod(f.nlp, f.x + t * f.d, f.d))
116116
end

0 commit comments

Comments
 (0)