Skip to content

Commit b367149

Browse files
authored
Add tests with Aqua, ExplicitImports and JET (#1198)
1 parent c3ba21c commit b367149

File tree

22 files changed

+145
-70
lines changed

22 files changed

+145
-70
lines changed

Project.toml

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,7 @@ name = "Optim"
22
uuid = "429524aa-4258-5aef-a3af-852621145aeb"
33
version = "1.14.0"
44

5-
65
[deps]
7-
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
86
EnumX = "4e289a0a-7415-4d19-859d-a7e5c4648b56"
97
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
108
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
@@ -25,27 +23,38 @@ MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
2523
OptimMOIExt = "MathOptInterface"
2624

2725
[compat]
28-
Compat = "3.2.0, 3.3.0, 3.4.0, 3.5.0, 3.6.0, 4"
26+
ADTypes = "1.11"
27+
Aqua = "0.8.14"
28+
Distributions = "0.25.122"
2929
EnumX = "1.0.4"
30+
ExplicitImports = "1.13.2"
3031
FillArrays = "0.6.2, 0.7, 0.8, 0.9, 0.10, 0.11, 0.12, 0.13, 1"
3132
ForwardDiff = "0.10, 1"
33+
JET = "0.9, 0.10"
3234
LineSearches = "7.4.0"
3335
LinearAlgebra = "<0.0.1, 1.6"
3436
MathOptInterface = "1.17"
37+
Measurements = "2.14.1"
3538
NLSolversBase = "7.9.0"
3639
NaNMath = "0.3.2, 1"
3740
OptimTestProblems = "2.0.3"
3841
PositiveFactorizations = "0.2.2"
3942
Printf = "<0.0.1, 1.6"
4043
Random = "<0.0.1, 1.6"
44+
RecursiveArrayTools = "3.39"
45+
ReverseDiff = "1.16.1"
4146
SparseArrays = "<0.0.1, 1.6"
47+
StableRNGs = "1.0.3"
4248
StatsBase = "0.29, 0.30, 0.31, 0.32, 0.33, 0.34"
4349
Test = "<0.0.1, 1.6"
4450
julia = "1.10"
4551

4652
[extras]
4753
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
54+
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
4855
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
56+
ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7"
57+
JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b"
4958
LineSearches = "d3d80556-e9d4-5f37-9878-2ab0fcc64255"
5059
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
5160
Measurements = "eff96d63-e80a-5855-80a2-b1b0885c5ab7"
@@ -59,4 +68,4 @@ StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3"
5968
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
6069

6170
[targets]
62-
test = ["Test", "Distributions", "MathOptInterface", "Measurements", "OptimTestProblems", "Random", "RecursiveArrayTools", "StableRNGs", "LineSearches", "NLSolversBase", "PositiveFactorizations", "ReverseDiff", "ADTypes"]
71+
test = ["Test", "Aqua", "Distributions", "ExplicitImports", "JET", "MathOptInterface", "Measurements", "OptimTestProblems", "Random", "RecursiveArrayTools", "StableRNGs", "LineSearches", "NLSolversBase", "PositiveFactorizations", "ReverseDiff", "ADTypes"]

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
[![](https://img.shields.io/badge/docs-latest-blue.svg)](https://julianlsolvers.github.io/Optim.jl/dev)
55
[![Build Status](https://github.com/JuliaNLSolvers/Optim.jl/actions/workflows/CI.yml/badge.svg?branch=master)](https://github.com/JuliaNLSolvers/Optim.jl/actions/workflows/CI.yml?query=branch%3Amaster)
66
[![Codecov branch](https://img.shields.io/codecov/c/github/JuliaNLSolvers/Optim.jl/master.svg)](https://codecov.io/gh/JuliaNLSolvers/Optim.jl)
7+
[![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl)
78
[![JOSS](http://joss.theoj.org/papers/10.21105/joss.00615/status.svg)](https://doi.org/10.21105/joss.00615)
89

910
Univariate and multivariate optimization in Julia.

docs/src/user/minimization.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ julia> Optim.minimizer(res)
176176
-0.3333
177177
-1.49994
178178
179-
julia> Optim.minimum(res)
179+
julia> minimum(res)
180180
-2.8333333205768865
181181
```
182182

docs/src/user/tipsandtricks.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ that only use the gradient in some iterations but not in others.
8686
Now we call `optimize` with the following syntax:
8787

8888
```julia
89-
Optim.optimize(Optim.only_fg!(fg!), [0., 0.], Optim.LBFGS())
89+
Optim.optimize(NLSolversBase.only_fg!(fg!), [0., 0.], Optim.LBFGS())
9090
```
9191

9292
Similarly, for a computation that requires the Hessian, we can write:
@@ -99,7 +99,7 @@ function fgh!(F, G, H, x)
9999
nothing
100100
end
101101

102-
Optim.optimize(Optim.only_fgh!(fgh!), [0., 0.], Optim.Newton())
102+
Optim.optimize(NLSolversBase.only_fgh!(fgh!), [0., 0.], Optim.Newton())
103103
```
104104

105105
## Provide gradients

ext/OptimMOIExt.jl

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,11 @@
11
module OptimMOIExt
22

33
using Optim
4-
using Optim.LinearAlgebra
4+
using Optim.LinearAlgebra: rmul!
55
import MathOptInterface as MOI
66

77
function __init__()
8-
@static if isdefined(Base, :get_extension)
9-
setglobal!(Optim, :Optimizer, Optimizer)
10-
end
8+
setglobal!(Optim, :Optimizer, Optimizer)
119
end
1210

1311
mutable struct Optimizer{T} <: MOI.AbstractOptimizer

src/Optim.jl

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -15,52 +15,54 @@ Besides the help provided at the REPL, it is possible to find help and general
1515
documentation online at http://julianlsolvers.github.io/Optim.jl/stable/ .
1616
"""
1717
module Optim
18-
using Compat
19-
using NLSolversBase # for shared infrastructure in JuliaNLSolvers
2018

21-
using PositiveFactorizations # for globalization strategy in Newton
22-
import PositiveFactorizations: cholesky!, cholesky
19+
using PositiveFactorizations: Positive # for globalization strategy in Newton
2320

24-
using LineSearches # for globalization strategy in Quasi-Newton algs
21+
using LineSearches: LineSearches # for globalization strategy in Quasi-Newton algs
2522

2623
import NaNMath # for functions that ignore NaNs (no poisoning)
2724

28-
using Printf # For printing, maybe look into other options
25+
using Printf: @printf # For printing, maybe look into other options
2926

30-
using FillArrays # For handling scalar bounds in Fminbox
31-
32-
#using Compat # for compatibility across multiple julia versions
33-
34-
# for extensions of functions defined in Base.
35-
import Base: length, push!, show, getindex, setindex!, maximum, minimum
27+
using FillArrays: Fill # For handling scalar bounds in Fminbox
3628

3729
# objective and constraints types and functions relevant to them.
38-
import NLSolversBase:
30+
using NLSolversBase:
31+
NLSolversBase,
32+
AbstractObjective,
33+
InplaceObjective,
34+
NotInplaceObjective,
3935
NonDifferentiable,
4036
OnceDifferentiable,
4137
TwiceDifferentiable,
38+
TwiceDifferentiableHV,
39+
AbstractConstraints,
40+
ConstraintBounds,
41+
TwiceDifferentiableConstraints,
4242
nconstraints,
4343
nconstraints_x,
44-
NotInplaceObjective,
45-
InplaceObjective
44+
hessian,
45+
hessian!,
46+
hessian!!,
47+
hv_product,
48+
hv_product!
4649

4750
# var for NelderMead
4851
import StatsBase: var
4952

50-
import LinearAlgebra
51-
import LinearAlgebra:
53+
using LinearAlgebra:
54+
LinearAlgebra,
5255
Diagonal,
5356
diag,
5457
Hermitian,
5558
Symmetric,
5659
rmul!,
5760
mul!,
5861
norm,
59-
normalize!,
6062
diagind,
6163
eigen,
62-
BLAS,
6364
cholesky,
65+
cholesky!,
6466
Cholesky, # factorizations
6567
I,
6668
svd,
@@ -69,7 +71,9 @@ import LinearAlgebra:
6971
ldiv!,
7072
dot
7173

72-
import SparseArrays: AbstractSparseMatrix
74+
using SparseArrays: AbstractSparseMatrix
75+
76+
using EnumX: @enumx # Better enums
7377

7478
# exported functions and types
7579
export optimize,

src/api.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
Base.summary(io::IO, r::OptimizationResults) = summary(io, r.method) # might want to do more here than just return summary of the method used
22
minimizer(r::OptimizationResults) = r.minimizer
3-
minimum(r::OptimizationResults) = r.minimum
3+
Base.minimum(r::OptimizationResults) = r.minimum
44
iterations(r::OptimizationResults) = r.iterations
55
iteration_limit_reached(r::OptimizationResults) = r.stopped_by.iterations
66
trace(r::OptimizationResults) =

src/maximize.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ function maximize(
3636
f,
3737
x0::AbstractArray,
3838
method::AbstractOptimizer,
39-
options = Optim.Options()
39+
options = Options()
4040
)
4141
fmax = let f=f
4242
x -> -f(x)
@@ -48,7 +48,7 @@ function maximize(
4848
g,
4949
x0::AbstractArray,
5050
method::AbstractOptimizer,
51-
options = Optim.Options()
51+
options = Options()
5252
)
5353
fmax = let f=f
5454
x -> -f(x)
@@ -65,7 +65,7 @@ function maximize(
6565
h,
6666
x0::AbstractArray,
6767
method::AbstractOptimizer,
68-
options = Optim.Options()
68+
options = Options()
6969
)
7070
fmax = let f=f
7171
x -> -f(x)
@@ -80,7 +80,7 @@ function maximize(
8080
end
8181

8282
maximizer(r::MaximizationWrapper) = minimizer(res(r))
83-
maximum(r::MaximizationWrapper) = -minimum(res(r))
83+
Base.maximum(r::MaximizationWrapper) = -minimum(res(r))
8484
Base.summary(io::IO, r::MaximizationWrapper) = summary(io, res(r))
8585

8686
for api_method in (

src/multivariate/solvers/constrained/fminbox.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -316,7 +316,7 @@ function optimize(f, l::Number, u::Number, initial_x::AbstractArray; autodiff =
316316
Fill(T(u), size(initial_x)...),
317317
initial_x,
318318
Fminbox(),
319-
Optim.Options(),
319+
Options(),
320320
)
321321
end
322322

src/multivariate/solvers/constrained/ipnewton/interior.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ Base.hash(b::BarrierStateVars, u::UInt) = hash(
124124
hash(b.λxE, hash(b.λc, hash(b.λx, hash(b.slack_c, hash(b.slack_x, u + bsv_seed))))),
125125
)
126126

127-
function dot(v::BarrierStateVars, w::BarrierStateVars)
127+
function LinearAlgebra.dot(v::BarrierStateVars, w::BarrierStateVars)
128128
dot(v.slack_x, w.slack_x) +
129129
dot(v.slack_c, w.slack_c) +
130130
dot(v.λx, w.λx) +
@@ -133,7 +133,7 @@ function dot(v::BarrierStateVars, w::BarrierStateVars)
133133
dot(v.λcE, w.λcE)
134134
end
135135

136-
function norm(b::BarrierStateVars, p::Real)
136+
function LinearAlgebra.norm(b::BarrierStateVars, p::Real)
137137
norm(b.slack_x, p) +
138138
norm(b.slack_c, p) +
139139
norm(b.λx, p) +

0 commit comments

Comments
 (0)