Skip to content

Commit 0dd0fd8

Browse files
apply JuliaFormatter
1 parent f4ff437 commit 0dd0fd8

File tree

3 files changed

+127
-111
lines changed

3 files changed

+127
-111
lines changed

test/test-R2N.jl

Lines changed: 75 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -1,74 +1,83 @@
11
@testset "R2N" begin
2-
# BASIC TESTS
3-
# Test basic NLP with 2-norm
4-
@testset "BASIC" begin
5-
rosenbrock_nlp = construct_rosenbrock_nlp()
6-
rosenbrock_reg_nlp = RegularizedNLPModel(rosenbrock_nlp, NormL2(0.01))
2+
# BASIC TESTS
3+
# Test basic NLP with 2-norm
4+
@testset "BASIC" begin
5+
rosenbrock_nlp = construct_rosenbrock_nlp()
6+
rosenbrock_reg_nlp = RegularizedNLPModel(rosenbrock_nlp, NormL2(0.01))
77

8-
# Test first order status
9-
first_order_kwargs = (atol = 1e-6, rtol = 1e-6)
10-
test_solver(rosenbrock_reg_nlp,
11-
"R2N",
12-
expected_status = :first_order,
13-
solver_kwargs=first_order_kwargs)
14-
solver, stats = R2NSolver(rosenbrock_reg_nlp), RegularizedExecutionStats(rosenbrock_reg_nlp)
8+
# Test first order status
9+
first_order_kwargs = (atol = 1e-6, rtol = 1e-6)
10+
test_solver(
11+
rosenbrock_reg_nlp,
12+
"R2N",
13+
expected_status = :first_order,
14+
solver_kwargs = first_order_kwargs,
15+
)
16+
solver, stats = R2NSolver(rosenbrock_reg_nlp), RegularizedExecutionStats(rosenbrock_reg_nlp)
1517

16-
# Test max time status
17-
max_time_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_time = 1e-12)
18-
test_solver(rosenbrock_reg_nlp,
19-
"R2N",
20-
expected_status = :max_time,
21-
solver_kwargs=max_time_kwargs)
18+
# Test max time status
19+
max_time_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_time = 1e-12)
20+
test_solver(
21+
rosenbrock_reg_nlp,
22+
"R2N",
23+
expected_status = :max_time,
24+
solver_kwargs = max_time_kwargs,
25+
)
2226

23-
# Test max iter status
24-
max_iter_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_iter = 1)
25-
test_solver(rosenbrock_reg_nlp,
26-
"R2N",
27-
expected_status = :max_iter,
28-
solver_kwargs=max_iter_kwargs)
29-
30-
# Test max eval status
31-
max_eval_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_eval = 1)
32-
test_solver(rosenbrock_reg_nlp,
33-
"R2N",
34-
expected_status = :max_eval,
35-
solver_kwargs=max_eval_kwargs)
36-
37-
end
38-
# BPDN TESTS
27+
# Test max iter status
28+
max_iter_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_iter = 1)
29+
test_solver(
30+
rosenbrock_reg_nlp,
31+
"R2N",
32+
expected_status = :max_iter,
33+
solver_kwargs = max_iter_kwargs,
34+
)
3935

40-
# Test bpdn with L-BFGS and 1-norm
41-
@testset "BPDN" begin
42-
bpdn_kwargs = (x0 = zeros(bpdn.meta.nvar),σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
43-
reg_nlp = RegularizedNLPModel(LBFGSModel(bpdn), NormL1(λ))
44-
test_solver(reg_nlp,
45-
"R2N",
46-
expected_status = :first_order,
47-
solver_kwargs=bpdn_kwargs)
48-
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
49-
@test @wrappedallocs(solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)) == 0
36+
# Test max eval status
37+
max_eval_kwargs = (x0 = [π, -π], atol = 1e-16, rtol = 1e-16, max_eval = 1)
38+
test_solver(
39+
rosenbrock_reg_nlp,
40+
"R2N",
41+
expected_status = :max_eval,
42+
solver_kwargs = max_eval_kwargs,
43+
)
44+
end
45+
# BPDN TESTS
5046

51-
#test_solver(reg_nlp, # FIXME
52-
# "R2N",
53-
# expected_status = :first_order,
54-
# solver_kwargs=bpdn_kwargs,
55-
# solver_constructor_kwargs=(subsolver=R2DHSolver,))
47+
# Test bpdn with L-BFGS and 1-norm
48+
@testset "BPDN" begin
49+
bpdn_kwargs = (x0 = zeros(bpdn.meta.nvar), σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
50+
reg_nlp = RegularizedNLPModel(LBFGSModel(bpdn), NormL1(λ))
51+
test_solver(reg_nlp, "R2N", expected_status = :first_order, solver_kwargs = bpdn_kwargs)
52+
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
53+
@test @wrappedallocs(
54+
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
55+
) == 0
5656

57-
# Test bpdn with L-SR1 and 0-norm
58-
reg_nlp = RegularizedNLPModel(LSR1Model(bpdn), NormL0(λ))
59-
test_solver(reg_nlp,
60-
"R2N",
61-
expected_status = :first_order,
62-
solver_kwargs=bpdn_kwargs)
63-
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
64-
@test @wrappedallocs(solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)) == 0
57+
#test_solver(reg_nlp, # FIXME
58+
# "R2N",
59+
# expected_status = :first_order,
60+
# solver_kwargs=bpdn_kwargs,
61+
# solver_constructor_kwargs=(subsolver=R2DHSolver,))
6562

66-
test_solver(reg_nlp,
67-
"R2N",
68-
expected_status = :first_order,
69-
solver_kwargs=bpdn_kwargs,
70-
solver_constructor_kwargs=(subsolver=R2DHSolver,))
71-
solver, stats = R2NSolver(reg_nlp, subsolver = R2DHSolver), RegularizedExecutionStats(reg_nlp)
72-
@test @wrappedallocs(solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)) == 0
73-
end
74-
end
63+
# Test bpdn with L-SR1 and 0-norm
64+
reg_nlp = RegularizedNLPModel(LSR1Model(bpdn), NormL0(λ))
65+
test_solver(reg_nlp, "R2N", expected_status = :first_order, solver_kwargs = bpdn_kwargs)
66+
solver, stats = R2NSolver(reg_nlp), RegularizedExecutionStats(reg_nlp)
67+
@test @wrappedallocs(
68+
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
69+
) == 0
70+
71+
test_solver(
72+
reg_nlp,
73+
"R2N",
74+
expected_status = :first_order,
75+
solver_kwargs = bpdn_kwargs,
76+
solver_constructor_kwargs = (subsolver = R2DHSolver,),
77+
)
78+
solver, stats = R2NSolver(reg_nlp, subsolver = R2DHSolver), RegularizedExecutionStats(reg_nlp)
79+
@test @wrappedallocs(
80+
solve!(solver, reg_nlp, stats, σk = 1.0, β = 1e16, atol = 1e-6, rtol = 1e-6)
81+
) == 0
82+
end
83+
end

test/test-solver.jl

Lines changed: 42 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,47 @@
1-
function test_solver(reg_nlp::R, solver_name::String; expected_status = :first_order, solver_constructor_kwargs = (;), solver_kwargs = (;)) where{R}
2-
3-
# Test output with allocating calling form
4-
solver_fun = getfield(RegularizedOptimization, Symbol(solver_name))
5-
stats_basic = solver_fun(reg_nlp.model, reg_nlp.h, ROSolverOptions(); solver_constructor_kwargs..., solver_kwargs...)
1+
function test_solver(
2+
reg_nlp::R,
3+
solver_name::String;
4+
expected_status = :first_order,
5+
solver_constructor_kwargs = (;),
6+
solver_kwargs = (;),
7+
) where {R}
68

7-
x0 = get(solver_kwargs, :x0, reg_nlp.model.meta.x0)
8-
@test typeof(stats_basic.solution) == typeof(x0)
9-
@test length(stats_basic.solution) == reg_nlp.model.meta.nvar
10-
@test typeof(stats_basic.dual_feas) == eltype(stats_basic.solution)
11-
@test stats_basic.status == expected_status
12-
@test obj(reg_nlp, stats_basic.solution) == stats_basic.objective
13-
@test stats_basic.objective <= obj(reg_nlp, x0)
9+
# Test output with allocating calling form
10+
solver_fun = getfield(RegularizedOptimization, Symbol(solver_name))
11+
stats_basic = solver_fun(
12+
reg_nlp.model,
13+
reg_nlp.h,
14+
ROSolverOptions();
15+
solver_constructor_kwargs...,
16+
solver_kwargs...,
17+
)
1418

15-
# Test output with optimized calling form
16-
solver_constructor = getfield(RegularizedOptimization, Symbol(solver_name*"Solver"))
17-
solver = solver_constructor(reg_nlp; solver_constructor_kwargs...)
18-
stats_optimized = RegularizedExecutionStats(reg_nlp)
19+
x0 = get(solver_kwargs, :x0, reg_nlp.model.meta.x0)
20+
@test typeof(stats_basic.solution) == typeof(x0)
21+
@test length(stats_basic.solution) == reg_nlp.model.meta.nvar
22+
@test typeof(stats_basic.dual_feas) == eltype(stats_basic.solution)
23+
@test stats_basic.status == expected_status
24+
@test obj(reg_nlp, stats_basic.solution) == stats_basic.objective
25+
@test stats_basic.objective <= obj(reg_nlp, x0)
1926

20-
# Remove the x0 entry from solver_kwargs
21-
optimized_solver_kwargs = Base.structdiff(solver_kwargs, NamedTuple{(:x0,)})
22-
solve!(solver, reg_nlp, stats_optimized; x = x0, optimized_solver_kwargs...) # It would be interesting to check for allocations here as well but depending on
23-
# the structure of solver_kwargs, some variables might get boxed, resulting in
24-
# false positives, for example if tol = 1e-3; solver_kwargs = (atol = tol),
25-
# then wrappedallocs would give a > 0 answer...
26-
@test typeof(stats_optimized.solution) == typeof(x0)
27-
@test length(stats_optimized.solution) == reg_nlp.model.meta.nvar
28-
@test typeof(stats_optimized.dual_feas) == eltype(stats_optimized.solution)
29-
@test stats_optimized.status == expected_status
30-
@test obj(reg_nlp, stats_optimized.solution) == stats_optimized.objective
31-
@test stats_optimized.objective <= obj(reg_nlp, x0)
27+
# Test output with optimized calling form
28+
solver_constructor = getfield(RegularizedOptimization, Symbol(solver_name * "Solver"))
29+
solver = solver_constructor(reg_nlp; solver_constructor_kwargs...)
30+
stats_optimized = RegularizedExecutionStats(reg_nlp)
3231

33-
# TODO: test that the optimized entries in stats_optimized and stats_basic are the same.
32+
# Remove the x0 entry from solver_kwargs
33+
optimized_solver_kwargs = Base.structdiff(solver_kwargs, NamedTuple{(:x0,)})
34+
solve!(solver, reg_nlp, stats_optimized; x = x0, optimized_solver_kwargs...) # It would be interesting to check for allocations here as well but depending on
35+
# the structure of solver_kwargs, some variables might get boxed, resulting in
36+
# false positives, for example if tol = 1e-3; solver_kwargs = (atol = tol),
37+
# then wrappedallocs would give a > 0 answer...
38+
@test typeof(stats_optimized.solution) == typeof(x0)
39+
@test length(stats_optimized.solution) == reg_nlp.model.meta.nvar
40+
@test typeof(stats_optimized.dual_feas) == eltype(stats_optimized.solution)
41+
@test stats_optimized.status == expected_status
42+
@test obj(reg_nlp, stats_optimized.solution) == stats_optimized.objective
43+
@test stats_optimized.objective <= obj(reg_nlp, x0)
3444

35-
end
45+
# TODO: test that the optimized entries in stats_optimized and stats_basic are the same.
46+
47+
end

test/utils.jl

Lines changed: 10 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -33,25 +33,20 @@ end
3333

3434
# Construct the rosenbrock problem.
3535

36-
function rosenbrock_f(x::Vector{T}) where{T <: Real}
37-
100*(x[2]-x[1]^2)^2 + (1-x[1])^2
36+
function rosenbrock_f(x::Vector{T}) where {T <: Real}
37+
100 * (x[2] - x[1]^2)^2 + (1 - x[1])^2
3838
end
3939

40-
function rosenbrock_grad!(gx::Vector{T}, x::Vector{T}) where{T <: Real}
41-
gx[1] = -400*x[1]*(x[2]-x[1]^2)-2*(1-x[1])
42-
gx[2] = 200*(x[2]-x[1]^2)
40+
function rosenbrock_grad!(gx::Vector{T}, x::Vector{T}) where {T <: Real}
41+
gx[1] = -400 * x[1] * (x[2] - x[1]^2) - 2 * (1 - x[1])
42+
gx[2] = 200 * (x[2] - x[1]^2)
4343
end
4444

45-
function rosenbrock_hv!(hv::Vector{T}, x::Vector{T}, v::Vector{T}; obj_weight = 1.0) where{T}
46-
hv[1] = (1200*x[1]^2-400*x[2]+2)*v[1] -400*x[1]*v[2]
47-
hv[2] = -400*x[1]*v[1] + 200*v[2]
45+
function rosenbrock_hv!(hv::Vector{T}, x::Vector{T}, v::Vector{T}; obj_weight = 1.0) where {T}
46+
hv[1] = (1200 * x[1]^2 - 400 * x[2] + 2) * v[1] - 400 * x[1] * v[2]
47+
hv[2] = -400 * x[1] * v[1] + 200 * v[2]
4848
end
4949

5050
function construct_rosenbrock_nlp()
51-
return NLPModel(
52-
zeros(2),
53-
rosenbrock_f,
54-
grad = rosenbrock_grad!,
55-
hprod = rosenbrock_hv!
56-
)
57-
end
51+
return NLPModel(zeros(2), rosenbrock_f, grad = rosenbrock_grad!, hprod = rosenbrock_hv!)
52+
end

0 commit comments

Comments
 (0)