forked from JuliaNLSolvers/Optim.jl
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathassess_convergence.jl
More file actions
103 lines (87 loc) · 3.18 KB
/
assess_convergence.jl
File metadata and controls
103 lines (87 loc) · 3.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
f_abschange(d::AbstractObjective, state) = f_abschange(value(d), state.f_x_previous)
f_abschange(f_x::T, f_x_previous) where {T} = abs(f_x - f_x_previous)
f_relchange(d::AbstractObjective, state) = f_relchange(value(d), state.f_x_previous)
f_relchange(f_x::T, f_x_previous) where {T} = abs(f_x - f_x_previous) / abs(f_x)
x_abschange(state) = x_abschange(state.x, state.x_previous)
x_abschange(x, x_previous) = maxdiff(x, x_previous)
x_relchange(state) = x_relchange(state.x, state.x_previous)
x_relchange(x, x_previous) = maxdiff(x, x_previous) / Base.maximum(abs, x) # Base.maximum !== maximum
g_residual(d, state) = g_residual(d)
g_residual(d, state::NelderMeadState) = state.nm_x
g_residual(d::AbstractObjective) = g_residual(gradient(d))
g_residual(d::NonDifferentiable) = convert(typeof(value(d)), NaN)
g_residual(g) = Base.maximum(abs, g) # Base.maximum !== maximum
gradient_convergence_assessment(state::AbstractOptimizerState, d, options) =
g_residual(gradient(d)) ≤ options.g_abstol
gradient_convergence_assessment(state::ZerothOrderState, d, options) = false
# Default function for convergence assessment used by
# AcceleratedGradientDescentState, BFGSState, ConjugateGradientState,
# GradientDescentState, LBFGSState, MomentumGradientDescentState and NewtonState
function assess_convergence(state::AbstractOptimizerState, d, options::Options)
assess_convergence(
state.x,
state.x_previous,
value(d),
state.f_x_previous,
gradient(d),
options.x_abstol,
options.x_reltol,
options.f_abstol,
options.f_reltol,
options.g_abstol,
)
end
function assess_convergence(
x,
x_previous,
f_x,
f_x_previous,
gx,
x_abstol,
x_reltol,
f_abstol,
f_reltol,
g_abstol,
)
x_converged, f_converged, f_increased, g_converged = false, false, false, false
# TODO: Create function for x_convergence_assessment
if x_abschange(x, x_previous) ≤ x_abstol
x_converged = true
end
if x_abschange(x, x_previous) ≤ x_reltol * Base.maximum(abs, x) # Base.maximum !== maximum
x_converged = true
end
# Relative Tolerance
# TODO: Create function for f_convergence_assessment
if f_abschange(f_x, f_x_previous) ≤ f_abstol
f_converged = true
end
if f_abschange(f_x, f_x_previous) ≤ f_reltol * abs(f_x)
f_converged = true
end
if f_x > f_x_previous
f_increased = true
end
g_converged = g_residual(gx) ≤ g_abstol
return x_converged, f_converged, g_converged, f_increased
end
# Used by Fminbox and IPNewton
function assess_convergence(x, x_previous, f_x, f_x_previous, g, x_tol, f_tol, g_tol)
x_converged, f_converged, f_increased, g_converged = false, false, false, false
if x_abschange(x, x_previous) ≤ x_tol
x_converged = true
end
# Absolute Tolerance
# if abs(f_x - f_x_previous) < f_tol
# Relative Tolerance
if f_abschange(f_x, f_x_previous) ≤ f_tol * abs(f_x)
f_converged = true
end
if f_x > f_x_previous
f_increased = true
end
if g_residual(g) ≤ g_tol
g_converged = true
end
return x_converged, f_converged, g_converged, f_increased
end