Skip to content

Commit bb3d295

Browse files
committed
Update
1 parent 29165e4 commit bb3d295

36 files changed

+2622
-2622
lines changed
Lines changed: 60 additions & 60 deletions
Original file line numberDiff line numberDiff line change
@@ -1,60 +1,60 @@
1-
using PEPit, OrderedCollections
2-
3-
4-
function wc_three_operator_splitting(mu1, L1, L3, alpha, theta, n; verbose=true)
5-
6-
problem = PEP()
7-
8-
f1 = declare_function!(problem, SmoothStronglyConvexFunction, OrderedDict("mu" => mu1, "L" => L1); reuse_gradient=true)
9-
f2 = declare_function!(problem, ConvexFunction, OrderedDict(); reuse_gradient=false)
10-
f3 = declare_function!(problem, SmoothConvexFunction, OrderedDict("L" => L3); reuse_gradient=true)
11-
12-
w0 = set_initial_point!(problem)
13-
w0p = set_initial_point!(problem)
14-
15-
set_initial_condition!(problem, (w0 - w0p)^2 <= 1)
16-
17-
w = w0
18-
for _ in 1:n
19-
x, _, _ = proximal_step!(w, f2, alpha)
20-
21-
gx, _ = oracle!(f3, x)
22-
23-
y, _, _ = proximal_step!(2 * x - w - alpha * gx, f1, alpha)
24-
25-
w = w + theta * (y - x)
26-
end
27-
28-
wp = w0p
29-
for _ in 1:n
30-
xp, _, _ = proximal_step!(wp, f2, alpha)
31-
32-
gxp, _ = oracle!(f3, xp)
33-
34-
yp, _, _ = proximal_step!(2 * xp - wp - alpha * gxp, f1, alpha)
35-
36-
wp = wp + theta * (yp - xp)
37-
end
38-
39-
set_performance_metric!(problem, (w - wp)^2)
40-
41-
pepit_tau = solve!(problem; verbose=verbose)
42-
43-
theoretical_tau = nothing
44-
45-
if verbose
46-
println("*** Example file: worst-case performance of the Three Operator Splitting in distance ***")
47-
println("\tPEPit guarantee:\t ||w^1_n - w^0_n||^2 <= $(round(pepit_tau, digits=6)) ||w^1_0 - w^0_0||^2")
48-
end
49-
50-
return pepit_tau, theoretical_tau
51-
end
52-
53-
54-
L3 = 1.0
55-
56-
alpha = 1 / L3
57-
58-
pepit_tau, theoretical_tau = wc_three_operator_splitting(0.1, 10.0, L3, alpha, 1.0, 4; verbose=true)
59-
60-
1+
using PEPit, OrderedCollections
2+
3+
4+
function wc_three_operator_splitting(mu1, L1, L3, alpha, theta, n; verbose=true)
5+
6+
problem = PEP()
7+
8+
f1 = declare_function!(problem, SmoothStronglyConvexFunction, OrderedDict("mu" => mu1, "L" => L1); reuse_gradient=true)
9+
f2 = declare_function!(problem, ConvexFunction, OrderedDict(); reuse_gradient=false)
10+
f3 = declare_function!(problem, SmoothConvexFunction, OrderedDict("L" => L3); reuse_gradient=true)
11+
12+
w0 = set_initial_point!(problem)
13+
w0p = set_initial_point!(problem)
14+
15+
set_initial_condition!(problem, (w0 - w0p)^2 <= 1)
16+
17+
w = w0
18+
for _ in 1:n
19+
x, _, _ = proximal_step!(w, f2, alpha)
20+
21+
gx, _ = oracle!(f3, x)
22+
23+
y, _, _ = proximal_step!(2 * x - w - alpha * gx, f1, alpha)
24+
25+
w = w + theta * (y - x)
26+
end
27+
28+
wp = w0p
29+
for _ in 1:n
30+
xp, _, _ = proximal_step!(wp, f2, alpha)
31+
32+
gxp, _ = oracle!(f3, xp)
33+
34+
yp, _, _ = proximal_step!(2 * xp - wp - alpha * gxp, f1, alpha)
35+
36+
wp = wp + theta * (yp - xp)
37+
end
38+
39+
set_performance_metric!(problem, (w - wp)^2)
40+
41+
pepit_tau = solve!(problem; verbose=verbose)
42+
43+
theoretical_tau = nothing
44+
45+
if verbose
46+
println("*** Example file: worst-case performance of the Three Operator Splitting in distance ***")
47+
println("\tPEPit guarantee:\t ||w^1_n - w^0_n||^2 <= $(round(pepit_tau, digits=6)) ||w^1_0 - w^0_0||^2")
48+
end
49+
50+
return pepit_tau, theoretical_tau
51+
end
52+
53+
54+
L3 = 1.0
55+
56+
alpha = 1 / L3
57+
58+
pepit_tau, theoretical_tau = wc_three_operator_splitting(0.1, 10.0, L3, alpha, 1.0, 4; verbose=true)
59+
60+
Lines changed: 65 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -1,65 +1,65 @@
1-
using PEPit, OrderedCollections
2-
3-
function wc_accelerated_inexact_forward_backward(L, zeta, n; verbose=true)
4-
5-
problem = PEP()
6-
7-
f = declare_function!(problem, SmoothConvexFunction, OrderedDict("L" => L); reuse_gradient=true)
8-
h = declare_function!(problem, ConvexFunction, OrderedDict(); reuse_gradient=false)
9-
10-
F = f + h
11-
12-
xs = stationary_point!(F)
13-
14-
Fs = value!(F, xs)
15-
16-
x0 = set_initial_point!(problem)
17-
18-
set_initial_condition!(problem, (x0 - xs)^2 <= 1)
19-
20-
gamma = 1 / L
21-
22-
eta = (1 - zeta^2) * gamma
23-
24-
A = 0.0
25-
26-
x = x0
27-
z = x0
28-
29-
hx = nothing
30-
31-
for _ in 1:n
32-
A_next = A + (eta + sqrt(eta^2 + 4 * eta * A)) / 2
33-
34-
y = x + (1 - A / A_next) * (z - x)
35-
36-
gy = gradient!(f, y)
37-
38-
x, _, hx, _, vx, _, eps_var = inexact_proximal_step!(y - gamma * gy, h, gamma; opt="PD_gapI")
39-
40-
add_constraint!(h, eps_var <= (zeta * gamma)^2 / 2 * (vx + gy)^2)
41-
42-
z = z - (A_next - A) * (vx + gy)
43-
44-
A = A_next
45-
end
46-
47-
set_performance_metric!(problem, value!(f, x) + hx - Fs)
48-
49-
pepit_tau = solve!(problem; verbose=verbose)
50-
51-
theoretical_tau = 2 * L / (1 - zeta^2) / n^2
52-
53-
if verbose
54-
println("*** Example file: worst-case performance of an inexact accelerated forward backward method ***")
55-
println("\tPEPit guarantee:\t F(x_n)-F_* <= $(round(pepit_tau, digits=7)) ||x_0 - x_*||^2")
56-
println("\tTheoretical guarantee:\t F(x_n)-F_* <= $(round(theoretical_tau, digits=7)) ||x_0 - x_*||^2")
57-
end
58-
59-
return pepit_tau, theoretical_tau
60-
end
61-
62-
63-
pepit_tau, theoretical_tau = wc_accelerated_inexact_forward_backward(1.3, 0.45, 11; verbose=true)
64-
65-
1+
using PEPit, OrderedCollections
2+
3+
function wc_accelerated_inexact_forward_backward(L, zeta, n; verbose=true)
4+
5+
problem = PEP()
6+
7+
f = declare_function!(problem, SmoothConvexFunction, OrderedDict("L" => L); reuse_gradient=true)
8+
h = declare_function!(problem, ConvexFunction, OrderedDict(); reuse_gradient=false)
9+
10+
F = f + h
11+
12+
xs = stationary_point!(F)
13+
14+
Fs = value!(F, xs)
15+
16+
x0 = set_initial_point!(problem)
17+
18+
set_initial_condition!(problem, (x0 - xs)^2 <= 1)
19+
20+
gamma = 1 / L
21+
22+
eta = (1 - zeta^2) * gamma
23+
24+
A = 0.0
25+
26+
x = x0
27+
z = x0
28+
29+
hx = nothing
30+
31+
for _ in 1:n
32+
A_next = A + (eta + sqrt(eta^2 + 4 * eta * A)) / 2
33+
34+
y = x + (1 - A / A_next) * (z - x)
35+
36+
gy = gradient!(f, y)
37+
38+
x, _, hx, _, vx, _, eps_var = inexact_proximal_step!(y - gamma * gy, h, gamma; opt="PD_gapI")
39+
40+
add_constraint!(h, eps_var <= (zeta * gamma)^2 / 2 * (vx + gy)^2)
41+
42+
z = z - (A_next - A) * (vx + gy)
43+
44+
A = A_next
45+
end
46+
47+
set_performance_metric!(problem, value!(f, x) + hx - Fs)
48+
49+
pepit_tau = solve!(problem; verbose=verbose)
50+
51+
theoretical_tau = 2 * L / (1 - zeta^2) / n^2
52+
53+
if verbose
54+
println("*** Example file: worst-case performance of an inexact accelerated forward backward method ***")
55+
println("\tPEPit guarantee:\t F(x_n)-F_* <= $(round(pepit_tau, digits=7)) ||x_0 - x_*||^2")
56+
println("\tTheoretical guarantee:\t F(x_n)-F_* <= $(round(theoretical_tau, digits=7)) ||x_0 - x_*||^2")
57+
end
58+
59+
return pepit_tau, theoretical_tau
60+
end
61+
62+
63+
pepit_tau, theoretical_tau = wc_accelerated_inexact_forward_backward(1.3, 0.45, 11; verbose=true)
64+
65+
Lines changed: 50 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,50 @@
1-
using PEPit, OrderedCollections
2-
3-
function wc_online_gradient_descent(M::Real, D::Real, n::Int; verbose = true)
4-
5-
problem = PEP()
6-
7-
gamma = D / (M * sqrt(n))
8-
9-
fis = [declare_function!(problem, ConvexLipschitzFunction, OrderedDict("M" => M)) for _ in 1:n]
10-
11-
h = declare_function!(problem, ConvexIndicatorFunction, OrderedDict("D" => D))
12-
13-
F = sum(fis)
14-
15-
x_ref = set_initial_point!(problem)
16-
x_ref, _, _ = proximal_step!(x_ref, h, 1)
17-
_, F_ref = oracle!(F, x_ref)
18-
19-
x = set_initial_point!(problem)
20-
x, _, _ = proximal_step!(x, h, 1)
21-
22-
f_saved = Vector{Expression}(undef, n)
23-
for i in 1:n
24-
g_i, f_i = oracle!(fis[i], x)
25-
f_saved[i] = f_i
26-
x, _, _ = proximal_step!(x - gamma * g_i, h, gamma)
27-
end
28-
29-
set_performance_metric!(problem, sum(f_saved) - F_ref)
30-
31-
pepit_tau = solve!(problem; verbose=verbose)
32-
33-
theoretical_tau = M * D * sqrt(n)
34-
35-
if verbose != -1
36-
println("*** Example file: worst-case regret of online gradient descent for fixed step-sizes ***")
37-
println("\tPEPit guarantee:\t R_n <= $(round(pepit_tau, digits=6))")
38-
println("\tTheoretical guarantee:\t R_n <= $(round(theoretical_tau, digits=6))")
39-
end
40-
41-
return pepit_tau, theoretical_tau
42-
end
43-
44-
45-
46-
M, D, n = 1.0, 0.5, 2
47-
48-
pepit_tau, theoretical_tau = wc_online_gradient_descent(M, D, n; verbose=true)
49-
50-
1+
using PEPit, OrderedCollections
2+
3+
function wc_online_gradient_descent(M::Real, D::Real, n::Int; verbose = true)
4+
5+
problem = PEP()
6+
7+
gamma = D / (M * sqrt(n))
8+
9+
fis = [declare_function!(problem, ConvexLipschitzFunction, OrderedDict("M" => M)) for _ in 1:n]
10+
11+
h = declare_function!(problem, ConvexIndicatorFunction, OrderedDict("D" => D))
12+
13+
F = sum(fis)
14+
15+
x_ref = set_initial_point!(problem)
16+
x_ref, _, _ = proximal_step!(x_ref, h, 1)
17+
_, F_ref = oracle!(F, x_ref)
18+
19+
x = set_initial_point!(problem)
20+
x, _, _ = proximal_step!(x, h, 1)
21+
22+
f_saved = Vector{Expression}(undef, n)
23+
for i in 1:n
24+
g_i, f_i = oracle!(fis[i], x)
25+
f_saved[i] = f_i
26+
x, _, _ = proximal_step!(x - gamma * g_i, h, gamma)
27+
end
28+
29+
set_performance_metric!(problem, sum(f_saved) - F_ref)
30+
31+
pepit_tau = solve!(problem; verbose=verbose)
32+
33+
theoretical_tau = M * D * sqrt(n)
34+
35+
if verbose != -1
36+
println("*** Example file: worst-case regret of online gradient descent for fixed step-sizes ***")
37+
println("\tPEPit guarantee:\t R_n <= $(round(pepit_tau, digits=6))")
38+
println("\tTheoretical guarantee:\t R_n <= $(round(theoretical_tau, digits=6))")
39+
end
40+
41+
return pepit_tau, theoretical_tau
42+
end
43+
44+
45+
46+
M, D, n = 1.0, 0.5, 2
47+
48+
pepit_tau, theoretical_tau = wc_online_gradient_descent(M, D, n; verbose=true)
49+
50+

0 commit comments

Comments
 (0)