Skip to content

Commit ce47e79

Browse files
authored
Merge pull request #138 from PerformanceEstimation/features/visualization_2nd_version
Features/visualization 2nd version
2 parents 7c448ab + 0903c93 commit ce47e79

22 files changed

+2439
-5
lines changed

PEPit/__init__.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
from .constraint import Constraint
33
from .expression import Expression, null_expression
44
from .function import Function
5+
from .interpolator import Interpolator
56
from .psd_matrix import PSDMatrix
67
from .wrapper import Wrapper
78
from .pep import PEP
@@ -10,13 +11,15 @@
1011
__all__ = ['block_partition', 'BlockPartition',
1112
'examples',
1213
'functions',
14+
'interpolators',
1315
'operators',
1416
'primitive_steps',
1517
'tools',
1618
'wrappers',
1719
'constraint', 'Constraint',
1820
'expression', 'Expression', 'null_expression',
1921
'function', 'Function',
22+
'interpolator', 'Interpolator',
2023
'psd_matrix', 'PSDMatrix',
2124
'pep', 'PEP',
2225
'point', 'Point', 'null_point',

PEPit/examples/online_learning/online_follow_leader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def wc_online_follow_leader(M, D, n, wrapper="cvxpy", solver=None, verbose=1):
99
"""
1010
Consider the online convex minimization problem, whose goal is to sequentially minimize the regret
1111
12-
.. math:: R_n \\triangleq \\min_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
12+
.. math:: R_n \\triangleq \\max_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
1313
1414
where the functions :math:`f_i` are :math:`M`-Lipschitz and convex, and where :math:`Q` is a
1515
bounded closed convex set with diameter upper bounded by :math:`D`. We also denote by :math:`x_\\star\\in Q`

PEPit/examples/online_learning/online_follow_regularized_leader.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def wc_online_follow_regularized_leader(M, D, n, wrapper="cvxpy", solver=None, v
99
"""
1010
Consider the online convex minimization problem, whose goal is to sequentially minimize the regret
1111
12-
.. math:: R_n \\triangleq \\min_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
12+
.. math:: R_n \\triangleq \\max_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
1313
1414
where the functions :math:`f_i` are :math:`M`-Lipschitz and convex, and where :math:`Q` is a
1515
bounded closed convex set with diameter upper bounded by :math:`D`. We also denote by :math:`x_\\star\\in Q`

PEPit/examples/online_learning/online_frank_wolfe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ def wc_online_frank_wolfe(M, D, n, wrapper="cvxpy", solver=None, verbose=1):
1010
"""
1111
Consider the online convex minimization problem, whose goal is to sequentially minimize the regret
1212
13-
.. math:: R_n \\triangleq \\min_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
13+
.. math:: R_n \\triangleq \\max_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
1414
1515
where the functions :math:`f_i` are :math:`M`-Lipschitz and convex, and where :math:`Q` is a
1616
bounded closed convex set with diameter upper bounded by :math:`D`. We also denote by :math:`x_\\star\\in Q`

PEPit/examples/online_learning/online_gradient_descent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ def wc_online_gradient_descent(M, D, n, wrapper="cvxpy", solver=None, verbose=1)
99
"""
1010
Consider the online convex minimization problem, whose goal is to sequentially minimize the regret
1111
12-
.. math:: R_n \\triangleq \\min_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
12+
.. math:: R_n \\triangleq \\max_{x\\in Q} \sum_{i=1}^n f_i(x_i)-f_i(x),
1313
1414
where the functions :math:`f_i` are :math:`M`-Lipschitz and convex, and where :math:`Q` is a
1515
bounded closed convex set with diameter upper bounded by :math:`D`. We also denote by :math:`x_\\star\\in Q`

PEPit/function.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -157,6 +157,18 @@ def get_is_leaf(self):
157157
"""
158158
return self._is_leaf
159159

160+
def get_interpolator(self):
161+
"""
162+
Warnings:
163+
Needs to be overwritten with an appropriate interpolator for the class.
164+
165+
Raises:
166+
NotImplementedError: This method must be overwritten in children classes
167+
168+
"""
169+
170+
raise NotImplementedError("This method must be overwritten in children classes")
171+
160172
def __add__(self, other):
161173
"""
162174
Add 2 :class:`Function` objects together, leading to a new :class:`Function` object.

PEPit/functions/convex_function.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1+
import numpy as np
12
from PEPit.function import Function
3+
from PEPit.interpolators import SmoothStronglyConvexInterpolator
24

35

46
class ConvexFunction(Function):
@@ -41,6 +43,13 @@ def __init__(self,
4143
name=name,
4244
)
4345

46+
def get_interpolator(self, options='lowest'):
47+
"""
48+
Returns: SmoothStronglyConvexInterpolator (with L=np.inf and mu=0) based on self.
49+
50+
"""
51+
return SmoothStronglyConvexInterpolator(self, L=np.inf, mu=0, options=options)
52+
4453
@staticmethod
4554
def set_convexity_constraint_i_j(xi, gi, fi,
4655
xj, gj, fj,

PEPit/functions/smooth_convex_function.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import numpy as np
22
from PEPit.function import Function
3+
from PEPit.interpolators import SmoothStronglyConvexInterpolator
34

45

56
class SmoothConvexFunction(Function):
@@ -65,6 +66,13 @@ def __init__(self,
6566
"L == np.inf. Instead, please use the class ConvexFunction (which accounts for the fact \n"
6667
"that there might be several subgradients at the same point).\033[0m")
6768

69+
def get_interpolator(self, options='lowest'):
70+
"""
71+
Returns: SmoothStronglyConvexInterpolator (with mu=0) based on self.
72+
73+
"""
74+
return SmoothStronglyConvexInterpolator(self, L=self.L, mu=0, options=options)
75+
6876
def set_smoothness_convexity_constraint_i_j(self,
6977
xi, gi, fi,
7078
xj, gj, fj,

PEPit/functions/smooth_function.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import numpy as np
22
from PEPit.function import Function
3+
from PEPit.interpolators import SmoothStronglyConvexInterpolator
34

45

56
class SmoothFunction(Function):
@@ -63,6 +64,13 @@ def __init__(self,
6364
print("\033[96m(PEPit) The class of L-smooth functions with L == np.inf implies no constraint: \n"
6465
"it contains all differentiable functions. This might imply issues in your code.\033[0m")
6566

67+
def get_interpolator(self, options='lowest'):
68+
"""
69+
Returns: SmoothStronglyConvexInterpolator (with mu=-L) based on self.
70+
71+
"""
72+
return SmoothStronglyConvexInterpolator(self, L=self.L, mu=-self.L, options=options)
73+
6674
def set_smoothness_i_j(self,
6775
xi, gi, fi,
6876
xj, gj, fj,

PEPit/functions/smooth_strongly_convex_function.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import numpy as np
22
from PEPit.function import Function
3+
from PEPit.interpolators import SmoothStronglyConvexInterpolator
34

45

56
class SmoothStronglyConvexFunction(Function):
@@ -70,6 +71,13 @@ def __init__(self,
7071
"L == np.inf. Instead, please use the class StronglyConvexFunction (which accounts for the fact\n"
7172
"that there might be several sub-gradients at the same point).\033[0m")
7273

74+
def get_interpolator(self, options='lowest'):
75+
"""
76+
Returns: SmoothStronglyConvexInterpolator based on self.
77+
78+
"""
79+
return SmoothStronglyConvexInterpolator(self, L=self.L, mu=self.mu, options=options)
80+
7381
def set_smoothness_strong_convexity_constraint_i_j(self,
7482
xi, gi, fi,
7583
xj, gj, fj,

0 commit comments

Comments
 (0)