11import time
2+ import warnings
23import numpy as np
34
45from math import sqrt
@@ -100,9 +101,9 @@ def ProximalPoint(prox, x0, tau, niter=10, callback=None, show=False):
100101
101102
102103def ProximalGradient (proxf , proxg , x0 , tau = None , beta = 0.5 ,
103- epsg = 1. , niter = 10 , niterback = 100 ,
104- acceleration = 'vandenberghe' ,
105- callback = None , show = False ):
104+ epsg = 1. , niter = 10 , niterback = 100 ,
105+ acceleration = None ,
106+ callback = None , show = False ):
106107 r"""Proximal gradient (optionnally accelerated)
107108
108109 Solves the following minimization problem using (Accelerated) Proximal
@@ -131,7 +132,7 @@ def ProximalGradient(proxf, proxg, x0, tau=None, beta=0.5,
131132 backtracking is used to adaptively estimate the best tau at each
132133 iteration. Finally note that :math:`\tau` can be chosen to be a vector
133134 when dealing with problems with multiple right-hand-sides
134- beta : obj:`float`, optional
135+ beta : : obj:`float`, optional
135136 Backtracking parameter (must be between 0 and 1)
136137 epsg : :obj:`float` or :obj:`np.ndarray`, optional
137138 Scaling factor of g function
@@ -239,7 +240,7 @@ def ProximalGradient(proxf, proxg, x0, tau=None, beta=0.5,
239240 # update y
240241 if acceleration == 'vandenberghe' :
241242 omega = iiter / (iiter + 3 )
242- elif acceleration == 'fista' :
243+ elif acceleration == 'fista' :
243244 told = t
244245 t = (1. + np .sqrt (1. + 4. * t ** 2 )) / 2.
245246 omega = ((told - 1. ) / t )
@@ -264,9 +265,31 @@ def ProximalGradient(proxf, proxg, x0, tau=None, beta=0.5,
264265 print ('---------------------------------------------------------\n ' )
265266 return x
266267
267- def GeneralizedProximalGradient (proxfs , proxgs , x0 , tau = None , beta = 0.5 ,
268+
269+ def AcceleratedProximalGradient (proxf , proxg , x0 , tau = None , beta = 0.5 ,
270+ epsg = 1. , niter = 10 , niterback = 100 ,
271+ acceleration = 'vandenberghe' ,
272+ callback = None , show = False ):
273+ r"""Accelerated Proximal gradient
274+
275+ This is a thin wrapper around :func:`pyproximal.optimization.primal.ProximalGradient` with
276+ ``vandenberghe`` or ``fista``acceleration. See :func:`pyproximal.optimization.primal.ProximalGradient`
277+ for details.
278+
279+ """
280+ warnings .warn ('AcceleratedProximalGradient has been integrated directly into ProximalGradient '
281+ 'from v0.5.0. It is recommended to start using ProximalGradient by selecting the '
282+ 'appropriate acceleration parameter as this behaviour will become default in '
283+ 'version v1.0.0 and AcceleratedProximalGradient will be removed.' , FutureWarning )
284+ return ProximalGradient (proxf , proxg , x0 , tau = tau , beta = beta ,
285+ epsg = epsg , niter = niter , niterback = niterback ,
286+ acceleration = acceleration ,
287+ callback = callback , show = show )
288+
289+
290+ def GeneralizedProximalGradient (proxfs , proxgs , x0 , tau = None ,
268291 epsg = 1. , niter = 10 ,
269- acceleration = ' None' ,
292+ acceleration = None ,
270293 callback = None , show = False ):
271294 r"""Generalized Proximal gradient
272295
@@ -285,9 +308,9 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None, beta=0.5,
285308 Parameters
286309 ----------
287310 proxfs : :obj:`List of pyproximal.ProxOperator`
288- Proximal operators of the f_i functions (must have ``grad`` implemented)
311+ Proximal operators of the :math:` f_i` functions (must have ``grad`` implemented)
289312 proxgs : :obj:`List of pyproximal.ProxOperator`
290- Proximal operators of the g_j functions
313+ Proximal operators of the :math:` g_j` functions
291314 x0 : :obj:`numpy.ndarray`
292315 Initial vector
293316 tau : :obj:`float` or :obj:`numpy.ndarray`, optional
@@ -296,8 +319,6 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None, beta=0.5,
296319 the Lipschitz constant of :math:`\sum_{i=1}^n \nabla f_i`. When ``tau=None``,
297320 backtracking is used to adaptively estimate the best tau at each
298321 iteration.
299- beta : obj:`float`, optional
300- Backtracking parameter (must be between 0 and 1)
301322 epsg : :obj:`float` or :obj:`np.ndarray`, optional
302323 Scaling factor of g function
303324 niter : :obj:`int`, optional
@@ -323,7 +344,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None, beta=0.5,
323344 .. math::
324345 \text{for } j=1,\cdots,n, \\
325346 ~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \eta_k (prox_{\frac{\tau^k}{\omega_j} g_j}(2 \mathbf{x}^{k} - z_j^{k})
326- - \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})) - \mathbf{x}^{k} - \\
347+ - \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})) - \mathbf{x}^{k} \\
327348 \mathbf{x}^{k+1} = \sum_{j=1}^n \omega_j f_j \\
328349
329350 where :math:`\sum_{j=1}^n \omega_j=1`.
@@ -343,10 +364,10 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None, beta=0.5,
343364 '---------------------------------------------------------\n '
344365 'Proximal operators (f): %s\n '
345366 'Proximal operators (g): %s\n '
346- 'tau = %10e\t beta=%10e \ n epsg = %s\t niter = %d\n ' % ([type (proxf ) for proxf in proxfs ],
367+ 'tau = %10e\n epsg = %s\t niter = %d\n ' % ([type (proxf ) for proxf in proxfs ],
347368 [type (proxg ) for proxg in proxgs ],
348369 0 if tau is None else tau ,
349- beta , epsg_print , niter ))
370+ epsg_print , niter ))
350371 head = ' Itn x[0] f g J=f+eps*g'
351372 print (head )
352373
@@ -373,7 +394,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None, beta=0.5,
373394 tmp = 2 * y - zs [i ] - tau * grad
374395 tmp [:] = proxg .prox (tmp , tau * len (proxgs ) )
375396 zs [i ] += epsg * (tmp - y )
376- sol += zs [i ]/ len (proxgs )
397+ sol += zs [i ] / len (proxgs )
377398 x [:] = sol .copy ()
378399
379400 # update y
0 commit comments