@@ -308,7 +308,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
308308 .. math::
309309
310310 \mathbf{x} = \argmin_\mathbf{x} \sum_{i=1}^n f_i(\mathbf{x})
311- + \sum_{j=1}^m \tau_j g_j(\mathbf{x}),~~n,m \in \mathbb{N}^+
311+ + \sum_{j=1}^m \epsilon_j g_j(\mathbf{x}),~~n,m \in \mathbb{N}^+
312312
313313 where the :math:`f_i(\mathbf{x})` are smooth convex functions with a uniquely
314314 defined gradient and the :math:`g_j(\mathbf{x})` are any convex function that
@@ -329,7 +329,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
329329 backtracking is used to adaptively estimate the best tau at each
330330 iteration.
331331 epsg : :obj:`float` or :obj:`np.ndarray`, optional
332- Scaling factor of g function
332+ Scaling factor(s) of ``g`` function(s)
333333 niter : :obj:`int`, optional
334334 Number of iterations of iterative scheme
335335 acceleration: :obj:`str`, optional
@@ -352,11 +352,12 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
352352
353353 .. math::
354354 \text{for } j=1,\cdots,n, \\
355- ~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \eta_k (prox_{\frac{\tau^k}{\omega_j} g_j}(2 \mathbf{x}^{k} - z_j^{k})
356- - \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})) - \mathbf{x}^{k} \\
355+ ~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \epsilon_j
356+ \left[prox_{\frac{\tau^k}{\omega_j} g_j}\left(2 \mathbf{x}^{k} - \mathbf{z}_j^{k}
357+ - \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})\right) - \mathbf{x}^{k} \right] \\
357358 \mathbf{x}^{k+1} = \sum_{j=1}^n \omega_j f_j \\
358359
359- where :math:`\sum_{j=1}^n \omega_j=1`.
360+ where :math:`\sum_{j=1}^n \omega_j=1`. In the current implementation :math:`\omega_j=1/n`.
360361 """
361362 # check if epgs is a vector
362363 if np .asarray (epsg ).size == 1. :
@@ -393,23 +394,23 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
393394 for iiter in range (niter ):
394395 xold = x .copy ()
395396
396- # proximal step
397+ # gradient
397398 grad = np .zeros_like (x )
398399 for i , proxf in enumerate (proxfs ):
399400 grad += proxf .grad (x )
400401
401- sol = np .zeros_like (x )
402+ # proximal step
403+ x = np .zeros_like (x )
402404 for i , proxg in enumerate (proxgs ):
403- tmp = 2 * y - zs [i ] - tau * grad
404- tmp [:] = proxg .prox (tmp , tau * len (proxgs ) )
405- zs [i ] += epsg * (tmp - y )
406- sol += zs [i ] / len (proxgs )
407- x [:] = sol .copy ()
405+ ztmp = 2 * y - zs [i ] - tau * grad
406+ ztmp = proxg .prox (ztmp , tau * len (proxgs ))
407+ zs [i ] += epsg * (ztmp - y )
408+ x += zs [i ] / len (proxgs )
408409
409410 # update y
410411 if acceleration == 'vandenberghe' :
411412 omega = iiter / (iiter + 3 )
412- elif acceleration == 'fista' :
413+ elif acceleration == 'fista' :
413414 told = t
414415 t = (1. + np .sqrt (1. + 4. * t ** 2 )) / 2.
415416 omega = ((told - 1. ) / t )
@@ -781,7 +782,7 @@ def ADMML2(proxg, Op, b, A, x0, tau, niter=10, callback=None, show=False, **kwar
781782
782783 if show :
783784 if iiter < 10 or niter - iiter < 10 or iiter % (niter // 10 ) == 0 :
784- pf , pg = np .linalg .norm (Op @ x - b ), proxg (Ax )
785+ pf , pg = 0.5 * np .linalg .norm (Op @ x - b ) ** 2 , proxg (Ax )
785786 msg = '%6g %12.5e %10.3e %10.3e %10.3e' % \
786787 (iiter + 1 , x [0 ], pf , pg , pf + pg )
787788 print (msg )
0 commit comments