@@ -309,7 +309,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
309309 .. math::
310310
311311 \mathbf{x} = \argmin_\mathbf{x} \sum_{i=1}^n f_i(\mathbf{x})
312- + \sum_{j=1}^m \tau_j g_j(\mathbf{x}),~~n,m \in \mathbb{N}^+
312+ + \sum_{j=1}^m \epsilon_j g_j(\mathbf{x}),~~n,m \in \mathbb{N}^+
313313
314314 where the :math:`f_i(\mathbf{x})` are smooth convex functions with a uniquely
315315 defined gradient and the :math:`g_j(\mathbf{x})` are any convex function that
@@ -330,7 +330,7 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
330330 backtracking is used to adaptively estimate the best tau at each
331331 iteration.
332332 epsg : :obj:`float` or :obj:`np.ndarray`, optional
333- Scaling factor of g function
333+ Scaling factor(s) of ``g`` function(s)
334334 niter : :obj:`int`, optional
335335 Number of iterations of iterative scheme
336336 acceleration: :obj:`str`, optional
@@ -353,11 +353,12 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
353353
354354 .. math::
355355 \text{for } j=1,\cdots,n, \\
356- ~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \eta_k (prox_{\frac{\tau^k}{\omega_j} g_j}(2 \mathbf{x}^{k} - z_j^{k})
357- - \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})) - \mathbf{x}^{k} \\
356+ ~~~~\mathbf z_j^{k+1} = \mathbf z_j^{k} + \epsilon_j
357+ \left[prox_{\frac{\tau^k}{\omega_j} g_j}\left(2 \mathbf{x}^{k} - \mathbf{z}_j^{k}
358+ - \tau^k \sum_{i=1}^n \nabla f_i(\mathbf{x}^{k})\right) - \mathbf{x}^{k} \right] \\
358359 \mathbf{x}^{k+1} = \sum_{j=1}^n \omega_j f_j \\
359360
360- where :math:`\sum_{j=1}^n \omega_j=1`.
361+ where :math:`\sum_{j=1}^n \omega_j=1`. In the current implementation :math:`\omega_j=1/n`.
361362 """
362363 # check if epgs is a vector
363364 if np .asarray (epsg ).size == 1. :
@@ -394,23 +395,23 @@ def GeneralizedProximalGradient(proxfs, proxgs, x0, tau=None,
394395 for iiter in range (niter ):
395396 xold = x .copy ()
396397
397- # proximal step
398+ # gradient
398399 grad = np .zeros_like (x )
399400 for i , proxf in enumerate (proxfs ):
400401 grad += proxf .grad (x )
401402
402- sol = np .zeros_like (x )
403+ # proximal step
404+ x = np .zeros_like (x )
403405 for i , proxg in enumerate (proxgs ):
404- tmp = 2 * y - zs [i ] - tau * grad
405- tmp [:] = proxg .prox (tmp , epsg * tau * len (proxgs ) )
406- zs [i ] += (tmp - y )
407- sol += zs [i ] / len (proxgs )
408- x [:] = sol .copy ()
406+ ztmp = 2 * y - zs [i ] - tau * grad
407+ ztmp = proxg .prox (ztmp , epsg * tau * len (proxgs ))
408+ zs [i ] += (ztmp - y )
409+ x += zs [i ] / len (proxgs )
409410
410411 # update y
411412 if acceleration == 'vandenberghe' :
412413 omega = iiter / (iiter + 3 )
413- elif acceleration == 'fista' :
414+ elif acceleration == 'fista' :
414415 told = t
415416 t = (1. + np .sqrt (1. + 4. * t ** 2 )) / 2.
416417 omega = ((told - 1. ) / t )
@@ -782,7 +783,7 @@ def ADMML2(proxg, Op, b, A, x0, tau, niter=10, callback=None, show=False, **kwar
782783
783784 if show :
784785 if iiter < 10 or niter - iiter < 10 or iiter % (niter // 10 ) == 0 :
785- pf , pg = np .linalg .norm (Op @ x - b ), proxg (Ax )
786+ pf , pg = 0.5 * np .linalg .norm (Op @ x - b ) ** 2 , proxg (Ax )
786787 msg = '%6g %12.5e %10.3e %10.3e %10.3e' % \
787788 (iiter + 1 , x [0 ], pf , pg , pf + pg )
788789 print (msg )
0 commit comments