|
| 1 | +import pytest |
| 2 | + |
| 3 | +import numpy as np |
| 4 | +from numpy.testing import assert_array_almost_equal |
| 5 | +from pylops.basicoperators import MatrixMult |
| 6 | +from pyproximal.proximal import L1, L2 |
| 7 | +from pyproximal.optimization.primal import ProximalGradient, GeneralizedProximalGradient |
| 8 | + |
| 9 | +par1 = {'n': 8, 'm': 10, 'dtype': 'float32'} # float64 |
| 10 | +par2 = {'n': 8, 'm': 10, 'dtype': 'float64'} # float32 |
| 11 | + |
| 12 | + |
| 13 | +@pytest.mark.parametrize("par", [(par1), (par2)]) |
| 14 | +def test_PG_GPG(par): |
| 15 | + """Check equivalency of ProximalGradient and GeneralizedProximalGradient when using |
| 16 | + a single regularization term |
| 17 | + """ |
| 18 | + np.random.seed(0) |
| 19 | + n, m = par['n'], par['m'] |
| 20 | + |
| 21 | + # Define sparse model |
| 22 | + x = np.zeros(m) |
| 23 | + x[2], x[4] = 1, 0.5 |
| 24 | + |
| 25 | + # Random mixing matrix |
| 26 | + R = np.random.normal(0., 1., (n, m)) |
| 27 | + Rop = MatrixMult(R) |
| 28 | + |
| 29 | + y = Rop @ x |
| 30 | + |
| 31 | + # Step size |
| 32 | + L = (Rop.H * Rop).eigs(1).real |
| 33 | + tau = 0.99 / L |
| 34 | + |
| 35 | + # PG |
| 36 | + l2 = L2(Op=Rop, b=y, niter=10, warm=True) |
| 37 | + l1 = L1(sigma=5e-1) |
| 38 | + xpg = ProximalGradient(l2, l1, x0=np.zeros(m), |
| 39 | + tau=tau, niter=100, |
| 40 | + acceleration='fista') |
| 41 | + |
| 42 | + # GPG |
| 43 | + l2 = L2(Op=Rop, b=y, niter=10, warm=True) |
| 44 | + l1 = L1(sigma=5e-1) |
| 45 | + xgpg = GeneralizedProximalGradient([l2, ], [l1, ], x0=np.zeros(m), |
| 46 | + tau=tau, niter=100, |
| 47 | + acceleration='fista') |
| 48 | + |
| 49 | + assert_array_almost_equal(xpg, xgpg, decimal=2) |
0 commit comments