Skip to content

Commit

Permalink
Merge pull request #87 from mrava87/main
Browse files Browse the repository at this point in the history
HQS solver and L0 norm
  • Loading branch information
mrava87 authored Jun 4, 2022
2 parents 4a13fff + 6941d00 commit 0090e34
Show file tree
Hide file tree
Showing 8 changed files with 254 additions and 25 deletions.
2 changes: 2 additions & 0 deletions docs/source/api/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ Convex
EuclideanBall
Huber
Intersection
L0
L0Ball
L1
L1Ball
Expand Down Expand Up @@ -136,6 +137,7 @@ Primal
AcceleratedProximalGradient
ADMM
ADMML2
HQS
LinearizedADMM
ProximalGradient
ProximalPoint
Expand Down
3 changes: 2 additions & 1 deletion pyproximal/optimization/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,9 @@
ProximalPoint Proximal point algorithm (or proximal min.)
ProximalGradient Proximal gradient algorithm
AcceleratedProximalGradient Accelerated Proximal gradient algorithm
HQS Half Quadrating Splitting
ADMM Alternating Direction Method of Multipliers
ADMML2 ADMM with L2 misfit term
ADMML2 ADMM with L2 misfit term
LinearizedADMM Linearized ADMM
TwIST Two-step Iterative Shrinkage/Threshold
PlugAndPlay Plug-and-Play Prior with ADMM
Expand Down
131 changes: 125 additions & 6 deletions pyproximal/optimization/primal.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,115 @@ def AcceleratedProximalGradient(proxf, proxg, x0, tau=None, beta=0.5,
return x


def ADMM(proxf, proxg, x0, tau, niter=10, callback=None, show=False):
def HQS(proxf, proxg, x0, tau, niter=10, gfirst=True,
callback=None, callbackz=False, show=False):
r"""Half Quadratic splitting
Solves the following minimization problem using Half Quadratic splitting
algorithm:
.. math::
\mathbf{x},\mathbf{z} = \argmin_{\mathbf{x},\mathbf{z}}
f(\mathbf{x}) + g(\mathbf{z}) \\
s.t. \; \mathbf{x}=\mathbf{z}
where :math:`f(\mathbf{x})` and :math:`g(\mathbf{z})` are any convex
function that has a known proximal operator.
Parameters
----------
proxf : :obj:`pyproximal.ProxOperator`
Proximal operator of f function
proxg : :obj:`pyproximal.ProxOperator`
Proximal operator of g function
x0 : :obj:`numpy.ndarray`
Initial vector
tau : :obj:`float`, optional
Positive scalar weight, which should satisfy the following condition
to guarantees convergence: :math:`\tau \in (0, 1/L]` where ``L`` is
the Lipschitz constant of :math:`\nabla f`.
niter : :obj:`int`, optional
Number of iterations of iterative scheme
gfirst : :obj:`bool`, optional
Apply Proximal of operator ``g`` first (``True``) or Proximal of
operator ``f`` first (``False``)
callback : :obj:`callable`, optional
Function with signature (``callback(x)``) to call after each iteration
where ``x`` is the current model vector
callbackz : :obj:`bool`, optional
Modify callback signature to (``callback(x, z)``) when ``callbackz=True``
show : :obj:`bool`, optional
Display iterations log
Returns
-------
x : :obj:`numpy.ndarray`
Inverted model
z : :obj:`numpy.ndarray`
Inverted second model
Notes
-----
The HQS algorithm can be expressed by the following recursion [1]_:
.. math::
\mathbf{z}^{k+1} = \prox_{\tau g}(\mathbf{x}^{k})
\mathbf{x}^{k+1} = \prox_{\tau f}(\mathbf{z}^{k+1})\\
Note that ``x`` and ``z`` converge to each other, however if iterations are
stopped too early ``x`` is guaranteed to belong to the domain of ``f``
while ``z`` is guaranteed to belong to the domain of ``g``. Depending on
the problem either of the two may be the best solution.
.. [1] D., Geman, and C., Yang, "Nonlinear image recovery with halfquadratic
regularization", IEEE Transactions on Image Processing,
4, 7, pp. 932-946, 1995.
"""
if show:
tstart = time.time()
print('HQS\n'
'---------------------------------------------------------\n'
'Proximal operator (f): %s\n'
'Proximal operator (g): %s\n'
'tau = %10e\tniter = %d\n' % (type(proxf), type(proxg),
tau, niter))
head = ' Itn x[0] f g J = f + g'
print(head)

x = x0.copy()
z = np.zeros_like(x)
for iiter in range(niter):
if gfirst:
z = proxg.prox(x, tau)
x = proxf.prox(z, tau)
else:
x = proxf.prox(z, tau)
z = proxg.prox(x, tau)

# run callback
if callback is not None:
if callbackz:
callback(x, z)
else:
callback(x)

if show:
if iiter < 10 or niter - iiter < 10 or iiter % (niter // 10) == 0:
pf, pg = proxf(x), proxg(x)
msg = '%6g %12.5e %10.3e %10.3e %10.3e' % \
(iiter + 1, x[0], pf, pg, pf + pg)
print(msg)
if show:
print('\nTotal time (s) = %.2f' % (time.time() - tstart))
print('---------------------------------------------------------\n')
return x, z


def ADMM(proxf, proxg, x0, tau, niter=10, gfirst=False,
callback=None, callbackz=False, show=False):
r"""Alternating Direction Method of Multipliers
Solves the following minimization problem using Alternating Direction
Expand Down Expand Up @@ -417,9 +525,14 @@ def ADMM(proxf, proxg, x0, tau, niter=10, callback=None, show=False):
the Lipschitz constant of :math:`\nabla f`.
niter : :obj:`int`, optional
Number of iterations of iterative scheme
gfirst : :obj:`bool`, optional
Apply Proximal of operator ``g`` first (``True``) or Proximal of
operator ``f`` first (``False``)
callback : :obj:`callable`, optional
Function with signature (``callback(x)``) to call after each iteration
where ``x`` is the current model vector
callbackz : :obj:`bool`, optional
Modify callback signature to (``callback(x, z)``) when ``callbackz=True``
show : :obj:`bool`, optional
Display iterations log
Expand All @@ -445,7 +558,7 @@ def ADMM(proxf, proxg, x0, tau, niter=10, callback=None, show=False):
\mathbf{z}^{k+1} = \prox_{\tau g}(\mathbf{x}^{k+1} + \mathbf{u}^{k})\\
\mathbf{u}^{k+1} = \mathbf{u}^{k} + \mathbf{x}^{k+1} - \mathbf{z}^{k+1}
Note that ``x`` and ``z`` converge to each other, but if iterations are
Note that ``x`` and ``z`` converge to each other, however if iterations are
stopped too early ``x`` is guaranteed to belong to the domain of ``f``
while ``z`` is guaranteed to belong to the domain of ``g``. Depending on
the problem either of the two may be the best solution.
Expand All @@ -465,14 +578,20 @@ def ADMM(proxf, proxg, x0, tau, niter=10, callback=None, show=False):
x = x0.copy()
u = z = np.zeros_like(x)
for iiter in range(niter):
x = proxf.prox(z - u, tau)
z = proxg.prox(x + u, tau)
if gfirst:
z = proxg.prox(x + u, tau)
x = proxf.prox(z - u, tau)
else:
x = proxf.prox(z - u, tau)
z = proxg.prox(x + u, tau)
u = u + x - z

# run callback
if callback is not None:
callback(x)

if callbackz:
callback(x, z)
else:
callback(x)
if show:
if iiter < 10 or niter - iiter < 10 or iiter % (niter // 10) == 0:
pf, pg = proxf(x), proxg(x)
Expand Down
46 changes: 31 additions & 15 deletions pyproximal/optimization/primaldual.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@


def PrimalDual(proxf, proxg, A, x0, tau, mu, z=None, theta=1., niter=10,
gfirst=True, callback=None, show=False):
gfirst=True, callback=None, callbacky=False, show=False):
r"""Primal-dual algorithm
Solves the following (possibly) nonlinear minimization problem using
Expand Down Expand Up @@ -39,10 +39,12 @@ def PrimalDual(proxf, proxg, A, x0, tau, mu, z=None, theta=1., niter=10,
Linear operator of g
x0 : :obj:`numpy.ndarray`
Initial vector
tau : :obj:`float`
Stepsize of subgradient of :math:`f`
mu : :obj:`float`
Stepsize of subgradient of :math:`g^*`
tau : :obj:`float` or :obj:`np.ndarray`
Stepsize of subgradient of :math:`f`. This can be constant
or function of iterations (in the latter cases provided as np.ndarray)
mu : :obj:`float` or :obj:`np.ndarray`
Stepsize of subgradient of :math:`g^*`. This can be constant
or function of iterations (in the latter cases provided as np.ndarray)
z : :obj:`numpy.ndarray`, optional
Additional vector
theta : :obj:`float`
Expand All @@ -58,6 +60,8 @@ def PrimalDual(proxf, proxg, A, x0, tau, mu, z=None, theta=1., niter=10,
callback : :obj:`callable`, optional
Function with signature (``callback(x)``) to call after each iteration
where ``x`` is the current model vector
callbacky : :obj:`bool`, optional
Modify callback signature to (``callback(x, y)``) when ``callbacky=True``
show : :obj:`bool`, optional
Display iterations log
Expand Down Expand Up @@ -93,11 +97,20 @@ def PrimalDual(proxf, proxg, A, x0, tau, mu, z=None, theta=1., niter=10,
\mathbf{y}^{k+1} = \prox_{\mu g^*}(\mathbf{y}^{k} +
\mu \mathbf{A}\bar{\mathbf{x}}^{k+1})
.. [1] A., Chambolle, and T., Pock, "A first-order primal-dual algorithm for
.. [1] A., Chambolle, and T., Pock, "A first-order primal-dual algorithm for
convex problems with applications to imaging", Journal of Mathematical
Imaging and Vision, 40, 8pp. 120145. 2011.
Imaging and Vision, 40, 8pp. 120-145. 2011.
"""
# check if tau and mu are scalars or arrays
fixedtau = fixedmu = False
if isinstance(tau, (int, float)):
tau = tau * np.ones(niter)
fixedtau = True
if isinstance(mu, (int, float)):
mu = mu * np.ones(niter)
fixedmu = True

if show:
tstart = time.time()
print('Primal-dual: min_x f(Ax) + x^T z + g(x)\n'
Expand All @@ -106,9 +119,10 @@ def PrimalDual(proxf, proxg, A, x0, tau, mu, z=None, theta=1., niter=10,
'Proximal operator (g): %s\n'
'Linear operator (A): %s\n'
'Additional vector (z): %s\n'
'tau = %10e\tmu = %10e\ntheta = %.2f\t\tniter = %d\n' %
'tau = %s\t\tmu = %s\ntheta = %.2f\t\tniter = %d\n' %
(type(proxf), type(proxg), type(A),
None if z is None else 'vector', tau, mu, theta, niter))
None if z is None else 'vector', str(tau[0]) if fixedtau else 'Variable',
str(mu[0]) if fixedmu else 'Variable', theta, niter))
head = ' Itn x[0] f g z^x J = f + g + z^x'
print(head)

Expand All @@ -119,24 +133,26 @@ def PrimalDual(proxf, proxg, A, x0, tau, mu, z=None, theta=1., niter=10,
for iiter in range(niter):
xold = x.copy()
if gfirst:
y = proxg.proxdual(y + mu * A.matvec(xhat), mu)
y = proxg.proxdual(y + mu[iiter] * A.matvec(xhat), mu[iiter])
ATy = A.rmatvec(y)
if z is not None:
ATy += z
x = proxf.prox(x - tau * ATy, tau)
x = proxf.prox(x - tau[iiter] * ATy, tau[iiter])
xhat = x + theta * (x - xold)
else:
ATy = A.rmatvec(y)
if z is not None:
ATy += z
x = proxf.prox(x - tau * ATy, tau)
x = proxf.prox(x - tau[iiter] * ATy, tau[iiter])
xhat = x + theta * (x - xold)
y = proxg.proxdual(y + mu * A.matvec(xhat), mu)
y = proxg.proxdual(y + mu[iiter] * A.matvec(xhat), mu[iiter])

# run callback
if callback is not None:
callback(x)

if callbacky:
callback(x, y)
else:
callback(x)
if show:
if iiter < 10 or niter - iiter < 10 or iiter % (niter // 10) == 0:
pf, pg = proxf(x), proxg(A.matvec(x))
Expand Down
6 changes: 5 additions & 1 deletion pyproximal/projection/L1.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,4 +40,8 @@ def __init__(self, n, radius, maxiter=100, xtol=1e-5):
self.simplex = SimplexProj(n, radius, maxiter, xtol)

def __call__(self, x):
return np.sign(x) * self.simplex(np.abs(x))
if np.iscomplexobj(x):
return np.exp(1j * np.angle(x)) * self.simplex(np.abs(x))
else:
return np.sign(x) * self.simplex(np.abs(x))

Loading

0 comments on commit 0090e34

Please sign in to comment.