diff --git a/examples/plot_stacked_array.py b/examples/plot_stacked_array.py index d82cc36a..a92a98f5 100644 --- a/examples/plot_stacked_array.py +++ b/examples/plot_stacked_array.py @@ -104,7 +104,6 @@ print('Dot-product:', dot_arr) print('Dot-product (np):', np.dot(full_arr1, full_arr2)) - ############################################################################### # **Norms** l0norm = arr1.norm(0) @@ -120,8 +119,8 @@ ############################################################################### # Now that we have a way to stack multiple :py:class:`pylops_mpi.StackedDistributedArray` objects, -# let's see how we can apply operators on them. More specifically this can be -# done using the :py:class:`pylops_mpi.StackedVStack` operator that takes multiple +# let's see how we can apply operators to them. More specifically this can be +# done using the :py:class:`pylops_mpi.MPIStackedVStack` operator that takes multiple # :py:class:`pylops_mpi.MPILinearOperator` objects, each acting on one specific # distributed array x = pylops_mpi.DistributedArray(global_shape=size * 10, diff --git a/pylops_mpi/StackedLinearOperator.py b/pylops_mpi/StackedLinearOperator.py index 3e444abd..a96fe29c 100644 --- a/pylops_mpi/StackedLinearOperator.py +++ b/pylops_mpi/StackedLinearOperator.py @@ -15,7 +15,7 @@ class MPIStackedLinearOperator(ABC): for StackedLinearOperators. This class provides methods to perform matrix-vector product and adjoint matrix-vector - products using MPI. + products on a stack of MPILinearOperator objects. .. note:: End users of pylops-mpi should not use this class directly but simply use operators that are already implemented. This class is meant for @@ -33,7 +33,8 @@ class MPIStackedLinearOperator(ABC): """ def __init__(self, shape: Optional[ShapeLike] = None, - dtype: Optional[DTypeLike] = None, base_comm: MPI.Comm = MPI.COMM_WORLD): + dtype: Optional[DTypeLike] = None, + base_comm: MPI.Comm = MPI.COMM_WORLD): if shape: self.shape = shape if dtype: @@ -110,7 +111,6 @@ def adjoint(self): Adjoint of Operator """ - return self._adjoint() H = property(adjoint) @@ -124,7 +124,6 @@ def transpose(self): Transpose MPIStackedLinearOperator """ - return self._transpose() T = property(transpose) diff --git a/pylops_mpi/basicoperators/BlockDiag.py b/pylops_mpi/basicoperators/BlockDiag.py index 67916cb5..685638f7 100644 --- a/pylops_mpi/basicoperators/BlockDiag.py +++ b/pylops_mpi/basicoperators/BlockDiag.py @@ -14,81 +14,81 @@ class MPIBlockDiag(MPILinearOperator): r"""MPI Block-diagonal operator. - Create a block-diagonal operator from a set of linear operators using MPI. - Each rank must initialize this operator by providing one or more linear operators - which will be computed within such rank. - - Both model and data vectors must be of :class:`pylops_mpi.DistributedArray` type and partitioned between ranks - according to the shapes of the different linear operators. - - Parameters - ---------- - ops : :obj:`list` - One or more :class:`pylops.LinearOperator` to be stacked. - base_comm : :obj:`mpi4py.MPI.Comm`, optional - Base MPI Communicator. Defaults to ``mpi4py.MPI.COMM_WORLD``. - dtype : :obj:`str`, optional - Type of elements in input array. - - Attributes - ---------- - shape : :obj:`tuple` - Operator shape - - Notes - ----- - An MPI Block Diagonal operator is composed of N linear operators, represented by **L**. - Each rank has one or more :class:`pylops.LinearOperator`, which we represent here compactly - as :math:`\mathbf{L}_i` for rank :math:`i`. - - Each operator performs forward mode operations using its corresponding model vector, denoted as **m**. - This vector is effectively a :class:`pylops_mpi.DistributedArray` partitioned at each rank in such a way that - its local shapes agree with those of the corresponding linear operators. - The forward mode of each operator is then collected from all ranks as a DistributedArray, referred to as **d**. - - .. math:: - \begin{bmatrix} - \mathbf{d}_1 \\ - \mathbf{d}_2 \\ - \vdots \\ - \mathbf{d}_n - \end{bmatrix} = - \begin{bmatrix} - \mathbf{L}_1 & \mathbf{0} & \ldots & \mathbf{0} \\ - \mathbf{0} & \mathbf{L}_2 & \ldots & \mathbf{0} \\ - \vdots & \vdots & \ddots & \vdots \\ - \mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n - \end{bmatrix} - \begin{bmatrix} - \mathbf{m}_1 \\ - \mathbf{m}_2 \\ - \vdots \\ - \mathbf{m}_n - \end{bmatrix} - - Likewise, for the adjoint mode, each operator executes operations in the adjoint mode, - the adjoint mode of each operator is then collected from all ranks as a DistributedArray - referred as **d**. - - .. math:: - \begin{bmatrix} - \mathbf{d}_1 \\ - \mathbf{d}_2 \\ - \vdots \\ - \mathbf{d}_n - \end{bmatrix} = - \begin{bmatrix} - \mathbf{L}_1^H & \mathbf{0} & \ldots & \mathbf{0} \\ - \mathbf{0} & \mathbf{L}_2^H & \ldots & \mathbf{0} \\ - \vdots & \vdots & \ddots & \vdots \\ - \mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n^H - \end{bmatrix} - \begin{bmatrix} - \mathbf{m}_1 \\ - \mathbf{m}_2 \\ - \vdots \\ - \mathbf{m}_n - \end{bmatrix} + Create a block-diagonal operator from a set of linear operators using MPI. + Each rank must initialize this operator by providing one or more linear operators + which will be computed within such rank. + + Both model and data vectors must be of :class:`pylops_mpi.DistributedArray` type and partitioned between ranks + according to the shapes of the different linear operators. + + Parameters + ---------- + ops : :obj:`list` + One or more :class:`pylops.LinearOperator` to be stacked. + base_comm : :obj:`mpi4py.MPI.Comm`, optional + Base MPI Communicator. Defaults to ``mpi4py.MPI.COMM_WORLD``. + dtype : :obj:`str`, optional + Type of elements in input array. + + Attributes + ---------- + shape : :obj:`tuple` + Operator shape + + Notes + ----- + An MPI Block Diagonal operator is composed of N linear operators, represented by **L**. + Each rank has one or more :class:`pylops.LinearOperator`, which we represent here compactly + as :math:`\mathbf{L}_i` for rank :math:`i`. + + Each operator performs forward mode operations using its corresponding model vector, denoted as **m**. + This vector is effectively a :class:`pylops_mpi.DistributedArray` partitioned at each rank in such a way that + its local shapes agree with those of the corresponding linear operators. + The forward mode of each operator is then collected from all ranks as a DistributedArray, referred to as **d**. + + .. math:: + \begin{bmatrix} + \mathbf{d}_1 \\ + \mathbf{d}_2 \\ + \vdots \\ + \mathbf{d}_n + \end{bmatrix} = + \begin{bmatrix} + \mathbf{L}_1 & \mathbf{0} & \ldots & \mathbf{0} \\ + \mathbf{0} & \mathbf{L}_2 & \ldots & \mathbf{0} \\ + \vdots & \vdots & \ddots & \vdots \\ + \mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n + \end{bmatrix} + \begin{bmatrix} + \mathbf{m}_1 \\ + \mathbf{m}_2 \\ + \vdots \\ + \mathbf{m}_n + \end{bmatrix} + + Likewise, for the adjoint mode, each operator executes operations in the adjoint mode, + the adjoint mode of each operator is then collected from all ranks as a DistributedArray + referred as **d**. + + .. math:: + \begin{bmatrix} + \mathbf{d}_1 \\ + \mathbf{d}_2 \\ + \vdots \\ + \mathbf{d}_n + \end{bmatrix} = + \begin{bmatrix} + \mathbf{L}_1^H & \mathbf{0} & \ldots & \mathbf{0} \\ + \mathbf{0} & \mathbf{L}_2^H & \ldots & \mathbf{0} \\ + \vdots & \vdots & \ddots & \vdots \\ + \mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_n^H + \end{bmatrix} + \begin{bmatrix} + \mathbf{m}_1 \\ + \mathbf{m}_2 \\ + \vdots \\ + \mathbf{m}_n + \end{bmatrix} """ @@ -135,23 +135,23 @@ def _rmatvec(self, x: DistributedArray) -> DistributedArray: class MPIStackedBlockDiag(MPIStackedLinearOperator): r"""MPI Stacked BlockDiag Operator - Create a stack of :class:`pylops_mpi.MPILinearOperator` operators stacked diagonally. + Create a diagonal stack of :class:`pylops_mpi.MPILinearOperator` operators. - Parameters - ---------- - ops : :obj:`list` - One or more :class:`pylops_mpi.MPILinearOperator` to be stacked. + Parameters + ---------- + ops : :obj:`list` + One or more :class:`pylops_mpi.MPILinearOperator` to be stacked. - Attributes - ---------- - shape : :obj:`tuple` - Operator shape + Attributes + ---------- + shape : :obj:`tuple` + Operator shape - Notes - ----- - A MPIStackedBlockDiag is composed of N :class:pylops_mpi.MPILinearOperator instances stacked along the diagonal. - These MPI operators will be applied sequentially, with distributed computations - performed within each operator. + Notes + ----- + A MPIStackedBlockDiag is composed of N :class:pylops_mpi.MPILinearOperator instances stacked along the diagonal. + These MPI operators will be applied sequentially, with distributed computations + performed within each operator. """ diff --git a/pylops_mpi/optimization/basic.py b/pylops_mpi/optimization/basic.py index c913d723..e4a34bbf 100644 --- a/pylops_mpi/optimization/basic.py +++ b/pylops_mpi/optimization/basic.py @@ -1,20 +1,20 @@ from typing import Optional, Tuple, Callable from pylops.utils import NDArray -from pylops_mpi import MPILinearOperator, DistributedArray +from pylops_mpi import MPILinearOperator, DistributedArray, StackedDistributedArray from pylops_mpi.optimization.cls_basic import CG, CGLS def cg( Op: MPILinearOperator, - y: DistributedArray, + y: Union[DistributedArray, StackedDistributedArray] , x0: Optional[DistributedArray] = None, niter: int = 10, tol: float = 1e-4, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, -) -> Tuple[DistributedArray, int, NDArray]: +) -> Tuple[Union[DistributedArray, StackedDistributedArray], int, NDArray]: r"""Conjugate gradient Solve a square system of equations given an MPILinearOperator ``Op`` and @@ -24,9 +24,9 @@ def cg( ---------- Op : :obj:`pylops_mpi.MPILinearOperator` Operator to invert of size :math:`[N \times N]` - y : :obj:`pylops_mpi.DistributedArray` + y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` DistributedArray of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray`, optional + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional Initial guess niter : :obj:`int`, optional Number of iterations @@ -44,7 +44,7 @@ def cg( Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Estimated model of size (N,) iit : :obj:`int` Number of executed iterations @@ -67,8 +67,8 @@ def cg( def cgls( Op: MPILinearOperator, - y: DistributedArray, - x0: Optional[DistributedArray] = None, + y: Union[DistributedArray, StackedDistributedArray], + x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, niter: int = 10, damp: float = 0.0, tol: float = 1e-4, @@ -85,9 +85,9 @@ def cgls( ---------- Op : :obj:`pylops_mpi.MPILinearOperator` MPI Linear Operator to invert of size :math:`[N \times M]` - y : :obj:`pylops_mpi.DistributedArray` + y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` DistributedArray of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray`, optional + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional Initial guess niter : :obj:`int`, optional Number of iterations @@ -107,7 +107,7 @@ def cgls( Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Estimated model of size (M, ) istop : :obj:`int` Gives the reason for termination diff --git a/pylops_mpi/optimization/cls_basic.py b/pylops_mpi/optimization/cls_basic.py index 8240da2f..bf0079bf 100644 --- a/pylops_mpi/optimization/cls_basic.py +++ b/pylops_mpi/optimization/cls_basic.py @@ -1,11 +1,11 @@ -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, Union import numpy as np import time from pylops.optimization.basesolver import Solver from pylops.utils import NDArray -from pylops_mpi import DistributedArray +from pylops_mpi import DistributedArray, StackedDistributedArray class CG(Solver): @@ -41,31 +41,30 @@ def _print_setup(self, xcomplex: bool = False) -> None: head1 = " Itn x[0] r2norm" print(head1) - def _print_step(self, x: DistributedArray) -> None: + def _print_step(self, x: Union[DistributedArray, StackedDistributedArray]) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x.local_array) else f"{x[0]:11.4e} " msg = f"{self.iiter:6g} " + strx + f"{self.cost[self.iiter]:11.4e}" print(msg) def setup( self, - y: DistributedArray, - x0: Optional[DistributedArray] = None, + y: Union[DistributedArray, StackedDistributedArray], + x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, niter: Optional[int] = None, tol: float = 1e-4, show: bool = False, - ) -> DistributedArray: + ) -> Union[DistributedArray, StackedDistributedArray]: r"""Setup solver Parameters ---------- - y : :obj:`pylops_mpi.DistributedArray` + y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray`, optional + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional Initial guess of size (N,). If ``None``, initialize - internally as zero vector + internally as zero vector (will always default to :obj:`pylops_mpi.DistributedArray`) niter : :obj:`int`, optional - Number of iterations (default to ``None`` in case a user wants to - manually step over the solver) + Number of iterations (default to ``None`` in case a user wants to manually step over the solver) tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional @@ -73,7 +72,7 @@ def setup( Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Initial guess of size (N,). """ @@ -105,19 +104,21 @@ def setup( self._print_setup(np.iscomplexobj(x.local_array)) return x - def step(self, x: DistributedArray, show: bool = False) -> DistributedArray: + def step(self, x: Union[DistributedArray, StackedDistributedArray], + show: bool = False + ) -> Union[DistributedArray, StackedDistributedArray]: r"""Run one step of solver Parameters ---------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Current model vector to be updated by a step of CG show : :obj:`bool`, optional Display iteration log Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Updated model vector """ @@ -138,16 +139,16 @@ def step(self, x: DistributedArray, show: bool = False) -> DistributedArray: def run( self, - x: DistributedArray, + x: Union[DistributedArray, StackedDistributedArray], niter: Optional[int] = None, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), - ) -> DistributedArray: + ) -> Union[DistributedArray, StackedDistributedArray]: r"""Run solver Parameters ---------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Current model vector to be updated by multiple steps of CG niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already @@ -161,7 +162,7 @@ def run( Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Estimated model of size (M,) """ @@ -202,8 +203,8 @@ def finalize(self, show: bool = False) -> None: def solve( self, - y: DistributedArray, - x0: Optional[DistributedArray] = None, + y: Union[DistributedArray, StackedDistributedArray], + x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, niter: int = 10, tol: float = 1e-4, show: bool = False, @@ -213,11 +214,11 @@ def solve( Parameters ---------- - y : :obj:`pylops_mpi.DistributedArray` + y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size (N,) - x0 : :obj:`pylops_mpi.DistributedArray`, optional + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional Initial guess of size (N,). If ``None``, initialize - internally as zero vector + internally as zero vector (will always default to :obj:`pylops_mpi.DistributedArray`) niter : :obj:`int`, optional Number of iterations tol : :obj:`float`, optional @@ -231,7 +232,7 @@ def solve( Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Estimated model of size (N,) iit : :obj:`int` Number of executed iterations @@ -286,7 +287,7 @@ def _print_setup(self, xcomplex: bool = False) -> None: head1 = " Itn x[0] r1norm r2norm" print(head1) - def _print_step(self, x: DistributedArray) -> None: + def _print_step(self, x: Union[DistributedArray, StackedDistributedArray]) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x.local_array) else f"{x[0]:11.4e} " msg = ( f"{self.iiter:6g} " @@ -296,7 +297,7 @@ def _print_step(self, x: DistributedArray) -> None: print(msg) def setup(self, - y: DistributedArray, + y: Union[DistributedArray, StackedDistributedArray], x0: Optional[DistributedArray] = None, niter: Optional[int] = None, damp: float = 0.0, @@ -307,11 +308,11 @@ def setup(self, Parameters ---------- - y : :obj:`pylops_mpi.DistributedArray` + y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size :math:`[N \times 1]` - x0 : :obj:`pylops_mpi.DistributedArray`, optional - Initial guess of size (M,). If ``None``, Defaults to a - zero vector + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray`, optional + Initial guess of size (M,). If ``None``, Defaults to a zero vector + (will always default to :obj:`pylops_mpi.DistributedArray`) niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) @@ -324,7 +325,7 @@ def setup(self, Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Initial guess of size (N,). """ @@ -363,19 +364,21 @@ def setup(self, self._print_setup(np.iscomplexobj(x.local_array)) return x - def step(self, x: DistributedArray, show: bool = False) -> DistributedArray: + def step(self, x: Union[DistributedArray, StackedDistributedArray], + show: bool = False + ) -> Union[DistributedArray, StackedDistributedArray]: r"""Run one step of solver Parameters ---------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Current model vector to be updated by a step of CG show : :obj:`bool`, optional Display iteration log Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Updated model vector """ @@ -398,15 +401,15 @@ def step(self, x: DistributedArray, show: bool = False) -> DistributedArray: return x def run(self, - x: DistributedArray, + x: Union[DistributedArray, StackedDistributedArray], niter: Optional[int] = None, show: bool = False, - itershow: Tuple[int, int, int] = (10, 10, 10), ) -> DistributedArray: + itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Union[DistributedArray, StackedDistributedArray]: r"""Run solver Parameters ---------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Current model vector to be updated by multiple steps of CGLS niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already @@ -463,8 +466,8 @@ def finalize(self, show: bool = False, **kwargs) -> None: self.cost = np.array(self.cost) def solve(self, - y: DistributedArray, - x0: Optional[DistributedArray] = None, + y: Union[DistributedArray, StackedDistributedArray], + x0: Optional[Union[DistributedArray, StackedDistributedArray]] = None, niter: int = 10, damp: float = 0.0, tol: float = 1e-4, @@ -475,11 +478,11 @@ def solve(self, Parameters ---------- - y : :obj:`pylops_mpi.DistributedArray` + y : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Data of size (N, ) - x0 : :obj:`pylops_mpi.DistributedArray` - Initial guess of size (M, ). If ``None``, initialize - internally as zero vector + x0 : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` + Initial guess of size (M, ). If ``None``, initialize internally as zero vector + (will always default to :obj:`pylops_mpi.DistributedArray`) niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) @@ -496,7 +499,7 @@ def solve(self, Returns ------- - x : :obj:`pylops_mpi.DistributedArray` + x : :obj:`pylops_mpi.DistributedArray` or :obj:`pylops_mpi.StackedDistributedArray` Estimated model of size (M, ). istop : :obj:`int` Gives the reason for termination