From df966d1afc22ae960a48dc22c19902ed4d4518f6 Mon Sep 17 00:00:00 2001 From: mrava87 Date: Wed, 30 Oct 2024 11:36:44 +0300 Subject: [PATCH] feat: added dottest --- docs/source/api/index.rst | 19 ++++++ pylops_mpi/utils/__init__.py | 2 + pylops_mpi/utils/dottest.py | 107 ++++++++++++++++++++++++++++++++++ tests/test_blockdiag.py | 17 +++++- tests/test_derivative.py | 27 ++++++++- tests/test_fredholm.py | 8 ++- tests/test_linearop.py | 7 +++ tests/test_solver.py | 4 ++ tests/test_stack.py | 9 +++ tests/test_stackedlinearop.py | 4 ++ 10 files changed, 198 insertions(+), 6 deletions(-) create mode 100644 pylops_mpi/utils/dottest.py diff --git a/docs/source/api/index.rst b/docs/source/api/index.rst index c77bbccb..3c39cc0a 100644 --- a/docs/source/api/index.rst +++ b/docs/source/api/index.rst @@ -104,3 +104,22 @@ Basic cg cgls + + +Utils +----- + +.. currentmodule:: pylops_mpi.DistributedArray + +.. autosummary:: + :toctree: generated/ + + local_split + + +.. currentmodule:: pylops_mpi.utils.dottest + +.. autosummary:: + :toctree: generated/ + + dottest \ No newline at end of file diff --git a/pylops_mpi/utils/__init__.py b/pylops_mpi/utils/__init__.py index e69de29b..d2cf7d55 100644 --- a/pylops_mpi/utils/__init__.py +++ b/pylops_mpi/utils/__init__.py @@ -0,0 +1,2 @@ +# isort: skip_file +from .dottest import * \ No newline at end of file diff --git a/pylops_mpi/utils/dottest.py b/pylops_mpi/utils/dottest.py new file mode 100644 index 00000000..e91bc25b --- /dev/null +++ b/pylops_mpi/utils/dottest.py @@ -0,0 +1,107 @@ +__all__ = ["dottest"] + +from typing import Optional + +import numpy as np + +from pylops_mpi.DistributedArray import DistributedArray +from pylops.utils.backend import to_numpy + + +def dottest( + Op, + u: DistributedArray, + v: DistributedArray, + nr: Optional[int] = None, + nc: Optional[int] = None, + rtol: float = 1e-6, + atol: float = 1e-21, + raiseerror: bool = True, + verb: bool = False, +) -> bool: + r"""Dot test. + + Perform dot-test to verify the validity of forward and adjoint + operators using user-provided random vectors :math:`\mathbf{u}` + and :math:`\mathbf{v}` (whose Partition must be consistent with + the operator being tested). This test can help to detect errors + in the operator ximplementation. + + Parameters + ---------- + Op : :obj:`pylops_mpi.LinearOperator` + Linear operator to test. + u : :obj:`pylops_mpi.DistributedArray` + Distributed array of size equal to the number of columns of operator + v : :obj:`pylops_mpi.DistributedArray` + Distributed array of size equal to the number of rows of operator + nr : :obj:`int` + Number of rows of operator (i.e., elements in data) + nc : :obj:`int` + Number of columns of operator (i.e., elements in model) + rtol : :obj:`float`, optional + Relative dottest tolerance + atol : :obj:`float`, optional + Absolute dottest tolerance + .. versionadded:: 2.0.0 + raiseerror : :obj:`bool`, optional + Raise error or simply return ``False`` when dottest fails + verb : :obj:`bool`, optional + Verbosity + + Returns + ------- + passed : :obj:`bool` + Passed flag. + + Raises + ------ + AssertionError + If dot-test is not verified within chosen tolerances. + + Notes + ----- + A dot-test is mathematical tool used in the development of numerical + linear operators. + + More specifically, a correct implementation of forward and adjoint for + a linear operator should verify the following *equality* + within a numerical tolerance: + + .. math:: + (\mathbf{Op}\,\mathbf{u})^H\mathbf{v} = + \mathbf{u}^H(\mathbf{Op}^H\mathbf{v}) + + """ + if nr is None: + nr = Op.shape[0] + if nc is None: + nc = Op.shape[1] + + if (nr, nc) != Op.shape: + raise AssertionError("Provided nr and nc do not match operator shape") + + y = Op.matvec(u) # Op * u + x = Op.rmatvec(v) # Op'* v + + yy = np.vdot(y.asarray(), v.asarray()) # (Op * u)' * v + xx = np.vdot(u.asarray(), x.asarray()) # u' * (Op' * v) + + # convert back to numpy (in case cupy arrays were used), make into a numpy + # array and extract the first element. This is ugly but allows to handle + # complex numbers in subsequent prints also when using cupy arrays. + xx, yy = np.array([to_numpy(xx)])[0], np.array([to_numpy(yy)])[0] + + # evaluate if dot test passed + passed = np.isclose(xx, yy, rtol, atol) + + # verbosity or error raising + if (not passed and raiseerror) or verb: + passed_status = "passed" if passed else "failed" + msg = f"Dot test {passed_status}, v^H(Opu)={yy} - u^H(Op^Hv)={xx}" + if not passed and raiseerror: + raise AssertionError(msg) + else: + print(msg) + + return passed diff --git a/tests/test_blockdiag.py b/tests/test_blockdiag.py index 7a90b21a..bd6b81a5 100644 --- a/tests/test_blockdiag.py +++ b/tests/test_blockdiag.py @@ -1,18 +1,23 @@ +"""Test the MPIBlockDiag and MPIStackedBlockDiag classes + Designed to run with n processes + $ mpiexec -n 10 pytest test_blockdiag.py --with-mpi +""" from mpi4py import MPI import numpy as np from numpy.testing import assert_allclose -np.random.seed(42) - import pytest import pylops import pylops_mpi +from pylops_mpi.utils.dottest import dottest par1 = {'ny': 101, 'nx': 101, 'dtype': np.float64} par1j = {'ny': 101, 'nx': 101, 'dtype': np.complex128} par2 = {'ny': 301, 'nx': 101, 'dtype': np.float64} par2j = {'ny': 301, 'nx': 101, 'dtype': np.complex128} +np.random.seed(42) + @pytest.mark.mpi(min_size=2) @pytest.mark.parametrize("par", [(par1), (par1j), (par2), (par2j)]) @@ -30,10 +35,14 @@ def test_blockdiag(par): y[:] = np.ones(shape=par['ny'], dtype=par['dtype']) y_global = y.asarray() + # Forward x_mat = BDiag_MPI @ x + # Adjoint y_rmat = BDiag_MPI.H @ y assert isinstance(x_mat, pylops_mpi.DistributedArray) assert isinstance(y_rmat, pylops_mpi.DistributedArray) + # Dot test + dottest(BDiag_MPI, x, y, size * par['ny'], size * par['nx']) x_mat_mpi = x_mat.asarray() y_rmat_mpi = y_rmat.asarray() @@ -73,10 +82,14 @@ def test_stacked_blockdiag(par): y = pylops_mpi.StackedDistributedArray(distarrays=[dist1, dist2]) y_global = y.asarray() + # Forward x_mat = StackedBDiag_MPI @ x + # Adjoint y_rmat = StackedBDiag_MPI.H @ y assert isinstance(x_mat, pylops_mpi.StackedDistributedArray) assert isinstance(y_rmat, pylops_mpi.StackedDistributedArray) + # Dot test + dottest(StackedBDiag_MPI, x, y, size * par['ny'] + par['nx'] * par['ny'], size * par['nx'] + par['nx'] * par['ny']) x_mat_mpi = x_mat.asarray() y_rmat_mpi = y_rmat.asarray() diff --git a/tests/test_derivative.py b/tests/test_derivative.py index 68507624..3d3a4a65 100644 --- a/tests/test_derivative.py +++ b/tests/test_derivative.py @@ -1,3 +1,7 @@ +"""Test the derivative classes + Designed to run with n processes + $ mpiexec -n 10 pytest test_derivative.py --with-mpi +""" import numpy as np from mpi4py import MPI from numpy.testing import assert_allclose @@ -5,6 +9,7 @@ import pylops import pylops_mpi +from pylops_mpi.utils.dottest import dottest np.random.seed(42) rank = MPI.COMM_WORLD.Get_rank() @@ -194,6 +199,8 @@ def test_first_derivative_forward(par): # Adjoint y_adj_dist = Fop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Fop_MPI, x, y_dist, np.prod(par['nz']), np.prod(par['nz'])) if rank == 0: Fop = pylops.FirstDerivative(dims=par['nz'], axis=0, @@ -226,6 +233,8 @@ def test_first_derivative_backward(par): # Adjoint y_adj_dist = Fop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Fop_MPI, x, y_dist, np.prod(par['nz']), np.prod(par['nz'])) if rank == 0: Fop = pylops.FirstDerivative(dims=par['nz'], axis=0, @@ -259,6 +268,9 @@ def test_first_derivative_centered(par): # Adjoint y_adj_dist = Fop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Fop_MPI, x, y_dist, np.prod(par['nz']), np.prod(par['nz'])) + if rank == 0: Fop = pylops.FirstDerivative(dims=par['nz'], axis=0, sampling=par['dz'], @@ -290,6 +302,8 @@ def test_second_derivative_forward(par): # Adjoint y_adj_dist = Sop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Sop_MPI, x, y_dist, np.prod(par['nz']), np.prod(par['nz'])) if rank == 0: Sop = pylops.SecondDerivative(dims=par['nz'], axis=0, @@ -322,6 +336,8 @@ def test_second_derivative_backward(par): # Adjoint y_adj_dist = Sop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Sop_MPI, x, y_dist, np.prod(par['nz']), np.prod(par['nz'])) if rank == 0: Sop = pylops.SecondDerivative(dims=par['nz'], axis=0, @@ -354,6 +370,8 @@ def test_second_derivative_centered(par): # Adjoint y_adj_dist = Sop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Sop_MPI, x, y_dist, np.prod(par['nz']), np.prod(par['nz'])) if rank == 0: Sop = pylops.SecondDerivative(dims=par['nz'], axis=0, @@ -385,6 +403,8 @@ def test_laplacian(par): # Adjoint y_adj_dist = Lop_MPI.H @ x y_adj = y_adj_dist.asarray() + # Dot test + dottest(Lop_MPI, x, y_dist, np.prod(par['n']), np.prod(par['n'])) if rank == 0: Lop = pylops.Laplacian(dims=par['n'], axes=par['axes'], @@ -409,6 +429,7 @@ def test_gradient(par): x_fwd = pylops_mpi.DistributedArray(global_shape=np.prod(par['n']), dtype=par['dtype']) x_fwd[:] = np.random.normal(rank, 10, x_fwd.local_shape) x_global = x_fwd.asarray() + # Forward y_dist = Gop_MPI @ x_fwd assert isinstance(y_dist, pylops_mpi.StackedDistributedArray) @@ -421,15 +442,15 @@ def test_gradient(par): x_adj_dist2[:] = np.random.normal(rank, 20, x_adj_dist2.local_shape) x_adj_dist3 = pylops_mpi.DistributedArray(global_shape=int(np.prod(par['n'])), dtype=par['dtype']) x_adj_dist3[:] = np.random.normal(rank, 30, x_adj_dist3.local_shape) - x_adj = pylops_mpi.StackedDistributedArray(distarrays=[x_adj_dist1, x_adj_dist2, x_adj_dist3]) - x_adj_global = x_adj.asarray() y_adj_dist = Gop_MPI.H @ x_adj assert isinstance(y_adj_dist, pylops_mpi.DistributedArray) - y_adj = y_adj_dist.asarray() + # Dot test + dottest(Gop_MPI, x_fwd, y_dist, len(par['n']) * np.prod(par['n']), np.prod(par['n'])) + if rank == 0: Gop = pylops.Gradient(dims=par['n'], sampling=par['sampling'], kind=kind, edge=par['edge'], diff --git a/tests/test_fredholm.py b/tests/test_fredholm.py index 812c74e4..1baeb57c 100644 --- a/tests/test_fredholm.py +++ b/tests/test_fredholm.py @@ -1,3 +1,7 @@ +"""Test the MPIFredholm1 class + Designed to run with n processes + $ mpiexec -n 10 pytest test_fredholm.py --with-mpi +""" import numpy as np from numpy.testing import assert_allclose from mpi4py import MPI @@ -9,12 +13,12 @@ from pylops_mpi import DistributedArray from pylops_mpi.DistributedArray import local_split, Partition from pylops_mpi.signalprocessing import MPIFredholm1 +from pylops_mpi.utils.dottest import dottest np.random.seed(42) rank = MPI.COMM_WORLD.Get_rank() size = MPI.COMM_WORLD.Get_size() - par1 = { "nsl": 6, "ny": 6, @@ -130,6 +134,8 @@ def test_Fredholm1(par): # Adjoint y_adj_dist = Fop_MPI.H @ y_dist y_adj = y_adj_dist.asarray() + # Dot test + dottest(Fop_MPI, x, y_dist, par["nsl"] * par["nx"] * par["nz"],par["nsl"] * par["ny"] * par["nz"]) if rank == 0: Fop = pylops.signalprocessing.Fredholm1( diff --git a/tests/test_linearop.py b/tests/test_linearop.py index ad544586..411679f5 100644 --- a/tests/test_linearop.py +++ b/tests/test_linearop.py @@ -1,3 +1,7 @@ +"""Test the MPILinearOperator class + Designed to run with n processes + $ mpiexec -n 10 pytest test_linearop.py --with-mpi +""" import numpy as np from numpy.testing import assert_allclose from mpi4py import MPI @@ -58,6 +62,7 @@ def test_transpose(par): """Test the TransposeLinearOperator""" Op = pylops.MatrixMult(A=((rank + 1) * np.ones(shape=(par['ny'], par['nx']))).astype(par['dtype'])) BDiag_MPI = MPIBlockDiag(ops=[Op, ]) + # Tranposed Op Top_MPI = BDiag_MPI.T @@ -76,6 +81,7 @@ def test_transpose(par): Top_y = Top_MPI.H @ y assert isinstance(Top_y, DistributedArray) Top_y_np = Top_y.asarray() + if rank == 0: ops = [pylops.MatrixMult((i + 1) * np.ones(shape=(par['ny'], par['nx'])).astype(par['dtype'])) for i in range(size)] @@ -109,6 +115,7 @@ def test_scaled(par): Sop_y = Sop_MPI.H @ y assert isinstance(Sop_y, DistributedArray) Sop_y_np = Sop_y.asarray() + if rank == 0: ops = [pylops.MatrixMult((i + 1) * np.ones(shape=(par['ny'], par['nx'])).astype(par['dtype'])) for i in range(size)] diff --git a/tests/test_solver.py b/tests/test_solver.py index b7140d4e..46e9a139 100644 --- a/tests/test_solver.py +++ b/tests/test_solver.py @@ -1,3 +1,7 @@ +"""Test solvers + Designed to run with n processes + $ mpiexec -n 10 pytest test_solver.py --with-mpi +""" import numpy as np from numpy.testing import assert_allclose from mpi4py import MPI diff --git a/tests/test_stack.py b/tests/test_stack.py index f47d4dd3..5a552475 100644 --- a/tests/test_stack.py +++ b/tests/test_stack.py @@ -1,3 +1,7 @@ +"""Test the stacking classes + Designed to run with n processes + $ mpiexec -n 10 pytest test_stack.py --with-mpi +""" import numpy as np from numpy.testing import assert_allclose from mpi4py import MPI @@ -5,6 +9,7 @@ import pylops import pylops_mpi +from pylops_mpi.utils.dottest import dottest par1 = {'ny': 101, 'nx': 101, 'imag': 0, 'dtype': np.float64} par1j = {'ny': 101, 'nx': 101, 'imag': 1j, 'dtype': np.complex128} @@ -36,10 +41,14 @@ def test_vstack(par): y[:] = np.ones(shape=par['ny'], dtype=par['dtype']) y_global = y.asarray() + # Forward x_mat = VStack_MPI @ x + # Adjoint y_rmat = VStack_MPI.H @ y assert isinstance(x_mat, pylops_mpi.DistributedArray) assert isinstance(y_rmat, pylops_mpi.DistributedArray) + # Dot test + dottest(VStack_MPI, x, y, size * par['ny'], par['nx']) x_mat_mpi = x_mat.asarray() y_rmat_mpi = y_rmat.asarray() diff --git a/tests/test_stackedlinearop.py b/tests/test_stackedlinearop.py index 71cdec12..c31d32b6 100644 --- a/tests/test_stackedlinearop.py +++ b/tests/test_stackedlinearop.py @@ -1,3 +1,7 @@ +"""Test the MPIStackedLinearOperator class + Designed to run with n processes + $ mpiexec -n 10 pytest test_stackedlinearop.py --with-mpi +""" import numpy as np from numpy.testing import assert_allclose from mpi4py import MPI