diff --git a/tensorbackends/backends/ctf/ctf_backend.py b/tensorbackends/backends/ctf/ctf_backend.py index e62abb9..3f4f501 100644 --- a/tensorbackends/backends/ctf/ctf_backend.py +++ b/tensorbackends/backends/ctf/ctf_backend.py @@ -7,6 +7,7 @@ from ...interface import Backend from ...utils import einstr +from ...utils.svd_absorb_s import svd_absorb_s, svd_absorb_s_ctf from .ctf_random import CTFRandom from .ctf_tensor import CTFTensor @@ -82,35 +83,35 @@ def einsum(self, subscripts, *operands): expr = einstr.parse_einsum(subscripts, ndims) return self._einsum(expr, operands) - def einsvd_reduced(self, subscripts, a, rank=None): + def einsvd_reduced(self, subscripts, a, rank=None, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) - return self._einsvd_reduced(expr, a, rank) + return self._einsvd_reduced(expr, a, rank, absorb_s) - def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5): + def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) - return self._einsvd_rand(expr, a, rank, niter, oversamp) + return self._einsvd_rand(expr, a, rank, niter, oversamp, absorb_s=absorb_s) - def einsumsvd_reduced(self, subscripts, *operands, rank=None): + def einsumsvd_reduced(self, subscripts, *operands, rank=None, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] expr = einstr.parse_einsumsvd(subscripts, ndims) einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) - return self._einsvd_reduced(einsvd_expr, a, rank) + return self._einsvd_reduced(einsvd_expr, a, rank, absorb_s=absorb_s) - def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): + def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] expr = einstr.parse_einsumsvd(subscripts, ndims) einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) - return self._einsvd_rand(einsvd_expr, a, rank, niter, oversamp) + return self._einsvd_rand(einsvd_expr, a, rank, niter, oversamp, absorb_s=absorb_s) def isclose(self, a, b, *, rtol=1e-9, atol=0.0): return abs(a - b) <= atol + rtol * abs(b) @@ -122,13 +123,15 @@ def inv(self, a): u, s, v = self.einsvd('ij->ia,ja', a) return self.einsum('ia,a,ja->ji', u, 1/s, v) - def svd(self, a): + def svd(self, a, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) if a.ndim != 2: raise TypeError('the input tensor should be a matrix') u, s, vh = ctf.svd(a.unwrap()) - return self.tensor(u), self.tensor(ctf.real(s)), self.tensor(vh) + u, s, vh = self.tensor(u), self.tensor(ctf.real(s)), self.tensor(vh) + u, s, vh = svd_absorb_s(u, s, vh, absorb_s) + return u, s, vh def __getattr__(self, attr): wrap = lambda val: CTFTensor(val) if isinstance(val, ctf.tensor) else val @@ -167,19 +170,18 @@ def _einsum(self, expr, operands): else: return result - def _einsvd_reduced(self, expr, a, rank): - u, s, vh = a.tsr.i(expr.inputs[0].indices_string).svd( - expr.outputs[0].indices_string, - expr.outputs[1].indices_string, - rank=rank, - ) + def _einsvd_reduced(self, expr, a, rank, absorb_s): + u_str, vh_str = expr.outputs[0].indices_string, expr.outputs[1].indices_string + u, s, vh = a.tsr.i(expr.inputs[0].indices_string).svd(u_str, vh_str, rank=rank) + u, s, vh = self.tensor(u), self.tensor(ctf.real(s)), self.tensor(vh) + u, s, vh = svd_absorb_s_ctf(u, s, vh, absorb_s, u_str, vh_str) u_newshape = expr.outputs[0].newshape(u.shape) vh_newshape = expr.outputs[1].newshape(vh.shape) if u_newshape != u.shape: u = u.reshape(*u_newshape) if vh_newshape != vh.shape: vh = vh.reshape(*vh_newshape) - return self.tensor(u), self.tensor(ctf.real(s)), self.tensor(vh) + return u, s, vh - def _einsvd_rand(self, expr, a, rank, niter, oversamp): + def _einsvd_rand(self, expr, a, rank, niter, oversamp, absorb_s): newindex = (expr.output_indices - expr.input_indices).pop() axis_of_index = {index: axis for axis, index in enumerate(expr.inputs[0])} u_axes_from_a = [axis_of_index[index] for index in expr.outputs[0] if index != newindex] @@ -188,7 +190,7 @@ def _einsvd_rand(self, expr, a, rank, niter, oversamp): a_matrix_axes = [*u_axes_from_a, *vh_axes_from_a] a_matrix_shape = (np.prod([a.shape[axis] for axis in u_axes_from_a]), -1) a_matrix = a.transpose(*a_matrix_axes).reshape(*a_matrix_shape) - u, s, vh = self.rsvd(a_matrix, rank, niter, oversamp) + u, s, vh = self.rsvd(a_matrix, rank, niter, oversamp, absorb_s) # form u u = u.reshape(*(a.shape[axis] for axis in u_axes_from_a), s.shape[0]) u = self.moveaxis(u, -1, expr.outputs[0].find(newindex)) diff --git a/tensorbackends/backends/ctf/ctf_tensor.py b/tensorbackends/backends/ctf/ctf_tensor.py index 003229b..4df4986 100644 --- a/tensorbackends/backends/ctf/ctf_tensor.py +++ b/tensorbackends/backends/ctf/ctf_tensor.py @@ -145,7 +145,7 @@ def method(self, other): '__imatmul__', '__itruediv__', '__ifloordiv__', - '__ipow__', + # '__ipow__', # TODO Add __ipow__ to ctf tensor '__lt__', '__le__', diff --git a/tensorbackends/backends/ctfview/ctfview_backend.py b/tensorbackends/backends/ctfview/ctfview_backend.py index a2d9b8e..565cf27 100644 --- a/tensorbackends/backends/ctfview/ctfview_backend.py +++ b/tensorbackends/backends/ctfview/ctfview_backend.py @@ -7,6 +7,7 @@ from ...interface import Backend from ...utils import einstr +from ...utils.svd_absorb_s import svd_absorb_s from .ctfview_random import CTFViewRandom from .ctfview_tensor import CTFViewTensor from . import indices_utils @@ -91,26 +92,26 @@ def einsum(self, subscripts, *operands): expr = einstr.parse_einsum(subscripts, ndims) return self._einsum(expr, operands) - def einsvd_reduced(self, subscripts, a, rank=None): + def einsvd_reduced(self, subscripts, a, rank=None, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) def svd_func(matrix): - u, s, vh = self.svd(matrix) + u, s, vh = self.svd(matrix, absorb_s=absorb_s) if rank is not None and s.shape[0] > rank: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] return u, s, vh return self._einsvd(expr, a, svd_func) - def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5): + def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) def svd_func(matrix): - return self.rsvd(matrix, rank, niter, oversamp) + return self.rsvd(matrix, rank, niter, oversamp, absorb_s=absorb_s) return self._einsvd(expr, a, svd_func) - def einsumsvd_reduced(self, subscripts, *operands, rank=None): + def einsumsvd_reduced(self, subscripts, *operands, rank=None, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] @@ -118,13 +119,13 @@ def einsumsvd_reduced(self, subscripts, *operands, rank=None): einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) def svd_func(matrix): - u, s, vh = self.svd(matrix) + u, s, vh = self.svd(matrix, absorb_s=absorb_s) if rank is not None and s.shape[0] > rank: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] return u, s, vh return self._einsvd(einsvd_expr, a, svd_func) - def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): + def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] @@ -132,7 +133,7 @@ def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) def svd_func(matrix): - return self.rsvd(matrix, rank, niter, oversamp) + return self.rsvd(matrix, rank, niter, oversamp, absorb_s=absorb_s) return self._einsvd(einsvd_expr, a, svd_func) def isclose(self, a, b, *, rtol=1e-9, atol=0.0): @@ -147,13 +148,15 @@ def inv(self, a): u, s, v = self.einsvd('ij->ia,ja', a) return self.einsum('ia,a,ja->ji', u, 1/s, v) - def svd(self, a): + def svd(self, a, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) if a.ndim != 2: raise TypeError('the input tensor should be a matrix') u, s, vh = ctf.svd(a.unwrap()) - return self.tensor(u), self.tensor(ctf.real(s)), self.tensor(vh) + u, s, vh = self.tensor(u), self.tensor(ctf.real(s)), self.tensor(vh) + u, s, vh = svd_absorb_s(u, s, vh, absorb_s) + return u, s, vh def __getattr__(self, attr): wrap = lambda val: CTFViewTensor(val) if isinstance(val, ctf.tensor) else val diff --git a/tensorbackends/backends/ctfview/ctfview_tensor.py b/tensorbackends/backends/ctfview/ctfview_tensor.py index 3d67a59..8f5b8d1 100644 --- a/tensorbackends/backends/ctfview/ctfview_tensor.py +++ b/tensorbackends/backends/ctfview/ctfview_tensor.py @@ -200,7 +200,7 @@ def method(self, other): '__imatmul__', '__itruediv__', '__ifloordiv__', - '__ipow__', + # '__ipow__', # TODO Add __ipow__ to ctf tensor '__lt__', '__le__', diff --git a/tensorbackends/backends/cupy/cupy_backend.py b/tensorbackends/backends/cupy/cupy_backend.py index 55acd1b..f531e76 100644 --- a/tensorbackends/backends/cupy/cupy_backend.py +++ b/tensorbackends/backends/cupy/cupy_backend.py @@ -9,6 +9,7 @@ from ...interface import Backend from ...utils import einstr +from ...utils.svd_absorb_s import svd_absorb_s from .cupy_random import CuPyRandom from .cupy_tensor import CuPyTensor @@ -84,26 +85,26 @@ def einsum(self, subscripts, *operands): expr = einstr.parse_einsum(subscripts, ndims) return self._einsum(expr, operands) - def einsvd_reduced(self, subscripts, a, rank=None): + def einsvd_reduced(self, subscripts, a, rank=None, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) def svd_func(matrix): - u, s, vh = self.svd(matrix) + u, s, vh = self.svd(matrix, absorb_s=absorb_s) if rank is not None and s.shape[0] > rank: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] return u, s, vh return self._einsvd(expr, a, svd_func) - def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5): + def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) def svd_func(matrix): - return self.rsvd(matrix, rank, niter, oversamp) + return self.rsvd(matrix, rank, niter, oversamp, absorb_s=absorb_s) return self._einsvd(expr, a, svd_func) - def einsumsvd_reduced(self, subscripts, *operands, rank=None): + def einsumsvd_reduced(self, subscripts, *operands, rank=None, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] @@ -111,13 +112,13 @@ def einsumsvd_reduced(self, subscripts, *operands, rank=None): einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) def svd_func(matrix): - u, s, vh = self.svd(matrix) + u, s, vh = self.svd(matrix, absorb_s=absorb_s) if rank is not None and s.shape[0] > rank: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] return u, s, vh return self._einsvd(einsvd_expr, a, svd_func) - def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): + def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] @@ -125,7 +126,7 @@ def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) def svd_func(matrix): - return self.rsvd(matrix, rank, niter, oversamp) + return self.rsvd(matrix, rank, niter, oversamp, absorb_s=absorb_s) return self._einsvd(einsvd_expr, a, svd_func) def isclose(self, a, b, *, rtol=1e-9, atol=0.0): @@ -142,9 +143,11 @@ def allclose(self, a, b, *, rtol=1e-9, atol=0.0): def inv(self, a): return CuPyTensor(la.inv(a.unwrap())) - def svd(self, a): + def svd(self, a, absorb_s=False): u, s, vh = la.svd(a.unwrap(), full_matrices=False) - return CuPyTensor(u), CuPyTensor(s), CuPyTensor(vh) + u, s, vh = self.tensor(u), self.tensor(s), self.tensor(vh) + u, s, vh = svd_absorb_s(u, s, vh, absorb_s) + return u, s, vh def __getattr__(self, attr): wrap = lambda val: CuPyTensor(val) if isinstance(val, cp.ndarray) else val diff --git a/tensorbackends/backends/numpy/numpy_backend.py b/tensorbackends/backends/numpy/numpy_backend.py index 47a6c1b..963fd78 100644 --- a/tensorbackends/backends/numpy/numpy_backend.py +++ b/tensorbackends/backends/numpy/numpy_backend.py @@ -10,6 +10,7 @@ from ...interface import Backend from ...utils import einstr +from ...utils.svd_absorb_s import svd_absorb_s from .numpy_random import NumPyRandom from .numpy_tensor import NumPyTensor @@ -85,26 +86,26 @@ def einsum(self, subscripts, *operands): expr = einstr.parse_einsum(subscripts, ndims) return self._einsum(expr, operands) - def einsvd_reduced(self, subscripts, a, rank=None): + def einsvd_reduced(self, subscripts, a, rank=None, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) def svd_func(matrix): - u, s, vh = self.svd(matrix) + u, s, vh = self.svd(matrix, absorb_s=absorb_s) if rank is not None and s.shape[0] > rank: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] return u, s, vh return self._einsvd(expr, a, svd_func) - def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5): + def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5, absorb_s=False): if not isinstance(a, self.tensor): raise TypeError('the input should be {}'.format(self.tensor.__qualname__)) expr = einstr.parse_einsvd(subscripts, a.ndim) def svd_func(matrix): - return self.rsvd(matrix, rank, niter, oversamp) + return self.rsvd(matrix, rank, niter, oversamp, absorb_s=absorb_s) return self._einsvd(expr, a, svd_func) - def einsumsvd_reduced(self, subscripts, *operands, rank=None): + def einsumsvd_reduced(self, subscripts, *operands, rank=None, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] @@ -112,13 +113,13 @@ def einsumsvd_reduced(self, subscripts, *operands, rank=None): einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) def svd_func(matrix): - u, s, vh = self.svd(matrix) + u, s, vh = self.svd(matrix, absorb_s=absorb_s) if rank is not None and s.shape[0] > rank: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] return u, s, vh return self._einsvd(einsvd_expr, a, svd_func) - def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): + def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5, absorb_s=False): if not all(isinstance(operand, self.tensor) for operand in operands): raise TypeError('all operands should be {}'.format(self.tensor.__qualname__)) ndims = [operand.ndim for operand in operands] @@ -126,7 +127,7 @@ def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): einsum_expr, einsvd_expr = einstr.split_einsumsvd(expr) a = self._einsum(einsum_expr, operands) def svd_func(matrix): - return self.rsvd(matrix, rank, niter, oversamp) + return self.rsvd(matrix, rank, niter, oversamp, absorb_s=absorb_s) return self._einsvd(einsvd_expr, a, svd_func) def isclose(self, a, b, *, rtol=1e-9, atol=0.0): @@ -143,9 +144,11 @@ def allclose(self, a, b, *, rtol=1e-9, atol=0.0): def inv(self, a): return NumPyTensor(la.inv(a.unwrap())) - def svd(self, a): + def svd(self, a, absorb_s=False): u, s, vh = la.svd(a.unwrap(), full_matrices=False) - return NumPyTensor(u), NumPyTensor(s), NumPyTensor(vh) + u, s, vh = self.tensor(u), self.tensor(s), self.tensor(vh) + u, s, vh = svd_absorb_s(u, s, vh, absorb_s) + return u, s, vh def __getattr__(self, attr): wrap = lambda val: NumPyTensor(val) if isinstance(val, np.ndarray) else val diff --git a/tensorbackends/extensions/einsumsvd_implicit_rand.py b/tensorbackends/extensions/einsumsvd_implicit_rand.py index f225a9f..2b49621 100644 --- a/tensorbackends/extensions/einsumsvd_implicit_rand.py +++ b/tensorbackends/extensions/einsumsvd_implicit_rand.py @@ -4,7 +4,7 @@ import scipy.linalg as la -def einsumsvd_implicit_rand(backend, subscripts, *operands, rank, niter, orth_method): +def einsumsvd_implicit_rand(backend, subscripts, *operands, rank, niter, orth_method, absorb_s): if orth_method == 'qr': orthogonalize = orthogonalize_qr elif orth_method == 'local_gram': @@ -74,7 +74,7 @@ def einsumsvd_implicit_rand(backend, subscripts, *operands, rank, niter, orth_me op_YT = orthogonalize(backend, op_YT) op_X = apply_A(backend,expr_A,ops_A,term_YT,op_YT,term_X) - mat_U, S, mat_XVT = backend.svd(op_X.reshape(np.prod(op_X.shape)//r, r)) + mat_U, S, mat_XVT = backend.svd(op_X.reshape(np.prod(op_X.shape)//r, r), absorb_s=absorb_s) op_YT = backend.tensordot(op_YT.conj(), mat_XVT, axes=((-1),(-1))) op_X = mat_U.reshape(*op_X.shape) U = op_X diff --git a/tensorbackends/extensions/rsvd.py b/tensorbackends/extensions/rsvd.py index 68bc658..11a778e 100644 --- a/tensorbackends/extensions/rsvd.py +++ b/tensorbackends/extensions/rsvd.py @@ -1,5 +1,6 @@ +from ..utils.svd_absorb_s import svd_absorb_s -def rsvd(backend, a, rank, niter, oversamp): +def rsvd(backend, a, rank, niter, oversamp, absorb_s): dtype = a.dtype m, n = a.shape r = min(rank + oversamp, m, n) @@ -17,4 +18,5 @@ def rsvd(backend, a, rank, niter, oversamp): u = q @ u_sub if rank < r: u, s, vh = u[:,:rank], s[:rank], vh[:rank,:] + u, s, vh = svd_absorb_s(u, s, vh, absorb_s) return u, s, vh diff --git a/tensorbackends/interface/backend.py b/tensorbackends/interface/backend.py index 1a26238..6c23b77 100644 --- a/tensorbackends/interface/backend.py +++ b/tensorbackends/interface/backend.py @@ -72,41 +72,41 @@ def vstack(self, tensors): def einsum(self, subscripts, *operands): raise NotImplementedError() - def einsvd(self, subscripts, a, option=options.ReducedSVD()): + def einsvd(self, subscripts, a, option=options.ReducedSVD(), absorb_s=False): if isinstance(option, options.ReducedSVD): - return self.einsvd_reduced(subscripts, a, option.rank) + return self.einsvd_reduced(subscripts, a, option.rank, absorb_s) elif isinstance(option, options.RandomizedSVD): - return self.einsvd_rand(subscripts, a, option.rank, option.niter, option.oversamp) + return self.einsvd_rand(subscripts, a, option.rank, option.niter, option.oversamp, absorb_s) else: raise ValueError('{} is not a valid option for einsvd'.format(type(option).__qualname__)) - def einsvd_reduced(self, subscripts, a, rank=None): + def einsvd_reduced(self, subscripts, a, rank=None, absorb_s=False): raise NotImplementedError() - def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5): + def einsvd_rand(self, subscripts, a, rank, niter=1, oversamp=5, absorb_s=False): raise NotImplementedError() def einqr(self, subscripts, a): return extensions.einqr(self, subscripts, a) - def einsumsvd(self, subscripts, *operands, option=options.ReducedSVD()): + def einsumsvd(self, subscripts, *operands, option=options.ReducedSVD(), absorb_s=False): if isinstance(option, options.ReducedSVD): - return self.einsumsvd_reduced(subscripts, *operands, rank=option.rank) + return self.einsumsvd_reduced(subscripts, *operands, rank=option.rank, absorb_s=absorb_s) elif isinstance(option, options.RandomizedSVD): - return self.einsumsvd_rand(subscripts, *operands, rank=option.rank, niter=option.niter, oversamp=option.oversamp) + return self.einsumsvd_rand(subscripts, *operands, rank=option.rank, niter=option.niter, oversamp=option.oversamp, absorb_s=absorb_s) elif isinstance(option, options.ImplicitRandomizedSVD): - return self.einsumsvd_implicit_rand(subscripts, *operands, rank=option.rank, niter=option.niter, orth_method=option.orth_method) + return self.einsumsvd_implicit_rand(subscripts, *operands, rank=option.rank, niter=option.niter, orth_method=option.orth_method, absorb_s=absorb_s) else: raise ValueError('{} is not a valid option for einsumsvd'.format(type(option).__qualname__)) - def einsumsvd_reduced(self, subscripts, *operands, rank=None): + def einsumsvd_reduced(self, subscripts, *operands, rank=None, absorb_s=False): raise NotImplementedError() - def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5): + def einsumsvd_rand(self, subscripts, *operands, rank, niter=1, oversamp=5, absorb_s=False): raise NotImplementedError() - def einsumsvd_implicit_rand(self, subscripts, *operands, rank, niter=1, orth_method='qr'): - return extensions.einsumsvd_implicit_rand(self, subscripts, *operands, rank=rank, niter=niter, orth_method=orth_method) + def einsumsvd_implicit_rand(self, subscripts, *operands, rank, niter=1, orth_method='qr', absorb_s=False): + return extensions.einsumsvd_implicit_rand(self, subscripts, *operands, rank=rank, niter=niter, orth_method=orth_method, absorb_s=absorb_s) def isclose(self, a, b, *, rtol=1e-9, atol=0.0): raise NotImplementedError() @@ -117,8 +117,8 @@ def allclose(self, a, b, *, rtol=1e-9, atol=0.0): def inv(self, a): raise NotImplementedError() - def svd(self, a): + def svd(self, a, absorb_s=False): raise NotImplementedError() - def rsvd(self, a, rank, niter=1, oversamp=5): - return extensions.rsvd(self, a, rank, niter, oversamp) + def rsvd(self, a, rank, niter=1, oversamp=5, absorb_s=False): + return extensions.rsvd(self, a, rank, niter, oversamp, absorb_s) diff --git a/tensorbackends/utils/svd_absorb_s.py b/tensorbackends/utils/svd_absorb_s.py new file mode 100644 index 0000000..198799c --- /dev/null +++ b/tensorbackends/utils/svd_absorb_s.py @@ -0,0 +1,38 @@ + +def svd_absorb_s(u, s, vh, absorb_s): + if not absorb_s: + return u, s, vh + elif absorb_s == 'even': + s **= 0.5 + u = u.backend.einsum('ij,j->ij', u, s) + vh = vh.backend.einsum('j,jk->jk', s, vh) + return u, s, vh + elif absorb_s == 'u': + u = u.backend.einsum('ij,j->ij', u, s) + return u, s, vh + elif absorb_s == 'v': + vh = vh.backend.einsum('j,jk->jk', s, vh) + return u, s, vh + else: + raise ValueError('invalid `absorb_s` option: {}'.format(absorb_s)) + + +def svd_absorb_s_ctf(u, s, vh, absorb_s, u_str, vh_str): + if not absorb_s: + return u, s, vh + elif absorb_s == 'even': + s **= 0.5 + s_str, = set(u_str) & set(vh_str) + u = u.backend.einsum(f'{u_str},{s_str}->{u_str}', u, s) + vh = vh.backend.einsum(f'{s_str},{vh_str}->{vh_str}', s, vh) + return u, s, vh + elif absorb_s == 'u': + s_str, = set(u_str) & set(vh_str) + u = u.backend.einsum(f'{u_str},{s_str}->{u_str}', u, s) + return u, s, vh + elif absorb_s == 'v': + s_str, = set(u_str) & set(vh_str) + vh = vh.backend.einsum(f'{s_str},{vh_str}->{vh_str}', s, vh) + return u, s, vh + else: + raise ValueError('invalid `absorb_s` option: {}'.format(absorb_s)) diff --git a/test/test_backend.py b/test/test_backend.py index a2d0e8a..4aa95fa 100644 --- a/test/test_backend.py +++ b/test/test_backend.py @@ -157,6 +157,28 @@ def test_einsumsvd_options(self, tb): self.assertTrue(tb.allclose(usv, low_rank, atol=1e-9)) + def test_einsumsvd_absorb_s(self, tb): + from tensorbackends.interface import ReducedSVD, RandomizedSVD, ImplicitRandomizedSVD + a = tb.astensor([[0,2e-3j,0,0],[1e-3,0,0,0],[0,0,3,0],[0,0,0,4j]], dtype=complex) + p = tb.astensor([[0,1,0,0],[1,0,0,0],[0,0,1,0],[0,0,0,1]], dtype=complex) + s_true = tb.astensor([4,3]) + low_rank = tb.astensor([[0,0,0,0],[0,0,0,0],[0,0,3,0],[0,0,0,4j]], dtype=complex) + options = [ + ReducedSVD(rank=2), + RandomizedSVD(rank=2, niter=2, oversamp=1), + ImplicitRandomizedSVD(rank=2, niter=2, orth_method='qr'), + ImplicitRandomizedSVD(rank=2, niter=2, orth_method='local_gram'), + ] + for option in options: + for absorb_s in ['even', 'u', 'v']: + with self.subTest(option=option): + u, _, v = tb.einsumsvd('ij,jk->is,sk', p, a, option=option, absorb_s=absorb_s) + usv = tb.einsum('is,sk->ik', u, v) + self.assertEqual(u.shape, (4,2)) + self.assertEqual(v.shape, (2,4)) + self.assertTrue(tb.allclose(usv, low_rank, atol=1e-9)) + + def test_einsumsvd_rand(self, tb): tb.random.seed(42) A1 = tb.random.random((2,3)) + tb.random.random((2,3)) * 1j