diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..a917e09 --- /dev/null +++ b/.clang-format @@ -0,0 +1,30 @@ +# Use stuff +IndentWidth: 4 +UseTab: Never +ColumnLimit: 80 + +BreakBeforeBraces: Custom +BraceWrapping: + AfterClass: false + AfterFunction: true + AfterStruct: true + AfterControlStatement: false + AfterEnum: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false + +AlwaysBreakTemplateDeclarations: true + +AccessModifierOffset: -4 +IndentCaseLabels: false + +AllowShortIfStatementsOnASingleLine: false + +ConstructorInitializerAllOnOneLineOrOnePerLine: true + +SpaceInEmptyBlock: true +SpaceAfterCStyleCast: false diff --git a/include/sparseir/_impl.hpp b/include/sparseir/_impl.hpp index fd201be..9add113 100644 --- a/include/sparseir/_impl.hpp +++ b/include/sparseir/_impl.hpp @@ -2,48 +2,55 @@ #include "xprec/ddouble.hpp" - namespace sparseir { // General case -template -inline T sqrt_impl(const T& x) { +template +inline T sqrt_impl(const T &x) +{ return std::sqrt(x); } // Specialization for DDouble -template<> -inline xprec::DDouble sqrt_impl(const xprec::DDouble& x) { +template <> +inline xprec::DDouble sqrt_impl(const xprec::DDouble &x) +{ return xprec::sqrt(x); } -template -inline T cosh_impl(const T& x) { +template +inline T cosh_impl(const T &x) +{ return std::cosh(x); } -template<> -inline xprec::DDouble cosh_impl(const xprec::DDouble& x) { +template <> +inline xprec::DDouble cosh_impl(const xprec::DDouble &x) +{ return xprec::cosh(x); } -template -inline T sinh_impl(const T& x) { +template +inline T sinh_impl(const T &x) +{ return std::sinh(x); } -template<> -inline xprec::DDouble sinh_impl(const xprec::DDouble& x) { +template <> +inline xprec::DDouble sinh_impl(const xprec::DDouble &x) +{ return xprec::sinh(x); } -template -inline T exp_impl(const T& x) { +template +inline T exp_impl(const T &x) +{ return std::exp(x); } -template<> -inline xprec::DDouble exp_impl(const xprec::DDouble& x) { +template <> +inline xprec::DDouble exp_impl(const xprec::DDouble &x) +{ return xprec::exp(x); } -} \ No newline at end of file +} // namespace sparseir \ No newline at end of file diff --git a/include/sparseir/_linalg.hpp b/include/sparseir/_linalg.hpp index 5473c50..c4abad8 100644 --- a/include/sparseir/_linalg.hpp +++ b/include/sparseir/_linalg.hpp @@ -3,15 +3,15 @@ #include #include -#include -#include #include -#include +#include #include +#include +#include #include "xprec/ddouble.hpp" -namespace sparseir{ +namespace sparseir { using Eigen::Dynamic; using Eigen::Matrix; @@ -20,9 +20,11 @@ using Eigen::Vector; using Eigen::VectorX; template -int argmax(const Eigen::MatrixBase& vec) { +int argmax(const Eigen::MatrixBase &vec) +{ // Ensure this function is only used for 1D Eigen column vectors - static_assert(Derived::ColsAtCompileTime == 1, "argmax function is only for Eigen column vectors."); + static_assert(Derived::ColsAtCompileTime == 1, + "argmax function is only for Eigen column vectors."); int maxIndex = 0; auto maxValue = vec(0); @@ -40,14 +42,15 @@ int argmax(const Eigen::MatrixBase& vec) { /* This function ports Julia's implementation of the `invperm` function to C++. */ -inline Eigen::VectorXi invperm(const Eigen::VectorXi& a) { +inline Eigen::VectorXi invperm(const Eigen::VectorXi &a) +{ int n = a.size(); Eigen::VectorXi b(n); b.setConstant(-1); - for (int i = 0; i < n; i++){ + for (int i = 0; i < n; i++) { int j = a(i); - if ((0 <= j < n) && b(j) == -1){ + if ((0 <= j < n) && b(j) == -1) { std::invalid_argument("invalid permutation"); } b(j) = i; @@ -56,27 +59,31 @@ inline Eigen::VectorXi invperm(const Eigen::VectorXi& a) { } template -struct SVDResult { +struct SVDResult +{ Matrix U; Vector s; Matrix V; }; template -struct QRPivoted { +struct QRPivoted +{ Matrix factors; Vector taus; Vector jpvt; }; template -struct QRPackedQ { +struct QRPackedQ +{ Eigen::MatrixX factors; Eigen::Matrix taus; }; template -void lmul(const QRPackedQ Q, Eigen::MatrixX& B) { +void lmul(const QRPackedQ Q, Eigen::MatrixX &B) +{ Eigen::MatrixX A_factors = Q.factors; Eigen::VectorX A_tau = Q.taus; int mA = A_factors.rows(); @@ -85,7 +92,8 @@ void lmul(const QRPackedQ Q, Eigen::MatrixX& B) { int nB = B.cols(); if (mA != mB) { - throw std::invalid_argument("DimensionMismatch: matrix A has different dimensions than matrix B"); + throw std::invalid_argument("DimensionMismatch: matrix A has different " + "dimensions than matrix B"); } for (int k = std::min(mA, nA) - 1; k >= 0; --k) { @@ -104,7 +112,9 @@ void lmul(const QRPackedQ Q, Eigen::MatrixX& B) { } template -void mul(Eigen::MatrixX& C, const QRPackedQ& Q, const Eigen::MatrixX& B) { +void mul(Eigen::MatrixX &C, const QRPackedQ &Q, + const Eigen::MatrixX &B) +{ int mB = B.rows(); int nB = B.cols(); @@ -112,7 +122,8 @@ void mul(Eigen::MatrixX& C, const QRPackedQ& Q, const Eigen::MatrixX& B int nC = C.cols(); if (nB != nC) { - throw std::invalid_argument("DimensionMismatch: number of columns in B and C do not match"); + throw std::invalid_argument( + "DimensionMismatch: number of columns in B and C do not match"); } if (mB < mC) { @@ -148,21 +159,21 @@ function getproperty(F::QRPivoted{T}, d::Symbol) where T end */ - /* template struct QRPackedQ { Matrix factors; Matrix taus; - QRPackedQ(const Matrix& factors, const Matrix& taus) - : factors(factors), taus(taus) {} + QRPackedQ(const Matrix& factors, const Matrix& taus) : factors(factors), taus(taus) {} }; */ // TODO: FIX THIS template -auto getPropertyP(const QRPivoted& F) { +auto getPropertyP(const QRPivoted &F) +{ int m = F.factors.rows(); int n = F.factors.cols(); @@ -174,12 +185,14 @@ auto getPropertyP(const QRPivoted& F) { } template -QRPackedQ getPropertyQ(const QRPivoted& F) { +QRPackedQ getPropertyQ(const QRPivoted &F) +{ return QRPackedQ{F.factors, F.taus}; } template -MatrixX getPropertyR(const QRPivoted& F) { +MatrixX getPropertyR(const QRPivoted &F) +{ int m = F.factors.rows(); int n = F.factors.cols(); @@ -193,13 +206,13 @@ MatrixX getPropertyR(const QRPivoted& F) { return upper; } -// General template for _copysign, handles standard floating-point types like double and float -inline double _copysign(double x, double y) { - return std::copysign(x, y); -} +// General template for _copysign, handles standard floating-point types like +// double and float +inline double _copysign(double x, double y) { return std::copysign(x, y); } // Specialization for xprec::DDouble type, assuming xprec::copysign is defined -inline xprec::DDouble _copysign(xprec::DDouble x, xprec::DDouble y) { +inline xprec::DDouble _copysign(xprec::DDouble x, xprec::DDouble y) +{ return xprec::copysign(x, y); } @@ -210,20 +223,23 @@ Elementary reflection similar to LAPACK. The reflector is not Hermitian but ensures that tridiagonalization of Hermitian matrices become real. See lawn72 */ template -typename Derived::Scalar reflector(Eigen::MatrixBase& x) { +typename Derived::Scalar reflector(Eigen::MatrixBase &x) +{ using T = typename Derived::Scalar; int n = x.size(); - if (n == 0) return T(0); + if (n == 0) + return T(0); - T xi1 = x(0); // First element of x - T normu = x.norm(); // Norm of vector x - if (normu == T(0)) return T(0); + T xi1 = x(0); // First element of x + T normu = x.norm(); // Norm of vector x + if (normu == T(0)) + return T(0); // Calculate ν using copysign, which gives normu with the sign of xi1 T nu = _copysign(normu, xi1); - xi1 += nu; // Update xi1 - x(0) = -nu; // Set first element to -ν + xi1 += nu; // Update xi1 + x(0) = -nu; // Set first element to -ν // Divide remaining elements by the new xi1 value for (int i = 1; i < n; ++i) { @@ -238,18 +254,24 @@ This implementation is based on Julia's LinearAlgebra.reflectorApply! function. reflectorApply!(x, τ, A) -Multiplies `A` in-place by a Householder reflection on the left. It is equivalent to `A .= (I - conj(τ)*[1; x[2:end]]*[1; x[2:end]]')*A`. +Multiplies `A` in-place by a Householder reflection on the left. It is +equivalent to `A .= (I - conj(τ)*[1; x[2:end]]*[1; x[2:end]]')*A`. */ template -void reflectorApply(Eigen::VectorBlock, -1, 1, true>, -1>& x, T tau, Eigen::Block>& A){ +void reflectorApply( + Eigen::VectorBlock, -1, 1, true>, -1> &x, + T tau, Eigen::Block> &A) +{ int m = A.rows(); int n = A.cols(); if (x.size() != m) { - throw std::invalid_argument("Reflector length must match the first dimension of matrix A"); + throw std::invalid_argument( + "Reflector length must match the first dimension of matrix A"); } - if (m == 0) return; + if (m == 0) + return; // Loop over each column of A for (int j = 0; j < n; ++j) { @@ -274,7 +296,9 @@ void reflectorApply(Eigen::VectorBlock, -1, 1, tr A will be modified in-place. */ template -std::pair, int> rrqr(MatrixX& A, T rtol = std::numeric_limits::epsilon()) { +std::pair, int> rrqr(MatrixX &A, + T rtol = std::numeric_limits::epsilon()) +{ using std::abs; using std::sqrt; @@ -321,7 +345,7 @@ std::pair, int> rrqr(MatrixX& A, T rtol = std::numeric_limits } } - if (abs(A(i,i)) < rtol * abs((A(0,0)))) { + if (abs(A(i, i)) < rtol * abs((A(0, 0)))) { A.bottomRightCorner(m - i, n - i).setZero(); taus.tail(k - i).setZero(); rk = i; @@ -390,7 +414,8 @@ rrqr(Eigen::MatrixX& A, T rtol = std::numeric_limits::epsilon()) { */ template -Eigen::MatrixBase triu(Eigen::MatrixBase& M) { +Eigen::MatrixBase triu(Eigen::MatrixBase &M) +{ using T = typename Derived::Scalar; int m = M.rows(); int n = M.cols(); @@ -402,13 +427,14 @@ Eigen::MatrixBase triu(Eigen::MatrixBase& M) { return M; } - template -std::pair, MatrixX> truncate_qr_result(QRPivoted& qr, int k) { +std::pair, MatrixX> truncate_qr_result(QRPivoted &qr, int k) +{ int m = qr.factors.rows(); int n = qr.factors.cols(); if (k < 0 || k > std::min(m, n)) { - throw std::domain_error("Invalid rank, must be in [0, " + std::to_string(std::min(m, n)) + "]"); + throw std::domain_error("Invalid rank, must be in [0, " + + std::to_string(std::min(m, n)) + "]"); } // Extract Q matrix @@ -443,37 +469,38 @@ truncate_qr_result(const Eigen::HouseholderQR>& qr, int k) { } // Extract the first k columns of Q - Eigen::MatrixX Qfull = qr.householderQ() * Eigen::MatrixX::Identity(m, k); + Eigen::MatrixX Qfull = qr.householderQ() * Eigen::MatrixX::Identity(m, +k); // Extract the upper triangular part of the first k rows of R - Eigen::MatrixX R = qr.matrixQR().topLeftCorner(k, n).template triangularView(); + Eigen::MatrixX R = qr.matrixQR().topLeftCorner(k, n).template +triangularView(); return std::make_pair(Qfull, R); } */ - /* template -SVDResult tsvd(Matrix& A, double rtol = std::numeric_limits::epsilon()) { - auto A_qr_k = rrqr(A, rtol); +SVDResult tsvd(Matrix& A, double rtol = +std::numeric_limits::epsilon()) { auto A_qr_k = rrqr(A, rtol); // FIX ME auto QR_result = truncate_qr_result(A_qr_k.first, A_qr_k.second); auto Q = QR_result.first; auto R = QR_result.second; - Eigen::JacobiSVD> svd(R.transpose(), Eigen::ComputeThinU | Eigen::ComputeThinV); - Matrix U = Q * svd.matrixV(); - Matrix V = svd.matrixU().transpose(); + Eigen::JacobiSVD> svd(R.transpose(), +Eigen::ComputeThinU | Eigen::ComputeThinV); Matrix U = Q * +svd.matrixV(); Matrix V = svd.matrixU().transpose(); Vector s = svd.singularValues(); return SVDResult {U, s, V}; } */ - // Swap columns of a matrix A template -void swapCols(Eigen::MatrixX& A, int i, int j) { +void swapCols(Eigen::MatrixX &A, int i, int j) +{ if (i != j) { A.col(i).swap(A.col(j)); } @@ -482,7 +509,8 @@ void swapCols(Eigen::MatrixX& A, int i, int j) { // Truncate RRQR result to low rank template std::pair, Eigen::MatrixX> -truncateQRResult(const Eigen::MatrixX& Q, const Eigen::MatrixX& R, int k) { +truncateQRResult(const Eigen::MatrixX &Q, const Eigen::MatrixX &R, int k) +{ int m = Q.rows(); int n = R.cols(); @@ -498,7 +526,8 @@ truncateQRResult(const Eigen::MatrixX& Q, const Eigen::MatrixX& R, int k) // Truncated SVD (TSVD) template std::tuple, Eigen::VectorX, Eigen::MatrixX> -tsvd(const Eigen::MatrixX& A, T rtol = std::numeric_limits::epsilon()) { +tsvd(const Eigen::MatrixX &A, T rtol = std::numeric_limits::epsilon()) +{ // Step 1: Apply RRQR to A QRPivoted A_qr; int k; @@ -513,11 +542,12 @@ tsvd(const Eigen::MatrixX& A, T rtol = std::numeric_limits::epsilon()) { // Step 3: Compute SVD of R_trunc - //Eigen::JacobiSVD> svd(R_trunc.transpose(), Eigen::ComputeThinU | Eigen::ComputeThinV); - // There seems to be a bug in the latest version of Eigen3. - // Please first construct a Jacobi SVD and then compare the results. - // Do not use the svd_jacobi function directly. - // Better to write a wrrapper function for the SVD. + // Eigen::JacobiSVD> svd(R_trunc.transpose(), + // Eigen::ComputeThinU | Eigen::ComputeThinV); + // There seems to be a bug in the latest version of Eigen3. + // Please first construct a Jacobi SVD and then compare the results. + // Do not use the svd_jacobi function directly. + // Better to write a wrrapper function for the SVD. Eigen::JacobiSVD svd; // The following comment is taken from Julia's implementation @@ -538,9 +568,10 @@ tsvd(const Eigen::MatrixX& A, T rtol = std::numeric_limits::epsilon()) { return std::make_tuple(U, s, V); } - template -std::tuple, std::pair, std::pair> svd2x2(T f, T g, T h) { +std::tuple, std::pair, std::pair> svd2x2(T f, T g, + T h) +{ T fa = T(std::abs((double)f)); T ga = T(std::abs((double)g)); T ha = T(std::abs((double)h)); @@ -549,7 +580,8 @@ std::tuple, std::pair, std::pair> svd2x2(T f, T g, T if (fa < ha) { // switch h <-> f, cu <-> sv, cv <-> su - //std::tie(std::tie(sv, cv), std::tie(smax, smin), std::tie(su, cu)) = svd2x2(h, g, f); + // std::tie(std::tie(sv, cv), std::tie(smax, smin), std::tie(su, cu)) = + // svd2x2(h, g, f); auto svd_result = svd2x2(h, g, f); sv = std::get<0>(std::get<0>(svd_result)); cv = std::get<1>(std::get<0>(svd_result)); @@ -596,15 +628,18 @@ std::tuple, std::pair, std::pair> svd2x2(T f, T g, T su = ((h / f) * sv) / a; } - return std::make_tuple(std::make_pair(cu, su), std::make_pair(smax, smin), std::make_pair(cv, sv)); + return std::make_tuple(std::make_pair(cu, su), std::make_pair(smax, smin), + std::make_pair(cv, sv)); } template -std::tuple, T> givens_params(T f, T g) { +std::tuple, T> givens_params(T f, T g) +{ if (g == 0) { return {{T(1), T(0)}, f}; } else if (f == 0) { - return {{T(0), T(std::copysign(1.0, (double)g))}, T(std::abs((double)g))}; + return {{T(0), T(std::copysign(1.0, (double)g))}, + T(std::abs((double)g))}; } else { T r = T(std::copysign(std::hypot((double)f, (double)g), (double)f)); T c = f / r; @@ -614,14 +649,17 @@ std::tuple, T> givens_params(T f, T g) { } template -std::pair givens_lmul(T c, T s, T x, T y) { +std::pair givens_lmul(T c, T s, T x, T y) +{ T a = c * x + s * y; T b = c * y - s * x; return {a, b}; } template -std::tuple, std::tuple, std::tuple> svd2x2(T a11, T a12, T a21, T a22) { +std::tuple, std::tuple, std::tuple> +svd2x2(T a11, T a12, T a21, T a22) +{ T abs_a12 = std::abs((double)(a12)); T abs_a21 = std::abs((double)(a21)); @@ -634,7 +672,8 @@ std::tuple, std::tuple, std::tuple> svd2x2(T a11, T auto smax_smin = std::get<1>(svd_result); auto cv = std::get<0>(std::get<2>(svd_result)); auto sv = std::get<1>(std::get<2>(svd_result)); - return std::make_tuple(std::make_pair(cv, sv), smax_smin, std::make_pair(cu, su)); + return std::make_tuple(std::make_pair(cv, sv), smax_smin, + std::make_pair(cu, su)); } else { auto rot = givens_params(a11, a21); auto cx_sx = std::get<0>(rot); @@ -655,13 +694,15 @@ std::tuple, std::tuple, std::tuple> svd2x2(T a11, T auto sv = std::get<1>(std::get<2>(svd_result)); std::tie(cu, su) = givens_lmul(cx, -sx, cu, su); - return std::make_tuple(std::make_tuple(cu, su), std::make_tuple(smax, smin), std::make_tuple(cu, sv)); + return std::make_tuple(std::make_tuple(cu, su), + std::make_tuple(smax, smin), + std::make_tuple(cu, sv)); } } - template -T jacobi_sweep(Matrix& U, Matrix& VT) { +T jacobi_sweep(Matrix &U, Matrix &VT) +{ int ii = U.rows(); int jj = U.cols(); if (ii < jj) { @@ -692,14 +733,18 @@ T jacobi_sweep(Matrix& U, Matrix& VT) } template -SVDResult svd_jacobi(Matrix& U, T rtol = std::numeric_limits::epsilon(), int maxiter = 20) { +SVDResult svd_jacobi(Matrix &U, + T rtol = std::numeric_limits::epsilon(), + int maxiter = 20) +{ int m = U.rows(); int n = U.cols(); if (m < n) { throw std::invalid_argument("matrix must be 'tall'"); } - Matrix VT = Matrix::Identity(n, n); + Matrix VT = + Matrix::Identity(n, n); T Unorm = U.topLeftCorner(n, n).norm(); for (int iter = 0; iter < maxiter; ++iter) { @@ -712,7 +757,7 @@ SVDResult svd_jacobi(Matrix& U, T rtol = std::numeric_li Vector s = U.colwise().norm(); U.array().rowwise() /= s.transpose().array(); - return SVDResult {U, s, VT}; + return SVDResult{U, s, VT}; } } // namespace sparseir \ No newline at end of file diff --git a/include/sparseir/_root.hpp b/include/sparseir/_root.hpp index 322f6c9..380bb59 100644 --- a/include/sparseir/_root.hpp +++ b/include/sparseir/_root.hpp @@ -1,15 +1,16 @@ #pragma once -#include -#include #include -#include #include +#include +#include +#include namespace sparseir { template -T midpoint(T lo, T hi) { +T midpoint(T lo, T hi) +{ if (std::is_integral::value) { return lo + ((hi - lo) >> 1); } else { @@ -18,16 +19,20 @@ T midpoint(T lo, T hi) { } template -std::vector find_all(F f, const std::vector& xgrid) { +std::vector find_all(F f, const std::vector &xgrid) +{ std::vector fx; - std::transform(xgrid.begin(), xgrid.end(), std::back_inserter(fx), [&](T x) { return static_cast(f(x)); }); + std::transform(xgrid.begin(), xgrid.end(), std::back_inserter(fx), + [&](T x) { return static_cast(f(x)); }); std::vector hit(fx.size()); - std::transform(fx.begin(), fx.end(), hit.begin(), [](double val) { return val == 0.0; }); + std::transform(fx.begin(), fx.end(), hit.begin(), + [](double val) { return val == 0.0; }); std::vector x_hit; for (size_t i = 0; i < hit.size(); ++i) { - if (hit[i]) x_hit.push_back(xgrid[i]); + if (hit[i]) + x_hit.push_back(xgrid[i]); } std::vector sign_change(fx.size() - 1); @@ -39,7 +44,8 @@ std::vector find_all(F f, const std::vector& xgrid) { sign_change[i] = sign_change[i] && !hit[i] && !hit[i + 1]; } - if (std::none_of(sign_change.begin(), sign_change.end(), [](bool v) { return v; })) { + if (std::none_of(sign_change.begin(), sign_change.end(), + [](bool v) { return v; })) { return x_hit; } @@ -62,7 +68,10 @@ std::vector find_all(F f, const std::vector& xgrid) { } } - double epsilon_x = std::numeric_limits::epsilon() * *std::max_element(xgrid.begin(), xgrid.end(), [](T a, T b) { return std::abs(a) < std::abs(b); }); + double epsilon_x = + std::numeric_limits::epsilon() * + *std::max_element(xgrid.begin(), xgrid.end(), + [](T a, T b) { return std::abs(a) < std::abs(b); }); std::vector x_bisect; for (size_t i = 0; i < a.size(); ++i) { @@ -75,10 +84,12 @@ std::vector find_all(F f, const std::vector& xgrid) { } template -T bisect(F f, T a, T b, double fa, double epsilon_x) { +T bisect(F f, T a, T b, double fa, double epsilon_x) +{ while (true) { T mid = midpoint(a, b); - if (closeenough(a, mid, epsilon_x)) return mid; + if (closeenough(a, mid, epsilon_x)) + return mid; double fmid = f(mid); if (std::signbit(fa) != std::signbit(fmid)) { b = mid; @@ -90,7 +101,8 @@ T bisect(F f, T a, T b, double fa, double epsilon_x) { } template -bool closeenough(T a, T b, double epsilon) { +bool closeenough(T a, T b, double epsilon) +{ if (std::is_floating_point::value) { return std::abs(a - b) <= epsilon; } else { @@ -99,7 +111,8 @@ bool closeenough(T a, T b, double epsilon) { } template -std::vector refine_grid(const std::vector& grid, int alpha) { +std::vector refine_grid(const std::vector &grid, int alpha) +{ size_t n = grid.size(); size_t newn = alpha * (n - 1) + 1; std::vector newgrid(newn); @@ -117,13 +130,15 @@ std::vector refine_grid(const std::vector& grid, int alpha) { return newgrid; } - template -T bisect_discr_extremum(F absf, T a, T b, double absf_a, double absf_b) { +T bisect_discr_extremum(F absf, T a, T b, double absf_a, double absf_b) +{ T d = b - a; - if (d <= 1) return absf_a > absf_b ? a : b; - if (d == 2) return a + 1; + if (d <= 1) + return absf_a > absf_b ? a : b; + if (d == 2) + return a + 1; T m = midpoint(a, b); T n = m + 1; @@ -138,12 +153,14 @@ T bisect_discr_extremum(F absf, T a, T b, double absf_a, double absf_b) { } template -std::vector discrete_extrema(F f, const std::vector& xgrid) { +std::vector discrete_extrema(F f, const std::vector &xgrid) +{ std::vector fx(xgrid.size()); std::transform(xgrid.begin(), xgrid.end(), fx.begin(), f); std::vector absfx(fx.size()); - std::transform(fx.begin(), fx.end(), absfx.begin(), [](double val) { return std::abs(val); }); + std::transform(fx.begin(), fx.end(), absfx.begin(), + [](double val) { return std::abs(val); }); std::vector signdfdx(fx.size() - 1); for (size_t i = 0; i < fx.size() - 1; ++i) { @@ -155,8 +172,10 @@ std::vector discrete_extrema(F f, const std::vector& xgrid) { derivativesignchange[i] = signdfdx[i] != signdfdx[i + 1]; } - std::vector derivativesignchange_a(derivativesignchange.size() + 2, false); - std::vector derivativesignchange_b(derivativesignchange.size() + 2, false); + std::vector derivativesignchange_a(derivativesignchange.size() + 2, + false); + std::vector derivativesignchange_b(derivativesignchange.size() + 2, + false); for (size_t i = 0; i < derivativesignchange.size(); ++i) { derivativesignchange_a[i] = derivativesignchange[i]; derivativesignchange_b[i + 2] = derivativesignchange[i]; @@ -177,16 +196,19 @@ std::vector discrete_extrema(F f, const std::vector& xgrid) { std::vector res; for (size_t i = 0; i < a.size(); ++i) { - res.push_back(bisect_discr_extremum(f, a[i], b[i], absf_a[i], absf_b[i])); + res.push_back( + bisect_discr_extremum(f, a[i], b[i], absf_a[i], absf_b[i])); } std::vector sfx(fx.size()); - std::transform(fx.begin(), fx.end(), sfx.begin(), [](double val) { return std::signbit(val); }); + std::transform(fx.begin(), fx.end(), sfx.begin(), + [](double val) { return std::signbit(val); }); if (absfx.front() > absfx[1] || sfx.front() != sfx[1]) { res.insert(res.begin(), xgrid.front()); } - if (absfx.back() > absfx[absfx.size() - 2] || sfx.back() != sfx[sfx.size() - 2]) { + if (absfx.back() > absfx[absfx.size() - 2] || + sfx.back() != sfx[sfx.size() - 2]) { res.push_back(xgrid.back()); } diff --git a/include/sparseir/_specfuncs.hpp b/include/sparseir/_specfuncs.hpp index 2906120..eec3a72 100644 --- a/include/sparseir/_specfuncs.hpp +++ b/include/sparseir/_specfuncs.hpp @@ -1,16 +1,17 @@ #pragma once #include -#include #include +#include namespace sparseir { -using Eigen::Matrix; using Eigen::Dynamic; +using Eigen::Matrix; template -T legval(T x, const std::vector& c) { +T legval(T x, const std::vector &c) +{ int nd = c.size(); if (nd < 2) { return c.back(); @@ -21,14 +22,15 @@ T legval(T x, const std::vector& c) { for (int j = nd - 2; j >= 1; --j) { T k = T(j) / T(j + 1); T temp = c0; - c0 = c[j-1] - c1 * k; + c0 = c[j - 1] - c1 * k; c1 = temp + c1 * x * (k + 1); } return c0 + c1 * x; } template -Matrix legvander(const Eigen::VectorX& x, int deg) { +Matrix legvander(const Eigen::VectorX &x, int deg) +{ if (deg < 0) { throw std::domain_error("degree needs to be non-negative"); } @@ -47,7 +49,8 @@ Matrix legvander(const Eigen::VectorX& x, int deg) { for (int i = 2; i <= deg; ++i) { T invi = T(1) / T(i); for (int j = 0; j < deg + 1; ++j) { - v(j, i) = v(j, i - 1) * x[j] * (2 - invi) - v(j, i - 2) * (1 - invi); + v(j, i) = + v(j, i - 1) * x[j] * (2 - invi) - v(j, i - 2) * (1 - invi); } } } @@ -57,18 +60,19 @@ Matrix legvander(const Eigen::VectorX& x, int deg) { // Add legder for accepting std::vector template -Matrix legvander(const std::vector& x, int deg) { - Eigen::VectorX x_eigen = Eigen::Map>(x.data(), x.size()); +Matrix legvander(const std::vector &x, int deg) +{ + Eigen::VectorX x_eigen = + Eigen::Map>(x.data(), x.size()); return legvander(x_eigen, deg); } - - - template -Matrix legder(Matrix c, int cnt = 1) { +Matrix legder(Matrix c, int cnt = 1) +{ if (cnt < 0) { - throw std::domain_error("The order of derivation needs to be non-negative"); + throw std::domain_error( + "The order of derivation needs to be non-negative"); } if (cnt == 0) { return c; diff --git a/include/sparseir/basis.hpp b/include/sparseir/basis.hpp index eb1b160..50cfef6 100644 --- a/include/sparseir/basis.hpp +++ b/include/sparseir/basis.hpp @@ -2,172 +2,169 @@ #pragma once #include -#include #include -#include #include +#include #include +#include -namespace sparseir -{ +namespace sparseir { - template - class AbstractBasis +template +class AbstractBasis { +public: + virtual ~AbstractBasis() { } + + /** + * @brief Basis functions on the imaginary time axis. + * + * Set of IR basis functions on the imaginary time (tau) axis, where tau + * is a real number between zero and beta. To get the l-th basis function + * at imaginary time tau, use: + * + * T ul_tau = u(l, tau); + * + * @param l Index of the basis function. + * @param tau Imaginary time variable. + * @return Value of the l-th basis function at time tau. + */ + virtual T u(int l, T tau) const = 0; + + /** + * @brief Basis functions on the reduced Matsubara frequency axis. + * + * Set of IR basis functions on the reduced Matsubara frequency (wn) axis, + * where wn is an integer. These are related to u by the Fourier transform: + * + * uhat(n) = ∫₀^β dτ exp(iπnτ/β) * u(τ) + * + * To get the l-th basis function at some reduced frequency wn, use: + * + * T uhat_l_n = uhat(l, wn); + * + * @param l Index of the basis function. + * @param wn Reduced Matsubara frequency (integer multiplier). + * @return Value of the l-th basis function at frequency wn. + */ + virtual T uhat(int l, int wn) const = 0; + + /** + * @brief Quantum statistic ("F" for fermionic, "B" for bosonic). + * + * @return Character representing the quantum statistics. + */ + virtual char statistics() const = 0; + + /** + * @brief Access basis functions/singular values for given index/indices. + * + * This can be used to truncate the basis to the n most significant + * singular values: `basis[0, n]`. + * + * @param index Index or range of indices. + * @return Pointer to the truncated basis (implementation-defined). + */ + virtual AbstractBasis *operator[](int index) const = 0; + + /** + * @brief Shape of the basis function set. + * + * @return Pair representing the shape (rows, columns). + */ + virtual std::pair shape() const = 0; + + /** + * @brief Number of basis functions / singular values. + * + * @return Size of the basis function set. + */ + virtual int size() const = 0; + + /** + * @brief Significances of the basis functions. + * + * Vector of significance values, one for each basis function. Each value + * is a number between 0 and 1 which provides an a-priori bound on the + * relative error made by discarding the associated coefficient. + * + * @return Vector of significance values. + */ + virtual const std::vector &significance() const = 0; + + /** + * @brief Accuracy of the basis. + * + * Upper bound to the relative error of representing a propagator with + * the given number of basis functions (number between 0 and 1). + * + * @return Accuracy value. + */ + virtual T accuracy() const { - public: - virtual ~AbstractBasis() {} - - /** - * @brief Basis functions on the imaginary time axis. - * - * Set of IR basis functions on the imaginary time (tau) axis, where tau - * is a real number between zero and beta. To get the l-th basis function - * at imaginary time tau, use: - * - * T ul_tau = u(l, tau); - * - * @param l Index of the basis function. - * @param tau Imaginary time variable. - * @return Value of the l-th basis function at time tau. - */ - virtual T u(int l, T tau) const = 0; - - /** - * @brief Basis functions on the reduced Matsubara frequency axis. - * - * Set of IR basis functions on the reduced Matsubara frequency (wn) axis, - * where wn is an integer. These are related to u by the Fourier transform: - * - * uhat(n) = ∫₀^β dτ exp(iπnτ/β) * u(τ) - * - * To get the l-th basis function at some reduced frequency wn, use: - * - * T uhat_l_n = uhat(l, wn); - * - * @param l Index of the basis function. - * @param wn Reduced Matsubara frequency (integer multiplier). - * @return Value of the l-th basis function at frequency wn. - */ - virtual T uhat(int l, int wn) const = 0; - - /** - * @brief Quantum statistic ("F" for fermionic, "B" for bosonic). - * - * @return Character representing the quantum statistics. - */ - virtual char statistics() const = 0; - - /** - * @brief Access basis functions/singular values for given index/indices. - * - * This can be used to truncate the basis to the n most significant - * singular values: `basis[0, n]`. - * - * @param index Index or range of indices. - * @return Pointer to the truncated basis (implementation-defined). - */ - virtual AbstractBasis *operator[](int index) const = 0; - - /** - * @brief Shape of the basis function set. - * - * @return Pair representing the shape (rows, columns). - */ - virtual std::pair shape() const = 0; - - /** - * @brief Number of basis functions / singular values. - * - * @return Size of the basis function set. - */ - virtual int size() const = 0; - - /** - * @brief Significances of the basis functions. - * - * Vector of significance values, one for each basis function. Each value - * is a number between 0 and 1 which provides an a-priori bound on the - * relative error made by discarding the associated coefficient. - * - * @return Vector of significance values. - */ - virtual const std::vector &significance() const = 0; - - /** - * @brief Accuracy of the basis. - * - * Upper bound to the relative error of representing a propagator with - * the given number of basis functions (number between 0 and 1). - * - * @return Accuracy value. - */ - virtual T accuracy() const - { - const auto &sig = significance(); - return !sig.empty() ? sig.back() : static_cast(0); - } + const auto &sig = significance(); + return !sig.empty() ? sig.back() : static_cast(0); + } - /** - * @brief Basis cutoff parameter, Λ == β * wmax, or NaN if not present. - * - * @return Cutoff parameter Λ. - */ - virtual T lambda() const = 0; - - /** - * @brief Inverse temperature. - * - * @return Value of β. - */ - virtual T beta() const = 0; - - /** - * @brief Real frequency cutoff or NaN if not present. - * - * @return Maximum real frequency wmax. - */ - virtual T wmax() const = 0; - - /** - * @brief Default sampling points on the imaginary time axis. - * - * @param npoints Minimum number of sampling points to return. - * @return Vector of sampling points on the τ-axis. - */ - virtual std::vector default_tau_sampling_points(int npoints = 0) const = 0; - - /** - * @brief Default sampling points on the imaginary frequency axis. - * - * @param npoints Minimum number of sampling points to return. - * @param positive_only If true, only return non-negative frequencies. - * @return Vector of sampling points on the Matsubara axis. - */ - virtual std::vector default_matsubara_sampling_points( - int npoints = 0, bool positive_only = false) const = 0; - - /** - * @brief Returns true if the sampling is expected to be well-conditioned. - * - * @return True if well-conditioned. - */ - virtual bool is_well_conditioned() const - { - return true; - } - }; + /** + * @brief Basis cutoff parameter, Λ == β * wmax, or NaN if not present. + * + * @return Cutoff parameter Λ. + */ + virtual T lambda() const = 0; + + /** + * @brief Inverse temperature. + * + * @return Value of β. + */ + virtual T beta() const = 0; + + /** + * @brief Real frequency cutoff or NaN if not present. + * + * @return Maximum real frequency wmax. + */ + virtual T wmax() const = 0; + + /** + * @brief Default sampling points on the imaginary time axis. + * + * @param npoints Minimum number of sampling points to return. + * @return Vector of sampling points on the τ-axis. + */ + virtual std::vector + default_tau_sampling_points(int npoints = 0) const = 0; + + /** + * @brief Default sampling points on the imaginary frequency axis. + * + * @param npoints Minimum number of sampling points to return. + * @param positive_only If true, only return non-negative frequencies. + * @return Vector of sampling points on the Matsubara axis. + */ + virtual std::vector + default_matsubara_sampling_points(int npoints = 0, + bool positive_only = false) const = 0; + + /** + * @brief Returns true if the sampling is expected to be well-conditioned. + * + * @return True if well-conditioned. + */ + virtual bool is_well_conditioned() const { return true; } +}; } // namespace sparseir namespace sparseir { -template +template class FiniteTempBasis : public AbstractBasis { public: K kernel; SVEResult sve_result; double accuracy; - double beta; // β + double beta; // β PiecewiseLegendrePolyVector u; PiecewiseLegendrePolyVector v; std::vector s; @@ -175,14 +172,16 @@ class FiniteTempBasis : public AbstractBasis { PiecewiseLegendreFTVector uhat_full; // Constructor - FiniteTempBasis(S statistics, double beta, double omega_max, double epsilon = 0.0, - int max_size = -1, K kernel = K(), SVEResult sve_result = SVEResult()) + FiniteTempBasis(S statistics, double beta, double omega_max, + double epsilon = 0.0, int max_size = -1, K kernel = K(), + SVEResult sve_result = SVEResult()) : kernel(kernel), sve_result(sve_result), beta(beta) { if (beta <= 0.0) throw std::domain_error("Inverse temperature β must be positive"); if (omega_max < 0.0) - throw std::domain_error("Frequency cutoff ωmax must be non-negative"); + throw std::domain_error( + "Frequency cutoff ωmax must be non-negative"); // Partition the SVE result auto part_result = sve_result.part(epsilon, max_size); @@ -204,100 +203,110 @@ class FiniteTempBasis : public AbstractBasis { Eigen::VectorXd u_knots = (beta / 2.0) * (u_.knots.array() + 1.0); Eigen::VectorXd v_knots = omega_max * v_.knots; - u = PiecewiseLegendrePolyVector(u_, u_knots, (beta / 2.0) * u_.delta_x, u_.symmetry); - v = PiecewiseLegendrePolyVector(v_, v_knots, omega_max * v_.delta_x, v_.symmetry); + u = PiecewiseLegendrePolyVector(u_, u_knots, (beta / 2.0) * u_.delta_x, + u_.symmetry); + v = PiecewiseLegendrePolyVector(v_, v_knots, omega_max * v_.delta_x, + v_.symmetry); // Scale singular values - double scale_factor = std::sqrt(beta / 2.0 * omega_max) * std::pow(omega_max, -kernel.getYPower()); + double scale_factor = std::sqrt(beta / 2.0 * omega_max) * + std::pow(omega_max, -kernel.getYPower()); s.resize(L); for (int i = 0; i < L; ++i) s[i] = scale_factor * s_[i]; // Fourier transforms scaling - PiecewiseLegendrePolyVector u_base_full(std::sqrt(beta) * sve_result.u.data, sve_result.u); - uhat_full = PiecewiseLegendreFTVector(u_base_full, statistics, kernel.convRadius()); + PiecewiseLegendrePolyVector u_base_full( + std::sqrt(beta) * sve_result.u.data, sve_result.u); + uhat_full = PiecewiseLegendreFTVector(u_base_full, statistics, + kernel.convRadius()); uhat = uhat_full.slice(0, L); } // Show function (for printing) - void show() const { - std::cout << s.size() << "-element FiniteTempBasis<" << typeid(S).name() << "> with " - << "β = " << beta << ", ωmax = " << getOmegaMax() << " and singular values:\n"; + void show() const + { + std::cout << s.size() << "-element FiniteTempBasis<" << typeid(S).name() + << "> with " + << "β = " << beta << ", ωmax = " << getOmegaMax() + << " and singular values:\n"; for (size_t i = 0; i < s.size() - 1; ++i) std::cout << " " << s[i] << "\n"; std::cout << " " << s.back() << "\n"; } // Overload operator[] for indexing (get a subset of the basis) - FiniteTempBasis operator[](const std::pair& range) const { + FiniteTempBasis operator[](const std::pair &range) const + { int new_size = range.second - range.first + 1; return FiniteTempBasis(statistics(), beta, getOmegaMax(), 0.0, new_size, kernel, sve_result); } // Calculate significance - Eigen::VectorXd significance() const { - Eigen::VectorXd s_vec = Eigen::Map(s.data(), s.size()); + Eigen::VectorXd significance() const + { + Eigen::VectorXd s_vec = + Eigen::Map(s.data(), s.size()); return s_vec / s_vec[0]; } // Getter for accuracy - double getAccuracy() const { - return accuracy; - } + double getAccuracy() const { return accuracy; } // Getter for ωmax - double getOmegaMax() const { - return kernel.Lambda() / beta; - } + double getOmegaMax() const { return kernel.Lambda() / beta; } // Getter for SVEResult - const SVEResult& getSVEResult() const { - return sve_result; - } + const SVEResult &getSVEResult() const { return sve_result; } // Getter for kernel - const K& getKernel() const { - return kernel; - } + const K &getKernel() const { return kernel; } // Getter for Λ - double Lambda() const { - return kernel.Lambda(); - } + double Lambda() const { return kernel.Lambda(); } // Default τ sampling points - Eigen::VectorXd defaultTauSamplingPoints() const { - Eigen::VectorXd x = default_sampling_points(sve_result.u, static_cast(s.size())); + Eigen::VectorXd defaultTauSamplingPoints() const + { + Eigen::VectorXd x = + default_sampling_points(sve_result.u, static_cast(s.size())); return (beta / 2.0) * (x.array() + 1.0); } // Default Matsubara sampling points - Eigen::VectorXd defaultMatsubaraSamplingPoints(bool positive_only = false) const { - return defaultMatsubaraSamplingPoints(uhat_full, static_cast(s.size()), false, positive_only); + Eigen::VectorXd + defaultMatsubaraSamplingPoints(bool positive_only = false) const + { + return defaultMatsubaraSamplingPoints( + uhat_full, static_cast(s.size()), false, positive_only); } // Default ω sampling points - Eigen::VectorXd defaultOmegaSamplingPoints() const { - Eigen::VectorXd y = default_sampling_points(sve_result.v, static_cast(s.size())); + Eigen::VectorXd defaultOmegaSamplingPoints() const + { + Eigen::VectorXd y = + default_sampling_points(sve_result.v, static_cast(s.size())); return getOmegaMax() * y.array(); } // Rescale function - FiniteTempBasis rescale(double new_beta) const { + FiniteTempBasis rescale(double new_beta) const + { double new_omega_max = kernel.Lambda() / new_beta; return FiniteTempBasis(statistics(), new_beta, new_omega_max, 0.0, - static_cast(s.size()), kernel, sve_result); + static_cast(s.size()), kernel, + sve_result); } private: // Placeholder statistics function - S statistics() const { - return S(); - } + S statistics() const { return S(); } // Default sampling points function - Eigen::VectorXd default_sampling_points(const PiecewiseLegendrePolyVector& u, int L) const { + Eigen::VectorXd + default_sampling_points(const PiecewiseLegendrePolyVector &u, int L) const + { if (u.xmin() != -1.0 || u.xmax() != 1.0) throw std::runtime_error("Expecting unscaled functions here."); @@ -319,8 +328,8 @@ class FiniteTempBasis : public AbstractBasis { x0[x0.size() - 1] = right; if (x0.size() != L) { - std::cerr << "Warning: Expected " << L << " sampling points, got " - << x0.size() << ".\n"; + std::cerr << "Warning: Expected " << L + << " sampling points, got " << x0.size() << ".\n"; } return x0; @@ -328,9 +337,10 @@ class FiniteTempBasis : public AbstractBasis { } // Default Matsubara sampling points function - Eigen::VectorXd defaultMatsubaraSamplingPoints(const PiecewiseLegendreFTVector& u_hat_full, - int L, bool fence = false, - bool positive_only = false) const { + Eigen::VectorXd defaultMatsubaraSamplingPoints( + const PiecewiseLegendreFTVector &u_hat_full, int L, + bool fence = false, bool positive_only = false) const + { int l_requested = L; // Adjust l_requested based on statistics @@ -350,7 +360,8 @@ class FiniteTempBasis : public AbstractBasis { omega_n.conservativeResize(omega_n.size() + 1); omega_n[omega_n.size() - 1] = 0.0; std::sort(omega_n.data(), omega_n.data() + omega_n.size()); - omega_n = omega_n.unaryExpr([](double x) { return std::unique(&x, &x + 1); }); + omega_n = omega_n.unaryExpr( + [](double x) { return std::unique(&x, &x + 1); }); } } @@ -359,7 +370,8 @@ class FiniteTempBasis : public AbstractBasis { expected_size = (expected_size + 1) / 2; if (omega_n.size() != expected_size) { - std::cerr << "Warning: Requested " << expected_size << " sampling frequencies for basis size L = " << L + std::cerr << "Warning: Requested " << expected_size + << " sampling frequencies for basis size L = " << L << ", but got " << omega_n.size() << ".\n"; } @@ -370,9 +382,11 @@ class FiniteTempBasis : public AbstractBasis { } // Fence Matsubara sampling points - void fenceMatsubaraSamplingPoints(Eigen::VectorXd& omega_n, bool positive_only) const { + void fenceMatsubaraSamplingPoints(Eigen::VectorXd &omega_n, + bool positive_only) const + { // Implement fencing logic here... } }; -} // namespace sparseir +} // namespace sparseir diff --git a/include/sparseir/freq.hpp b/include/sparseir/freq.hpp index 5b8f28b..7bdb3f6 100644 --- a/include/sparseir/freq.hpp +++ b/include/sparseir/freq.hpp @@ -2,16 +2,15 @@ #include #include +#include #include #include #include -#include namespace sparseir { // Base abstract class for Statistics -class Statistics -{ +class Statistics { public: virtual ~Statistics() = default; virtual int zeta() const = 0; @@ -19,16 +18,14 @@ class Statistics }; // Fermionic statistics class -class Fermionic : public Statistics -{ +class Fermionic : public Statistics { public: inline int zeta() const override { return 1; } inline bool allowed(int n) const override { return n % 2 != 0; } }; // Bosonic statistics class -class Bosonic : public Statistics -{ +class Bosonic : public Statistics { public: inline int zeta() const override { return 0; } inline bool allowed(int n) const override { return n % 2 == 0; } @@ -46,9 +43,9 @@ inline std::unique_ptr create_statistics(int zeta) // Matsubara frequency class template template -class MatsubaraFreq -{ - static_assert(std::is_base_of::value, "S must be derived from Statistics"); +class MatsubaraFreq { + static_assert(std::is_base_of::value, + "S must be derived from Statistics"); public: int n; @@ -61,7 +58,10 @@ class MatsubaraFreq } inline double value(double beta) const { return n * M_PI / beta; } - inline std::complex value_im(double beta) const { return std::complex(0, value(beta)); } + inline std::complex value_im(double beta) const + { + return std::complex(0, value(beta)); + } inline S statistics() const { return S(); } inline int get_n() const { return n; } @@ -75,42 +75,66 @@ using FermionicFreq = MatsubaraFreq; template inline auto operator+(const MatsubaraFreq &a, const MatsubaraFreq &b) { - return MatsubaraFreq(a.get_n() + b.get_n()); + return MatsubaraFreq(a.get_n() + + b.get_n()); } template inline auto operator-(const MatsubaraFreq &a, const MatsubaraFreq &b) { - return MatsubaraFreq(a.get_n() - b.get_n()); + return MatsubaraFreq(a.get_n() - + b.get_n()); } template -inline MatsubaraFreq operator+(const MatsubaraFreq &a) { return a; } +inline MatsubaraFreq operator+(const MatsubaraFreq &a) +{ + return a; +} template -inline MatsubaraFreq operator-(const MatsubaraFreq &a) { return MatsubaraFreq(-a.get_n()); } +inline MatsubaraFreq operator-(const MatsubaraFreq &a) +{ + return MatsubaraFreq(-a.get_n()); +} -inline BosonicFreq operator*(const BosonicFreq &a, int c) { return BosonicFreq(a.get_n() * c); } -inline FermionicFreq operator*(const FermionicFreq &a, int c) { return FermionicFreq(a.get_n() * c); } +inline BosonicFreq operator*(const BosonicFreq &a, int c) +{ + return BosonicFreq(a.get_n() * c); +} +inline FermionicFreq operator*(const FermionicFreq &a, int c) +{ + return FermionicFreq(a.get_n() * c); +} // Utility functions template -inline int sign(const MatsubaraFreq &a) { return (a.get_n() > 0) - (a.get_n() < 0); } +inline int sign(const MatsubaraFreq &a) +{ + return (a.get_n() > 0) - (a.get_n() < 0); +} template -inline BosonicFreq zero() { return BosonicFreq(0); } +inline BosonicFreq zero() +{ + return BosonicFreq(0); +} inline bool is_zero(const FermionicFreq &) { return false; } inline bool is_zero(const BosonicFreq &a) { return a.get_n() == 0; } template -inline bool is_less(const MatsubaraFreq &a, const MatsubaraFreq &b) { return a.get_n() < b.get_n(); } +inline bool is_less(const MatsubaraFreq &a, const MatsubaraFreq &b) +{ + return a.get_n() < b.get_n(); +} // Custom exception for incompatible promotions template inline void promote_rule() { - throw std::invalid_argument("Will not promote between MatsubaraFreq and another type. Use explicit conversion."); + throw std::invalid_argument("Will not promote between MatsubaraFreq and " + "another type. Use explicit conversion."); } // Display function for MatsubaraFreq diff --git a/include/sparseir/gauss.hpp b/include/sparseir/gauss.hpp index 65e99be..8cc6387 100644 --- a/include/sparseir/gauss.hpp +++ b/include/sparseir/gauss.hpp @@ -1,11 +1,11 @@ #pragma once -#include -#include -#include +#include #include +#include #include -#include +#include +#include #include namespace sparseir { @@ -17,9 +17,12 @@ class slice { size_t start, stop, step; slice(size_t start, size_t stop, size_t step = 1) - : start(start), stop(stop), step(step) {} + : start(start), stop(stop), step(step) + { + } - std::vector indices() const { + std::vector indices() const + { std::vector result; for (size_t i = start; i < stop; i += step) { result.push_back(i); @@ -48,62 +51,96 @@ class Rule { // Default constructor Rule() {}; - Rule(const std::vector& x, const std::vector& w, T a = -1, T b = 1) + Rule(const std::vector &x, const std::vector &w, T a = -1, T b = 1) : x(Eigen::Map>(x.data(), x.size())), w(Eigen::Map>(w.data(), w.size())), - a(a), b(b) { + a(a), + b(b) + { this->x_forward = Eigen::VectorX::Zero(x.size()); this->x_backward = Eigen::VectorX::Zero(x.size()); - std::transform(this->x.data(), this->x.data() + this->x.size(), this->x_forward.data(), [a](T xi) { return xi - a; }); - std::transform(this->x.data(), this->x.data() + this->x.size(), this->x_backward.data(), [b](T xi) { return b - xi; }); + std::transform(this->x.data(), this->x.data() + this->x.size(), + this->x_forward.data(), [a](T xi) { return xi - a; }); + std::transform(this->x.data(), this->x.data() + this->x.size(), + this->x_backward.data(), [b](T xi) { return b - xi; }); } // Constructor with x, w, x_forward, x_backward, a, b - Rule(const Eigen::VectorX& x, const Eigen::VectorX& w, Eigen::VectorX x_forward, Eigen::VectorX x_backward, T a = -1, T b = 1) - : x(x), w(w), x_forward(x_forward), x_backward(x_backward), a(a), b(b) {} + Rule(const Eigen::VectorX &x, const Eigen::VectorX &w, + Eigen::VectorX x_forward, Eigen::VectorX x_backward, T a = -1, + T b = 1) + : x(x), w(w), x_forward(x_forward), x_backward(x_backward), a(a), b(b) + { + } // Constructor with x, w, a, b - Rule(const Eigen::VectorX& x, const Eigen::VectorX& w, T a = -1, T b = 1) - : x(x), w(w), a(a), b(b) { - this->x_forward = x_forward.size() == 0 ? Eigen::VectorX::Zero(x.size()) : x_forward; - this->x_backward = x_backward.size() == 0 ? Eigen::VectorX::Zero(x.size()) : x_backward; + Rule(const Eigen::VectorX &x, const Eigen::VectorX &w, T a = -1, + T b = 1) + : x(x), w(w), a(a), b(b) + { + this->x_forward = x_forward.size() == 0 + ? Eigen::VectorX::Zero(x.size()) + : x_forward; + this->x_backward = x_backward.size() == 0 + ? Eigen::VectorX::Zero(x.size()) + : x_backward; if (x_forward.size() == 0) { - std::transform(x.data(), x.data() + x.size(), this->x_forward.data(), [a](T xi) { return xi - a; }); + std::transform(x.data(), x.data() + x.size(), + this->x_forward.data(), + [a](T xi) { return xi - a; }); } if (x_backward.size() == 0) { - std::transform(x.data(), x.data() + x.size(), this->x_backward.data(), [b](T xi) { return b - xi; }); + std::transform(x.data(), x.data() + x.size(), + this->x_backward.data(), + [b](T xi) { return b - xi; }); } } - template - Rule(const Rule& other) + Rule(const Rule &other) : x(other.x.template cast()), w(other.w.template cast()), x_forward(other.x_forward.template cast()), - x_backward(other.x_backward.template cast()), + x_backward(other.x_backward.template cast()), a(static_cast(other.a)), - b(static_cast(other.b)) {} + b(static_cast(other.b)) + { + } - Rule reseat(T a, T b) const { + Rule reseat(T a, T b) const + { T scaling = (b - a) / (this->b - this->a); - Eigen::VectorX new_x(x.size()), new_w(w.size()), new_x_forward(x_forward.size()), new_x_backward(x_backward.size()); - std::transform(x.data(), x.data() + x.size(), new_x.data(), [this, scaling, a, b](T xi) { return scaling * (xi - (this->b + this->a) / 2) + (b + a) / 2; }); - std::transform(w.data(), w.data() + w.size(), new_w.data(), [scaling](T wi) { return wi * scaling; }); - std::transform(x_forward.data(), x_forward.data() + x_forward.size(), new_x_forward.data(), [scaling](T xi) { return xi * scaling; }); - std::transform(x_backward.data(), x_backward.data() + x_backward.size(), new_x_backward.data(), [scaling](T xi) { return xi * scaling; }); + Eigen::VectorX new_x(x.size()), new_w(w.size()), + new_x_forward(x_forward.size()), new_x_backward(x_backward.size()); + std::transform(x.data(), x.data() + x.size(), new_x.data(), + [this, scaling, a, b](T xi) { + return scaling * (xi - (this->b + this->a) / 2) + + (b + a) / 2; + }); + std::transform(w.data(), w.data() + w.size(), new_w.data(), + [scaling](T wi) { return wi * scaling; }); + std::transform(x_forward.data(), x_forward.data() + x_forward.size(), + new_x_forward.data(), + [scaling](T xi) { return xi * scaling; }); + std::transform(x_backward.data(), x_backward.data() + x_backward.size(), + new_x_backward.data(), + [scaling](T xi) { return xi * scaling; }); return Rule(new_x, new_w, new_x_forward, new_x_backward, a, b); } - Rule scale(T factor) const { + Rule scale(T factor) const + { Eigen::VectorX new_w(w.size()); - transform(w.data(), w.data() + w.size(), new_w.data(), [factor](T wi) { return wi * factor; }); + transform(w.data(), w.data() + w.size(), new_w.data(), + [factor](T wi) { return wi * factor; }); return Rule(x, new_w, x_forward, x_backward, a, b); } template - Rule piecewise(const std::vector& edges) const { + Rule piecewise(const std::vector &edges) const + { if (!std::is_sorted(edges.begin(), edges.end())) { - throw std::invalid_argument("segments ends must be ordered ascendingly"); + throw std::invalid_argument( + "segments ends must be ordered ascendingly"); } std::vector> rules; for (size_t i = 0; i < edges.size() - 1; ++i) { @@ -112,12 +149,14 @@ class Rule { return join(rules); } - Rule astype(const std::string& dtype) const { + Rule astype(const std::string &dtype) const + { // Assuming dtype is either "float" or "double" return *this; } - static Rule join(const std::vector>& gauss_list) { + static Rule join(const std::vector> &gauss_list) + { if (gauss_list.empty()) { return Rule(Eigen::VectorX(), Eigen::VectorX()); } @@ -127,18 +166,27 @@ class Rule { T prev_b = a; Eigen::VectorX x, w, x_forward, x_backward; - for (const auto& curr : gauss_list) { + for (const auto &curr : gauss_list) { if (curr.a != prev_b) { throw std::invalid_argument("Gauss rules must be ascending"); } prev_b = curr.b; - Eigen::VectorX curr_x_forward(curr.x_forward.size()), curr_x_backward(curr.x_backward.size()); - std::transform(curr.x_forward.data(), curr.x_forward.data() + curr.x_forward.size(), curr_x_forward.data(), [a, curr](T xi) { return xi + (curr.a - a); }); - std::transform(curr.x_backward.data(), curr.x_backward.data() + curr.x_backward.size(), curr_x_backward.data(), [b, curr](T xi) { return xi + (b - curr.b); }); + Eigen::VectorX curr_x_forward(curr.x_forward.size()), + curr_x_backward(curr.x_backward.size()); + std::transform(curr.x_forward.data(), + curr.x_forward.data() + curr.x_forward.size(), + curr_x_forward.data(), + [a, curr](T xi) { return xi + (curr.a - a); }); + std::transform(curr.x_backward.data(), + curr.x_backward.data() + curr.x_backward.size(), + curr_x_backward.data(), + [b, curr](T xi) { return xi + (b - curr.b); }); x.conservativeResize(x.size() + curr.x.size()); w.conservativeResize(w.size() + curr.w.size()); - x_forward.conservativeResize(x_forward.size() + curr_x_forward.size()); - x_backward.conservativeResize(x_backward.size() + curr_x_backward.size()); + x_forward.conservativeResize(x_forward.size() + + curr_x_forward.size()); + x_backward.conservativeResize(x_backward.size() + + curr_x_backward.size()); x.tail(curr.x.size()) = curr.x; w.tail(curr.w.size()) = curr.w; x_forward.tail(curr_x_forward.size()) = curr_x_forward; @@ -156,21 +204,25 @@ class NestedRule : public Rule { std::vector v; slice vsel; - NestedRule(const std::vector& x, const std::vector& w, const std::vector& v, const std::vector& x_forward = {}, const std::vector& x_backward = {}, T a = -1, T b = 1) - : Rule(x, w, x_forward, x_backward, a, b), v(v), vsel(1, v.size(), 2) {} + NestedRule(const std::vector& x, const std::vector& w, const +std::vector& v, const std::vector& x_forward = {}, const std::vector& +x_backward = {}, T a = -1, T b = 1) : Rule(x, w, x_forward, x_backward, a, +b), v(v), vsel(1, v.size(), 2) {} NestedRule reseat(T a, T b) const { Rule res = Rule::reseat(a, b); std::vector new_v(v.size()); - transform(v.begin(), v.end(), new_v.begin(), [this, a, b](T vi) { return (b - a) / (this->b - this->a) * vi; }); - return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward, res.a, res.b); + transform(v.begin(), v.end(), new_v.begin(), [this, a, b](T vi) { return +(b - a) / (this->b - this->a) * vi; }); return NestedRule(res.x, res.w, +new_v, res.x_forward, res.x_backward, res.a, res.b); } NestedRule scale(T factor) const { Rule res = Rule::scale(factor); std::vector new_v(v.size()); - transform(v.begin(), v.end(), new_v.begin(), [factor](T vi) { return vi * factor; }); - return NestedRule(res.x, res.w, new_v, res.x_forward, res.x_backward, res.a, res.b); + transform(v.begin(), v.end(), new_v.begin(), [factor](T vi) { return vi +* factor; }); return NestedRule(res.x, res.w, new_v, res.x_forward, +res.x_backward, res.a, res.b); } NestedRule astype(const string& dtype) const { @@ -185,26 +237,33 @@ class NestedRule : public Rule { Gauss-Legendre quadrature with `n` points on [-1, 1]. */ -template -inline Rule legendre(int n){ +template +inline Rule legendre(int n) +{ std::vector x(n), w(n); xprec::gauss_legendre(n, x.data(), w.data()); // cast to T - //Eigen::VectorX new_x = Eigen::Map>(x.data(), x.size()); + // Eigen::VectorX new_x = Eigen::Map>(x.data(), + // x.size()); Eigen::VectorX new_x(x.size()); - std::transform(x.begin(), x.end(), new_x.data(), [](const xprec::DDouble& xi) { return static_cast(xi); }); + std::transform(x.begin(), x.end(), new_x.data(), + [](const xprec::DDouble &xi) { return static_cast(xi); }); Eigen::VectorX new_w(w.size()); - std::transform(w.begin(), w.end(), new_w.data(), [](const xprec::DDouble& wi) { return static_cast(wi); }); + std::transform(w.begin(), w.end(), new_w.data(), + [](const xprec::DDouble &wi) { return static_cast(wi); }); return Rule(new_x, new_w); } template -Eigen::Matrix legendre_collocation(const Rule& rule, int n = -1) { +Eigen::Matrix +legendre_collocation(const Rule &rule, int n = -1) +{ if (n < 0) { n = rule.x.size(); } // Compute the Legendre Vandermonde matrix - Eigen::Matrix lv = sparseir::legvander(rule.x, n-1); + Eigen::Matrix lv = + sparseir::legvander(rule.x, n - 1); for (size_t i = 0; i < rule.w.size(); ++i) { for (size_t j = 0; j < lv.cols(); ++j) { lv(i, j) *= rule.w[i]; @@ -212,7 +271,9 @@ Eigen::Matrix legendre_collocation(const Rule } // !!! do NOT do this !!! // res = res.transpose(); - // This is the so-called aliasing issue. In "debug mode", i.e., when assertions have not been disabled, such common pitfalls are automatically detected. + // This is the so-called aliasing issue. In "debug mode", i.e., when + // assertions have not been disabled, such common pitfalls are automatically + // detected. auto res = lv.transpose(); @@ -229,15 +290,16 @@ Eigen::Matrix legendre_collocation(const Rule } /*template -inline sparseir::Rule convert(const sparseir::Rule &rule) +inline sparseir::Rule convert(const sparseir::Rule +&rule) { // Convert vectors using Eigen::Map to handle the conversion properly Eigen::VectorX x = rule.x.template cast(); Eigen::VectorX w = rule.w.template cast(); - Eigen::VectorX x_forward = rule.x_forward.template cast(); - Eigen::VectorX x_backward = rule.x_backward.template cast(); - TargetType a = static_cast(rule.a); - TargetType b = static_cast(rule.b); + Eigen::VectorX x_forward = rule.x_forward.template +cast(); Eigen::VectorX x_backward = +rule.x_backward.template cast(); TargetType a = +static_cast(rule.a); TargetType b = static_cast(rule.b); return sparseir::Rule(x, w, x_forward, x_backward, a, b); }*/ diff --git a/include/sparseir/kernel.hpp b/include/sparseir/kernel.hpp index e347738..23ede2a 100644 --- a/include/sparseir/kernel.hpp +++ b/include/sparseir/kernel.hpp @@ -1,1132 +1,1106 @@ #pragma once #include -#include #include -#include -#include -#include #include +#include +#include +#include +#include #include -namespace sparseir -{ - // Forward declaration of ReducedKernel - template - class ReducedKernel; - class AbstractSVEHints; - class SVEHintsLogistic; - class SVEHintsRegularizedBose; +namespace sparseir { +// Forward declaration of ReducedKernel +template +class ReducedKernel; +class AbstractSVEHints; +class SVEHintsLogistic; +class SVEHintsRegularizedBose; + +/** + * @brief Abstract base class for an integral kernel K(x, y). + * + * AbstractKernel represents a real binary function K(x, y) used in a Fredholm + * integral equation of the first kind: + * + * u(x) = ∫ K(x, y) v(y) dy + * + * where x ∈ [xmin, xmax] and y ∈ [ymin, ymax]. + * + * In general, the kernel is applied to a scaled spectral function ρ'(y) as: + * + * ∫ K(x, y) ρ'(y) dy, + * + * where ρ'(y) = w(y) ρ(y). + */ +class AbstractKernel { +public: + double lambda_; + // Constructor + AbstractKernel(double lambda) : lambda_(lambda) { } + + /** + * @brief Evaluate kernel at point (x, y). + * + * For given x, y, return the value of K(x, y). The parameters x_plus and + * x_minus, if given, shall contain the values of x - xmin and xmax - x, + * respectively. This is useful if either difference is to be formed and + * cancellation is expected. + * + * @param x The x value. + * @param y The y value. + * @param x_plus Optional. x - xmin. + * @param x_minus Optional. xmax - x. + * @return The value of K(x, y). + */ + virtual double operator()( + double x, double y, + double x_plus = std::numeric_limits::quiet_NaN(), + double x_minus = std::numeric_limits::quiet_NaN()) const = 0; + + /* + sve_hints(double epsilon) const + { + return std::make_shared(*this, epsilon); + } + */ + + /** + * @brief Return symmetrized kernel K(x, y) + sign * K(x, -y). + * + * Should be overridden by derived classes if they support symmetrization. + * + * @param sign The sign (+1 or -1). + * @return A shared pointer to the symmetrized kernel. + */ + // virtual std::shared_ptr get_symmetrized(int sign) const + //{ + // throw std::runtime_error("get_symmetrized not implemented in base + // class"); + // } + /** + * @brief Return tuple (xmin, xmax) delimiting the range of allowed x + * values. + * + * @return A pair containing xmin and xmax. + */ + virtual std::pair xrange() const + { + return std::make_pair(-1.0, 1.0); + } /** - * @brief Abstract base class for an integral kernel K(x, y). + * @brief Return tuple (ymin, ymax) delimiting the range of allowed y + * values. * - * AbstractKernel represents a real binary function K(x, y) used in a Fredholm - * integral equation of the first kind: + * @return A pair containing ymin and ymax. + */ + virtual std::pair yrange() const + { + return std::make_pair(-1.0, 1.0); + } + + /** + * @brief Check if the kernel is centrosymmetric. * - * u(x) = ∫ K(x, y) v(y) dy + * Returns true if and only if K(x, y) == K(-x, -y) for all values of x and + * y. This allows the kernel to be block-diagonalized, speeding up the + * singular value expansion by a factor of 4. Defaults to false. * - * where x ∈ [xmin, xmax] and y ∈ [ymin, ymax]. + * @return True if the kernel is centrosymmetric, false otherwise. + */ + virtual bool is_centrosymmetric() const { return false; } + + /** + * @brief Power with which the y coordinate scales. * - * In general, the kernel is applied to a scaled spectral function ρ'(y) as: + * @return The power with which y scales. + */ + virtual int ypower() const { return 0; } + + /** + * @brief Convergence radius of the Matsubara basis asymptotic model. * - * ∫ K(x, y) ρ'(y) dy, + * For improved relative numerical accuracy, the IR basis functions on the + * Matsubara axis can be evaluated from an asymptotic expression for + * abs(n) > conv_radius. If conv_radius is infinity, then the asymptotics + * are unused (the default). * - * where ρ'(y) = w(y) ρ(y). + * @return The convergence radius. */ - class AbstractKernel + virtual double conv_radius() const { - public: - double lambda_; - // Constructor - AbstractKernel(double lambda) : lambda_(lambda) {} - - /** - * @brief Evaluate kernel at point (x, y). - * - * For given x, y, return the value of K(x, y). The parameters x_plus and - * x_minus, if given, shall contain the values of x - xmin and xmax - x, - * respectively. This is useful if either difference is to be formed and - * cancellation is expected. - * - * @param x The x value. - * @param y The y value. - * @param x_plus Optional. x - xmin. - * @param x_minus Optional. xmax - x. - * @return The value of K(x, y). - */ - virtual double operator()(double x, double y, double x_plus = std::numeric_limits::quiet_NaN(), - double x_minus = std::numeric_limits::quiet_NaN()) const = 0; - - /* - sve_hints(double epsilon) const - { - return std::make_shared(*this, epsilon); - } - */ - - /** - * @brief Return symmetrized kernel K(x, y) + sign * K(x, -y). - * - * Should be overridden by derived classes if they support symmetrization. - * - * @param sign The sign (+1 or -1). - * @return A shared pointer to the symmetrized kernel. - */ - //virtual std::shared_ptr get_symmetrized(int sign) const - //{ - // throw std::runtime_error("get_symmetrized not implemented in base class"); - //} - - /** - * @brief Return tuple (xmin, xmax) delimiting the range of allowed x values. - * - * @return A pair containing xmin and xmax. - */ - virtual std::pair xrange() const - { - return std::make_pair(-1.0, 1.0); - } + return std::numeric_limits::infinity(); + } - /** - * @brief Return tuple (ymin, ymax) delimiting the range of allowed y values. - * - * @return A pair containing ymin and ymax. - */ - virtual std::pair yrange() const - { - return std::make_pair(-1.0, 1.0); + /** + * @brief Return the weight function for given statistics. + * + * @param statistics 'F' for fermions or 'B' for bosons. + * @return A function representing the weight function w(y). + */ + virtual std::function weight_func(char statistics) const + { + if (statistics != 'F' && statistics != 'B') { + throw std::invalid_argument( + "statistics must be 'F' for fermions or 'B' for bosons"); } + return [](double /*x*/) { return 1.0; }; + } - /** - * @brief Check if the kernel is centrosymmetric. - * - * Returns true if and only if K(x, y) == K(-x, -y) for all values of x and y. - * This allows the kernel to be block-diagonalized, speeding up the singular - * value expansion by a factor of 4. Defaults to false. - * - * @return True if the kernel is centrosymmetric, false otherwise. - */ - virtual bool is_centrosymmetric() const - { - return false; - } + virtual ~AbstractKernel() = default; +}; - /** - * @brief Power with which the y coordinate scales. - * - * @return The power with which y scales. - */ - virtual int ypower() const - { - return 0; - } +class AbstractReducedKernel : public AbstractKernel { +public: + int sign; + std::shared_ptr inner; - /** - * @brief Convergence radius of the Matsubara basis asymptotic model. - * - * For improved relative numerical accuracy, the IR basis functions on the - * Matsubara axis can be evaluated from an asymptotic expression for - * abs(n) > conv_radius. If conv_radius is infinity, then the asymptotics are - * unused (the default). - * - * @return The convergence radius. - */ - virtual double conv_radius() const - { - return std::numeric_limits::infinity(); + // Constructor + AbstractReducedKernel(std::shared_ptr inner_kernel, + int sign) + : AbstractKernel(inner_kernel->lambda_), + inner(std::move(inner_kernel)), + sign(sign) + { + // Validate inputs + if (!inner->is_centrosymmetric()) { + throw std::invalid_argument("Inner kernel must be centrosymmetric"); } - - /** - * @brief Return the weight function for given statistics. - * - * @param statistics 'F' for fermions or 'B' for bosons. - * @return A function representing the weight function w(y). - */ - virtual std::function weight_func(char statistics) const - { - if (statistics != 'F' && statistics != 'B') - { - throw std::invalid_argument("statistics must be 'F' for fermions or 'B' for bosons"); - } - return [](double /*x*/) - { return 1.0; }; + if (sign != 1 && sign != -1) { + throw std::invalid_argument("sign must be -1 or 1"); } + } +}; - virtual ~AbstractKernel() = default; - }; +inline double callreduced(const AbstractReducedKernel &kernel, double x, + double y, double x_plus, double x_minus) +{ + x_plus += 1; + auto K_plus = (*kernel.inner)(x, +y, x_plus, x_minus); + auto K_minus = (*kernel.inner)(x, -y, x_minus, x_plus); + return K_plus + kernel.sign * K_minus; +} + +/** + * @brief Fermionic/bosonic analytical continuation kernel. + * + * In dimensionless variables x = 2τ/β - 1, y = βω/Λ, the integral kernel is a + * function on [-1, 1] × [-1, 1]: + * + * K(x, y) = exp(-Λ y (x + 1) / 2) / (1 + exp(-Λ y)) + * + * LogisticKernel is a fermionic analytic continuation kernel. + * Nevertheless, one can model the τ dependence of a bosonic correlation + * function as follows: + * + * ∫ [exp(-Λ y (x + 1) / 2) / (1 - exp(-Λ y))] ρ(y) dy = ∫ K(x, y) ρ'(y) dy, + * + * with ρ'(y) = w(y) ρ(y), where the weight function is given by w(y) = 1 / + * tanh(Λ y / 2). + */ +class LogisticKernel : public AbstractKernel { +public: + double lambda_; ///< The kernel cutoff Λ. - class AbstractReducedKernel : public AbstractKernel + /** + * @brief Constructor for LogisticKernel. + * + * @param lambda The kernel cutoff Λ. + */ + LogisticKernel(double lambda) : AbstractKernel(lambda) { - public: - int sign; - std::shared_ptr inner; - - // Constructor - AbstractReducedKernel(std::shared_ptr inner_kernel, int sign) - : AbstractKernel(inner_kernel->lambda_), inner(std::move(inner_kernel)), sign(sign) - { - // Validate inputs - if (!inner->is_centrosymmetric()) - { - throw std::invalid_argument("Inner kernel must be centrosymmetric"); - } - if (sign != 1 && sign != -1) - { - throw std::invalid_argument("sign must be -1 or 1"); - } + if (lambda < 0) { + throw std::domain_error("Kernel cutoff Λ must be non-negative"); } - }; + } - inline double callreduced(const AbstractReducedKernel &kernel, double x, double y, double x_plus, double x_minus) + /** + * @brief Evaluate the kernel at point (x, y). + * + * @param x The x value. + * @param y The y value. + * @param x_plus Optional. x - xmin. + * @param x_minus Optional. xmax - x. + * @return The value of K(x, y). + */ + double operator()(double x, double y, + double x_plus = std::numeric_limits::quiet_NaN(), + double x_minus = std::numeric_limits::quiet_NaN()) + const override + { + // Check that x and y are within the valid ranges + std::pair x_range = xrange(); + double xmin = x_range.first; + double xmax = x_range.second; + if (x < xmin || x > xmax) { + throw std::out_of_range("x value not in range [-1, 1]"); + } + std::pair y_range = yrange(); + double ymin = y_range.first; + double ymax = y_range.second; + if (y < ymin || y > ymax) { + throw std::out_of_range("y value not in range [-1, 1]"); + } + + std::tuple uv_values = + compute_uv(x, y, x_plus, x_minus); + double u_plus = std::get<0>(uv_values); + double u_minus = std::get<1>(uv_values); + double v = std::get<2>(uv_values); + + return compute(u_plus, u_minus, v); + } + + // Inside class LogisticKernel definition + /* + std::shared_ptr sve_hints(double epsilon) const { - x_plus += 1; - auto K_plus = (*kernel.inner)(x, +y, x_plus, x_minus); - auto K_minus = (*kernel.inner)(x, -y, x_minus, x_plus); - return K_plus + kernel.sign * K_minus; + return std::make_shared(*this, epsilon); } + */ /** - * @brief Fermionic/bosonic analytical continuation kernel. + * @brief Check if the kernel is centrosymmetric. * - * In dimensionless variables x = 2τ/β - 1, y = βω/Λ, the integral kernel is a function on [-1, 1] × [-1, 1]: + * @return True, since LogisticKernel is centrosymmetric. + */ + bool is_centrosymmetric() const override { return true; } + + /** + * @brief Convergence radius of the Matsubara basis asymptotic model. * - * K(x, y) = exp(-Λ y (x + 1) / 2) / (1 + exp(-Λ y)) + * For LogisticKernel, conv_radius = 40 * Λ. * - * LogisticKernel is a fermionic analytic continuation kernel. - * Nevertheless, one can model the τ dependence of a bosonic correlation function as follows: + * @return The convergence radius. + */ + double conv_radius() const override { return 40.0 * lambda_; } + + /** + * @brief Return the weight function for given statistics. * - * ∫ [exp(-Λ y (x + 1) / 2) / (1 - exp(-Λ y))] ρ(y) dy = ∫ K(x, y) ρ'(y) dy, + * @param statistics 'F' for fermions or 'B' for bosons. + * @return A function representing the weight function w(y). + */ + std::function weight_func(char statistics) const override + { + if (statistics == 'F') { + // Fermionic weight function: w(y) == 1 + return [](double /*y*/) { return 1.0; }; + } else if (statistics == 'B') { + // Bosonic weight function: w(y) == 1 / tanh(Λ*y/2) + double lambda = lambda_; + return [lambda](double y) { + return 1.0 / std::tanh(0.5 * lambda * y); + }; + } else { + throw std::invalid_argument( + "statistics must be 'F' for fermions or 'B' for bosons"); + } + } + +private: + /** + * @brief Compute the variables u_plus, u_minus, v. * - * with ρ'(y) = w(y) ρ(y), where the weight function is given by w(y) = 1 / tanh(Λ y / 2). + * @param x The x value. + * @param y The y value. + * @param x_plus x - xmin. + * @param x_minus xmax - x. + * @return A tuple containing u_plus, u_minus, and v. */ - class LogisticKernel : public AbstractKernel + std::tuple + compute_uv(double x, double y, double x_plus, double x_minus) const { - public: - double lambda_; ///< The kernel cutoff Λ. - - /** - * @brief Constructor for LogisticKernel. - * - * @param lambda The kernel cutoff Λ. - */ - LogisticKernel(double lambda) : AbstractKernel(lambda) - { - if (lambda < 0) - { - throw std::domain_error("Kernel cutoff Λ must be non-negative"); - } + // Compute u_plus, u_minus, v + if (std::isnan(x_plus)) { + x_plus = 1.0 + x; } + if (std::isnan(x_minus)) { + x_minus = 1.0 - x; + } + double u_plus = 0.5 * x_plus; + double u_minus = 0.5 * x_minus; + double v = lambda_ * y; + return std::make_tuple(u_plus, u_minus, v); + } - /** - * @brief Evaluate the kernel at point (x, y). - * - * @param x The x value. - * @param y The y value. - * @param x_plus Optional. x - xmin. - * @param x_minus Optional. xmax - x. - * @return The value of K(x, y). - */ - double operator()(double x, double y, double x_plus = std::numeric_limits::quiet_NaN(), - double x_minus = std::numeric_limits::quiet_NaN()) const override - { - // Check that x and y are within the valid ranges - std::pair x_range = xrange(); - double xmin = x_range.first; - double xmax = x_range.second; - if (x < xmin || x > xmax) - { - throw std::out_of_range("x value not in range [-1, 1]"); - } - std::pair y_range = yrange(); - double ymin = y_range.first; - double ymax = y_range.second; - if (y < ymin || y > ymax) - { - throw std::out_of_range("y value not in range [-1, 1]"); - } + /** + * @brief Compute the kernel value using u_plus, u_minus, and v. + * + * @param u_plus Computed u_plus. + * @param u_minus Computed u_minus. + * @param v Computed v. + * @return The value of K(x, y). + */ + double compute(double u_plus, double u_minus, double v) const + { + double abs_v = std::abs(v); - std::tuple uv_values = compute_uv(x, y, x_plus, x_minus); - double u_plus = std::get<0>(uv_values); - double u_minus = std::get<1>(uv_values); - double v = std::get<2>(uv_values); + double numerator; + double denominator; - return compute(u_plus, u_minus, v); + if (v >= 0) { + numerator = exp_impl(-u_plus * abs_v); + } else { + numerator = exp_impl(-u_minus * abs_v); } - // Inside class LogisticKernel definition - /* - std::shared_ptr sve_hints(double epsilon) const - { - return std::make_shared(*this, epsilon); - } - */ - - /** - * @brief Check if the kernel is centrosymmetric. - * - * @return True, since LogisticKernel is centrosymmetric. - */ - bool is_centrosymmetric() const override - { - return true; - } + denominator = 1.0 + exp_impl(-abs_v); - /** - * @brief Convergence radius of the Matsubara basis asymptotic model. - * - * For LogisticKernel, conv_radius = 40 * Λ. - * - * @return The convergence radius. - */ - double conv_radius() const override - { - return 40.0 * lambda_; + return numerator / denominator; + } +}; + +/** + * @brief Regularized bosonic analytical continuation kernel. + * + * In dimensionless variables x = 2τ/β - 1, y = βω/Λ, the integral kernel is a + * function on [-1, 1] × [-1, 1]: + * + * K(x, y) = y * exp(-Λ y (x + 1) / 2) / (exp(-Λ y) - 1) + * + * Care has to be taken in evaluating this expression around y = 0. + */ +class RegularizedBoseKernel : public AbstractKernel { +public: + /** + * @brief Constructor for RegularizedBoseKernel. + * + * @param lambda The kernel cutoff Λ. + */ + explicit RegularizedBoseKernel(double lambda) : AbstractKernel(lambda) + { + if (lambda < 0) { + throw std::domain_error("Kernel cutoff Λ must be non-negative"); } + } - /** - * @brief Return the weight function for given statistics. - * - * @param statistics 'F' for fermions or 'B' for bosons. - * @return A function representing the weight function w(y). - */ - std::function weight_func(char statistics) const override - { - if (statistics == 'F') - { - // Fermionic weight function: w(y) == 1 - return [](double /*y*/) - { return 1.0; }; - } - else if (statistics == 'B') - { - // Bosonic weight function: w(y) == 1 / tanh(Λ*y/2) - double lambda = lambda_; - return [lambda](double y) - { - return 1.0 / std::tanh(0.5 * lambda * y); - }; - } - else - { - throw std::invalid_argument("statistics must be 'F' for fermions or 'B' for bosons"); - } + /** + * @brief Evaluate the kernel at point (x, y). + * + * @param x The x value. + * @param y The y value. + * @param x_plus Optional. x - xmin. + * @param x_minus Optional. xmax - x. + * @return The value of K(x, y). + */ + double operator()(double x, double y, + double x_plus = std::numeric_limits::quiet_NaN(), + double x_minus = std::numeric_limits::quiet_NaN()) + const override + { + // Check that x and y are within the valid ranges + std::pair xrange_values = xrange(); + double xmin = xrange_values.first; + double xmax = xrange_values.second; + if (x < xmin || x > xmax) { + throw std::out_of_range("x value not in range [-1, 1]"); } - - private: - /** - * @brief Compute the variables u_plus, u_minus, v. - * - * @param x The x value. - * @param y The y value. - * @param x_plus x - xmin. - * @param x_minus xmax - x. - * @return A tuple containing u_plus, u_minus, and v. - */ - std::tuple compute_uv(double x, double y, double x_plus, double x_minus) const - { - // Compute u_plus, u_minus, v - if (std::isnan(x_plus)) - { - x_plus = 1.0 + x; - } - if (std::isnan(x_minus)) - { - x_minus = 1.0 - x; - } - double u_plus = 0.5 * x_plus; - double u_minus = 0.5 * x_minus; - double v = lambda_ * y; - return std::make_tuple(u_plus, u_minus, v); + std::pair yrange_values = yrange(); + double ymin = yrange_values.first; + double ymax = yrange_values.second; + if (y < ymin || y > ymax) { + throw std::out_of_range("y value not in range [-1, 1]"); } - /** - * @brief Compute the kernel value using u_plus, u_minus, and v. - * - * @param u_plus Computed u_plus. - * @param u_minus Computed u_minus. - * @param v Computed v. - * @return The value of K(x, y). - */ - double compute(double u_plus, double u_minus, double v) const - { - double abs_v = std::abs(v); - - double numerator; - double denominator; - - if (v >= 0) - { - numerator = exp_impl(-u_plus * abs_v); - } - else - { - numerator = exp_impl(-u_minus * abs_v); - } + std::tuple uv_values = + compute_uv(x, y, x_plus, x_minus); + double u_plus = std::get<0>(uv_values); + double u_minus = std::get<1>(uv_values); + double v = std::get<2>(uv_values); - denominator = 1.0 + exp_impl(-abs_v); + return compute(u_plus, u_minus, v); + } - return numerator / denominator; - } - }; + /** + * @brief Check if the kernel is centrosymmetric. + * + * @return True, since RegularizedBoseKernel is centrosymmetric. + */ + bool is_centrosymmetric() const override { return true; } /** - * @brief Regularized bosonic analytical continuation kernel. + * @brief Returns the power with which y scales. * - * In dimensionless variables x = 2τ/β - 1, y = βω/Λ, the integral kernel is a function on [-1, 1] × [-1, 1]: + * For RegularizedBoseKernel, ypower = 1. * - * K(x, y) = y * exp(-Λ y (x + 1) / 2) / (exp(-Λ y) - 1) + * @return The power with which y scales. + */ + int ypower() const override { return 1; } + + /** + * @brief Convergence radius of the Matsubara basis asymptotic model. + * + * For RegularizedBoseKernel, conv_radius = 40 * Λ. + * + * @return The convergence radius. + */ + double conv_radius() const override { return 40.0 * lambda_; } + + /** + * @brief Return the weight function for given statistics. * - * Care has to be taken in evaluating this expression around y = 0. + * @param statistics 'F' for fermions or 'B' for bosons. + * @return A function representing the weight function w(y). */ - class RegularizedBoseKernel : public AbstractKernel + std::function weight_func(char statistics) const override { - public: - /** - * @brief Constructor for RegularizedBoseKernel. - * - * @param lambda The kernel cutoff Λ. - */ - explicit RegularizedBoseKernel(double lambda) : AbstractKernel(lambda) - { - if (lambda < 0) - { - throw std::domain_error("Kernel cutoff Λ must be non-negative"); - } + if (statistics == 'F') { + throw std::runtime_error( + "Kernel is designed for bosonic functions"); + } else if (statistics == 'B') { + // Bosonic weight function: w(y) == 1 / y + return [](double y) { return 1.0 / y; }; + } else { + throw std::invalid_argument( + "statistics must be 'F' for fermions or 'B' for bosons"); } + } - /** - * @brief Evaluate the kernel at point (x, y). - * - * @param x The x value. - * @param y The y value. - * @param x_plus Optional. x - xmin. - * @param x_minus Optional. xmax - x. - * @return The value of K(x, y). - */ - double operator()(double x, double y, double x_plus = std::numeric_limits::quiet_NaN(), - double x_minus = std::numeric_limits::quiet_NaN()) const override - { - // Check that x and y are within the valid ranges - std::pair xrange_values = xrange(); - double xmin = xrange_values.first; - double xmax = xrange_values.second; - if (x < xmin || x > xmax) - { - throw std::out_of_range("x value not in range [-1, 1]"); - } - std::pair yrange_values = yrange(); - double ymin = yrange_values.first; - double ymax = yrange_values.second; - if (y < ymin || y > ymax) - { - throw std::out_of_range("y value not in range [-1, 1]"); - } - - std::tuple uv_values = compute_uv(x, y, x_plus, x_minus); - double u_plus = std::get<0>(uv_values); - double u_minus = std::get<1>(uv_values); - double v = std::get<2>(uv_values); - - return compute(u_plus, u_minus, v); +private: + /** + * @brief Compute the variables u_plus, u_minus, v. + * + * @param x The x value. + * @param y The y value. + * @param x_plus x - xmin. + * @param x_minus xmax - x. + * @return A tuple containing u_plus, u_minus, and v. + */ + std::tuple + compute_uv(double x, double y, double x_plus, double x_minus) const + { + // Compute u_plus, u_minus, v + if (std::isnan(x_plus)) { + x_plus = 1.0 + x; } - - /** - * @brief Check if the kernel is centrosymmetric. - * - * @return True, since RegularizedBoseKernel is centrosymmetric. - */ - bool is_centrosymmetric() const override - { - return true; + if (std::isnan(x_minus)) { + x_minus = 1.0 - x; } + double u_plus = 0.5 * x_plus; + double u_minus = 0.5 * x_minus; + double v = lambda_ * y; + return std::make_tuple(u_plus, u_minus, v); + } - /** - * @brief Returns the power with which y scales. - * - * For RegularizedBoseKernel, ypower = 1. - * - * @return The power with which y scales. - */ - int ypower() const override - { - return 1; + /** + * @brief Compute the kernel value using u_plus, u_minus, and v. + * + * @param u_plus Computed u_plus. + * @param u_minus Computed u_minus. + * @param v Computed v. + * @return The value of K(x, y). + */ + double compute(double u_plus, double u_minus, double v) const + { + double absv = std::abs(v); + double enum_val = std::exp(-absv * (v >= 0 ? u_plus : u_minus)); + + // Handle the tricky expression v / (exp(v) - 1) + double denom; + if (absv >= 1e-200) { + denom = absv / std::expm1(-absv); + } else { + denom = -1; // Assuming T is a floating-point type } - /** - * @brief Convergence radius of the Matsubara basis asymptotic model. - * - * For RegularizedBoseKernel, conv_radius = 40 * Λ. - * - * @return The convergence radius. - */ - double conv_radius() const override - { - return 40.0 * lambda_; - } + return -1 / static_cast(this->lambda_) * enum_val * denom; + } +}; + +/** + * @brief Restriction of centrosymmetric kernel to positive interval. + * + * For a kernel K on [-1, 1] × [-1, 1] that is centrosymmetric, i.e., + * K(x, y) = K(-x, -y), it is straightforward to show that the left/right + * singular vectors can be chosen as either odd or even functions. + * + * Consequently, they are singular functions of a reduced kernel K_red on + * [0, 1] × [0, 1] that is given as either: + * + * K_red(x, y) = K(x, y) ± K(x, -y) + * + * This kernel is what this class represents. The full singular functions can be + * reconstructed by (anti-)symmetrically continuing them to the negative axis. + */ +template +class ReducedKernel : public AbstractKernel { +public: + std::shared_ptr + inner_kernel_; ///< The inner kernel K. + int sign_; ///< The sign (+1 or -1). - /** - * @brief Return the weight function for given statistics. - * - * @param statistics 'F' for fermions or 'B' for bosons. - * @return A function representing the weight function w(y). - */ - std::function weight_func(char statistics) const override - { - if (statistics == 'F') - { - throw std::runtime_error("Kernel is designed for bosonic functions"); - } - else if (statistics == 'B') - { - // Bosonic weight function: w(y) == 1 / y - return [](double y) - { return 1.0 / y; }; - } - else - { - throw std::invalid_argument("statistics must be 'F' for fermions or 'B' for bosons"); - } + /** + * @brief Constructor for ReducedKernel. + * + * @param inner_kernel The inner kernel K. + * @param sign The sign (+1 or -1). Must satisfy abs(sign) == 1. + */ + ReducedKernel(std::shared_ptr inner_kernel, int sign) + : AbstractKernel(inner_kernel->lambda_), // Initialize base class + inner_kernel_(std::move(inner_kernel)), + sign_(sign) + { + if (!inner_kernel_->is_centrosymmetric()) { + throw std::invalid_argument("Inner kernel must be centrosymmetric"); } - - private: - /** - * @brief Compute the variables u_plus, u_minus, v. - * - * @param x The x value. - * @param y The y value. - * @param x_plus x - xmin. - * @param x_minus xmax - x. - * @return A tuple containing u_plus, u_minus, and v. - */ - std::tuple compute_uv(double x, double y, double x_plus, double x_minus) const - { - // Compute u_plus, u_minus, v - if (std::isnan(x_plus)) - { - x_plus = 1.0 + x; - } - if (std::isnan(x_minus)) - { - x_minus = 1.0 - x; - } - double u_plus = 0.5 * x_plus; - double u_minus = 0.5 * x_minus; - double v = lambda_ * y; - return std::make_tuple(u_plus, u_minus, v); + if (sign != 1 && sign != -1) { + throw std::invalid_argument("sign must be -1 or 1"); } + } - /** - * @brief Compute the kernel value using u_plus, u_minus, and v. - * - * @param u_plus Computed u_plus. - * @param u_minus Computed u_minus. - * @param v Computed v. - * @return The value of K(x, y). - */ - double compute(double u_plus, double u_minus, double v) const{ - double absv = std::abs(v); - double enum_val = std::exp(-absv * (v >= 0 ? u_plus : u_minus)); - - // Handle the tricky expression v / (exp(v) - 1) - double denom; - if (absv >= 1e-200) - { - denom = absv / std::expm1(-absv); - } - else - { - denom = -1; // Assuming T is a floating-point type - } - - return -1 / static_cast(this->lambda_) * enum_val * denom; - } - }; + /** + * @brief Evaluate the reduced kernel at point (x, y). + * + * @param x The x value. + * @param y The y value. + * @param x_plus Optional. x - xmin. + * @param x_minus Optional. xmax - x. + * @return The value of K_red(x, y). + */ + double operator()(double x, double y, + double x_plus = std::numeric_limits::quiet_NaN(), + double x_minus = std::numeric_limits::quiet_NaN()) + const override + { + return call_reduced(x, y, x_plus, x_minus); + } /** - * @brief Restriction of centrosymmetric kernel to positive interval. + * @brief Return tuple (xmin, xmax) delimiting the range of allowed x + * values. * - * For a kernel K on [-1, 1] × [-1, 1] that is centrosymmetric, i.e., - * K(x, y) = K(-x, -y), it is straightforward to show that the left/right - * singular vectors can be chosen as either odd or even functions. + * For ReducedKernel, xrange is modified to [0, xmax_inner]. * - * Consequently, they are singular functions of a reduced kernel K_red on - * [0, 1] × [0, 1] that is given as either: + * @return A pair containing xmin and xmax. + */ + std::pair xrange() const override + { + auto range = inner_kernel_->xrange(); + return std::make_pair(0.0, range.second); + } + + /** + * @brief Return tuple (ymin, ymax) delimiting the range of allowed y + * values. * - * K_red(x, y) = K(x, y) ± K(x, -y) + * For ReducedKernel, yrange is modified to [0, ymax_inner]. * - * This kernel is what this class represents. The full singular functions can be - * reconstructed by (anti-)symmetrically continuing them to the negative axis. + * @return A pair containing ymin and ymax. */ - template - class ReducedKernel : public AbstractKernel + std::pair yrange() const override { - public: - std::shared_ptr inner_kernel_; ///< The inner kernel K. - int sign_; ///< The sign (+1 or -1). - - /** - * @brief Constructor for ReducedKernel. - * - * @param inner_kernel The inner kernel K. - * @param sign The sign (+1 or -1). Must satisfy abs(sign) == 1. - */ - ReducedKernel(std::shared_ptr inner_kernel, int sign) - : AbstractKernel(inner_kernel->lambda_), // Initialize base class - inner_kernel_(std::move(inner_kernel)), - sign_(sign) - { - if (!inner_kernel_->is_centrosymmetric()) - { - throw std::invalid_argument("Inner kernel must be centrosymmetric"); - } - if (sign != 1 && sign != -1) - { - throw std::invalid_argument("sign must be -1 or 1"); - } - } - - /** - * @brief Evaluate the reduced kernel at point (x, y). - * - * @param x The x value. - * @param y The y value. - * @param x_plus Optional. x - xmin. - * @param x_minus Optional. xmax - x. - * @return The value of K_red(x, y). - */ - double operator()(double x, double y, double x_plus = std::numeric_limits::quiet_NaN(), - double x_minus = std::numeric_limits::quiet_NaN()) const override - { - return call_reduced(x, y, x_plus, x_minus); - } - - /** - * @brief Return tuple (xmin, xmax) delimiting the range of allowed x values. - * - * For ReducedKernel, xrange is modified to [0, xmax_inner]. - * - * @return A pair containing xmin and xmax. - */ - std::pair xrange() const override - { - auto range = inner_kernel_->xrange(); - return std::make_pair(0.0, range.second); - } + auto range = inner_kernel_->yrange(); + return std::make_pair(0.0, range.second); + } - /** - * @brief Return tuple (ymin, ymax) delimiting the range of allowed y values. - * - * For ReducedKernel, yrange is modified to [0, ymax_inner]. - * - * @return A pair containing ymin and ymax. - */ - std::pair yrange() const override - { - auto range = inner_kernel_->yrange(); - return std::make_pair(0.0, range.second); - } + /** + * @brief Check if the kernel is centrosymmetric. + * + * @return False, since ReducedKernel cannot be symmetrized further. + */ + bool is_centrosymmetric() const override { return false; } - /** - * @brief Check if the kernel is centrosymmetric. - * - * @return False, since ReducedKernel cannot be symmetrized further. - */ - bool is_centrosymmetric() const override - { - return false; - } + /** + * @brief Returns the power with which y scales. + * + * @return The ypower of the inner kernel. + */ + int ypower() const override { return inner_kernel_->ypower(); } - /** - * @brief Returns the power with which y scales. - * - * @return The ypower of the inner kernel. - */ - int ypower() const override - { - return inner_kernel_->ypower(); - } + /** + * @brief Convergence radius of the Matsubara basis asymptotic model. + * + * @return The convergence radius of the inner kernel. + */ + double conv_radius() const override { return inner_kernel_->conv_radius(); } - /** - * @brief Convergence radius of the Matsubara basis asymptotic model. - * - * @return The convergence radius of the inner kernel. - */ - double conv_radius() const override - { - return inner_kernel_->conv_radius(); +private: + /** + * @brief Evaluate the reduced kernel. + * + * @param x The x value. + * @param y The y value. + * @param x_plus x - xmin. + * @param x_minus xmax - x. + * @return The value of K_red(x, y). + */ + double call_reduced(double x, double y, double x_plus, double x_minus) const + { + // The reduced kernel is defined only over the interval [0, 1], which + // means we must add one to get the x_plus for the inner kernels. + if (std::isnan(x_plus)) { + x_plus = 1.0 + x; } + // x_minus remains the same - private: - /** - * @brief Evaluate the reduced kernel. - * - * @param x The x value. - * @param y The y value. - * @param x_plus x - xmin. - * @param x_minus xmax - x. - * @return The value of K_red(x, y). - */ - double call_reduced(double x, double y, double x_plus, double x_minus) const - { - // The reduced kernel is defined only over the interval [0, 1], which - // means we must add one to get the x_plus for the inner kernels. - if (std::isnan(x_plus)) - { - x_plus = 1.0 + x; - } - // x_minus remains the same - - // Evaluate inner kernel at (x, y) and (x, -y) - double K_plus = inner_kernel_->operator()(x, y, x_plus, x_minus); - double K_minus = inner_kernel_->operator()(x, -y, x_plus, x_minus); + // Evaluate inner kernel at (x, y) and (x, -y) + double K_plus = inner_kernel_->operator()(x, y, x_plus, x_minus); + double K_minus = inner_kernel_->operator()(x, -y, x_plus, x_minus); - if (sign_ == 1) - { - return K_plus + K_minus; - } - else - { - return K_plus - K_minus; - } + if (sign_ == 1) { + return K_plus + K_minus; + } else { + return K_plus - K_minus; } - }; + } +}; } // namespace sparseir -namespace sparseir -{ - /* - AbstractSVEHints +namespace sparseir { +/* + AbstractSVEHints - Discretization hints for singular value expansion of a given kernel. - */ - class AbstractSVEHints +Discretization hints for singular value expansion of a given kernel. +*/ +class AbstractSVEHints { +public: + virtual ~AbstractSVEHints() = default; + + // Functions to compute segments for x and y + // template + // virtual std::vector segments_x() const = 0; + // template + // virtual std::vector segments_y() const = 0; + + // Additional methods if needed + virtual int nsvals() const = 0; + virtual int ngauss() const = 0; +}; + +class SVEHintsLogistic final : public AbstractSVEHints { +public: + SVEHintsLogistic(const LogisticKernel &kernel, double epsilon) + : kernel_(kernel), epsilon_(epsilon) { - public: - virtual ~AbstractSVEHints() = default; - - // Functions to compute segments for x and y - //template - //virtual std::vector segments_x() const = 0; - //template - //virtual std::vector segments_y() const = 0; - - // Additional methods if needed - virtual int nsvals() const = 0; - virtual int ngauss() const = 0; - }; + } - class SVEHintsLogistic final : public AbstractSVEHints + template + std::vector segments_x() const { - public: - SVEHintsLogistic(const LogisticKernel &kernel, double epsilon) - : kernel_(kernel), epsilon_(epsilon) {} - - template - std::vector segments_x() const { - int nzeros = std::max(static_cast(std::round(15 * std::log10(kernel_.lambda_))), 1); - - // Create a range of values - std::vector temp(nzeros); - for (int i = 0; i < nzeros; ++i) { - temp[i] = (T)0.143 * i; - } + int nzeros = std::max( + static_cast(std::round(15 * std::log10(kernel_.lambda_))), 1); - // Calculate diffs using the inverse hyperbolic cosine - std::vector diffs(nzeros); - for (int i = 0; i < nzeros; ++i) { - diffs[i] = 1.0 / cosh_impl(temp[i]); - } + // Create a range of values + std::vector temp(nzeros); + for (int i = 0; i < nzeros; ++i) { + temp[i] = (T)0.143 * i; + } - // Calculate cumulative sum of diffs - std::vector zeros(nzeros); - zeros[0] = diffs[0]; - for (int i = 1; i < nzeros; ++i) { - zeros[i] = zeros[i - 1] + diffs[i]; - } + // Calculate diffs using the inverse hyperbolic cosine + std::vector diffs(nzeros); + for (int i = 0; i < nzeros; ++i) { + diffs[i] = 1.0 / cosh_impl(temp[i]); + } - // Normalize zeros - T last_zero = zeros.back(); - for (int i = 0; i < nzeros; ++i) { - zeros[i] /= last_zero; - } + // Calculate cumulative sum of diffs + std::vector zeros(nzeros); + zeros[0] = diffs[0]; + for (int i = 1; i < nzeros; ++i) { + zeros[i] = zeros[i - 1] + diffs[i]; + } - // Create the final segments vector - std::vector segments(2 * nzeros + 1, 0); - for (int i = 0; i < nzeros; ++i) { - segments[i] = -zeros[nzeros - i - 1]; - segments[nzeros + i + 1] = zeros[i]; - } - return segments; - }; - - template - std::vector segments_y() const { - // Calculate the number of zeros - int nzeros = std::max(static_cast(std::round(20 * std::log10(kernel_.lambda_))), 2); - - // Initial differences - std::vector diffs = { - 0.01523, 0.03314, 0.04848, 0.05987, 0.06703, 0.07028, 0.07030, - 0.06791, 0.06391, 0.05896, 0.05358, 0.04814, 0.04288, 0.03795, - 0.03342, 0.02932, 0.02565, 0.02239, 0.01951, 0.01699 - }; + // Normalize zeros + T last_zero = zeros.back(); + for (int i = 0; i < nzeros; ++i) { + zeros[i] /= last_zero; + } - // Truncate diffs if necessary - if (nzeros < diffs.size()) { - diffs.resize(nzeros); - } + // Create the final segments vector + std::vector segments(2 * nzeros + 1, 0); + for (int i = 0; i < nzeros; ++i) { + segments[i] = -zeros[nzeros - i - 1]; + segments[nzeros + i + 1] = zeros[i]; + } + return segments; + }; - // Calculate trailing differences - for (int i = 20; i < nzeros; ++i) { - T x = (T)0.141 * i; - diffs.push_back(0.25 * exp_impl(-x)); - } + template + std::vector segments_y() const + { + // Calculate the number of zeros + int nzeros = std::max( + static_cast(std::round(20 * std::log10(kernel_.lambda_))), 2); + + // Initial differences + std::vector diffs = {0.01523, 0.03314, 0.04848, 0.05987, 0.06703, + 0.07028, 0.07030, 0.06791, 0.06391, 0.05896, + 0.05358, 0.04814, 0.04288, 0.03795, 0.03342, + 0.02932, 0.02565, 0.02239, 0.01951, 0.01699}; + + // Truncate diffs if necessary + if (nzeros < diffs.size()) { + diffs.resize(nzeros); + } - // Calculate cumulative sum of diffs - std::vector zeros(nzeros); - zeros[0] = diffs[0]; - for (int i = 1; i < nzeros; ++i) { - zeros[i] = zeros[i - 1] + diffs[i]; - } + // Calculate trailing differences + for (int i = 20; i < nzeros; ++i) { + T x = (T)0.141 * i; + diffs.push_back(0.25 * exp_impl(-x)); + } - // Normalize zeros - T last_zero = zeros.back(); - for (int i = 0; i < nzeros; ++i) { - zeros[i] /= last_zero; - } - zeros.pop_back(); + // Calculate cumulative sum of diffs + std::vector zeros(nzeros); + zeros[0] = diffs[0]; + for (int i = 1; i < nzeros; ++i) { + zeros[i] = zeros[i - 1] + diffs[i]; + } - // updated nzeros - nzeros = zeros.size(); + // Normalize zeros + T last_zero = zeros.back(); + for (int i = 0; i < nzeros; ++i) { + zeros[i] /= last_zero; + } + zeros.pop_back(); - // Adjust zeros - for (int i = 0; i < nzeros; ++i) { - zeros[i] -= 1.0; - } + // updated nzeros + nzeros = zeros.size(); - // Create the final segments vector - std::vector segments(2 * nzeros + 3, 0); - for (int i = 0; i < nzeros; ++i) { - segments[1+i] = zeros[i]; - segments[1+nzeros + 1 + i] = -zeros[nzeros-i-1]; - } - segments[0] = -T(1.0); - segments[1 + nzeros] = T( 0.0); - segments[2*nzeros + 2] = T(1.0); - return segments; + // Adjust zeros + for (int i = 0; i < nzeros; ++i) { + zeros[i] -= 1.0; } - int nsvals() const override { - double log10_Lambda = std::max(1.0, std::log10(kernel_.lambda_)); - return static_cast(std::round((25 + log10_Lambda) * log10_Lambda)); + // Create the final segments vector + std::vector segments(2 * nzeros + 3, 0); + for (int i = 0; i < nzeros; ++i) { + segments[1 + i] = zeros[i]; + segments[1 + nzeros + 1 + i] = -zeros[nzeros - i - 1]; } - int ngauss() const override{ - return epsilon_ >= std::sqrt(std::numeric_limits::epsilon()) ? 10 : 16; - }; + segments[0] = -T(1.0); + segments[1 + nzeros] = T(0.0); + segments[2 * nzeros + 2] = T(1.0); + return segments; + } - private: - const LogisticKernel &kernel_; - double epsilon_; + int nsvals() const override + { + double log10_Lambda = std::max(1.0, std::log10(kernel_.lambda_)); + return static_cast(std::round((25 + log10_Lambda) * log10_Lambda)); + } + int ngauss() const override + { + return epsilon_ >= std::sqrt(std::numeric_limits::epsilon()) + ? 10 + : 16; }; - class SVEHintsRegularizedBose : public AbstractSVEHints +private: + const LogisticKernel &kernel_; + double epsilon_; +}; + +class SVEHintsRegularizedBose : public AbstractSVEHints { +public: + SVEHintsRegularizedBose(const RegularizedBoseKernel &kernel, double epsilon) + : kernel_(kernel), epsilon_(epsilon) { - public: - SVEHintsRegularizedBose(const RegularizedBoseKernel &kernel, double epsilon) - : kernel_(kernel), epsilon_(epsilon) {} - - template - std::vector segments_x() const { - int nzeros = std::max(static_cast(std::round(15 * std::log10(kernel_.lambda_))), 15); - std::vector temp(nzeros); - std::vector diffs(nzeros); - std::vector zeros(nzeros); - - for (int i = 0; i < nzeros; ++i) { - temp[i] = T(0.18) * i; - diffs[i] = 1.0 / std::cosh(temp[i]); - } + } - std::partial_sum(diffs.begin(), diffs.end(), zeros.begin()); - T last_zero = zeros.back(); - std::transform(zeros.begin(), zeros.end(), zeros.begin(), [last_zero](T z) { return z / last_zero; }); + template + std::vector segments_x() const + { + int nzeros = std::max( + static_cast(std::round(15 * std::log10(kernel_.lambda_))), 15); + std::vector temp(nzeros); + std::vector diffs(nzeros); + std::vector zeros(nzeros); + + for (int i = 0; i < nzeros; ++i) { + temp[i] = T(0.18) * i; + diffs[i] = 1.0 / std::cosh(temp[i]); + } - std::vector result(2 * nzeros + 1, T(0)); + std::partial_sum(diffs.begin(), diffs.end(), zeros.begin()); + T last_zero = zeros.back(); + std::transform(zeros.begin(), zeros.end(), zeros.begin(), + [last_zero](T z) { return z / last_zero; }); - for (int i = 0; i < nzeros; ++i) { - result[i] = -zeros[nzeros - i - 1]; - result[nzeros + i + 1] = zeros[i]; - } + std::vector result(2 * nzeros + 1, T(0)); - return result; + for (int i = 0; i < nzeros; ++i) { + result[i] = -zeros[nzeros - i - 1]; + result[nzeros + i + 1] = zeros[i]; } - template - std::vector segments_y() const { - int nzeros = std::max(static_cast(std::round(20 * std::log10(kernel_.lambda_))), 20); - std::vector diffs(nzeros); - - for (int j = 0; j < nzeros; ++j) { - diffs[j] = T(0.12) / std::exp(0.0337 * j * std::log(j + 1)); - } + return result; + } - // Calculate cumulative sum of diffs - std::vector zeros(nzeros); - zeros[0] = diffs[0]; - for (int i = 1; i < nzeros; ++i) { - zeros[i] = zeros[i - 1] + diffs[i]; - } + template + std::vector segments_y() const + { + int nzeros = std::max( + static_cast(std::round(20 * std::log10(kernel_.lambda_))), 20); + std::vector diffs(nzeros); - // Normalize zeros - T last_zero = zeros.back(); - for (int i = 0; i < nzeros; ++i) { - zeros[i] /= last_zero; - } - zeros.pop_back(); + for (int j = 0; j < nzeros; ++j) { + diffs[j] = T(0.12) / std::exp(0.0337 * j * std::log(j + 1)); + } - // updated nzeros - nzeros = zeros.size(); + // Calculate cumulative sum of diffs + std::vector zeros(nzeros); + zeros[0] = diffs[0]; + for (int i = 1; i < nzeros; ++i) { + zeros[i] = zeros[i - 1] + diffs[i]; + } - // Adjust zeros - for (int i = 0; i < nzeros; ++i) { - zeros[i] -= 1.0; - } + // Normalize zeros + T last_zero = zeros.back(); + for (int i = 0; i < nzeros; ++i) { + zeros[i] /= last_zero; + } + zeros.pop_back(); - std::vector result(2 * nzeros + 3, T(0)); + // updated nzeros + nzeros = zeros.size(); - for (int i = 0; i < nzeros; ++i) { - result[1+i] = zeros[i]; - result[1+nzeros + 1 + i] = -zeros[nzeros-i-1]; - } - result[0] = T(-1); - result[1 + nzeros] = T(0); - result[2*nzeros + 2] = T(1); - return result; + // Adjust zeros + for (int i = 0; i < nzeros; ++i) { + zeros[i] -= 1.0; } - int nsvals() const override{ - double log10_Lambda = std::max(1.0, std::log10(kernel_.lambda_)); - return static_cast(std::round(28 * log10_Lambda)); + std::vector result(2 * nzeros + 3, T(0)); + + for (int i = 0; i < nzeros; ++i) { + result[1 + i] = zeros[i]; + result[1 + nzeros + 1 + i] = -zeros[nzeros - i - 1]; } - int ngauss() const override { - return epsilon_ >= std::sqrt(std::numeric_limits::epsilon()) ? 10 : 16; - }; + result[0] = T(-1); + result[1 + nzeros] = T(0); + result[2 * nzeros + 2] = T(1); + return result; + } - private: - const RegularizedBoseKernel &kernel_; - double epsilon_; + int nsvals() const override + { + double log10_Lambda = std::max(1.0, std::log10(kernel_.lambda_)); + return static_cast(std::round(28 * log10_Lambda)); + } + int ngauss() const override + { + return epsilon_ >= std::sqrt(std::numeric_limits::epsilon()) + ? 10 + : 16; }; - class RegularizedBoseKernelOdd : public AbstractReducedKernel +private: + const RegularizedBoseKernel &kernel_; + double epsilon_; +}; + +class RegularizedBoseKernelOdd : public AbstractReducedKernel { +public: + RegularizedBoseKernelOdd(std::shared_ptr inner, + int sign) + : AbstractReducedKernel(inner, sign) { - public: - RegularizedBoseKernelOdd(std::shared_ptr inner, int sign) - : AbstractReducedKernel(inner, sign) - { - if (!is_centrosymmetric()) - { - throw std::runtime_error("inner kernel must be centrosymmetric"); - } - if (std::abs(sign) != 1) - { - throw std::domain_error("sign must be -1 or 1"); - } + if (!is_centrosymmetric()) { + throw std::runtime_error("inner kernel must be centrosymmetric"); } - - // Implement the pure virtual function from the parent class - double operator()(double x, double y, double x_plus = std::numeric_limits::quiet_NaN(), - double x_minus = std::numeric_limits::quiet_NaN()) const override - { - double v_half = inner->lambda_ * 0.5 * y; - double xv_half = x * v_half; - bool xy_small = xv_half < 1; - bool sinh_range = 1e-200 < v_half && v_half < 85; - if (xy_small && sinh_range) - { - return y * sinh_impl(xv_half) / sinh_impl(v_half); - } - else - { - return callreduced(*this, x, x, x_plus, x_minus); - } + if (std::abs(sign) != 1) { + throw std::domain_error("sign must be -1 or 1"); } + } - // You'll need to implement the isCentrosymmetric function - // Here's a placeholder - bool isCentrosymmetric(RegularizedBoseKernel &kernel) - { - // Implement this function - return true; + // Implement the pure virtual function from the parent class + double operator()(double x, double y, + double x_plus = std::numeric_limits::quiet_NaN(), + double x_minus = std::numeric_limits::quiet_NaN()) + const override + { + double v_half = inner->lambda_ * 0.5 * y; + double xv_half = x * v_half; + bool xy_small = xv_half < 1; + bool sinh_range = 1e-200 < v_half && v_half < 85; + if (xy_small && sinh_range) { + return y * sinh_impl(xv_half) / sinh_impl(v_half); + } else { + return callreduced(*this, x, x, x_plus, x_minus); } - }; - - + } + // You'll need to implement the isCentrosymmetric function + // Here's a placeholder + bool isCentrosymmetric(RegularizedBoseKernel &kernel) + { + // Implement this function + return true; + } +}; - class LogisticKernelOdd : public AbstractReducedKernel +class LogisticKernelOdd : public AbstractReducedKernel { +public: + LogisticKernelOdd(std::shared_ptr inner, int sign) + : AbstractReducedKernel(inner, sign) { - public: - LogisticKernelOdd(std::shared_ptr inner, int sign) - : AbstractReducedKernel(inner, sign){} + } // Implement the pure virtual function from the parent class - double operator()(double x, double y, double x_plus = std::numeric_limits::quiet_NaN(), - double x_minus = std::numeric_limits::quiet_NaN()) const override{ + double operator()(double x, double y, + double x_plus = std::numeric_limits::quiet_NaN(), + double x_minus = std::numeric_limits::quiet_NaN()) + const override + { double v_half = inner->lambda_ * 0.5 * y; bool xy_small = x * v_half < 1; bool cosh_finite = v_half < 85; - if (xy_small && cosh_finite) - { + if (xy_small && cosh_finite) { return -std::sinh(v_half * x) / std::cosh(v_half); - } - else - { + } else { return callreduced(*this, x, x, x_plus, x_minus); } } - }; +}; - inline std::shared_ptr get_symmetrized(std::shared_ptr kernel, int sign) - { - return std::make_shared>(kernel, sign); - } +inline std::shared_ptr +get_symmetrized(std::shared_ptr kernel, int sign) +{ + return std::make_shared>(kernel, sign); +} - inline std::shared_ptr get_symmetrized(std::shared_ptr kernel, int sign) - { - if (sign == -1) - { - return std::make_shared(kernel, sign); - } - else - { - return std::make_shared>(kernel, sign); - } +inline std::shared_ptr +get_symmetrized(std::shared_ptr kernel, int sign) +{ + if (sign == -1) { + return std::make_shared(kernel, sign); + } else { + return std::make_shared>(kernel, sign); } +} - inline std::shared_ptr get_symmetrized(std::shared_ptr kernel, int sign) - { - if (sign == -1) - { - return std::make_shared(kernel, sign); - } - else - { - return std::make_shared>(kernel, sign); - } +inline std::shared_ptr +get_symmetrized(std::shared_ptr kernel, int sign) +{ + if (sign == -1) { + return std::make_shared(kernel, sign); + } else { + return std::make_shared>(kernel, + sign); } +} - inline std::shared_ptr get_symmetrized(const LogisticKernel& kernel, int sign) { - auto kernel_ptr = std::make_shared(kernel); - return get_symmetrized(kernel_ptr, sign); - } +inline std::shared_ptr +get_symmetrized(const LogisticKernel &kernel, int sign) +{ + auto kernel_ptr = std::make_shared(kernel); + return get_symmetrized(kernel_ptr, sign); +} - inline std::shared_ptr get_symmetrized(const RegularizedBoseKernel& kernel, int sign) { - auto kernel_ptr = std::make_shared(kernel); - return get_symmetrized(kernel_ptr, sign); - } +inline std::shared_ptr +get_symmetrized(const RegularizedBoseKernel &kernel, int sign) +{ + auto kernel_ptr = std::make_shared(kernel); + return get_symmetrized(kernel_ptr, sign); +} - inline void get_symmetrized(AbstractReducedKernel &kernel, int sign) - { - throw std::runtime_error("cannot symmetrize twice"); - } +inline void get_symmetrized(AbstractReducedKernel &kernel, int sign) +{ + throw std::runtime_error("cannot symmetrize twice"); +} } // namespace sparseir -namespace sparseir{ - - // Function to compute matrix from Gauss rules - template - Eigen::MatrixX matrix_from_gauss( - const AbstractKernel &kernel, const Rule& gauss_x, const Rule& gauss_y) { - - size_t n = gauss_x.x.size(); - size_t m = gauss_y.x.size(); - Eigen::Matrix res(n, m); - - // Parallelize using threads - std::vector threads; - for (size_t i = 0; i < n; ++i) { - threads.emplace_back([&, i]() { - for (size_t j = 0; j < m; ++j) { - // TODO: return type should be T - double ko = kernel( - static_cast(gauss_x.x[i]), - static_cast(gauss_y.x[j]), - static_cast(gauss_x.x_forward[i]), - static_cast(gauss_x.x_backward[i]) - ); - res(i, j) = T(ko); - } - }); - } +namespace sparseir { - // Join threads - for (auto& thread : threads) { - thread.join(); - } +// Function to compute matrix from Gauss rules +template +Eigen::MatrixX matrix_from_gauss(const AbstractKernel &kernel, + const Rule &gauss_x, + const Rule &gauss_y) +{ - return res; + size_t n = gauss_x.x.size(); + size_t m = gauss_y.x.size(); + Eigen::Matrix res(n, m); + + // Parallelize using threads + std::vector threads; + for (size_t i = 0; i < n; ++i) { + threads.emplace_back([&, i]() { + for (size_t j = 0; j < m; ++j) { + // TODO: return type should be T + double ko = kernel(static_cast(gauss_x.x[i]), + static_cast(gauss_y.x[j]), + static_cast(gauss_x.x_forward[i]), + static_cast(gauss_x.x_backward[i])); + res(i, j) = T(ko); + } + }); } - class SVEHintsReduced : public AbstractSVEHints - { - public: - SVEHintsReduced(std::shared_ptr inner_hints) - : inner(inner_hints) {} - - // Implement required methods - int nsvals() const override - { - // Implement this function - // For example, you can delegate the call to the inner object - return inner->nsvals(); - } - - int ngauss() const override - { - // Implement this function - // For example, you can delegate the call to the inner object - return inner->ngauss(); - } + // Join threads + for (auto &thread : threads) { + thread.join(); + } - private: - std::shared_ptr inner; - }; + return res; +} - // Function to provide SVE hints - inline SVEHintsLogistic sve_hints(const LogisticKernel& kernel, double epsilon) { - return SVEHintsLogistic(kernel, epsilon); +class SVEHintsReduced : public AbstractSVEHints { +public: + SVEHintsReduced(std::shared_ptr inner_hints) + : inner(inner_hints) + { } - inline SVEHintsRegularizedBose sve_hints(const RegularizedBoseKernel& kernel, double epsilon) { - return SVEHintsRegularizedBose(kernel, epsilon); + // Implement required methods + int nsvals() const override + { + // Implement this function + // For example, you can delegate the call to the inner object + return inner->nsvals(); } - inline std::shared_ptr sve_hints(std::shared_ptr kernel, double epsilon) + int ngauss() const override { - if (auto logisticKernel = std::dynamic_pointer_cast(kernel)) - { - return std::make_shared(*logisticKernel, epsilon); - } - else if (auto boseKernel = std::dynamic_pointer_cast(kernel)) - { - return std::make_shared(*boseKernel, epsilon); - } - else if (auto reducedKernel = std::dynamic_pointer_cast(kernel)) - { - return std::make_shared(sve_hints(reducedKernel->inner, epsilon)); - } - else - { - throw std::invalid_argument("Unsupported kernel type for SVE hints"); - } + // Implement this function + // For example, you can delegate the call to the inner object + return inner->ngauss(); } - inline SVEHintsReduced sve_hints(const AbstractReducedKernel& kernel, double epsilon) { - return SVEHintsReduced(sve_hints(kernel.inner, epsilon)); +private: + std::shared_ptr inner; +}; + +// Function to provide SVE hints +inline SVEHintsLogistic sve_hints(const LogisticKernel &kernel, double epsilon) +{ + return SVEHintsLogistic(kernel, epsilon); +} + +inline SVEHintsRegularizedBose sve_hints(const RegularizedBoseKernel &kernel, + double epsilon) +{ + return SVEHintsRegularizedBose(kernel, epsilon); +} + +inline std::shared_ptr +sve_hints(std::shared_ptr kernel, double epsilon) +{ + if (auto logisticKernel = + std::dynamic_pointer_cast(kernel)) { + return std::make_shared(*logisticKernel, epsilon); + } else if (auto boseKernel = + std::dynamic_pointer_cast( + kernel)) { + return std::make_shared(*boseKernel, epsilon); + } else if (auto reducedKernel = + std::dynamic_pointer_cast( + kernel)) { + return std::make_shared( + sve_hints(reducedKernel->inner, epsilon)); + } else { + throw std::invalid_argument("Unsupported kernel type for SVE hints"); } +} + +inline SVEHintsReduced sve_hints(const AbstractReducedKernel &kernel, + double epsilon) +{ + return SVEHintsReduced(sve_hints(kernel.inner, epsilon)); +} /* function ngauss end @@ -1135,13 +1109,12 @@ ngauss(hints::SVEHintsRegularizedBose) = hints.ε ≥ sqrt(eps()) ? 10 : 16 ngauss(hints::SVEHintsReduced) = ngauss(hints.inner_hints) */ - - - /* - std::shared_ptr sve_hints(const AbstractReducedKernel& kernel, double epsilon) { - // Assume kernel.inner is a method that returns the inner kernel - auto innerHints = sve_hints(kernel.inner(), epsilon); - return std::make_shared(innerHints); - } - */ -} \ No newline at end of file +/* +std::shared_ptr sve_hints(const AbstractReducedKernel& kernel, +double epsilon) { + // Assume kernel.inner is a method that returns the inner kernel + auto innerHints = sve_hints(kernel.inner(), epsilon); + return std::make_shared(innerHints); +} +*/ +} // namespace sparseir \ No newline at end of file diff --git a/include/sparseir/poly.hpp b/include/sparseir/poly.hpp index 0acb6fe..16b5b0d 100644 --- a/include/sparseir/poly.hpp +++ b/include/sparseir/poly.hpp @@ -36,16 +36,23 @@ class PiecewiseLegendrePoly { // Constructor PiecewiseLegendrePoly(int polyorder, double xmin, double xmax, - const Eigen::VectorXd& knots, - const Eigen::VectorXd& delta_x, - const Eigen::MatrixXd& data, - int symm, int l, - const Eigen::VectorXd& xm, - const Eigen::VectorXd& inv_xs, - const Eigen::VectorXd& norms) - : polyorder(polyorder), xmin(xmin), xmax(xmax), - knots(knots), delta_x(delta_x), data(data), symm(symm), l(l), - xm(xm), inv_xs(inv_xs), norms(norms) + const Eigen::VectorXd &knots, + const Eigen::VectorXd &delta_x, + const Eigen::MatrixXd &data, int symm, int l, + const Eigen::VectorXd &xm, + const Eigen::VectorXd &inv_xs, + const Eigen::VectorXd &norms) + : polyorder(polyorder), + xmin(xmin), + xmax(xmax), + knots(knots), + delta_x(delta_x), + data(data), + symm(symm), + l(l), + xm(xm), + inv_xs(inv_xs), + norms(norms) { // Check for NaN in data if (data.unaryExpr([](double x) { return std::isnan(x); }).any()) { @@ -68,18 +75,29 @@ class PiecewiseLegendrePoly { } // Constructor: PiecewiseLegendrePoly(data, p; symm=symm(p)) - PiecewiseLegendrePoly(const Eigen::MatrixXd& data, const PiecewiseLegendrePoly& p, int symm) - : polyorder(p.polyorder), xmin(p.xmin), xmax(p.xmax), - knots(p.knots), delta_x(p.delta_x), data(data), symm(symm), l(p.l), - xm(p.xm), inv_xs(p.inv_xs), norms(p.norms) + PiecewiseLegendrePoly(const Eigen::MatrixXd &data, + const PiecewiseLegendrePoly &p, int symm) + : polyorder(p.polyorder), + xmin(p.xmin), + xmax(p.xmax), + knots(p.knots), + delta_x(p.delta_x), + data(data), + symm(symm), + l(p.l), + xm(p.xm), + inv_xs(p.inv_xs), + norms(p.norms) { // Copy constructor with new data and symm } - // Constructor: PiecewiseLegendrePoly(data::Matrix, knots::Vector, l::Integer; + // Constructor: PiecewiseLegendrePoly(data::Matrix, knots::Vector, + // l::Integer; // delta_x=diff(knots), symm=0) - PiecewiseLegendrePoly(const Eigen::MatrixXd& data, const Eigen::VectorXd& knots, int l, - const Eigen::VectorXd& delta_x = Eigen::VectorXd(), + PiecewiseLegendrePoly(const Eigen::MatrixXd &data, + const Eigen::VectorXd &knots, int l, + const Eigen::VectorXd &delta_x = Eigen::VectorXd(), int symm = 0) : data(data), knots(knots), symm(symm), l(l) { @@ -93,13 +111,16 @@ class PiecewiseLegendrePoly { if (delta_x.size() == 0) { // delta_x = diff(knots) - this->delta_x = knots.segment(1, knots.size() - 1) - knots.segment(0, knots.size() - 1); + this->delta_x = knots.segment(1, knots.size() - 1) - + knots.segment(0, knots.size() - 1); } else { this->delta_x = delta_x; } // xm = (knots[1:end-1] + knots[2:end]) / 2 - xm = (knots.segment(0, knots.size() - 1) + knots.segment(1, knots.size() - 1)) / 2.0; + xm = (knots.segment(0, knots.size() - 1) + + knots.segment(1, knots.size() - 1)) / + 2.0; // inv_xs = 2 ./ delta_x inv_xs = 2.0 / this->delta_x.array(); @@ -119,7 +140,8 @@ class PiecewiseLegendrePoly { } // Function call operator: evaluate the polynomial at x - double operator()(double x) const { + double operator()(double x) const + { int i; double x_tilde; std::tie(i, x_tilde) = split(x); @@ -129,7 +151,8 @@ class PiecewiseLegendrePoly { } // Evaluate the polynomial at an array of x - Eigen::VectorXd operator()(const Eigen::VectorXd& xs) const { + Eigen::VectorXd operator()(const Eigen::VectorXd &xs) const + { Eigen::VectorXd results(xs.size()); for (int idx = 0; idx < xs.size(); ++idx) { results[idx] = (*this)(xs[idx]); @@ -138,26 +161,32 @@ class PiecewiseLegendrePoly { } // Overlap function - double overlap(std::function f, double rtol = std::numeric_limits::epsilon(), - bool return_error = false, int maxevals = 10000, const std::vector& points = {}) const { + double overlap(std::function f, + double rtol = std::numeric_limits::epsilon(), + bool return_error = false, int maxevals = 10000, + const std::vector &points = {}) const + { // Implement numerical integration over the intervals - // Since C++ does not have a built-in quadgk, we need to implement one or use a library - // For simplicity, let's use Gauss-Legendre quadrature over each segment + // Since C++ does not have a built-in quadgk, we need to implement one + // or use a library For simplicity, let's use Gauss-Legendre quadrature + // over each segment double result = 0.0; - std::vector integration_points(knots.data(), knots.data() + knots.size()); - integration_points.insert(integration_points.end(), points.begin(), points.end()); + std::vector integration_points(knots.data(), + knots.data() + knots.size()); + integration_points.insert(integration_points.end(), points.begin(), + points.end()); std::sort(integration_points.begin(), integration_points.end()); - integration_points.erase(std::unique(integration_points.begin(), integration_points.end()), integration_points.end()); + integration_points.erase( + std::unique(integration_points.begin(), integration_points.end()), + integration_points.end()); for (size_t idx = 0; idx < integration_points.size() - 1; ++idx) { double a = integration_points[idx]; double b = integration_points[idx + 1]; // Perform Gauss-Legendre quadrature on [a, b] - auto integrand = [this, &f](double x) { - return (*this)(x) * f(x); - }; + auto integrand = [this, &f](double x) { return (*this)(x)*f(x); }; double integral = gauss_legendre_quadrature(a, b, integrand); result += integral; } @@ -171,7 +200,8 @@ class PiecewiseLegendrePoly { } // Derivative function - PiecewiseLegendrePoly deriv(int n = 1) const { + PiecewiseLegendrePoly deriv(int n = 1) const + { Eigen::MatrixXd ddata = legder(data, n); // Multiply each column by inv_xs[i]^n @@ -184,7 +214,8 @@ class PiecewiseLegendrePoly { } // Roots function - Eigen::VectorXd roots(double tol = 1e-10) const { + Eigen::VectorXd roots(double tol = 1e-10) const + { std::vector all_roots; // For each segment, find the roots of the polynomial @@ -198,27 +229,34 @@ class PiecewiseLegendrePoly { }; // Find roots in the interval [knots[i], knots[i+1]] - std::vector segment_roots = find_roots_in_interval(segment_poly, knots[i], knots[i + 1], tol); - all_roots.insert(all_roots.end(), segment_roots.begin(), segment_roots.end()); + std::vector segment_roots = find_roots_in_interval( + segment_poly, knots[i], knots[i + 1], tol); + all_roots.insert(all_roots.end(), segment_roots.begin(), + segment_roots.end()); } // Convert std::vector to Eigen::VectorXd - Eigen::VectorXd roots = Eigen::Map(all_roots.data(), all_roots.size()); + Eigen::VectorXd roots = Eigen::Map( + all_roots.data(), all_roots.size()); return roots; } // Overloaded operators - PiecewiseLegendrePoly operator*(double factor) const { + PiecewiseLegendrePoly operator*(double factor) const + { Eigen::MatrixXd new_data = data * factor; return PiecewiseLegendrePoly(new_data, *this, symm); } - friend PiecewiseLegendrePoly operator*(double factor, const PiecewiseLegendrePoly& poly) { + friend PiecewiseLegendrePoly operator*(double factor, + const PiecewiseLegendrePoly &poly) + { return poly * factor; } - PiecewiseLegendrePoly operator+ (const PiecewiseLegendrePoly& other) const { + PiecewiseLegendrePoly operator+(const PiecewiseLegendrePoly &other) const + { if (!knots.isApprox(other.knots, 1e-12)) { throw std::runtime_error("knots must be the same"); } @@ -227,33 +265,38 @@ class PiecewiseLegendrePoly { return PiecewiseLegendrePoly(new_data, knots, -1, delta_x, new_symm); } - PiecewiseLegendrePoly operator- () const { + PiecewiseLegendrePoly operator-() const + { Eigen::MatrixXd new_data = -data; return PiecewiseLegendrePoly(new_data, knots, -1, delta_x, symm); } - PiecewiseLegendrePoly operator- (const PiecewiseLegendrePoly& other) const { + PiecewiseLegendrePoly operator-(const PiecewiseLegendrePoly &other) const + { return (*this) + (-other); } // Accessor functions double get_xmin() const { return xmin; } double get_xmax() const { return xmax; } - const Eigen::VectorXd& get_knots() const { return knots; } - const Eigen::VectorXd& get_delta_x() const { return delta_x; } + const Eigen::VectorXd &get_knots() const { return knots; } + const Eigen::VectorXd &get_delta_x() const { return delta_x; } int get_symm() const { return symm; } - const Eigen::MatrixXd& get_data() const { return data; } - const Eigen::VectorXd& get_norms() const { return norms; } + const Eigen::MatrixXd &get_data() const { return data; } + const Eigen::VectorXd &get_norms() const { return norms; } int get_polyorder() const { return polyorder; } private: // Helper function to compute legval - static double legval(double x, const Eigen::VectorXd& coeffs) { + static double legval(double x, const Eigen::VectorXd &coeffs) + { int N = coeffs.size(); - if (N == 0) return 0.0; + if (N == 0) + return 0.0; std::vector P(N); P[0] = 1.0; - if (N > 1) P[1] = x; + if (N > 1) + P[1] = x; for (int n = 2; n < N; ++n) { P[n] = ((2 * n - 1) * x * P[n - 1] - (n - 1) * P[n - 2]) / n; } @@ -265,12 +308,14 @@ class PiecewiseLegendrePoly { } // Helper function to split x into segment index i and x_tilde - std::pair split(double x) const { + std::pair split(double x) const + { if (x < xmin || x > xmax) { throw std::domain_error("x is outside the domain"); } - auto it = std::lower_bound(knots.data(), knots.data() + knots.size(), x); + auto it = + std::lower_bound(knots.data(), knots.data() + knots.size(), x); int i = std::max(0, int(it - knots.data() - 1)); i = std::min(i, knots.size() - 2); @@ -279,13 +324,14 @@ class PiecewiseLegendrePoly { } // Placeholder for Gauss-Legendre quadrature over [a, b] - double gauss_legendre_quadrature(double a, double b, std::function f) const { - static const double xg[] = { -0.906179845938664, -0.538469310105683, - 0.0, - 0.538469310105683, 0.906179845938664 }; - static const double wg[] = { 0.236926885056189, 0.478628670499366, - 0.568888888888889, - 0.478628670499366, 0.236926885056189 }; + double gauss_legendre_quadrature(double a, double b, + std::function f) const + { + static const double xg[] = {-0.906179845938664, -0.538469310105683, 0.0, + 0.538469310105683, 0.906179845938664}; + static const double wg[] = {0.236926885056189, 0.478628670499366, + 0.568888888888889, 0.478628670499366, + 0.236926885056189}; double c1 = (b - a) / 2.0; double c2 = (b + a) / 2.0; double integral = 0.0; @@ -298,7 +344,10 @@ class PiecewiseLegendrePoly { } // Placeholder for finding roots in an interval - std::vector find_roots_in_interval(std::function f, double a, double b, double tol) const { + std::vector find_roots_in_interval(std::function f, + double a, double b, + double tol) const + { // Implement a root-finding algorithm like bisection or Brent's method std::vector roots; // Placeholder: actual implementation needed @@ -308,226 +357,204 @@ class PiecewiseLegendrePoly { } // namespace sparseir -namespace sparseir -{ - - // Function to compute the spherical Bessel function of the first kind using recursion - inline double spherical_bessel_j(int l, double x) - { - if (l < 0) - { - throw std::invalid_argument("Order l must be non-negative"); - } - if (x == 0.0) - { - return (l == 0) ? 1.0 : 0.0; - } - - // Initial values for the recursion - double j0 = std::sin(x) / x; - if (l == 0) - { - return j0; - } +namespace sparseir { - double j1 = (std::sin(x) / (x * x)) - (std::cos(x) / x); - if (l == 1) - { - return j1; - } +// Function to compute the spherical Bessel function of the first kind using +// recursion +inline double spherical_bessel_j(int l, double x) +{ + if (l < 0) { + throw std::invalid_argument("Order l must be non-negative"); + } + if (x == 0.0) { + return (l == 0) ? 1.0 : 0.0; + } - // Recursion relation: - // j_n(x) = ((2n - 1)/x) * j_{n-1}(x) - j_{n-2}(x) - double j_prev_prev = j0; - double j_prev = j1; - double j_curr = 0.0; + // Initial values for the recursion + double j0 = std::sin(x) / x; + if (l == 0) { + return j0; + } - for (int n = 2; n <= l; ++n) - { - j_curr = ((2.0 * n - 1.0) / x) * j_prev - j_prev_prev; - j_prev_prev = j_prev; - j_prev = j_curr; - } - return j_curr; + double j1 = (std::sin(x) / (x * x)) - (std::cos(x) / x); + if (l == 1) { + return j1; } - inline std::complex get_tnl(int l, double w) - { - double abs_w = std::abs(w); - // Compute spherical Bessel function of order l at abs_w - double sph_bessel = spherical_bessel_j(l, abs_w); + // Recursion relation: + // j_n(x) = ((2n - 1)/x) * j_{n-1}(x) - j_{n-2}(x) + double j_prev_prev = j0; + double j_prev = j1; + double j_curr = 0.0; - // Compute 2i^l - std::complex im_unit(0.0, 1.0); - std::complex im_power = std::pow(im_unit, l); - std::complex result = 2.0 * im_power * sph_bessel; + for (int n = 2; n <= l; ++n) { + j_curr = ((2.0 * n - 1.0) / x) * j_prev - j_prev_prev; + j_prev_prev = j_prev; + j_prev = j_curr; + } + return j_curr; +} - if (w < 0.0) - { - return std::conj(result); - } - else - { - return result; - } +inline std::complex get_tnl(int l, double w) +{ + double abs_w = std::abs(w); + // Compute spherical Bessel function of order l at abs_w + double sph_bessel = spherical_bessel_j(l, abs_w); + + // Compute 2i^l + std::complex im_unit(0.0, 1.0); + std::complex im_power = std::pow(im_unit, l); + std::complex result = 2.0 * im_power * sph_bessel; + + if (w < 0.0) { + return std::conj(result); + } else { + return result; } +} - inline std::pair, std::vector> shift_xmid(const std::vector &knots, const std::vector &delta_x) - { - size_t N = delta_x.size(); - std::vector delta_x_half(N); - for (size_t i = 0; i < N; ++i) - { - delta_x_half[i] = delta_x[i] / 2.0; - } +inline std::pair, std::vector> +shift_xmid(const std::vector &knots, const std::vector &delta_x) +{ + size_t N = delta_x.size(); + std::vector delta_x_half(N); + for (size_t i = 0; i < N; ++i) { + delta_x_half[i] = delta_x[i] / 2.0; + } - // Compute xmid_m1 - std::vector xmid_m1(N); - std::vector cumsum(N); - cumsum[0] = delta_x[0]; - for (size_t i = 1; i < N; ++i) - { - cumsum[i] = cumsum[i - 1] + delta_x[i]; - } - for (size_t i = 0; i < N; ++i) - { - xmid_m1[i] = cumsum[i] - delta_x_half[i]; - } + // Compute xmid_m1 + std::vector xmid_m1(N); + std::vector cumsum(N); + cumsum[0] = delta_x[0]; + for (size_t i = 1; i < N; ++i) { + cumsum[i] = cumsum[i - 1] + delta_x[i]; + } + for (size_t i = 0; i < N; ++i) { + xmid_m1[i] = cumsum[i] - delta_x_half[i]; + } - // Compute xmid_p1 - std::vector xmid_p1(N); - std::vector rev_delta_x(N); - for (size_t i = 0; i < N; ++i) - { - rev_delta_x[N - 1 - i] = delta_x[i]; - } - std::vector rev_cumsum(N); - rev_cumsum[0] = rev_delta_x[0]; - for (size_t i = 1; i < N; ++i) - { - rev_cumsum[i] = rev_cumsum[i - 1] + rev_delta_x[i]; - } - for (size_t i = 0; i < N; ++i) - { - xmid_p1[i] = -rev_cumsum[N - 1 - i] + delta_x_half[i]; - } + // Compute xmid_p1 + std::vector xmid_p1(N); + std::vector rev_delta_x(N); + for (size_t i = 0; i < N; ++i) { + rev_delta_x[N - 1 - i] = delta_x[i]; + } + std::vector rev_cumsum(N); + rev_cumsum[0] = rev_delta_x[0]; + for (size_t i = 1; i < N; ++i) { + rev_cumsum[i] = rev_cumsum[i - 1] + rev_delta_x[i]; + } + for (size_t i = 0; i < N; ++i) { + xmid_p1[i] = -rev_cumsum[N - 1 - i] + delta_x_half[i]; + } - // Compute xmid_0 - std::vector xmid_0(N); - for (size_t i = 0; i < N; ++i) - { - xmid_0[i] = knots[i + 1] - delta_x_half[i]; // Assuming knots has length N + 1 - } + // Compute xmid_0 + std::vector xmid_0(N); + for (size_t i = 0; i < N; ++i) { + xmid_0[i] = + knots[i + 1] - delta_x_half[i]; // Assuming knots has length N + 1 + } - // Compute shift - std::vector shift(N); - for (size_t i = 0; i < N; ++i) - { - shift[i] = static_cast(std::round(xmid_0[i])); - } + // Compute shift + std::vector shift(N); + for (size_t i = 0; i < N; ++i) { + shift[i] = static_cast(std::round(xmid_0[i])); + } - // Compute diff - std::vector diff(N); - for (size_t i = 0; i < N; ++i) - { - int idx = shift[i] + 1; // shift can be -1, 0, 1; idx ranges from 0 to 2 - if (idx == 0) - { - diff[i] = xmid_m1[i]; - } - else if (idx == 1) - { - diff[i] = xmid_0[i]; - } - else if (idx == 2) - { - diff[i] = xmid_p1[i]; - } - else - { - // Should not happen - throw std::runtime_error("Invalid shift value"); - } + // Compute diff + std::vector diff(N); + for (size_t i = 0; i < N; ++i) { + int idx = shift[i] + 1; // shift can be -1, 0, 1; idx ranges from 0 to 2 + if (idx == 0) { + diff[i] = xmid_m1[i]; + } else if (idx == 1) { + diff[i] = xmid_0[i]; + } else if (idx == 2) { + diff[i] = xmid_p1[i]; + } else { + // Should not happen + throw std::runtime_error("Invalid shift value"); } - - return std::make_pair(diff, shift); } - inline Eigen::VectorXcd phase_stable(const PiecewiseLegendrePoly &poly, int wn) - { - const std::vector knots(poly.knots.data(), poly.knots.data() + poly.knots.size()); - const std::vector delta_x(poly.delta_x.data(), poly.delta_x.data() + poly.delta_x.size()); + return std::make_pair(diff, shift); +} - // Compute xmid_diff and extra_shift - std::pair, std::vector> shift_result = shift_xmid(knots, delta_x); - const std::vector &xmid_diff = shift_result.first; - const std::vector &extra_shift = shift_result.second; - - size_t N = delta_x.size(); - Eigen::VectorXcd phase_wi(N); - - for (size_t i = 0; i < N; ++i) - { - int exponent = wn * (extra_shift[i] + 1); - int exponent_mod4 = ((exponent % 4) + 4) % 4; // Ensure positive modulo - - std::complex im_power; - switch (exponent_mod4) - { - case 0: - im_power = std::complex(1.0, 0.0); - break; - case 1: - im_power = std::complex(0.0, 1.0); - break; - case 2: - im_power = std::complex(-1.0, 0.0); - break; - case 3: - im_power = std::complex(0.0, -1.0); - break; - } +inline Eigen::VectorXcd phase_stable(const PiecewiseLegendrePoly &poly, int wn) +{ + const std::vector knots(poly.knots.data(), + poly.knots.data() + poly.knots.size()); + const std::vector delta_x( + poly.delta_x.data(), poly.delta_x.data() + poly.delta_x.size()); + + // Compute xmid_diff and extra_shift + std::pair, std::vector> shift_result = + shift_xmid(knots, delta_x); + const std::vector &xmid_diff = shift_result.first; + const std::vector &extra_shift = shift_result.second; + + size_t N = delta_x.size(); + Eigen::VectorXcd phase_wi(N); + + for (size_t i = 0; i < N; ++i) { + int exponent = wn * (extra_shift[i] + 1); + int exponent_mod4 = ((exponent % 4) + 4) % 4; // Ensure positive modulo + + std::complex im_power; + switch (exponent_mod4) { + case 0: + im_power = std::complex(1.0, 0.0); + break; + case 1: + im_power = std::complex(0.0, 1.0); + break; + case 2: + im_power = std::complex(-1.0, 0.0); + break; + case 3: + im_power = std::complex(0.0, -1.0); + break; + } + + double arg = M_PI * wn * xmid_diff[i] / 2.0; + std::complex cispi = std::polar(1.0, arg); // exp(i * arg) + + phase_wi(i) = im_power * cispi; + } - double arg = M_PI * wn * xmid_diff[i] / 2.0; - std::complex cispi = std::polar(1.0, arg); // exp(i * arg) + return phase_wi; +} - phase_wi(i) = im_power * cispi; - } +inline std::complex compute_unl_inner(const PiecewiseLegendrePoly &poly, + int wn) +{ + double wred = M_PI / 4.0 * wn; + Eigen::VectorXcd phase_wi = phase_stable(poly, wn); - return phase_wi; - } + std::complex res(0.0, 0.0); - inline std::complex compute_unl_inner(const PiecewiseLegendrePoly &poly, int wn) - { - double wred = M_PI / 4.0 * wn; - Eigen::VectorXcd phase_wi = phase_stable(poly, wn); + int num_orders = poly.get_data().rows(); + int num_j = poly.get_data().cols(); - std::complex res(0.0, 0.0); + for (int order = 0; order < num_orders; ++order) { + int l = order; + for (int j = 0; j < num_j; ++j) { + double data_value = poly.get_data()(order, j); + double delta_x_j = poly.delta_x(j); + double norm_j = poly.norms(j); - int num_orders = poly.get_data().rows(); - int num_j = poly.get_data().cols(); + double wred_delta_x = wred * delta_x_j; + std::complex tnl = get_tnl(l, wred_delta_x); + std::complex phase = phase_wi(j); - for (int order = 0; order < num_orders; ++order) - { - int l = order; - for (int j = 0; j < num_j; ++j) - { - double data_value = poly.get_data()(order, j); - double delta_x_j = poly.delta_x(j); - double norm_j = poly.norms(j); - - double wred_delta_x = wred * delta_x_j; - std::complex tnl = get_tnl(l, wred_delta_x); - std::complex phase = phase_wi(j); - - res += data_value * tnl * phase / norm_j; - } + res += data_value * tnl * phase / norm_j; } + } - res /= std::sqrt(2.0); + res /= std::sqrt(2.0); - return res; - } + return res; +} } // namespace sparseir namespace sparseir { @@ -538,12 +565,14 @@ class PiecewiseLegendrePolyVector { std::vector polyvec; // Constructors - PiecewiseLegendrePolyVector() {} + PiecewiseLegendrePolyVector() { } // Constructor with polyvec - PiecewiseLegendrePolyVector(const std::vector& polyvec) - : polyvec(polyvec) {} - + PiecewiseLegendrePolyVector( + const std::vector &polyvec) + : polyvec(polyvec) + { + } // Add iterator support using iterator = std::vector::iterator; @@ -559,7 +588,8 @@ class PiecewiseLegendrePolyVector { // Constructor with data tensor, knots, and optional symm vector PiecewiseLegendrePolyVector(const Eigen::Tensor& data, const Eigen::VectorXd& knots, - const std::vector& symm = std::vector()) + const std::vector& symm = + std::vector()) { int npolys = data.dimension(2); if (!symm.empty() && symm.size() != npolys) { @@ -623,23 +653,35 @@ class PiecewiseLegendrePolyVector { // Accessors size_t size() const { return polyvec.size(); } - const PiecewiseLegendrePoly& operator[](size_t i) const { + const PiecewiseLegendrePoly &operator[](size_t i) const + { return polyvec[i]; } - PiecewiseLegendrePoly& operator[](size_t i) { - return polyvec[i]; - } + PiecewiseLegendrePoly &operator[](size_t i) { return polyvec[i]; } // Functions to mimic Julia's property accessors double xmin() const { return polyvec.empty() ? 0.0 : polyvec[0].xmin; } double xmax() const { return polyvec.empty() ? 0.0 : polyvec[0].xmax; } - Eigen::VectorXd get_knots() const { return polyvec.empty() ? Eigen::VectorXd() : polyvec[0].knots; } - Eigen::VectorXd get_delta_x() const { return polyvec.empty() ? Eigen::VectorXd() : polyvec[0].delta_x; } - int get_polyorder() const { return polyvec.empty() ? 0 : polyvec[0].polyorder; } - Eigen::VectorXd get_norms() const { return polyvec.empty() ? Eigen::VectorXd() : polyvec[0].norms; } + Eigen::VectorXd get_knots() const + { + return polyvec.empty() ? Eigen::VectorXd() : polyvec[0].knots; + } + Eigen::VectorXd get_delta_x() const + { + return polyvec.empty() ? Eigen::VectorXd() : polyvec[0].delta_x; + } + int get_polyorder() const + { + return polyvec.empty() ? 0 : polyvec[0].polyorder; + } + Eigen::VectorXd get_norms() const + { + return polyvec.empty() ? Eigen::VectorXd() : polyvec[0].norms; + } - std::vector get_symm() const { + std::vector get_symm() const + { std::vector symms(polyvec.size()); for (size_t i = 0; i < polyvec.size(); ++i) { symms[i] = polyvec[i].symm; @@ -648,7 +690,8 @@ class PiecewiseLegendrePolyVector { } // Function to retrieve data as Eigen Tensor - Eigen::Tensor get_data() const { + Eigen::Tensor get_data() const + { if (polyvec.empty()) { return Eigen::Tensor(); } @@ -667,7 +710,8 @@ class PiecewiseLegendrePolyVector { } // Evaluate the vector of polynomials at x - Eigen::VectorXd operator()(double x) const { + Eigen::VectorXd operator()(double x) const + { Eigen::VectorXd results(polyvec.size()); for (size_t i = 0; i < polyvec.size(); ++i) { results[i] = polyvec[i](x); @@ -676,7 +720,8 @@ class PiecewiseLegendrePolyVector { } // Evaluate the vector of polynomials at multiple x - Eigen::MatrixXd operator()(const Eigen::VectorXd& xs) const { + Eigen::MatrixXd operator()(const Eigen::VectorXd &xs) const + { Eigen::MatrixXd results(polyvec.size(), xs.size()); for (size_t i = 0; i < polyvec.size(); ++i) { results.row(i) = polyvec[i](xs); @@ -686,8 +731,8 @@ class PiecewiseLegendrePolyVector { /* // Overlap function - std::vector overlap(std::function f, double rtol = std::numeric_limits::epsilon(), - bool return_error = false) const { + std::vector overlap(std::function f, double rtol = + std::numeric_limits::epsilon(), bool return_error = false) const { std::vector results; for (const auto& poly : polyvec) { double integral = poly.overlap(f, rtol, return_error); @@ -697,349 +742,344 @@ class PiecewiseLegendrePolyVector { } // Output function - friend std::ostream& operator<<(std::ostream& os, const PiecewiseLegendrePolyVector& polys) { - os << polys.size() << "-element PiecewiseLegendrePolyVector "; - os << "on [" << polys.xmin() << ", " << polys.xmax() << "]"; - return os; + friend std::ostream& operator<<(std::ostream& os, const + PiecewiseLegendrePolyVector& polys) { os << polys.size() << "-element + PiecewiseLegendrePolyVector "; os << "on [" << polys.xmin() << ", " << + polys.xmax() << "]"; return os; } */ }; } // namespace sparseir -namespace sparseir -{ +namespace sparseir { - // Forward declarations - class PiecewiseLegendrePoly; - class Statistics; +// Forward declarations +class PiecewiseLegendrePoly; +class Statistics; - // PowerModel class template - template - class PowerModel - { - public: - std::vector moments; +// PowerModel class template +template +class PowerModel { +public: + std::vector moments; - PowerModel(const std::vector &moments_) : moments(moments_) {} - }; + PowerModel(const std::vector &moments_) : moments(moments_) { } +}; - // Bosonic and Fermionic statistics classes - class BosonicStatistics : public Statistics - { - public: - int zeta() const override { return 1; } - }; +// Bosonic and Fermionic statistics classes +class BosonicStatistics : public Statistics { +public: + int zeta() const override { return 1; } +}; - // PiecewiseLegendreFT class template - template - class PiecewiseLegendreFT +// PiecewiseLegendreFT class template +template +class PiecewiseLegendreFT { +public: + PiecewiseLegendrePoly poly; + T n_asymp; + PowerModel model; + + PiecewiseLegendreFT(const PiecewiseLegendrePoly &poly_, + const StatisticsType &stat, + T n_asymp_ = std::numeric_limits::infinity()) + : poly(poly_), n_asymp(n_asymp_) { - public: - PiecewiseLegendrePoly poly; - T n_asymp; - PowerModel model; - - PiecewiseLegendreFT(const PiecewiseLegendrePoly &poly_, const StatisticsType &stat, T n_asymp_ = std::numeric_limits::infinity()) - : poly(poly_), n_asymp(n_asymp_) - { - if (poly.xmin != -1.0 || poly.xmax != 1.0) - { - throw std::invalid_argument("Only interval [-1, 1] is supported"); - } - model = power_model(stat, poly); + if (poly.xmin != -1.0 || poly.xmax != 1.0) { + throw std::invalid_argument("Only interval [-1, 1] is supported"); } + model = power_model(stat, poly); + } - T get_n_asymp() const { return n_asymp; } - int zeta() const { return static_cast(StatisticsType()).zeta(); } - const PiecewiseLegendrePoly &get_poly() const { return poly; } + T get_n_asymp() const { return n_asymp; } + int zeta() const + { + return static_cast(StatisticsType()).zeta(); + } + const PiecewiseLegendrePoly &get_poly() const { return poly; } - // Overload operator() for MatsubaraFreq - std::complex operator()(const MatsubaraFreq &omega) const - { - int n = static_cast(omega); - if (std::abs(n) < n_asymp) - { - return compute_unl_inner(poly, n); - } - else - { - return giw(*this, n); - } + // Overload operator() for MatsubaraFreq + std::complex + operator()(const MatsubaraFreq &omega) const + { + int n = static_cast(omega); + if (std::abs(n) < n_asymp) { + return compute_unl_inner(poly, n); + } else { + return giw(*this, n); } + } - // Overload operator() for integer frequency - std::complex operator()(int n) const - { - return (*this)(MatsubaraFreq(n)); - } + // Overload operator() for integer frequency + std::complex operator()(int n) const + { + return (*this)(MatsubaraFreq(n)); + } - // Overload operator() for a vector of frequencies - template - std::vector> operator()(const Container &ns) const - { - std::vector> res; - res.reserve(ns.size()); - for (const auto &n : ns) - { - res.push_back((*this)(n)); - } - return res; + // Overload operator() for a vector of frequencies + template + std::vector> operator()(const Container &ns) const + { + std::vector> res; + res.reserve(ns.size()); + for (const auto &n : ns) { + res.push_back((*this)(n)); } + return res; + } - private: - // Function to compute the Fourier transform for low frequencies - std::complex compute_unl_inner(const PiecewiseLegendrePoly &poly, int wn) const; - - // Function to compute the asymptotic model for high frequencies - std::complex giw(const PiecewiseLegendreFT &polyFT, int wn) const; +private: + // Function to compute the Fourier transform for low frequencies + std::complex compute_unl_inner(const PiecewiseLegendrePoly &poly, + int wn) const; - // Function to evaluate a polynomial at a complex point - std::complex evalpoly(const std::complex &x, const std::vector &coeffs) const; + // Function to compute the asymptotic model for high frequencies + std::complex giw(const PiecewiseLegendreFT &polyFT, int wn) const; - // Power model computation - PowerModel power_model(const Statistics &stat, const PiecewiseLegendrePoly &poly) const; - }; + // Function to evaluate a polynomial at a complex point + std::complex evalpoly(const std::complex &x, + const std::vector &coeffs) const; - // Implementations of member functions + // Power model computation + PowerModel power_model(const Statistics &stat, + const PiecewiseLegendrePoly &poly) const; +}; - template - std::complex PiecewiseLegendreFT::compute_unl_inner(const PiecewiseLegendrePoly &poly, int wn) const - { - double wred = M_PI / 4.0 * wn; - Eigen::VectorXcd phase_wi = phase_stable(poly, wn); - std::complex res = 0.0; +// Implementations of member functions - int order_max = poly.data.rows(); - int segment_count = poly.data.cols(); - for (int order = 0; order < order_max; ++order) - { - for (int j = 0; j < segment_count; ++j) - { - double data_oj = poly.data(order, j); - std::complex tnl = get_tnl(order, wred * poly.delta_x(j)); - res += data_oj * tnl * phase_wi(j) / poly.norms(j); - } +template +std::complex PiecewiseLegendreFT::compute_unl_inner( + const PiecewiseLegendrePoly &poly, int wn) const +{ + double wred = M_PI / 4.0 * wn; + Eigen::VectorXcd phase_wi = phase_stable(poly, wn); + std::complex res = 0.0; + + int order_max = poly.data.rows(); + int segment_count = poly.data.cols(); + for (int order = 0; order < order_max; ++order) { + for (int j = 0; j < segment_count; ++j) { + double data_oj = poly.data(order, j); + std::complex tnl = get_tnl(order, wred * poly.delta_x(j)); + res += data_oj * tnl * phase_wi(j) / poly.norms(j); } - return res / std::sqrt(2.0); - } - - template - std::complex PiecewiseLegendreFT::giw(const PiecewiseLegendreFT &polyFT, int wn) const - { - std::complex iw(0.0, M_PI / 2.0 * wn); - if (wn == 0) - return std::complex(0.0, 0.0); - std::complex inv_iw = 1.0 / iw; - std::complex result = inv_iw * evalpoly(inv_iw, model.moments); - return result; } + return res / std::sqrt(2.0); +} - template - std::complex PiecewiseLegendreFT::evalpoly(const std::complex &x, const std::vector &coeffs) const - { - std::complex result(0.0, 0.0); - for (auto it = coeffs.rbegin(); it != coeffs.rend(); ++it) - { - result = result * x + *it; - } - return result; +template +std::complex +PiecewiseLegendreFT::giw(const PiecewiseLegendreFT &polyFT, + int wn) const +{ + std::complex iw(0.0, M_PI / 2.0 * wn); + if (wn == 0) + return std::complex(0.0, 0.0); + std::complex inv_iw = 1.0 / iw; + std::complex result = inv_iw * evalpoly(inv_iw, model.moments); + return result; +} + +template +std::complex PiecewiseLegendreFT::evalpoly( + const std::complex &x, const std::vector &coeffs) const +{ + std::complex result(0.0, 0.0); + for (auto it = coeffs.rbegin(); it != coeffs.rend(); ++it) { + result = result * x + *it; } + return result; +} - // Assume implementations of derivs, power_moments_, and power_model - // For the purpose of this example, they are simplified placeholders +// Assume implementations of derivs, power_moments_, and power_model +// For the purpose of this example, they are simplified placeholders - // Placeholder for derivative computations at x = 1.0 - std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); +// Placeholder for derivative computations at x = 1.0 +std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); - // Placeholder for power moments computation - std::vector &power_moments_(const Statistics &stat, std::vector &deriv_x1, int l); +// Placeholder for power moments computation +std::vector &power_moments_(const Statistics &stat, + std::vector &deriv_x1, int l); - // Power model computation function - template - PowerModel PiecewiseLegendreFT::power_model(const Statistics &stat, const PiecewiseLegendrePoly &poly) const - { - std::vector deriv_x1 = derivs(poly, 1.0); - std::vector &moments = power_moments_(stat, deriv_x1, poly.l); - return PowerModel(moments); - } - - class FermionicStatistics : public Statistics - { - public: - int zeta() const override { return -1; } - }; +// Power model computation function +template +PowerModel PiecewiseLegendreFT::power_model( + const Statistics &stat, const PiecewiseLegendrePoly &poly) const +{ + std::vector deriv_x1 = derivs(poly, 1.0); + std::vector &moments = power_moments_(stat, deriv_x1, poly.l); + return PowerModel(moments); +} - // Assume implementations of derivs, power_moments_, and power_model - // For the purpose of this example, they are simplified placeholders +class FermionicStatistics : public Statistics { +public: + int zeta() const override { return -1; } +}; - // Placeholder for derivative computations at x = 1.0 - std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); +// Assume implementations of derivs, power_moments_, and power_model +// For the purpose of this example, they are simplified placeholders - // Placeholder for power moments computation - std::vector &power_moments_(const Statistics &stat, std::vector &deriv_x1, int l); +// Placeholder for derivative computations at x = 1.0 +std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); - // Assume implementations of derivs, power_moments_, and power_model - // For the purpose of this example, they are simplified placeholders +// Placeholder for power moments computation +std::vector &power_moments_(const Statistics &stat, + std::vector &deriv_x1, int l); - // Placeholder for derivative computations at x = 1.0 - std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); +// Assume implementations of derivs, power_moments_, and power_model +// For the purpose of this example, they are simplified placeholders - // Placeholder for power moments computation - std::vector &power_moments_(const Statistics &stat, std::vector &deriv_x1, int l); +// Placeholder for derivative computations at x = 1.0 +std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); - // Assume implementations of derivs, power_moments_, and power_model - // For the purpose of this example, they are simplified placeholders +// Placeholder for power moments computation +std::vector &power_moments_(const Statistics &stat, + std::vector &deriv_x1, int l); - // Placeholder for derivative computations at x = 1.0 - std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); +// Assume implementations of derivs, power_moments_, and power_model +// For the purpose of this example, they are simplified placeholders - // Placeholder for power moments computation - std::vector &power_moments_(const Statistics &stat, std::vector &deriv_x1, int l); +// Placeholder for derivative computations at x = 1.0 +std::vector derivs(const PiecewiseLegendrePoly &ppoly, double x); +// Placeholder for power moments computation +std::vector &power_moments_(const Statistics &stat, + std::vector &deriv_x1, int l); } // namespace sparseir namespace sparseir { - // PiecewiseLegendreFTVector class in C++ +// PiecewiseLegendreFTVector class in C++ + +template +class PiecewiseLegendreFTVector { +private: + std::vector> polyvec; + +public: + // Default constructor + // PiecewiseLegendreFTVector() {} - template - class PiecewiseLegendreFTVector + // Constructor from vector of PiecewiseLegendreFT + PiecewiseLegendreFTVector( + const std::vector> &polyvec_) + : polyvec(polyvec_) { - private: - std::vector> polyvec; - - public: - // Default constructor - //PiecewiseLegendreFTVector() {} - - // Constructor from vector of PiecewiseLegendreFT - PiecewiseLegendreFTVector(const std::vector> &polyvec_) - : polyvec(polyvec_) {} - - /* - // Constructor from PiecewiseLegendrePolyVector and Statistics - PiecewiseLegendreFTVector(const PiecewiseLegendrePolyVector &polys, - const Statistics &stat, - double n_asymp = std::numeric_limits::infinity()) - { - polyvec.reserve(polys.size()); - for (const auto &poly : polys) - { - polyvec.emplace_back(poly, stat, n_asymp); - } - } - */ + } - // Get the size of the vector - size_t size() const + /* + // Constructor from PiecewiseLegendrePolyVector and Statistics + PiecewiseLegendreFTVector(const PiecewiseLegendrePolyVector &polys, + const Statistics &stat, + double n_asymp = + std::numeric_limits::infinity()) + { + polyvec.reserve(polys.size()); + for (const auto &poly : polys) { - return polyvec.size(); + polyvec.emplace_back(poly, stat, n_asymp); } + } + */ - // Indexing operator (non-const) - PiecewiseLegendreFT &operator[](size_t i) - { - return polyvec[i]; - } + // Get the size of the vector + size_t size() const { return polyvec.size(); } - // Indexing operator (const) - const PiecewiseLegendreFT &operator[](size_t i) const - { - return polyvec[i]; - } + // Indexing operator (non-const) + PiecewiseLegendreFT &operator[](size_t i) { return polyvec[i]; } - // Indexing with a vector of indices - PiecewiseLegendreFTVector operator[](const std::vector &indices) const - { - std::vector> new_polyvec; - new_polyvec.reserve(indices.size()); - for (size_t idx : indices) - { - new_polyvec.push_back(polyvec[idx]); - } - return PiecewiseLegendreFTVector{std::move(new_polyvec)}; - } + // Indexing operator (const) + const PiecewiseLegendreFT &operator[](size_t i) const + { + return polyvec[i]; + } - // Set element at index i - void set(size_t i, const PiecewiseLegendreFT &p) - { - if (i < polyvec.size()) - { - polyvec[i] = p; - } + // Indexing with a vector of indices + PiecewiseLegendreFTVector + operator[](const std::vector &indices) const + { + std::vector> new_polyvec; + new_polyvec.reserve(indices.size()); + for (size_t idx : indices) { + new_polyvec.push_back(polyvec[idx]); } + return PiecewiseLegendreFTVector{std::move(new_polyvec)}; + } - // Create a similar PiecewiseLegendreFTVector - PiecewiseLegendreFTVector similar() const - { - return PiecewiseLegendreFTVector(); + // Set element at index i + void set(size_t i, const PiecewiseLegendreFT &p) + { + if (i < polyvec.size()) { + polyvec[i] = p; } + } - // Get n_asymp from the first element - double n_asymp() const - { - return polyvec.empty() ? std::numeric_limits::infinity() : polyvec.front().n_asymp(); - } + // Create a similar PiecewiseLegendreFTVector + PiecewiseLegendreFTVector similar() const + { + return PiecewiseLegendreFTVector(); + } - // Get statistics from the first element - S statistics() const - { - return polyvec.front().statistics(); - } + // Get n_asymp from the first element + double n_asymp() const + { + return polyvec.empty() ? std::numeric_limits::infinity() + : polyvec.front().n_asymp(); + } - // Get zeta from the first element - double zeta() const - { - return polyvec.empty() ? 0.0 : polyvec.front().zeta(); - } + // Get statistics from the first element + S statistics() const { return polyvec.front().statistics(); } - /* - // Get poly as PiecewiseLegendrePolyVector - PiecewiseLegendrePolyVector poly() const - { - std::vector> polys; - polys.reserve(polyvec.size()); - for (const auto &pft : polyvec) - { - polys.push_back(pft.poly()); - } - return PiecewiseLegendrePolyVector{std::move(polys)}; - } - */ + // Get zeta from the first element + double zeta() const + { + return polyvec.empty() ? 0.0 : polyvec.front().zeta(); + } - // Overload operator() for MatsubaraFreq - Eigen::VectorXcd operator()(const MatsubaraFreq &omega) const + /* + // Get poly as PiecewiseLegendrePolyVector + PiecewiseLegendrePolyVector poly() const + { + std::vector> polys; + polys.reserve(polyvec.size()); + for (const auto &pft : polyvec) { - size_t num_funcs = polyvec.size(); - Eigen::VectorXcd result(num_funcs); - for (size_t i = 0; i < num_funcs; ++i) - { - result(i) = polyvec[i](omega); - } - return result; + polys.push_back(pft.poly()); } + return PiecewiseLegendrePolyVector{std::move(polys)}; + } + */ - // Overload operator() for integer n - Eigen::VectorXcd operator()(int n) const - { - return (*this)(MatsubaraFreq(n)); + // Overload operator() for MatsubaraFreq + Eigen::VectorXcd operator()(const MatsubaraFreq &omega) const + { + size_t num_funcs = polyvec.size(); + Eigen::VectorXcd result(num_funcs); + for (size_t i = 0; i < num_funcs; ++i) { + result(i) = polyvec[i](omega); } + return result; + } - // Overload operator() for array of integers - Eigen::MatrixXcd operator()(const Eigen::ArrayXi &n_array) const - { - size_t num_funcs = polyvec.size(); - size_t num_freqs = n_array.size(); - Eigen::MatrixXcd result(num_funcs, num_freqs); - for (size_t i = 0; i < num_funcs; ++i) - { - for (size_t j = 0; j < num_freqs; ++j) - { - result(i, j) = polyvec[i](n_array[j]); - } + // Overload operator() for integer n + Eigen::VectorXcd operator()(int n) const + { + return (*this)(MatsubaraFreq(n)); + } + + // Overload operator() for array of integers + Eigen::MatrixXcd operator()(const Eigen::ArrayXi &n_array) const + { + size_t num_funcs = polyvec.size(); + size_t num_freqs = n_array.size(); + Eigen::MatrixXcd result(num_funcs, num_freqs); + for (size_t i = 0; i < num_funcs; ++i) { + for (size_t j = 0; j < num_freqs; ++j) { + result(i, j) = polyvec[i](n_array[j]); } - return result; } - }; + return result; + } +}; } // namespace sparseir \ No newline at end of file diff --git a/include/sparseir/svd.hpp b/include/sparseir/svd.hpp index 0eda606..cea678d 100644 --- a/include/sparseir/svd.hpp +++ b/include/sparseir/svd.hpp @@ -1,37 +1,40 @@ #pragma once // C++ Standard Library headers -#include #include +#include // Eigen headers #include -namespace sparseir +namespace sparseir { + +using namespace Eigen; + +template +std::tuple, VectorX, MatrixX> +compute_svd(const MatrixX &A, int n_sv_hint = 0, + std::string strategy = "default") { + if (n_sv_hint != 0) { + std::cout << "n_sv_hint is set but will not be used in the current " + "implementation!" + << std::endl; + } - using namespace Eigen; - - template - std::tuple, VectorX, MatrixX> compute_svd(const MatrixX &A, int n_sv_hint = 0, std::string strategy = "default") - { - if (n_sv_hint != 0) - { - std::cout << "n_sv_hint is set but will not be used in the current implementation!" << std::endl; - } - - if (strategy != "default") - { - std::cout << "strategy is set but will not be used in the current implementation!" << std::endl; - } - - MatrixX A_copy = A; // create a copy of A - return tsvd(A_copy); - // auto svd_result = tsvd(A_copy); - // MatrixX u = std::get<0>(svd_result); - // VectorX s = std::get<1>(svd_result); - // MatrixX v = std::get<2>(svd_result); - - // return std::make_tuple(u, s, v); + if (strategy != "default") { + std::cout << "strategy is set but will not be used in the current " + "implementation!" + << std::endl; } + + MatrixX A_copy = A; // create a copy of A + return tsvd(A_copy); + // auto svd_result = tsvd(A_copy); + // MatrixX u = std::get<0>(svd_result); + // VectorX s = std::get<1>(svd_result); + // MatrixX v = std::get<2>(svd_result); + + // return std::make_tuple(u, s, v); } +} // namespace sparseir diff --git a/include/sparseir/sve.hpp b/include/sparseir/sve.hpp index a75d349..b60c312 100644 --- a/include/sparseir/sve.hpp +++ b/include/sparseir/sve.hpp @@ -3,15 +3,15 @@ #pragma once #include -#include // Include Tensor module -#include -#include -#include -#include -#include #include +#include #include +#include #include +#include +#include +#include // Include Tensor module +#include // Include other necessary headers here @@ -21,35 +21,38 @@ namespace sparseir { template class SVEResult; -inline std::tuple choose_accuracy(double epsilon, std::string Twork) { +inline std::tuple +choose_accuracy(double epsilon, std::string Twork) +{ if (Twork == "Float64") { if (epsilon >= std::sqrt(std::numeric_limits::epsilon())) { return std::make_tuple(epsilon, Twork, "default"); } else { - std::cerr << "Warning: Basis cutoff is " << epsilon << ", which is below √ε with ε = " - << std::numeric_limits::epsilon() << ".\n" - << "Expect singular values and basis functions for large l to have lower precision than the cutoff.\n"; + std::cerr << "Warning: Basis cutoff is " << epsilon + << ", which is below √ε with ε = " + << std::numeric_limits::epsilon() << ".\n" + << "Expect singular values and basis functions for large " + "l to have lower precision than the cutoff.\n"; return std::make_tuple(epsilon, Twork, "accurate"); } - } else - { + } else { // Handle the case for xprec::DDouble - if (epsilon >= std::sqrt(std::numeric_limits::epsilon())) - { + if (epsilon >= std::sqrt(std::numeric_limits::epsilon())) { return std::make_tuple(epsilon, Twork, "default"); - } - else - { - std::cerr << "Warning: Basis cutoff is " << epsilon << ", which is below √ε with ε = " + } else { + std::cerr << "Warning: Basis cutoff is " << epsilon + << ", which is below √ε with ε = " << std::numeric_limits::epsilon() << ".\n" - << "Expect singular values and basis functions for large l to have lower precision than the cutoff.\n"; + << "Expect singular values and basis functions for large " + "l to have lower precision than the cutoff.\n"; return std::make_tuple(epsilon, Twork, "accurate"); } } - } -inline std::tuple choose_accuracy(double epsilon, std::nullptr_t) { +inline std::tuple +choose_accuracy(double epsilon, std::nullptr_t) +{ if (epsilon >= std::sqrt(std::numeric_limits::epsilon())) { return std::make_tuple(epsilon, "Float64", "default"); } else { @@ -57,9 +60,11 @@ inline std::tuple choose_accuracy(double epsil // This should work, but catch2 can't catch this warning // Therefore we suppress this block if (epsilon < std::sqrt(std::numeric_limits::epsilon())) { - std::cerr << "Warning: Basis cutoff is " << epsilon << ", which is below √ε with ε = " + std::cerr << "Warning: Basis cutoff is " << epsilon << ", which is + below √ε with ε = " << std::numeric_limits::epsilon() << ".\n" - << "Expect singular values and basis functions for large l to have lower precision than the cutoff.\n"; + << "Expect singular values and basis functions for large l + to have lower precision than the cutoff.\n"; } */ return std::make_tuple(epsilon, "Float64x2", "default"); @@ -68,36 +73,48 @@ inline std::tuple choose_accuracy(double epsil // Equivalent to Julia implementation: // julia> choose_accuracy(::Nothing, Twork) = sqrt(eps(Twork)), Twork, :default -inline std::tuple choose_accuracy(std::nullptr_t, std::string Twork) { - if (Twork == "Float64x2"){ - const double epsilon = 2.220446049250313e-16; // julia> using MultiFloats; Float64(sqrt(eps(Float64x2))) +inline std::tuple +choose_accuracy(std::nullptr_t, std::string Twork) +{ + if (Twork == "Float64x2") { + const double epsilon = + 2.220446049250313e-16; // julia> using MultiFloats; + // Float64(sqrt(eps(Float64x2))) return std::make_tuple(epsilon, Twork, "default"); } else { - return std::make_tuple(std::sqrt(std::numeric_limits::epsilon()), Twork, "default"); + return std::make_tuple( + std::sqrt(std::numeric_limits::epsilon()), Twork, + "default"); } } // Equivalent to Julia implementation: -// julia> choose_accuracy(::Nothing, ::Nothing) = Float64(sqrt(eps(T_MAX))), T_MAX, :default -inline std::tuple choose_accuracy(std::nullptr_t, std::nullptr_t) { - const double epsilon = 2.220446049250313e-16; // julia> using MultiFloats; Float64(sqrt(eps(Float64x2))) +// julia> choose_accuracy(::Nothing, ::Nothing) = Float64(sqrt(eps(T_MAX))), +// T_MAX, :default +inline std::tuple +choose_accuracy(std::nullptr_t, std::nullptr_t) +{ + const double epsilon = + 2.220446049250313e-16; // julia> using MultiFloats; + // Float64(sqrt(eps(Float64x2))) return std::make_tuple(epsilon, "Float64x2", "default"); } -inline std::tuple choose_accuracy(double epsilon, std::string Twork, std::string svd_strat) { +inline std::tuple +choose_accuracy(double epsilon, std::string Twork, std::string svd_strat) +{ std::string auto_svd_strat; std::tie(epsilon, Twork, auto_svd_strat) = choose_accuracy(epsilon, Twork); - std::string final_svd_strat = (svd_strat == "auto") ? auto_svd_strat : svd_strat; + std::string final_svd_strat = + (svd_strat == "auto") ? auto_svd_strat : svd_strat; return std::make_tuple(epsilon, Twork, final_svd_strat); } // Function to canonicalize basis functions -inline void canonicalize( - PiecewiseLegendrePolyVector &u, - PiecewiseLegendrePolyVector &v) +inline void canonicalize(PiecewiseLegendrePolyVector &u, + PiecewiseLegendrePolyVector &v) { - for (size_t i = 0; i < u.size(); ++i) - { + for (size_t i = 0; i < u.size(); ++i) { double gauge = std::copysign(1.0, u.polyvec[i](1.0)); u.polyvec[i].data *= gauge; v.polyvec[i].data *= gauge; @@ -108,16 +125,14 @@ inline void canonicalize( template class AbstractSVE { public: - virtual ~AbstractSVE() {} + virtual ~AbstractSVE() { } virtual std::vector> matrices() const = 0; - virtual SVEResult postprocess( - const std::vector>& u_list, - const std::vector>& s_list, - const std::vector>& v_list - ) const = 0; + virtual SVEResult + postprocess(const std::vector> &u_list, + const std::vector> &s_list, + const std::vector> &v_list) const = 0; }; - // SamplingSVE class /** * @brief Sampling-based SVE to SVD translation. @@ -143,10 +158,10 @@ class SamplingSVE : public AbstractSVE { // Constructor SamplingSVE(K kernel_, double epsilon_, int n_gauss_ = -1) - : kernel(kernel_), - epsilon(epsilon_) + : kernel(kernel_), epsilon(epsilon_) { - n_gauss = (n_gauss_ > 0) ? n_gauss_ : sve_hints(kernel, epsilon).ngauss(); + n_gauss = + (n_gauss_ > 0) ? n_gauss_ : sve_hints(kernel, epsilon).ngauss(); // TODO: Implement Rule(n_gauss) rule = legendre(n_gauss); auto hints = sve_hints(kernel, epsilon); @@ -158,16 +173,15 @@ class SamplingSVE : public AbstractSVE { } // Compute matrices for SVD - std::vector> matrices() const override { + std::vector> matrices() const override + { std::vector> mats; Eigen::MatrixX A = matrix_from_gauss(kernel, gauss_x, gauss_y); // Element-wise multiplication with square roots of weights - for (int i = 0; i < gauss_x.w.size(); ++i) - { + for (int i = 0; i < gauss_x.w.size(); ++i) { A.row(i) *= sqrt_impl(gauss_x.w[i]); } - for (int j = 0; j < gauss_y.w.size(); ++j) - { + for (int j = 0; j < gauss_y.w.size(); ++j) { A.col(j) *= sqrt_impl(gauss_y.w[j]); } mats.push_back(A); @@ -175,10 +189,10 @@ class SamplingSVE : public AbstractSVE { } // Postprocess to construct SVEResult - SVEResult postprocess( - const std::vector> &u_list, - const std::vector> &s_list, - const std::vector> &v_list) const override + SVEResult + postprocess(const std::vector> &u_list, + const std::vector> &s_list, + const std::vector> &v_list) const override { // Assuming there's only one matrix in u_list, s_list, and v_list const Eigen::MatrixX &u = u_list[0]; @@ -186,8 +200,10 @@ class SamplingSVE : public AbstractSVE { const Eigen::MatrixX &v = v_list[0]; Eigen::VectorXd s = s_.template cast(); - Eigen::VectorX gauss_x_w = Eigen::VectorX::Map(gauss_x.w.data(), gauss_x.w.size()); - Eigen::VectorX gauss_y_w = Eigen::VectorX::Map(gauss_y.w.data(), gauss_y.w.size()); + Eigen::VectorX gauss_x_w = + Eigen::VectorX::Map(gauss_x.w.data(), gauss_x.w.size()); + Eigen::VectorX gauss_y_w = + Eigen::VectorX::Map(gauss_y.w.data(), gauss_y.w.size()); Eigen::MatrixX u_x = u.array().colwise() / gauss_x_w.array().sqrt(); Eigen::MatrixX v_y = v.array().colwise() / gauss_y_w.array().sqrt(); @@ -195,35 +211,29 @@ class SamplingSVE : public AbstractSVE { std::vector u_x_flatten(u_x.data(), u_x.data() + u_x.size()); std::vector v_y_flatten(v_y.data(), v_y.data() + v_y.size()); - Eigen::TensorMap> u_data(u_x_flatten.data(), n_gauss, segs_x.size() - 1, s.size()); - Eigen::TensorMap> v_data(v_y_flatten.data(), n_gauss, segs_y.size() - 1, s.size()); + Eigen::TensorMap> u_data( + u_x_flatten.data(), n_gauss, segs_x.size() - 1, s.size()); + Eigen::TensorMap> v_data( + v_y_flatten.data(), n_gauss, segs_y.size() - 1, s.size()); Eigen::MatrixX cmat = legendre_collocation(rule); - for (int j = 0; j < u_data.dimension(1); ++j) - { - for (int k = 0; k < u_data.dimension(2); ++k) - { - for (int i = 0; i < cmat.rows(); ++i) - { + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int k = 0; k < u_data.dimension(2); ++k) { + for (int i = 0; i < cmat.rows(); ++i) { u_data(i, j, k) = T(0); - for (int l = 0; l < cmat.cols(); ++l) - { + for (int l = 0; l < cmat.cols(); ++l) { u_data(i, j, k) += cmat(i, l) * u_data(l, j, k); } } } } - for (int j = 0; j < v_data.dimension(1); ++j) - { - for (int k = 0; k < v_data.dimension(2); ++k) - { - for (int i = 0; i < cmat.rows(); ++i) - { + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int k = 0; k < v_data.dimension(2); ++k) { + for (int i = 0; i < cmat.rows(); ++i) { v_data(i, j, k) = T(0); - for (int l = 0; l < cmat.cols(); ++l) - { + for (int l = 0; l < cmat.cols(); ++l) { v_data(i, j, k) += cmat(i, l) * v_data(l, j, k); } } @@ -232,39 +242,33 @@ class SamplingSVE : public AbstractSVE { // Manually compute differences for dsegs_x and dsegs_y Eigen::VectorX dsegs_x(segs_x.size() - 1); - for (int i = 0; i < segs_x.size() - 1; ++i) - { + for (int i = 0; i < segs_x.size() - 1; ++i) { dsegs_x[i] = segs_x[i + 1] - segs_x[i]; } Eigen::VectorX dsegs_y(segs_y.size() - 1); - for (int i = 0; i < segs_y.size() - 1; ++i) - { + for (int i = 0; i < segs_y.size() - 1; ++i) { dsegs_y[i] = segs_y[i + 1] - segs_y[i]; } - //u_data_3d = u_data_3d * (T(0.5) * dsegs_x).sqrt().transpose().matrix().asDiagonal(); - //v_data_3d = v_data_3d * (T(0.5) * dsegs_y).sqrt().transpose().matrix().asDiagonal(); + // u_data_3d = u_data_3d * (T(0.5) * + // dsegs_x).sqrt().transpose().matrix().asDiagonal(); v_data_3d = + // v_data_3d * (T(0.5) * + // dsegs_y).sqrt().transpose().matrix().asDiagonal(); // Using nested for loops to multiply u_data - for (int j = 0; j < u_data.dimension(1); ++j) - { - for (int k = 0; k < u_data.dimension(2); ++k) - { - for (int i = 0; i < u_data.dimension(0); ++i) - { + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int k = 0; k < u_data.dimension(2); ++k) { + for (int i = 0; i < u_data.dimension(0); ++i) { u_data(i, j, k) *= sqrt_impl(T(0.5) * dsegs_x[j]); } } } // Using nested for loops to multiply v_data - for (int j = 0; j < v_data.dimension(1); ++j) - { - for (int k = 0; k < v_data.dimension(2); ++k) - { - for (int i = 0; i < v_data.dimension(0); ++i) - { + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int k = 0; k < v_data.dimension(2); ++k) { + for (int i = 0; i < v_data.dimension(0); ++i) { v_data(i, j, k) *= sqrt_impl(T(0.5) * dsegs_y[j]); } } @@ -273,48 +277,49 @@ class SamplingSVE : public AbstractSVE { std::vector polyvec_u; std::vector polyvec_v; - for (int i = 0; i < u_data.dimension(2); ++i) - { - Eigen::MatrixXd slice_double(u_data.dimension(0), u_data.dimension(1)); - for (int j = 0; j < u_data.dimension(1); ++j) - { - for (int k = 0; k < u_data.dimension(2); ++k) - { + for (int i = 0; i < u_data.dimension(2); ++i) { + Eigen::MatrixXd slice_double(u_data.dimension(0), + u_data.dimension(1)); + for (int j = 0; j < u_data.dimension(1); ++j) { + for (int k = 0; k < u_data.dimension(2); ++k) { slice_double(j, k) = static_cast(u_data(j, k, i)); } } std::vector segs_x_double; segs_x_double.reserve(segs_x.size()); - for (const auto& x : segs_x) { + for (const auto &x : segs_x) { segs_x_double.push_back(static_cast(x)); } - polyvec_u.push_back(PiecewiseLegendrePoly(slice_double, - Eigen::VectorXd::Map(segs_x_double.data(), segs_x_double.size()), i)); - + polyvec_u.push_back(PiecewiseLegendrePoly( + slice_double, + Eigen::VectorXd::Map(segs_x_double.data(), + segs_x_double.size()), + i)); } // Repeat similar changes for v_data - for (int i = 0; i < v_data.dimension(2); ++i) - { - Eigen::MatrixXd slice_double(v_data.dimension(0), v_data.dimension(1)); - for (int j = 0; j < v_data.dimension(1); ++j) - { - for (int k = 0; k < v_data.dimension(2); ++k) - { + for (int i = 0; i < v_data.dimension(2); ++i) { + Eigen::MatrixXd slice_double(v_data.dimension(0), + v_data.dimension(1)); + for (int j = 0; j < v_data.dimension(1); ++j) { + for (int k = 0; k < v_data.dimension(2); ++k) { slice_double(j, k) = static_cast(v_data(j, k, i)); } } std::vector segs_y_double; segs_y_double.reserve(segs_y.size()); - for (const auto& y : segs_y) { + for (const auto &y : segs_y) { segs_y_double.push_back(static_cast(y)); } - polyvec_v.push_back(PiecewiseLegendrePoly(slice_double, - Eigen::VectorXd::Map(segs_y_double.data(), segs_y_double.size()), i)); + polyvec_v.push_back(PiecewiseLegendrePoly( + slice_double, + Eigen::VectorXd::Map(segs_y_double.data(), + segs_y_double.size()), + i)); } PiecewiseLegendrePolyVector ulx(polyvec_u); @@ -325,7 +330,7 @@ class SamplingSVE : public AbstractSVE { }; // CentrosymmSVE class -template +template class CentrosymmSVE : public AbstractSVE { public: K kernel; @@ -334,53 +339,64 @@ class CentrosymmSVE : public AbstractSVE { SamplingSVE odd; int nsvals_hint; - CentrosymmSVE(const K& kernel_, double epsilon_, int n_gauss_ = -1) + CentrosymmSVE(const K &kernel_, double epsilon_, int n_gauss_ = -1) : kernel(kernel_), - epsilon(epsilon_), - even(static_cast(*get_symmetrized(kernel_, +1)), epsilon_, n_gauss_), - odd(static_cast(*get_symmetrized(kernel_, -1)), epsilon_, n_gauss_) { + epsilon(epsilon_), + even(static_cast(*get_symmetrized(kernel_, +1)), epsilon_, + n_gauss_), + odd(static_cast(*get_symmetrized(kernel_, -1)), epsilon_, + n_gauss_) + { nsvals_hint = std::max(even.nsvals_hint, odd.nsvals_hint); } - - std::vector> matrices() const override { + std::vector> matrices() const override + { auto mats_even = even.matrices(); auto mats_odd = odd.matrices(); return {mats_even[0], mats_odd[0]}; } - // Replace the vector merging code with Eigen operations - SVEResult postprocess(const std::vector>& u_list, - const std::vector>& s_list, - const std::vector>& v_list) const override { - SVEResult result_even = even.postprocess({ u_list[0] }, { s_list[0] }, { v_list[0] }); - SVEResult result_odd = odd.postprocess({ u_list[1] }, { s_list[1] }, { v_list[1] }); - - // Merge results using vectors instead of insert - std::vector u_merged; - u_merged.reserve(result_even.u.size() + result_odd.u.size()); - u_merged.insert(u_merged.end(), result_even.u.begin(), result_even.u.end()); - u_merged.insert(u_merged.end(), result_odd.u.begin(), result_odd.u.end()); - - // Concatenate singular values - Eigen::VectorXd s_merged(result_even.s.size() + result_odd.s.size()); - s_merged << result_even.s, result_odd.s; - - // Merge v vectors - std::vector v_merged; - v_merged.reserve(result_even.v.size() + result_odd.v.size()); - v_merged.insert(v_merged.end(), result_even.v.begin(), result_even.v.end()); - v_merged.insert(v_merged.end(), result_odd.v.begin(), result_odd.v.end()); - - // For segments, use the hints from the kernel class - auto hints = sve_hints(kernel, epsilon); - auto segs_x_full = hints.template segments_x(); - auto segs_y_full = hints.template segments_y(); - - // Rest of the implementation... - // Create PiecewiseLegendrePolyVector from merged vectors - PiecewiseLegendrePolyVector u_complete(u_merged); - PiecewiseLegendrePolyVector v_complete(v_merged); + // Replace the vector merging code with Eigen operations + SVEResult + postprocess(const std::vector> &u_list, + const std::vector> &s_list, + const std::vector> &v_list) const override + { + SVEResult result_even = + even.postprocess({u_list[0]}, {s_list[0]}, {v_list[0]}); + SVEResult result_odd = + odd.postprocess({u_list[1]}, {s_list[1]}, {v_list[1]}); + + // Merge results using vectors instead of insert + std::vector u_merged; + u_merged.reserve(result_even.u.size() + result_odd.u.size()); + u_merged.insert(u_merged.end(), result_even.u.begin(), + result_even.u.end()); + u_merged.insert(u_merged.end(), result_odd.u.begin(), + result_odd.u.end()); + + // Concatenate singular values + Eigen::VectorXd s_merged(result_even.s.size() + result_odd.s.size()); + s_merged << result_even.s, result_odd.s; + + // Merge v vectors + std::vector v_merged; + v_merged.reserve(result_even.v.size() + result_odd.v.size()); + v_merged.insert(v_merged.end(), result_even.v.begin(), + result_even.v.end()); + v_merged.insert(v_merged.end(), result_odd.v.begin(), + result_odd.v.end()); + + // For segments, use the hints from the kernel class + auto hints = sve_hints(kernel, epsilon); + auto segs_x_full = hints.template segments_x(); + auto segs_y_full = hints.template segments_y(); + + // Rest of the implementation... + // Create PiecewiseLegendrePolyVector from merged vectors + PiecewiseLegendrePolyVector u_complete(u_merged); + PiecewiseLegendrePolyVector v_complete(v_merged); return SVEResult(u_complete, s_merged, v_complete, kernel, epsilon); } @@ -397,86 +413,90 @@ class SVEResult { K kernel; double epsilon; - SVEResult(const PiecewiseLegendrePolyVector& u_, const Eigen::VectorXd& s_, - const PiecewiseLegendrePolyVector& v_, const K& kernel_, double epsilon_) - : u(u_), s(s_), v(v_), kernel(kernel_), epsilon(epsilon_) {} + SVEResult(const PiecewiseLegendrePolyVector &u_, const Eigen::VectorXd &s_, + const PiecewiseLegendrePolyVector &v_, const K &kernel_, + double epsilon_) + : u(u_), s(s_), v(v_), kernel(kernel_), epsilon(epsilon_) + { + } }; template -std::shared_ptr> determine_sve(const K& kernel, double safe_epsilon, int n_gauss) { +std::shared_ptr> +determine_sve(const K &kernel, double safe_epsilon, int n_gauss) +{ if (kernel.is_centrosymmetric()) { - return std::make_shared>(kernel, safe_epsilon, n_gauss); + return std::make_shared>(kernel, safe_epsilon, + n_gauss); } else { - return std::make_shared>(kernel, safe_epsilon, n_gauss); + return std::make_shared>(kernel, safe_epsilon, + n_gauss); } } // Function to truncate singular values template -inline std::tuple>, std::vector>, std::vector>> truncate( - std::vector> &u_list, - std::vector> &s_list, - std::vector> &v_list, - T rtol=0.0, - int lmax=std::numeric_limits::max()) +inline std::tuple>, + std::vector>, + std::vector>> +truncate(std::vector> &u_list, + std::vector> &s_list, + std::vector> &v_list, T rtol = 0.0, + int lmax = std::numeric_limits::max()) { std::vector> u_list_truncated; std::vector> s_list_truncated; std::vector> v_list_truncated; // Collect all singular values std::vector all_singular_values; - for (const auto &s : s_list) - { - for (int i = 0; i < s.size(); ++i) - { + for (const auto &s : s_list) { + for (int i = 0; i < s.size(); ++i) { all_singular_values.push_back(s(i)); } } - std::sort(all_singular_values.begin(), all_singular_values.end(), std::greater()); + std::sort(all_singular_values.begin(), all_singular_values.end(), + std::greater()); // Determine cutoff T cutoff = rtol * all_singular_values.front(); - if (lmax < static_cast(all_singular_values.size())) - { + if (lmax < static_cast(all_singular_values.size())) { cutoff = std::max(cutoff, all_singular_values[lmax - 1]); } // Truncate singular values and corresponding vectors - for (size_t idx = 0; idx < s_list.size(); ++idx) - { + for (size_t idx = 0; idx < s_list.size(); ++idx) { const auto &s = s_list[idx]; int scount = 0; - for (int i = 0; i < s.size(); ++i) - { - if (s(i) > cutoff) - { + for (int i = 0; i < s.size(); ++i) { + if (s(i) > cutoff) { ++scount; - } - else - { + } else { break; } } - if (scount < s.size()) - { + if (scount < s.size()) { u_list_truncated.push_back(u_list[idx].leftCols(scount)); s_list_truncated.push_back(s_list[idx].head(scount)); v_list_truncated.push_back(v_list[idx].leftCols(scount)); } } - return std::make_tuple(u_list_truncated, s_list_truncated, v_list_truncated); + return std::make_tuple(u_list_truncated, s_list_truncated, + v_list_truncated); } - template -auto pre_postprocess(K &kernel, double safe_epsilon, int n_gauss, double cutoff = std::numeric_limits::quiet_NaN(), int lmax = -1) +auto pre_postprocess(K &kernel, double safe_epsilon, int n_gauss, + double cutoff = std::numeric_limits::quiet_NaN(), + int lmax = -1) { auto sve = determine_sve(kernel, safe_epsilon, n_gauss); // Compute SVDs std::vector> matrices = sve->matrices(); // TODO: implement SVD Resutls - std::vector, Eigen::MatrixX, Eigen::MatrixX>> svds; - for (const auto& mat : matrices) { + std::vector< + std::tuple, Eigen::MatrixX, Eigen::MatrixX>> + svds; + for (const auto &mat : matrices) { auto svd = sparseir::compute_svd(mat); svds.push_back(svd); } @@ -484,7 +504,7 @@ auto pre_postprocess(K &kernel, double safe_epsilon, int n_gauss, double cutoff // Extract singular values and vectors std::vector> u_list_, v_list_; std::vector> s_list_; - for (const auto& svd : svds) { + for (const auto &svd : svds) { auto u = std::get<0>(svd); auto s = std::get<1>(svd); auto v = std::get<2>(svd); @@ -494,39 +514,45 @@ auto pre_postprocess(K &kernel, double safe_epsilon, int n_gauss, double cutoff } // Apply cutoff and lmax - T cutoff_actual = std::isnan(cutoff) ? 2 * T(std::numeric_limits::epsilon()) : T(cutoff); + T cutoff_actual = std::isnan(cutoff) + ? 2 * T(std::numeric_limits::epsilon()) + : T(cutoff); std::vector> u_list_truncated; std::vector> s_list_truncated; std::vector> v_list_truncated; - std::tie(u_list_truncated, s_list_truncated, v_list_truncated) = truncate(u_list_, s_list_, v_list_, cutoff_actual, lmax); + std::tie(u_list_truncated, s_list_truncated, v_list_truncated) = + truncate(u_list_, s_list_, v_list_, cutoff_actual, lmax); // Postprocess to get the SVEResult - return sve->postprocess(u_list_truncated, s_list_truncated, v_list_truncated); + return sve->postprocess(u_list_truncated, s_list_truncated, + v_list_truncated); } // Function to compute SVE result template - auto compute_sve(K kernel, - double epsilon = std::numeric_limits::quiet_NaN(), - double cutoff = std::numeric_limits::quiet_NaN(), - std::string Twork = "Floatt64", - int lmax = std::numeric_limits::max(), - int n_gauss = -1, - const std::string& svd_strat = "auto") { +auto compute_sve(K kernel, + double epsilon = std::numeric_limits::quiet_NaN(), + double cutoff = std::numeric_limits::quiet_NaN(), + std::string Twork = "Floatt64", + int lmax = std::numeric_limits::max(), int n_gauss = -1, + const std::string &svd_strat = "auto") +{ // Choose accuracy parameters double safe_epsilon; std::string Twork_actual; std::string svd_strategy_actual; std::cout << "Twork: " << Twork << std::endl; std::cout << "svd_strat: " << svd_strat << std::endl; - std::tie(safe_epsilon, Twork_actual, svd_strategy_actual) = choose_accuracy(epsilon, Twork, svd_strat); + std::tie(safe_epsilon, Twork_actual, svd_strategy_actual) = + choose_accuracy(epsilon, Twork, svd_strat); std::cout << "Twork_actual: " << Twork_actual << std::endl; std::cout << "svd_strategy_actual: " << svd_strategy_actual << std::endl; - if (Twork_actual == "Float64"){ - return pre_postprocess(kernel, safe_epsilon, n_gauss, cutoff, lmax); - } - else{ + if (Twork_actual == "Float64") { + return pre_postprocess(kernel, safe_epsilon, n_gauss, cutoff, + lmax); + } else { // xprec::DDouble - return pre_postprocess(kernel, safe_epsilon, n_gauss, cutoff, lmax); + return pre_postprocess(kernel, safe_epsilon, n_gauss, + cutoff, lmax); } } diff --git a/test/_linalg.cxx b/test/_linalg.cxx index 3650e71..09cd504 100644 --- a/test/_linalg.cxx +++ b/test/_linalg.cxx @@ -1,427 +1,436 @@ -#include -#include #include "sparseir/_linalg.hpp" #include "xprec/ddouble.hpp" +#include +#include #include using namespace Eigen; using namespace xprec; -TEST_CASE("invperm", "[linalg]") { - VectorXi a(3); - a << 1, 2, 0; - VectorXi b = sparseir::invperm(a); - VectorX refb(3); - refb << 2, 0, 1; - REQUIRE(b.isApprox(refb)); +TEST_CASE("invperm", "[linalg]") +{ + VectorXi a(3); + a << 1, 2, 0; + VectorXi b = sparseir::invperm(a); + VectorX refb(3); + refb << 2, 0, 1; + REQUIRE(b.isApprox(refb)); } -TEST_CASE("reflector", "[linalg]") { - // double - { - Eigen::VectorXd v(3); - v << 1, 2, 3; - auto tau = sparseir::reflector(v); - - Eigen::VectorXd refv(3); - // obtained by - // julia> using LinearAlgebra; v = Float64[1,2,3]; LinearAlgebra.reflector!(v) - refv << -3.7416574, 0.42179344, 0.63269017; - for (int i = 0; i < 3; i++) { - REQUIRE(std::abs(v(i) - refv(i)) < 1e-7); - } - } - - // DDouble - { - Eigen::VectorX v(3); - v << 1, 2, 3; - auto tau = sparseir::reflector(v); - - Eigen::VectorX refv(3); - refv << -3.7416574, 0.42179344, 0.63269017; - for (int i = 0; i < 3; i++) { - REQUIRE(xprec::abs(v(i) - refv(i)) < 1e-7); - } +TEST_CASE("reflector", "[linalg]") +{ + // double + { + Eigen::VectorXd v(3); + v << 1, 2, 3; + auto tau = sparseir::reflector(v); + + Eigen::VectorXd refv(3); + // obtained by + // julia> using LinearAlgebra; v = Float64[1,2,3]; + // LinearAlgebra.reflector!(v) + refv << -3.7416574, 0.42179344, 0.63269017; + for (int i = 0; i < 3; i++) { + REQUIRE(std::abs(v(i) - refv(i)) < 1e-7); } -} - -TEST_CASE("reflectorApply", "[linalg]") { - /* - using SparseIR - T = Float64 - rtol = T(0.1) - A = T[ - 1 1 1 - 1 1 1 - 1 1 1 - ] - - i = 1 - Ainp = @view A[i:end, i] - print("Pre: "); @show Ainp - tau_i = SparseIR._LinAlg.reflector!(Ainp) - @show tau_i - block = @view A[i:end, (i+1):end] - SparseIR._LinAlg.reflectorApply!( - Ainp, tau_i, block - ) - for i in axes(A, 1) - for j in axes(A, 2) - s = (i,j) == size(A) ? ";" : "," - print(A[i, j], "$(s) ") - end - println() - end - */ - Eigen::MatrixX A = Eigen::MatrixX::Random(3, 3); - A << 1, 1, 1, - 1, 1, 1, - 1, 1, 1; - int m = A.rows(); - int n = A.cols(); - int k = std::min(m, n); - double rtol = 0.1; - int i = 0; + } - auto Ainp = A.col(i).tail(m - i); - REQUIRE(Ainp.size() == 3); - auto tau_i = sparseir::reflector(Ainp); - REQUIRE(std::abs(tau_i - 1.5773502691896257) < 1e-7); + // DDouble + { + Eigen::VectorX v(3); + v << 1, 2, 3; + auto tau = sparseir::reflector(v); - Eigen::VectorX refv(3); - refv << -1.7320508075688772, 0.36602540378443865, 0.36602540378443865; + Eigen::VectorX refv(3); + refv << -3.7416574, 0.42179344, 0.63269017; for (int i = 0; i < 3; i++) { - REQUIRE(std::abs(Ainp(i) - refv(i)) < 1e-7); + REQUIRE(xprec::abs(v(i) - refv(i)) < 1e-7); } - - auto block = A.bottomRightCorner(m - i, n - (i + 1)); - sparseir::reflectorApply(Ainp, tau_i, block); - Eigen::MatrixX refA(3, 3); - refA <<-1.7320508075688772, -1.7320508075688772, -1.7320508075688772, - 0.36602540378443865, 0.0, 0.0, - 0.36602540378443865, 0.0, 0.0; - REQUIRE(A.isApprox(refA, 1e-7)); + } } +TEST_CASE("reflectorApply", "[linalg]") +{ + /* + using SparseIR + T = Float64 + rtol = T(0.1) + A = T[ + 1 1 1 + 1 1 1 + 1 1 1 + ] + + i = 1 + Ainp = @view A[i:end, i] + print("Pre: "); @show Ainp + tau_i = SparseIR._LinAlg.reflector!(Ainp) + @show tau_i + block = @view A[i:end, (i+1):end] + SparseIR._LinAlg.reflectorApply!( + Ainp, tau_i, block + ) + for i in axes(A, 1) + for j in axes(A, 2) + s = (i,j) == size(A) ? ";" : "," + print(A[i, j], "$(s) ") + end + println() + end + */ + Eigen::MatrixX A = Eigen::MatrixX::Random(3, 3); + A << 1, 1, 1, 1, 1, 1, 1, 1, 1; + int m = A.rows(); + int n = A.cols(); + int k = std::min(m, n); + double rtol = 0.1; + int i = 0; + + auto Ainp = A.col(i).tail(m - i); + REQUIRE(Ainp.size() == 3); + auto tau_i = sparseir::reflector(Ainp); + REQUIRE(std::abs(tau_i - 1.5773502691896257) < 1e-7); + + Eigen::VectorX refv(3); + refv << -1.7320508075688772, 0.36602540378443865, 0.36602540378443865; + for (int i = 0; i < 3; i++) { + REQUIRE(std::abs(Ainp(i) - refv(i)) < 1e-7); + } -TEST_CASE("Jacobi SVD", "[linalg]") { - MatrixX A = MatrixX::Random(20, 10); + auto block = A.bottomRightCorner(m - i, n - (i + 1)); + sparseir::reflectorApply(Ainp, tau_i, block); + Eigen::MatrixX refA(3, 3); + refA << -1.7320508075688772, -1.7320508075688772, -1.7320508075688772, + 0.36602540378443865, 0.0, 0.0, 0.36602540378443865, 0.0, 0.0; + REQUIRE(A.isApprox(refA, 1e-7)); +} - // There seems to be a bug in the latest version of Eigen3. - // Please first construct a Jacobi SVD and then compare the results. - // Do not use the svd_jacobi function directly. - // Better to write a wrrapper function for the SVD. - Eigen::JacobiSVD svd; - svd.compute(A, Eigen::ComputeThinU | Eigen::ComputeThinV); +TEST_CASE("Jacobi SVD", "[linalg]") +{ + MatrixX A = MatrixX::Random(20, 10); - auto U = svd.matrixU(); - auto S_diag = svd.singularValues().asDiagonal(); - auto V = svd.matrixV(); - MatrixX Areconst = ((U * S_diag * V.transpose())); + // There seems to be a bug in the latest version of Eigen3. + // Please first construct a Jacobi SVD and then compare the results. + // Do not use the svd_jacobi function directly. + // Better to write a wrrapper function for the SVD. + Eigen::JacobiSVD svd; + svd.compute(A, Eigen::ComputeThinU | Eigen::ComputeThinV); - // 28 significant digits are enough? - REQUIRE((A - Areconst).norm()/A.norm() < 1e-28); // 28 significant digits -} + auto U = svd.matrixU(); + auto S_diag = svd.singularValues().asDiagonal(); + auto V = svd.matrixV(); + MatrixX Areconst = ((U * S_diag * V.transpose())); -TEST_CASE("sparseir::rrqr simple", "[linalg]") { - Eigen::MatrixX Aorig(3,3); - Aorig << 1, 1, 1, - 1, 1, 1, - 1, 1, 1; - Eigen::MatrixX A(3,3); - A << 1, 1, 1, - 1, 1, 1, - 1, 1, 1; - - double A_eps = A.norm() * std::numeric_limits::epsilon(); - double rtol = 0.1; - sparseir::QRPivoted A_qr; - int A_rank; - std::tie(A_qr, A_rank) = sparseir::rrqr(A); - REQUIRE(A_rank == 1); - Eigen::MatrixX refA(3, 3); - refA <<-1.7320508075688772, -1.7320508075688772, -1.7320508075688772, - 0.36602540378443865, 0.0, 0.0, - 0.36602540378443865, 0.0, 0.0; - Eigen::VectorX reftaus(3); - reftaus << 1.5773502691896257, 0.0, 0.0; - Eigen::VectorX refjpvt(3); - refjpvt << 0, 1, 2; - - REQUIRE(A_qr.factors.isApprox(refA, 1e-7)); - REQUIRE(A_qr.taus.isApprox(reftaus, 1e-7)); - REQUIRE(A_qr.jpvt == refjpvt); - - sparseir::QRPackedQ Q = sparseir::getPropertyQ(A_qr); - Eigen::VectorX Qreftaus(3); - Qreftaus << 1.5773502691896257, 0.0, 0.0; - Eigen::MatrixX Qreffactors(3, 3); - Qreffactors << -1.7320508075688772, -1.7320508075688772, -1.7320508075688772, - 0.36602540378443865, 0.0, 0.0, - 0.36602540378443865, 0.0, 0.0; - REQUIRE(Q.taus.isApprox(Qreftaus, 1e-7)); - REQUIRE(Q.factors.isApprox(Qreffactors, 1e-7)); - - Eigen::MatrixX R = getPropertyR(A_qr); - Eigen::MatrixX refR(3, 3); - refR << -1.7320508075688772, -1.7320508075688772, -1.7320508075688772, - 0.0, 0.0, 0.0, - 0.0, 0.0, 0.0; - REQUIRE(R.isApprox(refR, 1e-7)); - - - MatrixX P = getPropertyP(A_qr); - MatrixX refP = MatrixX::Identity(3, 3); - refP << 1.0, 0.0, 0.0, - 0.0, 1.0, 0.0, - 0.0, 0.0, 1.0; - REQUIRE(P.isApprox(refP, 1e-7)); - - // In Julia Q * R - // In C++ Q.factors * R - - MatrixX C(3, 3); - sparseir::mul(C, Q, R); - MatrixX A_rec = C * P.transpose(); - - REQUIRE(A_rec.isApprox(Aorig, 4 * A_eps)); + // 28 significant digits are enough? + REQUIRE((A - Areconst).norm() / A.norm() < 1e-28); // 28 significant digits } -TEST_CASE("sparseir::RRQR", "[linalg]") { - MatrixX Aorig = MatrixX::Random(40, 30); - MatrixX A = Aorig; - DDouble A_eps = A.norm() * std::numeric_limits::epsilon(); - sparseir::QRPivoted A_qr; - int A_rank; - - std::tie(A_qr, A_rank) = sparseir::rrqr(A); - - REQUIRE(A_rank == 30); - sparseir::QRPackedQ Q = sparseir::getPropertyQ(A_qr); - Eigen::MatrixX R = getPropertyR(A_qr); - MatrixX P = getPropertyP(A_qr); +TEST_CASE("sparseir::rrqr simple", "[linalg]") +{ + Eigen::MatrixX Aorig(3, 3); + Aorig << 1, 1, 1, 1, 1, 1, 1, 1, 1; + Eigen::MatrixX A(3, 3); + A << 1, 1, 1, 1, 1, 1, 1, 1, 1; + + double A_eps = A.norm() * std::numeric_limits::epsilon(); + double rtol = 0.1; + sparseir::QRPivoted A_qr; + int A_rank; + std::tie(A_qr, A_rank) = sparseir::rrqr(A); + REQUIRE(A_rank == 1); + Eigen::MatrixX refA(3, 3); + refA << -1.7320508075688772, -1.7320508075688772, -1.7320508075688772, + 0.36602540378443865, 0.0, 0.0, 0.36602540378443865, 0.0, 0.0; + Eigen::VectorX reftaus(3); + reftaus << 1.5773502691896257, 0.0, 0.0; + Eigen::VectorX refjpvt(3); + refjpvt << 0, 1, 2; + + REQUIRE(A_qr.factors.isApprox(refA, 1e-7)); + REQUIRE(A_qr.taus.isApprox(reftaus, 1e-7)); + REQUIRE(A_qr.jpvt == refjpvt); + + sparseir::QRPackedQ Q = sparseir::getPropertyQ(A_qr); + Eigen::VectorX Qreftaus(3); + Qreftaus << 1.5773502691896257, 0.0, 0.0; + Eigen::MatrixX Qreffactors(3, 3); + Qreffactors << -1.7320508075688772, -1.7320508075688772, + -1.7320508075688772, 0.36602540378443865, 0.0, 0.0, 0.36602540378443865, + 0.0, 0.0; + REQUIRE(Q.taus.isApprox(Qreftaus, 1e-7)); + REQUIRE(Q.factors.isApprox(Qreffactors, 1e-7)); + + Eigen::MatrixX R = getPropertyR(A_qr); + Eigen::MatrixX refR(3, 3); + refR << -1.7320508075688772, -1.7320508075688772, -1.7320508075688772, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0; + REQUIRE(R.isApprox(refR, 1e-7)); + + MatrixX P = getPropertyP(A_qr); + MatrixX refP = MatrixX::Identity(3, 3); + refP << 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0; + REQUIRE(P.isApprox(refP, 1e-7)); + + // In Julia Q * R + // In C++ Q.factors * R + + MatrixX C(3, 3); + sparseir::mul(C, Q, R); + MatrixX A_rec = C * P.transpose(); + + REQUIRE(A_rec.isApprox(Aorig, 4 * A_eps)); +} - // In Julia Q * R - // In C++ Q.factors * R - MatrixX C(40, 30); - sparseir::mul(C, Q, R); - MatrixX A_rec = C * P.transpose(); - REQUIRE(A_rec.isApprox(Aorig, 4 * A_eps)); +TEST_CASE("sparseir::RRQR", "[linalg]") +{ + MatrixX Aorig = MatrixX::Random(40, 30); + MatrixX A = Aorig; + DDouble A_eps = A.norm() * std::numeric_limits::epsilon(); + sparseir::QRPivoted A_qr; + int A_rank; + + std::tie(A_qr, A_rank) = sparseir::rrqr(A); + + REQUIRE(A_rank == 30); + sparseir::QRPackedQ Q = sparseir::getPropertyQ(A_qr); + Eigen::MatrixX R = getPropertyR(A_qr); + MatrixX P = getPropertyP(A_qr); + + // In Julia Q * R + // In C++ Q.factors * R + MatrixX C(40, 30); + sparseir::mul(C, Q, R); + MatrixX A_rec = C * P.transpose(); + REQUIRE(A_rec.isApprox(Aorig, 4 * A_eps)); } -TEST_CASE("sparseir::RRQR Trunc", "[linalg]") { - // double - { - VectorX x = VectorX::LinSpaced(101, -1, 1); - MatrixX Aorig(101, 21); - for (int i = 0; i < 21; i++) { - Aorig.col(i) = x.array().pow(i); - } - - MatrixX A = Aorig; - int m = A.rows(); - int n = A.cols(); - sparseir::QRPivoted A_qr; - int k; - std::tie(A_qr, k) = sparseir::rrqr(A, double(1e-5)); - REQUIRE(k < std::min(m, n)); - REQUIRE(k == 17); - auto QR = sparseir::truncate_qr_result(A_qr, k); - auto Q = QR.first; - auto R = QR.second; - - MatrixX A_rec = Q * R * getPropertyP(A_qr).transpose(); - REQUIRE(A_rec.isApprox(Aorig, 1e-5 * A.norm())); +TEST_CASE("sparseir::RRQR Trunc", "[linalg]") +{ + // double + { + VectorX x = VectorX::LinSpaced(101, -1, 1); + MatrixX Aorig(101, 21); + for (int i = 0; i < 21; i++) { + Aorig.col(i) = x.array().pow(i); } - // DDouble - { - using std::pow; - VectorX x = VectorX::LinSpaced(101, -1, 1); - MatrixX Aorig(101, 21); - for (int i = 0; i < Aorig.cols(); i++) { - for (int j = 0; j < Aorig.rows(); j++) { - Aorig(j, i) = pow(x(j), i); // xprec::pow is called. - } - } - - MatrixX A = Aorig; - int m = A.rows(); - int n = A.cols(); - sparseir::QRPivoted A_qr; - int k; - std::tie(A_qr, k) = sparseir::rrqr(A, DDouble(0.00001)); - REQUIRE(k < std::min(m, n)); - REQUIRE(k == 17); - auto QR = sparseir::truncate_qr_result(A_qr, k); - auto Q = QR.first; - auto R = QR.second; - REQUIRE(Q.rows() == m); - REQUIRE(Q.cols() == k); - REQUIRE(R.rows() == k); - REQUIRE(R.cols() == n); - MatrixX A_rec = Q * R * getPropertyP(A_qr).transpose(); - REQUIRE(A_rec.isApprox(Aorig, 1e-5 * A.norm())); + + MatrixX A = Aorig; + int m = A.rows(); + int n = A.cols(); + sparseir::QRPivoted A_qr; + int k; + std::tie(A_qr, k) = sparseir::rrqr(A, double(1e-5)); + REQUIRE(k < std::min(m, n)); + REQUIRE(k == 17); + auto QR = sparseir::truncate_qr_result(A_qr, k); + auto Q = QR.first; + auto R = QR.second; + + MatrixX A_rec = Q * R * getPropertyP(A_qr).transpose(); + REQUIRE(A_rec.isApprox(Aorig, 1e-5 * A.norm())); + } + // DDouble + { + using std::pow; + VectorX x = VectorX::LinSpaced(101, -1, 1); + MatrixX Aorig(101, 21); + for (int i = 0; i < Aorig.cols(); i++) { + for (int j = 0; j < Aorig.rows(); j++) { + Aorig(j, i) = pow(x(j), i); // xprec::pow is called. + } } + + MatrixX A = Aorig; + int m = A.rows(); + int n = A.cols(); + sparseir::QRPivoted A_qr; + int k; + std::tie(A_qr, k) = sparseir::rrqr(A, DDouble(0.00001)); + REQUIRE(k < std::min(m, n)); + REQUIRE(k == 17); + auto QR = sparseir::truncate_qr_result(A_qr, k); + auto Q = QR.first; + auto R = QR.second; + REQUIRE(Q.rows() == m); + REQUIRE(Q.cols() == k); + REQUIRE(R.rows() == k); + REQUIRE(R.cols() == n); + MatrixX A_rec = Q * R * getPropertyP(A_qr).transpose(); + REQUIRE(A_rec.isApprox(Aorig, 1e-5 * A.norm())); + } } -TEST_CASE("TSVD", "[linalg]") { - using std::pow; - // double - { - for (auto tol : {1e-14}) { - int N1 = 201; - int N2 = 51; - VectorX x = VectorX::LinSpaced(N1, -1, 1); - //MatrixX Aorig(201, 51); - MatrixX Aorig(N1, N2); - for (int i = 0; i < Aorig.cols(); i++) { - Aorig.col(i) = x.array().pow(i); - //for (int j = 0; j < Aorig.rows(); j++) { - //Aorig(j, i) = pow(x(j), i); - //} - } - - MatrixX A = Aorig; // create a copy of Aorig - - auto tsvd_result = sparseir::tsvd(A, double(tol)); - sparseir::tsvd(Aorig, double(tol)); - auto U = std::get<0>(tsvd_result); - auto s = std::get<1>(tsvd_result); - auto V = std::get<2>(tsvd_result); - int k = s.size(); - - auto S_diag = s.asDiagonal(); - auto Areconst = U * S_diag * V.transpose(); - auto diff = (A - Areconst).norm() / A.norm(); - // std::cout << "diff " << diff << std::endl; - // std::cout << "Areconst " << Areconst.norm() << std::endl; - // std::cout << "Aorig " << Aorig.norm() << std::endl; - // std::cout << "norm diff" << Aorig.norm() - Areconst.norm() << std::endl; - - REQUIRE(Areconst.isApprox(Aorig, tol * Aorig.norm())); - REQUIRE((U.transpose() * U).isIdentity()); - REQUIRE((V.transpose() * V).isIdentity()); - REQUIRE(std::is_sorted(s.data(), s.data() + s.size(), std::greater())); - REQUIRE(k < std::min(A.rows(), A.cols())); - - Eigen::JacobiSVD> svd(Aorig.cast()); - REQUIRE(s.isApprox(svd.singularValues().head(k))); - REQUIRE(S_diag.toDenseMatrix().isApprox(svd.singularValues().head(k).asDiagonal().toDenseMatrix())); +TEST_CASE("TSVD", "[linalg]") +{ + using std::pow; + // double + { + for (auto tol : {1e-14}) { + int N1 = 201; + int N2 = 51; + VectorX x = VectorX::LinSpaced(N1, -1, 1); + // MatrixX Aorig(201, 51); + MatrixX Aorig(N1, N2); + for (int i = 0; i < Aorig.cols(); i++) { + Aorig.col(i) = x.array().pow(i); + // for (int j = 0; j < Aorig.rows(); j++) { + // Aorig(j, i) = pow(x(j), i); + //} } + + MatrixX A = Aorig; // create a copy of Aorig + + auto tsvd_result = sparseir::tsvd(A, double(tol)); + sparseir::tsvd(Aorig, double(tol)); + auto U = std::get<0>(tsvd_result); + auto s = std::get<1>(tsvd_result); + auto V = std::get<2>(tsvd_result); + int k = s.size(); + + auto S_diag = s.asDiagonal(); + auto Areconst = U * S_diag * V.transpose(); + auto diff = (A - Areconst).norm() / A.norm(); + // std::cout << "diff " << diff << std::endl; + // std::cout << "Areconst " << Areconst.norm() << std::endl; + // std::cout << "Aorig " << Aorig.norm() << std::endl; + // std::cout << "norm diff" << Aorig.norm() - Areconst.norm() << + // std::endl; + + REQUIRE(Areconst.isApprox(Aorig, tol * Aorig.norm())); + REQUIRE((U.transpose() * U).isIdentity()); + REQUIRE((V.transpose() * V).isIdentity()); + REQUIRE(std::is_sorted(s.data(), s.data() + s.size(), + std::greater())); + REQUIRE(k < std::min(A.rows(), A.cols())); + + Eigen::JacobiSVD> svd(Aorig.cast()); + REQUIRE(s.isApprox(svd.singularValues().head(k))); + REQUIRE(S_diag.toDenseMatrix().isApprox( + svd.singularValues().head(k).asDiagonal().toDenseMatrix())); } + } } -TEST_CASE("SVD of VERY triangular 2x2", "[linalg]") { - // auto [cu, su, smax, smin, cv, sv] = sparseir::svd2x2(DDouble(1), DDouble(1e100), DDouble(1)); - auto svd_result = sparseir::svd2x2(DDouble(1), DDouble(1e100), DDouble(1)); - auto cu = std::get<0>(std::get<0>(svd_result)); - auto su = std::get<1>(std::get<0>(svd_result)); - auto smax = std::get<0>(std::get<1>(svd_result)); - auto smin = std::get<1>(std::get<1>(svd_result)); - auto cv = std::get<0>(std::get<2>(svd_result)); - auto sv = std::get<1>(std::get<2>(svd_result)); - - REQUIRE(cu.hi() == 1.0); - REQUIRE(su.hi() == 1e-100); - REQUIRE(smax.hi() == 1e100); - REQUIRE(smin.hi() == 1e-100); - REQUIRE(cv.hi() == 1e-100); - REQUIRE(sv.hi() == 1.0); - Matrix U, S, Vt, A; - U << cu, -su, su, cu; - S << smax, 0, 0, smin; - Vt << cv, sv, -sv, cv; - A << DDouble(1), DDouble(1e100), DDouble(0), DDouble(1); - REQUIRE((U * S * Vt).isApprox(A)); - - svd_result = sparseir::svd2x2(DDouble(1), DDouble(1e100), DDouble(1e100)); - - cu = std::get<0>(std::get<0>(svd_result)); - su = std::get<1>(std::get<0>(svd_result)); - smax = std::get<0>(std::get<1>(svd_result)); - smin = std::get<1>(std::get<1>(svd_result)); - cv = std::get<0>(std::get<2>(svd_result)); - sv = std::get<1>(std::get<2>(svd_result)); - - REQUIRE(cu.hi() == 1 / std::sqrt(2)); - REQUIRE(su.hi() == 1 / std::sqrt(2)); - REQUIRE(smax.hi() == std::sqrt(2) * 1e100); - REQUIRE(smin.hi() == 1 / std::sqrt(2)); - REQUIRE(std::abs(cv.hi() - 5e-101) < 1e-10); - REQUIRE(std::abs(sv.hi() - 1.0) < 1e-10); - U << cu, -su, su, cu; - S << smax, 0, 0, smin; - Vt << cv, sv, -sv, cv; - A << DDouble(1), DDouble(1e100), DDouble(0), DDouble(1e100); - // REQUIRE((U * S * Vt).isApprox(A)); - - svd_result = sparseir::svd2x2(DDouble(1e100), DDouble(1e200), DDouble(2)); - cu = std::get<0>(std::get<0>(svd_result)); - su = std::get<1>(std::get<0>(svd_result)); - smax = std::get<0>(std::get<1>(svd_result)); - smin = std::get<1>(std::get<1>(svd_result)); - cv = std::get<0>(std::get<2>(svd_result)); - sv = std::get<1>(std::get<2>(svd_result)); - - REQUIRE(cu.hi() == 1.0); - REQUIRE(su.hi() == 2e-200); - REQUIRE(smax.hi() == 1e200); - REQUIRE(smin.hi() == 2e-100); - REQUIRE(cv.hi() == 1e-100); - REQUIRE(sv.hi() == 1.0); - U << cu, -su, su, cu; - S << smax, 0, 0, smin; - Vt << cv, sv, -sv, cv; - A << DDouble(1e100), DDouble(1e200), DDouble(0), DDouble(2); - // REQUIRE((U * S * Vt).isApprox(A)); - - svd_result = sparseir::svd2x2(DDouble(1e-100), DDouble(1), DDouble(1e-100)); - cu = std::get<0>(std::get<0>(svd_result)); - su = std::get<1>(std::get<0>(svd_result)); - smax = std::get<0>(std::get<1>(svd_result)); - smin = std::get<1>(std::get<1>(svd_result)); - cv = std::get<0>(std::get<2>(svd_result)); - sv = std::get<1>(std::get<2>(svd_result)); - - REQUIRE(cu.hi() == 1.0); - REQUIRE(su.hi() == 1e-100); - REQUIRE(smax.hi() == 1.0); - REQUIRE(smin.hi() == 1e-200); - REQUIRE(cv.hi() == 1e-100); - REQUIRE(sv.hi() == 1.0); - U << cu, -su, su, cu; - S << smax, 0, 0, smin; - Vt << cv, sv, -sv, cv; - A <(DDouble(1), DDouble(1e100), DDouble(1)); + auto cu = std::get<0>(std::get<0>(svd_result)); + auto su = std::get<1>(std::get<0>(svd_result)); + auto smax = std::get<0>(std::get<1>(svd_result)); + auto smin = std::get<1>(std::get<1>(svd_result)); + auto cv = std::get<0>(std::get<2>(svd_result)); + auto sv = std::get<1>(std::get<2>(svd_result)); + + REQUIRE(cu.hi() == 1.0); + REQUIRE(su.hi() == 1e-100); + REQUIRE(smax.hi() == 1e100); + REQUIRE(smin.hi() == 1e-100); + REQUIRE(cv.hi() == 1e-100); + REQUIRE(sv.hi() == 1.0); + Matrix U, S, Vt, A; + U << cu, -su, su, cu; + S << smax, 0, 0, smin; + Vt << cv, sv, -sv, cv; + A << DDouble(1), DDouble(1e100), DDouble(0), DDouble(1); + REQUIRE((U * S * Vt).isApprox(A)); + + svd_result = sparseir::svd2x2(DDouble(1), DDouble(1e100), DDouble(1e100)); + + cu = std::get<0>(std::get<0>(svd_result)); + su = std::get<1>(std::get<0>(svd_result)); + smax = std::get<0>(std::get<1>(svd_result)); + smin = std::get<1>(std::get<1>(svd_result)); + cv = std::get<0>(std::get<2>(svd_result)); + sv = std::get<1>(std::get<2>(svd_result)); + + REQUIRE(cu.hi() == 1 / std::sqrt(2)); + REQUIRE(su.hi() == 1 / std::sqrt(2)); + REQUIRE(smax.hi() == std::sqrt(2) * 1e100); + REQUIRE(smin.hi() == 1 / std::sqrt(2)); + REQUIRE(std::abs(cv.hi() - 5e-101) < 1e-10); + REQUIRE(std::abs(sv.hi() - 1.0) < 1e-10); + U << cu, -su, su, cu; + S << smax, 0, 0, smin; + Vt << cv, sv, -sv, cv; + A << DDouble(1), DDouble(1e100), DDouble(0), DDouble(1e100); + // REQUIRE((U * S * Vt).isApprox(A)); + + svd_result = sparseir::svd2x2(DDouble(1e100), DDouble(1e200), DDouble(2)); + cu = std::get<0>(std::get<0>(svd_result)); + su = std::get<1>(std::get<0>(svd_result)); + smax = std::get<0>(std::get<1>(svd_result)); + smin = std::get<1>(std::get<1>(svd_result)); + cv = std::get<0>(std::get<2>(svd_result)); + sv = std::get<1>(std::get<2>(svd_result)); + + REQUIRE(cu.hi() == 1.0); + REQUIRE(su.hi() == 2e-200); + REQUIRE(smax.hi() == 1e200); + REQUIRE(smin.hi() == 2e-100); + REQUIRE(cv.hi() == 1e-100); + REQUIRE(sv.hi() == 1.0); + U << cu, -su, su, cu; + S << smax, 0, 0, smin; + Vt << cv, sv, -sv, cv; + A << DDouble(1e100), DDouble(1e200), DDouble(0), DDouble(2); + // REQUIRE((U * S * Vt).isApprox(A)); + + svd_result = sparseir::svd2x2(DDouble(1e-100), DDouble(1), DDouble(1e-100)); + cu = std::get<0>(std::get<0>(svd_result)); + su = std::get<1>(std::get<0>(svd_result)); + smax = std::get<0>(std::get<1>(svd_result)); + smin = std::get<1>(std::get<1>(svd_result)); + cv = std::get<0>(std::get<2>(svd_result)); + sv = std::get<1>(std::get<2>(svd_result)); + + REQUIRE(cu.hi() == 1.0); + REQUIRE(su.hi() == 1e-100); + REQUIRE(smax.hi() == 1.0); + REQUIRE(smin.hi() == 1e-200); + REQUIRE(cv.hi() == 1e-100); + REQUIRE(sv.hi() == 1.0); + U << cu, -su, su, cu; + S << smax, 0, 0, smin; + Vt << cv, sv, -sv, cv; + A << DDouble(1e-100), DDouble(1), DDouble(0), DDouble(1e-100); + REQUIRE((U * S * Vt).isApprox(A)); } -TEST_CASE("SVD of 'more lower' 2x2", "[linalg]") { - auto svd_result = sparseir::svd2x2(DDouble(1), DDouble(1e-100), DDouble(1e100), DDouble(1)); - auto cu = std::get<0>(std::get<0>(svd_result)); - auto su = std::get<1>(std::get<0>(svd_result)); - auto smax = std::get<0>(std::get<1>(svd_result)); - auto smin = std::get<1>(std::get<1>(svd_result)); - auto cv = std::get<0>(std::get<2>(svd_result)); - auto sv = std::get<1>(std::get<2>(svd_result)); - - // REQUIRE(cu.hi() == 1e-100); <--- fails - REQUIRE(su.hi() == 1.0); - REQUIRE(smax.hi() == 1e100); - REQUIRE(xprec::abs(smin) < DDouble(1e-100)); // should be ≈ 0.0, but x ≈ 0 is equivalent to x == 0 - REQUIRE(cv.hi() == 1.0); - REQUIRE(sv.hi() == 1e-100); - Matrix U, S, Vt, A; - U << cu, -su, su, cu; - S << smax, 0, 0, smin; - Vt << cv, sv, -sv, cv; - A << DDouble(1), DDouble(1e-100), DDouble(1e100), DDouble(1); - //REQUIRE((U * S * Vt).isApprox(A)); +TEST_CASE("SVD of 'more lower' 2x2", "[linalg]") +{ + auto svd_result = sparseir::svd2x2(DDouble(1), DDouble(1e-100), + DDouble(1e100), DDouble(1)); + auto cu = std::get<0>(std::get<0>(svd_result)); + auto su = std::get<1>(std::get<0>(svd_result)); + auto smax = std::get<0>(std::get<1>(svd_result)); + auto smin = std::get<1>(std::get<1>(svd_result)); + auto cv = std::get<0>(std::get<2>(svd_result)); + auto sv = std::get<1>(std::get<2>(svd_result)); + + // REQUIRE(cu.hi() == 1e-100); <--- fails + REQUIRE(su.hi() == 1.0); + REQUIRE(smax.hi() == 1e100); + REQUIRE( + xprec::abs(smin) < + DDouble(1e-100)); // should be ≈ 0.0, but x ≈ 0 is equivalent to x == 0 + REQUIRE(cv.hi() == 1.0); + REQUIRE(sv.hi() == 1e-100); + Matrix U, S, Vt, A; + U << cu, -su, su, cu; + S << smax, 0, 0, smin; + Vt << cv, sv, -sv, cv; + A << DDouble(1), DDouble(1e-100), DDouble(1e100), DDouble(1); + // REQUIRE((U * S * Vt).isApprox(A)); } -TEST_CASE("Givens rotation of 2D vector - special cases", "[linalg]") { - for (auto v : {std::vector{42, 0}, std::vector{-42, 0}, std::vector{0, 42}, std::vector{0, -42}, std::vector{0, 0}}) { +TEST_CASE("Givens rotation of 2D vector - special cases", "[linalg]") +{ + for (auto v : {std::vector{42, 0}, std::vector{-42, 0}, + std::vector{0, 42}, std::vector{0, -42}, + std::vector{0, 0}}) { auto rot = sparseir::givens_params(v[0], v[1]); auto c_s = std::get<0>(rot); auto r = std::get<1>(rot); diff --git a/test/_root.cxx b/test/_root.cxx index d656b96..852f35c 100644 --- a/test/_root.cxx +++ b/test/_root.cxx @@ -1,46 +1,62 @@ #include -#include +#include #include -#include #include -#include +#include +#include #include // Include the previously implemented functions here -//template -//std::vector discrete_extrema(F f, const std::vector& xgrid); +// template +// std::vector discrete_extrema(F f, const std::vector& xgrid); template T midpoint(T lo, T hi); -TEST_CASE("DiscreteExtrema") { +TEST_CASE("DiscreteExtrema") +{ std::vector nonnegative = {0, 1, 2, 3, 4, 5, 6, 7, 8}; - std::vector symmetric = {-8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}; + std::vector symmetric = {-8, -7, -6, -5, -4, -3, -2, -1, 0, + 1, 2, 3, 4, 5, 6, 7, 8}; auto identity = [](int x) { return x; }; - auto shifted_identity = [](int x) { return x - std::numeric_limits::epsilon(); }; + auto shifted_identity = [](int x) { + return x - std::numeric_limits::epsilon(); + }; auto square = [](int x) { return x * x; }; auto constant = [](int x) { return 1; }; - REQUIRE(sparseir::discrete_extrema(identity, nonnegative) == std::vector({8})); - // REQUIRE(sparseir::discrete_extrema(shifted_identity, nonnegative) == std::vector({0, 8})); - // REQUIRE(discrete_extrema(square, symmetric) == std::vector({-8, 0, 8})); - // REQUIRE(discrete_extrema(constant, symmetric) == std::vector({})); + REQUIRE(sparseir::discrete_extrema(identity, nonnegative) == + std::vector({8})); + // REQUIRE(sparseir::discrete_extrema(shifted_identity, nonnegative) == + // std::vector({0, 8})); REQUIRE(discrete_extrema(square, symmetric) == + // std::vector({-8, 0, 8})); REQUIRE(discrete_extrema(constant, + // symmetric) == std::vector({})); } -TEST_CASE("Midpoint") { - // fails - //REQUIRE(midpoint(std::numeric_limits::max(), std::numeric_limits::max()) == std::numeric_limits::max()); - // REQUIRE(midpoint(std::numeric_limits::min(), std::numeric_limits::max()) == -1); +TEST_CASE("Midpoint") +{ // fails - //REQUIRE(midpoint(std::numeric_limits::min(), std::numeric_limits::min()) == std::numeric_limits::min()); - // REQUIRE(midpoint(static_cast(1000), static_cast(2000)) == static_cast(1500)); - // REQUIRE(midpoint(std::numeric_limits::max(), std::numeric_limits::max()) == std::numeric_limits::max()); - // REQUIRE(midpoint(static_cast(0), std::numeric_limits::max()) == std::numeric_limits::max() / 2); - // REQUIRE(midpoint(static_cast(0), std::numeric_limits::max()) == std::numeric_limits::max() / 2); - // REQUIRE(midpoint(static_cast(0), static_cast(99999999999999999999ULL)) == static_cast(99999999999999999999ULL) / 2); + // REQUIRE(midpoint(std::numeric_limits::max(), + // std::numeric_limits::max()) == std::numeric_limits::max()); + // REQUIRE(midpoint(std::numeric_limits::min(), + // std::numeric_limits::max()) == -1); fails + // REQUIRE(midpoint(std::numeric_limits::min(), + // std::numeric_limits::min()) == std::numeric_limits::min()); + // REQUIRE(midpoint(static_cast(1000), static_cast(2000)) + // == static_cast(1500)); + // REQUIRE(midpoint(std::numeric_limits::max(), + // std::numeric_limits::max()) == + // std::numeric_limits::max()); + // REQUIRE(midpoint(static_cast(0), + // std::numeric_limits::max()) == std::numeric_limits::max() / + // 2); REQUIRE(midpoint(static_cast(0), std::numeric_limits::max()) == std::numeric_limits::max() / 2); + // REQUIRE(midpoint(static_cast(0), + // static_cast(99999999999999999999ULL)) == + // static_cast(99999999999999999999ULL) / 2); // REQUIRE(midpoint(-10.0, 1.0) == -4.5); } \ No newline at end of file diff --git a/test/_specfuncs.cxx b/test/_specfuncs.cxx index 022f1c9..4820f05 100644 --- a/test/_specfuncs.cxx +++ b/test/_specfuncs.cxx @@ -1,26 +1,29 @@ #include +#include #include -#include #include -#include +#include #include #include -using xprec::DDouble; using Eigen::MatrixXd; +using xprec::DDouble; -TEST_CASE("_specfuns.cxx"){ - SECTION("legval"){ +TEST_CASE("_specfuns.cxx") +{ + SECTION("legval") + { std::vector c = {1.0, 2.0, 3.0}; double x = 0.5; double result = sparseir::legval(x, c); REQUIRE(result == 1.625); } - SECTION("legvander"){ + SECTION("legvander") + { std::vector x = {0.0, 0.5, 1.0}; int deg = 2; MatrixXd result = sparseir::legvander(x, deg); @@ -28,9 +31,9 @@ TEST_CASE("_specfuns.cxx"){ expected << 1, 0, -0.5, 1.0, 0.5, -0.125, 1, 1, 1; REQUIRE(result.isApprox(expected, 1e-9)); - Eigen::VectorXd x_eigen = Eigen::Map(x.data(), x.size()); + Eigen::VectorXd x_eigen = + Eigen::Map(x.data(), x.size()); MatrixXd result_eigen = sparseir::legvander(x_eigen, deg); REQUIRE(result_eigen.isApprox(expected, 1e-9)); } - } diff --git a/test/example.cxx b/test/example.cxx index 6027023..9958cf3 100644 --- a/test/example.cxx +++ b/test/example.cxx @@ -4,9 +4,9 @@ #include #include "sparseir/sparseir.h" -#include #include #include +#include using namespace xprec; @@ -28,10 +28,8 @@ TEST_CASE(" test ", "[test]") using MatrixXdd = Eigen::Matrix; auto N = 20; auto m = MatrixXdd(N, N); - for (int i = 0; i < N; i++) - { - for (int j = 0; j < N; j++) - { + for (int i = 0; i < N; i++) { + for (int j = 0; j < N; j++) { m(i, j) = 1 / double(i + j + 1); } } diff --git a/test/freq.cxx b/test/freq.cxx index ae36565..32405fb 100644 --- a/test/freq.cxx +++ b/test/freq.cxx @@ -24,7 +24,8 @@ TEST_CASE("freq", "[Integer value of MatsubaraFreq with explicit integer cast]") REQUIRE(bf.get_n() == 4); } -TEST_CASE("freq", "[Exceptions for invalid FermionicFreq and BosonicFreq values]") +TEST_CASE("freq", + "[Exceptions for invalid FermionicFreq and BosonicFreq values]") { REQUIRE_THROWS_AS(sparseir::FermionicFreq(4), std::domain_error); REQUIRE_THROWS_AS(sparseir::BosonicFreq(-7), std::domain_error); diff --git a/test/gauss.cxx b/test/gauss.cxx index 37dd2a4..b7bf46b 100644 --- a/test/gauss.cxx +++ b/test/gauss.cxx @@ -1,13 +1,13 @@ -#include -#include -#include #include +#include #include +#include +#include #include -#include #include +#include using std::invalid_argument; @@ -18,19 +18,21 @@ TEST_CASE("gauss", "[Rule]") // Initialize x, w, v, x_forward, x_backward with DDouble values std::vector x, w; sparseir::Rule r = sparseir::Rule(x, w); - REQUIRE(1==1); + REQUIRE(1 == 1); } - template -void gaussValidate(const sparseir::Rule& rule) { +void gaussValidate(const sparseir::Rule &rule) +{ if (!(rule.a <= rule.b)) { throw invalid_argument("a,b must be a valid interval"); } - if (!std::all_of(rule.x.begin(), rule.x.end(), [rule](T xi) { return xi <= rule.b; })) { + if (!std::all_of(rule.x.begin(), rule.x.end(), + [rule](T xi) { return xi <= rule.b; })) { throw invalid_argument("x must be smaller than b"); } - if (!std::all_of(rule.x.begin(), rule.x.end(), [rule](T xi) { return xi >= rule.a; })) { + if (!std::all_of(rule.x.begin(), rule.x.end(), + [rule](T xi) { return xi >= rule.a; })) { throw invalid_argument("x must be larger than a"); } if (!std::is_sorted(rule.x.begin(), rule.x.end())) { @@ -41,108 +43,122 @@ void gaussValidate(const sparseir::Rule& rule) { } // TODO: Fix me - //REQUIRE(equal(rule.x_forward.begin(), rule.x_forward.end(), rule.x.begin(), [rule](T xi, T x_forward) { return abs(x_forward - (xi - rule.a)) < 1e-9; })); - //REQUIRE(equal(rule.x_backward.begin(), rule.x_backward.end(), rule.x.begin(), [rule](T xi, T x_backward) { return abs(x_backward - (rule.b - xi)) < 1e-9; })); + // REQUIRE(equal(rule.x_forward.begin(), rule.x_forward.end(), + // rule.x.begin(), [rule](T xi, T x_forward) { return abs(x_forward - (xi - + // rule.a)) < 1e-9; })); REQUIRE(equal(rule.x_backward.begin(), + // rule.x_backward.end(), rule.x.begin(), [rule](T xi, T x_backward) { + // return abs(x_backward - (rule.b - xi)) < 1e-9; })); } -TEST_CASE("gauss.cpp") { - SECTION("Rule"){ +TEST_CASE("gauss.cpp") +{ + SECTION("Rule") + { std::vector x(20), w(20); DDouble a = -1, b = 1; xprec::gauss_legendre(20, x.data(), w.data()); - Eigen::VectorX x_eigen = Eigen::Map>(x.data(), x.size()); - Eigen::VectorX w_eigen = Eigen::Map>(w.data(), w.size()); + Eigen::VectorX x_eigen = + Eigen::Map>(x.data(), x.size()); + Eigen::VectorX w_eigen = + Eigen::Map>(w.data(), w.size()); sparseir::Rule r1 = sparseir::Rule(x_eigen, w_eigen); - sparseir::Rule r2 = sparseir::Rule(x_eigen, w_eigen, a, b); + sparseir::Rule r2 = + sparseir::Rule(x_eigen, w_eigen, a, b); REQUIRE(r1.a == r2.a); REQUIRE(r1.b == r2.b); REQUIRE(r1.x == r2.x); REQUIRE(r1.w == r2.w); } - SECTION("legvander6"){ + SECTION("legvander6") + { int n = 6; - auto result = sparseir::legvander(sparseir::legendre(n).x, n-1); + auto result = sparseir::legvander(sparseir::legendre(n).x, n - 1); Eigen::MatrixX expected(n, n); // expected is computed by - // using SparseIR; m = SparseIR.legvander(SparseIR.sparseir::legendre(6, SparseIR.Float64x2).x, 5); foreach(x -> println(x, ","), vec(m')) - expected << 1.0, - -0.9324695142031520278123015544939835, - 0.8042490923773935119886600608277198, - -0.6282499246436887457708844976782951, - 0.4220050092706226656844451152082432, - -0.2057123110596225258297870187140517, - 1.0, - -0.6612093864662645136613995950198845, - 0.1557967791266409127010318094210897, - 0.26911576974459911112396181357848563, - -0.428245862097120739542522563281485, - 0.2943957149254374170467243373494173, - 1.0, - -0.2386191860831969086305017216807169, - -0.4145913260494889701442373247943369, - 0.3239618653539352481441754340495602, - 0.1756623404298037786973215350969389, - -0.33461902074104083146186699361445111, - 1.0, - 0.2386191860831969086305017216807169, - -0.4145913260494889701442373247943369, - -0.3239618653539352481441754340495602, - 0.1756623404298037786973215350969389, - 0.33461902074104083146186699361445111, - 1.0, - 0.6612093864662645136613995950198845, - 0.1557967791266409127010318094210897, - -0.26911576974459911112396181357848563, - -0.428245862097120739542522563281485, - -0.2943957149254374170467243373494173, - 1.0, - 0.9324695142031520278123015544939835, - 0.8042490923773935119886600608277198, - 0.6282499246436887457708844976782951, - 0.4220050092706226656844451152082432, - 0.2057123110596225258297870187140517; + // using SparseIR; m = SparseIR.legvander(SparseIR.sparseir::legendre(6, + // SparseIR.Float64x2).x, 5); foreach(x -> println(x, ","), vec(m')) + expected << 1.0, -0.9324695142031520278123015544939835, + 0.8042490923773935119886600608277198, + -0.6282499246436887457708844976782951, + 0.4220050092706226656844451152082432, + -0.2057123110596225258297870187140517, 1.0, + -0.6612093864662645136613995950198845, + 0.1557967791266409127010318094210897, + 0.26911576974459911112396181357848563, + -0.428245862097120739542522563281485, + 0.2943957149254374170467243373494173, 1.0, + -0.2386191860831969086305017216807169, + -0.4145913260494889701442373247943369, + 0.3239618653539352481441754340495602, + 0.1756623404298037786973215350969389, + -0.33461902074104083146186699361445111, 1.0, + 0.2386191860831969086305017216807169, + -0.4145913260494889701442373247943369, + -0.3239618653539352481441754340495602, + 0.1756623404298037786973215350969389, + 0.33461902074104083146186699361445111, 1.0, + 0.6612093864662645136613995950198845, + 0.1557967791266409127010318094210897, + -0.26911576974459911112396181357848563, + -0.428245862097120739542522563281485, + -0.2943957149254374170467243373494173, 1.0, + 0.9324695142031520278123015544939835, + 0.8042490923773935119886600608277198, + 0.6282499246436887457708844976782951, + 0.4220050092706226656844451152082432, + 0.2057123110596225258297870187140517; DDouble e = 1e-13; REQUIRE(result.isApprox(expected, e)); } - SECTION("legendre_collocation"){ + SECTION("legendre_collocation") + { sparseir::Rule r = sparseir::legendre(2); Eigen::MatrixX result = sparseir::legendre_collocation(r); Eigen::MatrixX expected(2, 2); // expected is computed by - // julia> using SparseIR; m = SparseIR.legendre_collocation(SparseIR.sparseir::legendre(2, SparseIR.Float64x2)) - expected << 0.5, 0.5, - -0.8660254037844386467637231707528938, 0.8660254037844386467637231707528938; + // julia> using SparseIR; m = + // SparseIR.legendre_collocation(SparseIR.sparseir::legendre(2, + // SparseIR.Float64x2)) + expected << 0.5, 0.5, -0.8660254037844386467637231707528938, + 0.8660254037844386467637231707528938; DDouble e = 1e-13; REQUIRE(result.isApprox(expected, e)); } - SECTION("collocate") { + SECTION("collocate") + { int n = 6; sparseir::Rule r = sparseir::legendre(n); - Eigen::MatrixX cmat = legendre_collocation(r); // Assuming legendre_collocation function is defined + Eigen::MatrixX cmat = legendre_collocation( + r); // Assuming legendre_collocation function is defined Eigen::MatrixX emat = sparseir::legvander(r.x, r.x.size() - 1); DDouble e = 1e-13; - Eigen::MatrixX out = emat * cmat; - REQUIRE((emat * cmat).isApprox(Eigen::MatrixX::Identity(n, n), e)); + Eigen::MatrixX out = emat * cmat; + REQUIRE( + (emat * cmat).isApprox(Eigen::MatrixX::Identity(n, n), e)); } - SECTION("gauss sparseir::legendre") { + SECTION("gauss sparseir::legendre") + { int n = 200; sparseir::Rule rule = sparseir::legendre(n); gaussValidate(rule); std::vector x(n), w(n); xprec::gauss_legendre(n, x.data(), w.data()); - Eigen::VectorX x_eigen = Eigen::Map>(x.data(), n); - Eigen::VectorX w_eigen = Eigen::Map>(w.data(), n); + Eigen::VectorX x_eigen = + Eigen::Map>(x.data(), n); + Eigen::VectorX w_eigen = + Eigen::Map>(w.data(), n); REQUIRE(rule.x == x_eigen); REQUIRE(rule.w == w_eigen); } - SECTION("piecewise") { + SECTION("piecewise") + { std::vector edges = {-4, -1, 1, 3}; sparseir::Rule rule = sparseir::legendre(20).piecewise(edges); gaussValidate(rule); diff --git a/test/kernel.cxx b/test/kernel.cxx index e8148ed..5485116 100644 --- a/test/kernel.cxx +++ b/test/kernel.cxx @@ -1,19 +1,20 @@ -#include #include -#include -#include -#include +#include #include #include +#include #include +#include +#include // Include your sparseir headers #include using xprec::DDouble; -template -std::tuple kernel_accuracy_test(Kernel &K) { +template +std::tuple kernel_accuracy_test(Kernel &K) +{ using T = float; using T_x = double; @@ -51,10 +52,12 @@ std::tuple kernel_accuracy_test(Kernel &K) { T_x magn = result_x.cwiseAbs().maxCoeff(); // Check that the difference is within tolerance - bool b1 = (result.template cast() - result_x).cwiseAbs().maxCoeff() <= 2 * magn * epsilon; + bool b1 = (result.template cast() - result_x).cwiseAbs().maxCoeff() <= + 2 * magn * epsilon; auto reldiff = (result.cwiseAbs().array() < tiny) - .select(T(1.0), result.array() / result_x.template cast().array()); + .select(T(1.0), result.array() / + result_x.template cast().array()); bool b2 = (reldiff - T(1.0)).cwiseAbs().maxCoeff() <= 100 * epsilon; return std::make_tuple(b1, b2); @@ -66,40 +69,44 @@ TEST_CASE("Kernel Accuracy Test") // List of kernels to test std::vector kernels = { sparseir::LogisticKernel(9.0), - //sparseir::RegularizedBoseKernel(8.0), + // sparseir::RegularizedBoseKernel(8.0), sparseir::LogisticKernel(120000.0), - //sparseir::RegularizedBoseKernel(127500.0), - // Symmetrized kernels + // sparseir::RegularizedBoseKernel(127500.0), + // Symmetrized kernels }; - for (const auto &K : kernels) - { + for (const auto &K : kernels) { bool b1, b2; std::tie(b1, b2) = kernel_accuracy_test(K); REQUIRE(b1); REQUIRE(b2); /* if (b1){ - std::cout << "Kernel accuracy test passed for " << typeid(K).name() << b1 << b2 << std::endl; + std::cout << "Kernel accuracy test passed for " << + typeid(K).name() << b1 << b2 << std::endl; } if (b2){ - std::cout << "Kernel accuracy test passed for " << typeid(K).name() << b1 << b2 << std::endl; + std::cout << "Kernel accuracy test passed for " << + typeid(K).name() << b1 << b2 << std::endl; } */ } } { - auto kernel_ptr = std::make_shared(40000.0); + auto kernel_ptr = + std::make_shared(40000.0); auto K = sparseir::get_symmetrized(kernel_ptr, -1); // TODO: implement sve_hints bool b1, b2; - //std::tie(b1, b2) = kernel_accuracy_test(K); - //REQUIRE(b1); - //REQUIRE(b2); - // TODO: resolve this errors - //auto k2 = sparseir::get_symmetrized(sparseir::RegularizedBoseKernel(40000.0), -1); - // TODO: implement sve_hints - // REQUIRE(kernel_accuracy_test(k2)); + // std::tie(b1, b2) = kernel_accuracy_test(K); + // REQUIRE(b1); + // REQUIRE(b2); + // TODO: resolve this errors + // auto k2 = + // sparseir::get_symmetrized(sparseir::RegularizedBoseKernel(40000.0), + // -1); + // TODO: implement sve_hints + // REQUIRE(kernel_accuracy_test(k2)); } { // List of kernels to test @@ -107,8 +114,7 @@ TEST_CASE("Kernel Accuracy Test") sparseir::RegularizedBoseKernel(8.0), sparseir::RegularizedBoseKernel(127500.0), }; - for (const auto &K : kernels) - { + for (const auto &K : kernels) { bool b1, b2; std::tie(b1, b2) = kernel_accuracy_test(K); // TODO: resolve this errors @@ -118,12 +124,14 @@ TEST_CASE("Kernel Accuracy Test") /* if (b1) { - std::cout << "Kernel accuracy test passed for " << typeid(K).name() << b1 << b2 << std::endl; + std::cout << "Kernel accuracy test passed for " << + typeid(K).name() << b1 << b2 << std::endl; } if (b2) { - std::cout << "Kernel accuracy test passed for " << typeid(K).name() << b1 << b2 << std::endl; + std::cout << "Kernel accuracy test passed for " << + typeid(K).name() << b1 << b2 << std::endl; } */ } @@ -150,19 +158,16 @@ TEST_CASE("Kernel Singularity Test") std::mt19937 rng; std::uniform_real_distribution dist(-1.0, 1.0); - for (double lambda : lambdas) - { + for (double lambda : lambdas) { // Generate random x values std::vector x_values(1000); - for (double &x : x_values) - { + for (double &x : x_values) { x = dist(rng); } sparseir::RegularizedBoseKernel K(lambda); - for (double x : x_values) - { + for (double x : x_values) { double expected = 1.0 / lambda; double computed = K(x, 0.0); REQUIRE(Eigen::internal::isApprox(computed, expected)); diff --git a/test/poly.cxx b/test/poly.cxx index ddfbed2..cc3ea29 100644 --- a/test/poly.cxx +++ b/test/poly.cxx @@ -1,36 +1,37 @@ #include -#include #include -#include #include -#include #include +#include +#include +#include #include -#include #include +#include // test_piecewise_legendre_poly.cpp -#include #include -#include -#include +#include #include -#include #include +#include +#include +#include -TEST_CASE("StableRNG") { +TEST_CASE("StableRNG") +{ // Initialize data directly with the given values Eigen::MatrixXd data(3, 3); data << 0.8177021060277301, 0.7085670484724618, 0.5033588232863977, - 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, - 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; + 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, + 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; Eigen::VectorXd knots(4); - knots << 0.507134318967235, 0.5766150365607372, - 0.7126662232433161, 0.7357313003784003; + knots << 0.507134318967235, 0.5766150365607372, 0.7126662232433161, + 0.7357313003784003; // Check that knots are sorted REQUIRE(std::is_sorted(knots.data(), knots.data() + knots.size())); @@ -39,23 +40,25 @@ TEST_CASE("StableRNG") { int randsymm = 9; Eigen::MatrixXd ddata(3, 3); ddata << 0.5328437345518631, 0.8443074122979211, 0.6722336389122814, - 0.1799506228788046, 0.6805545318460489, 0.17641780726469292, - 0.13124858727993338, 0.2193663343416914, 0.7756615110113394; + 0.1799506228788046, 0.6805545318460489, 0.17641780726469292, + 0.13124858727993338, 0.2193663343416914, 0.7756615110113394; // Check that ddata matches expected values REQUIRE(ddata.isApprox(ddata)); } -TEST_CASE("sparseir::PiecewiseLegendrePoly(data::Matrix, knots::Vector, l::Int)") { +TEST_CASE( + "sparseir::PiecewiseLegendrePoly(data::Matrix, knots::Vector, l::Int)") +{ // Initialize data and knots as per the test Eigen::MatrixXd data(3, 3); data << 0.8177021060277301, 0.7085670484724618, 0.5033588232863977, - 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, - 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; + 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, + 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; Eigen::VectorXd knots(4); - knots << 0.507134318967235, 0.5766150365607372, - 0.7126662232433161, 0.7357313003784003; + knots << 0.507134318967235, 0.5766150365607372, 0.7126662232433161, + 0.7357313003784003; int l = 3; @@ -71,16 +74,17 @@ TEST_CASE("sparseir::PiecewiseLegendrePoly(data::Matrix, knots::Vector, l::Int)" REQUIRE(pwlp.symm == 0); } -TEST_CASE("PiecewiseLegendrePoly(data, p::PiecewiseLegendrePoly; symm=symm(p))") { +TEST_CASE("PiecewiseLegendrePoly(data, p::PiecewiseLegendrePoly; symm=symm(p))") +{ // Initialize data and knots as per the test Eigen::MatrixXd data(3, 3); data << 0.8177021060277301, 0.7085670484724618, 0.5033588232863977, - 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, - 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; + 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, + 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; Eigen::VectorXd knots(4); - knots << 0.507134318967235, 0.5766150365607372, - 0.7126662232433161, 0.7357313003784003; + knots << 0.507134318967235, 0.5766150365607372, 0.7126662232433161, + 0.7357313003784003; int l = 3; @@ -90,8 +94,8 @@ TEST_CASE("PiecewiseLegendrePoly(data, p::PiecewiseLegendrePoly; symm=symm(p))") int randsymm = 9; Eigen::MatrixXd ddata(3, 3); ddata << 0.5328437345518631, 0.8443074122979211, 0.6722336389122814, - 0.1799506228788046, 0.6805545318460489, 0.17641780726469292, - 0.13124858727993338, 0.2193663343416914, 0.7756615110113394; + 0.1799506228788046, 0.6805545318460489, 0.17641780726469292, + 0.13124858727993338, 0.2193663343416914, 0.7756615110113394; // Create ddata_pwlp sparseir::PiecewiseLegendrePoly ddata_pwlp(ddata, pwlp, randsymm); @@ -112,25 +116,21 @@ TEST_CASE("PiecewiseLegendrePoly(data, p::PiecewiseLegendrePoly; symm=symm(p))") REQUIRE(pwlp.norms.isApprox(ddata_pwlp.norms)); } -TEST_CASE("sparseir::PiecewiseLegendrePolyVector") { +TEST_CASE("sparseir::PiecewiseLegendrePolyVector") +{ // Initialize data1 Eigen::MatrixXd data1(16, 2); - data1 << 0.49996553669802485, -0.009838135710548356, - 0.003315915376286483, -2.4035906967802686e-5, - 3.4824832610792906e-6, -1.6818592059096e-8, - 1.5530850593697272e-9, -5.67191158452736e-12, - 3.8438802553084145e-13, -1.12861464373688e-15, - -1.4028528586225198e-16, 5.199431653846204e-18, - -3.490774002228127e-16, 4.339342349553959e-18, - -8.247505551908268e-17, 7.379549188001237e-19, - 0.49996553669802485, 0.009838135710548356, - 0.003315915376286483, 2.4035906967802686e-5, - 3.4824832610792906e-6, 1.6818592059096e-8, - 1.5530850593697272e-9, 5.67191158452736e-12, - 3.8438802553084145e-13, 1.12861464373688e-15, - -1.4028528586225198e-16, -5.199431653846204e-18, - -3.490774002228127e-16, -4.339342349553959e-18, - -8.247505551908268e-17, -7.379549188001237e-19; + data1 << 0.49996553669802485, -0.009838135710548356, 0.003315915376286483, + -2.4035906967802686e-5, 3.4824832610792906e-6, -1.6818592059096e-8, + 1.5530850593697272e-9, -5.67191158452736e-12, 3.8438802553084145e-13, + -1.12861464373688e-15, -1.4028528586225198e-16, 5.199431653846204e-18, + -3.490774002228127e-16, 4.339342349553959e-18, -8.247505551908268e-17, + 7.379549188001237e-19, 0.49996553669802485, 0.009838135710548356, + 0.003315915376286483, 2.4035906967802686e-5, 3.4824832610792906e-6, + 1.6818592059096e-8, 1.5530850593697272e-9, 5.67191158452736e-12, + 3.8438802553084145e-13, 1.12861464373688e-15, -1.4028528586225198e-16, + -5.199431653846204e-18, -3.490774002228127e-16, -4.339342349553959e-18, + -8.247505551908268e-17, -7.379549188001237e-19; data1.resize(16, 2); Eigen::VectorXd knots1(3); @@ -139,22 +139,17 @@ TEST_CASE("sparseir::PiecewiseLegendrePolyVector") { // Initialize data2 Eigen::MatrixXd data2(16, 2); - data2 << -0.43195475509329695, 0.436151579050162, - -0.005257007544885257, 0.0010660519696441624, - -6.611545612452212e-6, 7.461310619506964e-7, - -3.2179499894475862e-9, 2.5166526274315926e-10, - -8.387341925898803e-13, 5.008268649326024e-14, - 3.7750894390998034e-17, -2.304983535459561e-16, - 3.0252856483620636e-16, -1.923751082183687e-16, - 7.201014354168769e-17, -3.2715804561902326e-17, - 0.43195475509329695, 0.436151579050162, - 0.005257007544885257, 0.0010660519696441624, - 6.611545612452212e-6, 7.461310619506964e-7, - 3.2179499894475862e-9, 2.5166526274315926e-10, - 8.387341925898803e-13, 5.008268649326024e-14, - -3.7750894390998034e-17, -2.304983535459561e-16, - -3.0252856483620636e-16, -1.923751082183687e-16, - -7.201014354168769e-17, -3.2715804561902326e-17; + data2 << -0.43195475509329695, 0.436151579050162, -0.005257007544885257, + 0.0010660519696441624, -6.611545612452212e-6, 7.461310619506964e-7, + -3.2179499894475862e-9, 2.5166526274315926e-10, -8.387341925898803e-13, + 5.008268649326024e-14, 3.7750894390998034e-17, -2.304983535459561e-16, + 3.0252856483620636e-16, -1.923751082183687e-16, 7.201014354168769e-17, + -3.2715804561902326e-17, 0.43195475509329695, 0.436151579050162, + 0.005257007544885257, 0.0010660519696441624, 6.611545612452212e-6, + 7.461310619506964e-7, 3.2179499894475862e-9, 2.5166526274315926e-10, + 8.387341925898803e-13, 5.008268649326024e-14, -3.7750894390998034e-17, + -2.304983535459561e-16, -3.0252856483620636e-16, -1.923751082183687e-16, + -7.201014354168769e-17, -3.2715804561902326e-17; data2.resize(16, 2); Eigen::VectorXd knots2(3); @@ -163,22 +158,17 @@ TEST_CASE("sparseir::PiecewiseLegendrePolyVector") { // Initialize data3 Eigen::MatrixXd data3(16, 2); - data3 << -0.005870438661638806, -0.8376202388555938, - 0.28368166184926036, -0.0029450618222246236, - 0.0004277118923277169, -2.4101642603229184e-6, - 2.2287962786878678e-7, -8.875091544426018e-10, - 6.021488924175155e-11, -1.8705305570705647e-13, - 9.924398482443944e-15, 4.299521053905097e-16, - -1.0697019178666955e-16, 3.6972269778329906e-16, - -8.848885164903329e-17, 6.327687614609368e-17, - -0.005870438661638806, 0.8376202388555938, - 0.28368166184926036, 0.0029450618222246236, - 0.0004277118923277169, 2.4101642603229184e-6, - 2.2287962786878678e-7, 8.875091544426018e-10, - 6.021488924175155e-11, 1.8705305570705647e-13, - 9.924398482443944e-15, -4.299521053905097e-16, - -1.0697019178666955e-16, -3.6972269778329906e-16, - -8.848885164903329e-17, -6.327687614609368e-17; + data3 << -0.005870438661638806, -0.8376202388555938, 0.28368166184926036, + -0.0029450618222246236, 0.0004277118923277169, -2.4101642603229184e-6, + 2.2287962786878678e-7, -8.875091544426018e-10, 6.021488924175155e-11, + -1.8705305570705647e-13, 9.924398482443944e-15, 4.299521053905097e-16, + -1.0697019178666955e-16, 3.6972269778329906e-16, -8.848885164903329e-17, + 6.327687614609368e-17, -0.005870438661638806, 0.8376202388555938, + 0.28368166184926036, 0.0029450618222246236, 0.0004277118923277169, + 2.4101642603229184e-6, 2.2287962786878678e-7, 8.875091544426018e-10, + 6.021488924175155e-11, 1.8705305570705647e-13, 9.924398482443944e-15, + -4.299521053905097e-16, -1.0697019178666955e-16, + -3.6972269778329906e-16, -8.848885164903329e-17, -6.327687614609368e-17; data3.resize(16, 2); Eigen::VectorXd knots3(3); @@ -191,7 +181,8 @@ TEST_CASE("sparseir::PiecewiseLegendrePolyVector") { sparseir::PiecewiseLegendrePoly pwlp3(data3, knots3, l3); // Create sparseir::PiecewiseLegendrePolyVector - std::vector polyvec = {pwlp1, pwlp2, pwlp3}; + std::vector polyvec = {pwlp1, pwlp2, + pwlp3}; sparseir::PiecewiseLegendrePolyVector polys(polyvec); // Test length @@ -219,13 +210,14 @@ TEST_CASE("sparseir::PiecewiseLegendrePolyVector") { // Test data Eigen::Tensor data_tensor = polys.get_data(); - // Assuming get_data() returns a 3D tensor of size (polyorder, nsegments, npolys) - // We can compare data_tensor with the individual data matrices + // Assuming get_data() returns a 3D tensor of size (polyorder, nsegments, + // npolys) We can compare data_tensor with the individual data matrices // Test evaluation at an array of x Eigen::VectorXd xs(4); xs << -0.8, -0.2, 0.2, 0.8; - Eigen::MatrixXd polys_xs = polys(xs); // Should return a matrix of size (3, 4) + Eigen::MatrixXd polys_xs = + polys(xs); // Should return a matrix of size (3, 4) Eigen::MatrixXd expected_xs(3, 4); for (int i = 0; i < 4; ++i) { @@ -234,16 +226,17 @@ TEST_CASE("sparseir::PiecewiseLegendrePolyVector") { REQUIRE(polys_xs.isApprox(expected_xs)); } -TEST_CASE("Deriv") { +TEST_CASE("Deriv") +{ // Initialize data, knots, and create sparseir::PiecewiseLegendrePoly Eigen::MatrixXd data(3, 3); data << 0.8177021060277301, 0.7085670484724618, 0.5033588232863977, - 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, - 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; + 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, + 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; Eigen::VectorXd knots(4); - knots << 0.507134318967235, 0.5766150365607372, - 0.7126662232433161, 0.7357313003784003; + knots << 0.507134318967235, 0.5766150365607372, 0.7126662232433161, + 0.7357313003784003; int l = 3; sparseir::PiecewiseLegendrePoly pwlp(data, knots, l); @@ -276,16 +269,17 @@ TEST_CASE("Deriv") { REQUIRE(pwlp.norms.isApprox(deriv_pwlp.norms)); } -TEST_CASE("Overlap") { +TEST_CASE("Overlap") +{ // Initialize data, knots, and create sparseir::PiecewiseLegendrePoly Eigen::MatrixXd data(3, 3); data << 0.8177021060277301, 0.7085670484724618, 0.5033588232863977, - 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, - 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; + 0.3804323567786363, 0.7911959541742282, 0.8268504271915096, + 0.5425813266814807, 0.38397463704084633, 0.21626598379927042; Eigen::VectorXd knots(4); - knots << 0.507134318967235, 0.5766150365607372, - 0.7126662232433161, 0.7357313003784003; + knots << 0.507134318967235, 0.5766150365607372, 0.7126662232433161, + 0.7357313003784003; int l = 3; sparseir::PiecewiseLegendrePoly pwlp(data, knots, l); @@ -302,25 +296,21 @@ TEST_CASE("Overlap") { REQUIRE(std::abs(integral - expected_integral) < 1e-12); } -TEST_CASE("Roots") { +TEST_CASE("Roots") +{ // Initialize data and knots (from Julia code) Eigen::MatrixXd data(16, 2); - data << 0.16774734206553019, 0.49223680914312595, - -0.8276728567928646, 0.16912891046582143, - -0.0016231275318572044, 0.00018381683946452256, - -9.699355027805034e-7, 7.60144228530804e-8, - -2.8518324490258146e-10, 1.7090590205708293e-11, - -5.0081401126025e-14, 2.1244236198427895e-15, - 2.0478095258000225e-16, -2.676573801530628e-16, - 2.338165820094204e-16, -1.2050663212312096e-16, - -0.16774734206553019, 0.49223680914312595, - 0.8276728567928646, 0.16912891046582143, - 0.0016231275318572044, 0.00018381683946452256, - 9.699355027805034e-7, 7.60144228530804e-8, - 2.8518324490258146e-10, 1.7090590205708293e-11, - 5.0081401126025e-14, 2.1244236198427895e-15, - -2.0478095258000225e-16, -2.676573801530628e-16, - -2.338165820094204e-16, -1.2050663212312096e-16; + data << 0.16774734206553019, 0.49223680914312595, -0.8276728567928646, + 0.16912891046582143, -0.0016231275318572044, 0.00018381683946452256, + -9.699355027805034e-7, 7.60144228530804e-8, -2.8518324490258146e-10, + 1.7090590205708293e-11, -5.0081401126025e-14, 2.1244236198427895e-15, + 2.0478095258000225e-16, -2.676573801530628e-16, 2.338165820094204e-16, + -1.2050663212312096e-16, -0.16774734206553019, 0.49223680914312595, + 0.8276728567928646, 0.16912891046582143, 0.0016231275318572044, + 0.00018381683946452256, 9.699355027805034e-7, 7.60144228530804e-8, + 2.8518324490258146e-10, 1.7090590205708293e-11, 5.0081401126025e-14, + 2.1244236198427895e-15, -2.0478095258000225e-16, -2.676573801530628e-16, + -2.338165820094204e-16, -1.2050663212312096e-16; data.resize(16, 2); Eigen::VectorXd knots(3); @@ -334,12 +324,10 @@ TEST_CASE("Roots") { // Expected roots (from Julia code) Eigen::VectorXd expected_roots(3); - expected_roots << 0.1118633448586015, - 0.4999999999999998, + expected_roots << 0.1118633448586015, 0.4999999999999998, 0.8881366551413985; // fails // REQUIRE(roots.size() == expected_roots.size()); // REQUIRE(roots.isApprox(expected_roots)); - } diff --git a/test/svd.cxx b/test/svd.cxx index 07e490c..ed7ba7e 100644 --- a/test/svd.cxx +++ b/test/svd.cxx @@ -1,25 +1,25 @@ #include -#include #include -#include #include -#include #include +#include +#include +#include #include -#include #include +#include // test_piecewise_legendre_poly.cpp -#include #include -#include -#include +#include #include -#include #include +#include +#include +#include TEST_CASE("svd.cpp") { @@ -27,15 +27,18 @@ TEST_CASE("svd.cpp") using namespace Eigen; using namespace xprec; - // Create a matrix of Float64x2 equivalent (here just Eigen::MatrixXd for simplicity) + // Create a matrix of Float64x2 equivalent (here just Eigen::MatrixXd for + // simplicity) /** Eigen::MatrixXd mat64x2 = Eigen::MatrixXd::Random(4, 6); REQUIRE_NOTHROW(sparseir::compute_svd(mat64x2, "accurate", 2)); - std::cout << "n_sv_hint is set but will not be used in the current implementation!" << std::endl; + std::cout << "n_sv_hint is set but will not be used in the current + implementation!" << std::endl; REQUIRE_NOTHROW({ compute_svd(mat64x2, "accurate"); - std::cout << "strategy is set but will not be used in the current implementation!" << std::endl; + std::cout << "strategy is set but will not be used in the current + implementation!" << std::endl; }); */ diff --git a/test/sve.cxx b/test/sve.cxx index 50c921d..2c4976e 100644 --- a/test/sve.cxx +++ b/test/sve.cxx @@ -1,20 +1,23 @@ // sparseir/tests/sve.cpp -#include -#include -#include #include +#include +#include #include #include -#include +#include +#include -#include #include +#include using std::invalid_argument; // Function to check smoothness -void check_smooth(const std::function& u, const std::vector& s, double uscale, double fudge_factor) { +void check_smooth(const std::function &u, + const std::vector &s, double uscale, + double fudge_factor) +{ /* double epsilon = std::numeric_limits::epsilon(); std::vector knots = sparseir::knots(u); @@ -54,7 +57,8 @@ void check_smooth(const std::function& u, const std::vector(lk, 1e-6, 12); REQUIRE(ssve2.n_gauss == 12); - auto ssve1_double = sparseir::SamplingSVE(lk, 1e-6); + auto ssve1_double = + sparseir::SamplingSVE(lk, 1e-6); REQUIRE(ssve1_double.n_gauss == n_gauss); - auto ssve2_double = sparseir::SamplingSVE(lk, 1e-6, 12); + auto ssve2_double = + sparseir::SamplingSVE(lk, 1e-6, 12); REQUIRE(ssve2_double.n_gauss == 12); - auto ssve1_ddouble = sparseir::SamplingSVE(lk, 1e-6); + auto ssve1_ddouble = + sparseir::SamplingSVE(lk, + 1e-6); REQUIRE(ssve1_ddouble.n_gauss == n_gauss); - auto ssve2_ddouble = sparseir::SamplingSVE(lk, 1e-6, 12); + auto ssve2_ddouble = + sparseir::SamplingSVE( + lk, 1e-6, 12); REQUIRE(ssve2_ddouble.n_gauss == 12); } -TEST_CASE("CentrosymmSVE", "[CentrosymmSVE]") { +TEST_CASE("CentrosymmSVE", "[CentrosymmSVE]") +{ sparseir::LogisticKernel lk(10.0); auto hints = sparseir::sve_hints(lk, 1e-6); int nsvals_hint = hints.nsvals(); @@ -94,36 +105,50 @@ TEST_CASE("CentrosymmSVE", "[CentrosymmSVE]") { sparseir::Rule rule = sparseir::legendre(n_gauss); auto sve = sparseir::CentrosymmSVE(lk, 1e-6); - auto sve_double = sparseir::CentrosymmSVE(lk, 1e-6); - auto sve_ddouble = sparseir::CentrosymmSVE(lk, 1e-6); + auto sve_double = + sparseir::CentrosymmSVE(lk, 1e-6); + auto sve_ddouble = + sparseir::CentrosymmSVE(lk, + 1e-6); } -TEST_CASE("sve.cpp", "[compute_sve]") { - //auto sve = sparseir::compute_sve(sparseir::LogisticKernel(10.0)); +TEST_CASE("sve.cpp", "[compute_sve]") +{ + // auto sve = + // sparseir::compute_sve(sparseir::LogisticKernel(10.0)); // Define a map to store SVEResult objects - //auto sve_logistic = std::map < int, sparseir::SVEResult>{ - // {10, sparseir::compute_sve(sparseir::LogisticKernel(10.0))}, - // {42, sparseir::compute_sve(sparseir::LogisticKernel(42.0))}, - // {10000, sparseir::compute_sve(sparseir::LogisticKernel(10000.0))}, - // //{100000000, sparseir::compute_sve(sparseir::LogisticKernel(10000.0), 1e-12)}, + // auto sve_logistic = std::map < int, + // sparseir::SVEResult>{ + // {10, + // sparseir::compute_sve(sparseir::LogisticKernel(10.0))}, + // {42, + // sparseir::compute_sve(sparseir::LogisticKernel(42.0))}, + // {10000, + // sparseir::compute_sve(sparseir::LogisticKernel(10000.0))}, + // //{100000000, + // sparseir::compute_sve(sparseir::LogisticKernel(10000.0), + // 1e-12)}, // }; - SECTION("smooth with Λ =") { + SECTION("smooth with Λ =") + { for (int Lambda : {10, 42, 10000}) { REQUIRE(true); - // sparseir::FiniteTempBasis basis(1, Lambda, sve_logistic[Lambda]); + // sparseir::FiniteTempBasis basis(1, Lambda, sve_logistic[Lambda]); // TODO: Check that the maximum implementation is defined - // check_smooth(basis.u, basis.s, 2 * sparseir::maximum(basis.u(1)), 24); - //check_smooth(basis.v, basis.s, 50, 200); + // check_smooth(basis.u, basis.s, 2 * sparseir::maximum(basis.u(1)), + // 24); + // check_smooth(basis.v, basis.s, 50, 200); } } /* SECTION("num roots u with Λ =") { for (int Lambda : {10, 42, 10000}) { - FiniteTempBasis basis(1, Lambda, sparseir::sve_logistic[Lambda]); - for (const auto& ui : basis.u) { + FiniteTempBasis basis(1, Lambda, + sparseir::sve_logistic[Lambda]); for (const auto& ui : basis.u) { std::vector ui_roots = sparseir::roots(ui); REQUIRE(ui_roots.size() == static_cast(ui.l)); } @@ -135,10 +160,11 @@ TEST_CASE("sve.cpp", "[compute_sve]") { SECTION("num roots û with stat =, Λ =") { for (const auto& stat : {Fermionic(), Bosonic()}) { for (int Lambda : {10, 42, 10000}) { - FiniteTempBasis basis(stat, 1, Lambda, sparseir::sve_logistic[Lambda]); - for (int i : {1, 2, 8, 11}) { - std::vector x0 = sparseir::find_extrema(basis.uhat[i]); - REQUIRE(i <= x0.size() && x0.size() <= static_cast(i + 1)); + FiniteTempBasis basis(stat, 1, Lambda, + sparseir::sve_logistic[Lambda]); for (int i : {1, 2, 8, 11}) { + std::vector x0 = + sparseir::find_extrema(basis.uhat[i]); REQUIRE(i <= x0.size() && x0.size() + <= static_cast(i + 1)); } } } @@ -149,41 +175,57 @@ TEST_CASE("sve.cpp", "[compute_sve]") { SECTION("accuracy with stat =, Λ =") { for (const auto& stat : {Fermionic(), Bosonic()}) { for (int Lambda : {10, 42, 10000}) { - FiniteTempBasis basis(stat, 4, Lambda, sparseir::sve_logistic[Lambda]); - REQUIRE(sparseir::accuracy(basis) <= sparseir::significance(basis).back()); + FiniteTempBasis basis(stat, 4, Lambda, + sparseir::sve_logistic[Lambda]); REQUIRE(sparseir::accuracy(basis) <= + sparseir::significance(basis).back()); REQUIRE(sparseir::significance(basis).front() == 1.0); - REQUIRE(sparseir::accuracy(basis) <= (basis.s.back() / basis.s.front())); + REQUIRE(sparseir::accuracy(basis) <= (basis.s.back() / + basis.s.front())); } } } */ } -TEST_CASE("sve.cpp", "[choose_accuracy]") { - REQUIRE(sparseir::choose_accuracy(nullptr, nullptr) == std::make_tuple(2.2204460492503131e-16, "Float64x2", "default")); - REQUIRE(sparseir::choose_accuracy(nullptr, "Float64") == std::make_tuple(1.4901161193847656e-8, "Float64", "default")); - REQUIRE(sparseir::choose_accuracy(nullptr, "Float64x2") == std::make_tuple(2.2204460492503131e-16, "Float64x2", "default")); - - REQUIRE(sparseir::choose_accuracy(1e-6, nullptr) == std::make_tuple(1.0e-6, "Float64", "default")); +TEST_CASE("sve.cpp", "[choose_accuracy]") +{ + REQUIRE(sparseir::choose_accuracy(nullptr, nullptr) == + std::make_tuple(2.2204460492503131e-16, "Float64x2", "default")); + REQUIRE(sparseir::choose_accuracy(nullptr, "Float64") == + std::make_tuple(1.4901161193847656e-8, "Float64", "default")); + REQUIRE(sparseir::choose_accuracy(nullptr, "Float64x2") == + std::make_tuple(2.2204460492503131e-16, "Float64x2", "default")); + + REQUIRE(sparseir::choose_accuracy(1e-6, nullptr) == + std::make_tuple(1.0e-6, "Float64", "default")); // Note: Catch2 doesn't have a built-in way to capture logs. - // You might need to implement a custom logger or use a library that supports log capturing. - // Add debug output to see the actual return value - REQUIRE(sparseir::choose_accuracy(1e-8, nullptr) == std::make_tuple(1.0e-8, "Float64x2", "default")); - REQUIRE(sparseir::choose_accuracy(1e-20, nullptr) == std::make_tuple(1.0e-20, "Float64x2", "default")); - - REQUIRE(sparseir::choose_accuracy(1e-10, "Float64") == std::make_tuple(1.0e-10, "Float64", "accurate")); - - REQUIRE(sparseir::choose_accuracy(1e-6, "Float64") == std::make_tuple(1.0e-6, "Float64", "default")); - REQUIRE(sparseir::choose_accuracy(1e-6, "Float64", "auto") == std::make_tuple(1.0e-6, "Float64", "default")); - REQUIRE(sparseir::choose_accuracy(1e-6, "Float64", "accurate") == std::make_tuple(1.0e-6, "Float64", "accurate")); - - - SECTION("truncate") { - sparseir::CentrosymmSVE sve(sparseir::LogisticKernel(5), 1e-6); + // You might need to implement a custom logger or use a library that + // supports log capturing. Add debug output to see the actual return value + REQUIRE(sparseir::choose_accuracy(1e-8, nullptr) == + std::make_tuple(1.0e-8, "Float64x2", "default")); + REQUIRE(sparseir::choose_accuracy(1e-20, nullptr) == + std::make_tuple(1.0e-20, "Float64x2", "default")); + + REQUIRE(sparseir::choose_accuracy(1e-10, "Float64") == + std::make_tuple(1.0e-10, "Float64", "accurate")); + + REQUIRE(sparseir::choose_accuracy(1e-6, "Float64") == + std::make_tuple(1.0e-6, "Float64", "default")); + REQUIRE(sparseir::choose_accuracy(1e-6, "Float64", "auto") == + std::make_tuple(1.0e-6, "Float64", "default")); + REQUIRE(sparseir::choose_accuracy(1e-6, "Float64", "accurate") == + std::make_tuple(1.0e-6, "Float64", "accurate")); + + SECTION("truncate") + { + sparseir::CentrosymmSVE sve( + sparseir::LogisticKernel(5), 1e-6); std::vector> matrices = sve.matrices(); REQUIRE(matrices.size() == 2); - std::vector, Eigen::MatrixX, Eigen::MatrixX>> svds; - for (const auto& mat : matrices) { + std::vector, Eigen::MatrixX, + Eigen::MatrixX>> + svds; + for (const auto &mat : matrices) { auto svd = sparseir::compute_svd(mat); svds.push_back(svd); } @@ -191,7 +233,7 @@ TEST_CASE("sve.cpp", "[choose_accuracy]") { // Extract singular values and vectors std::vector> u_list, v_list; std::vector> s_list; - for (const auto& svd : svds) { + for (const auto &svd : svds) { auto u = std::get<0>(svd); auto s = std::get<1>(svd); auto v = std::get<2>(svd); @@ -201,10 +243,11 @@ TEST_CASE("sve.cpp", "[choose_accuracy]") { } for (int lmax = 3; lmax <= 20; ++lmax) { - auto truncated = sparseir::truncate(u_list, s_list, v_list, 1e-8, lmax); - auto u = std::get<0>(truncated); - auto s = std::get<1>(truncated); - auto v = std::get<2>(truncated); + auto truncated = + sparseir::truncate(u_list, s_list, v_list, 1e-8, lmax); + auto u = std::get<0>(truncated); + auto s = std::get<1>(truncated); + auto v = std::get<2>(truncated); auto sveresult = sve.postprocess(u, s, v); REQUIRE(sveresult.u.size() == sveresult.s.size());