Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove ``using namespace nda" to Avoid Namespace Pollution #12

Open
wants to merge 1 commit into
base: 1.2.x
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions c++/cppdlr/dlr_build.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
#include <nda/nda.hpp>
#include "dlr_kernels.hpp"

using namespace nda;

namespace cppdlr {


using dcomplex = std::complex<double>;
/**
* @class fineparams
* @brief Class containing parameters for fine composite Chebyshev grid
Expand Down
4 changes: 2 additions & 2 deletions c++/cppdlr/dlr_dyson.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ namespace cppdlr {
*/

// Type of Hamiltonian, and scalar type of Hamiltonian
template <typename Ht, nda::Scalar Sh = std::conditional_t<std::floating_point<Ht>, Ht, get_value_t<Ht>>>
template <typename Ht, nda::Scalar Sh = std::conditional_t<std::floating_point<Ht>, Ht, nda::get_value_t<Ht>>>
requires(std::floating_point<Ht> || nda::MemoryMatrix<Ht>)
class dyson_it {

Expand Down Expand Up @@ -120,7 +120,7 @@ namespace cppdlr {
// Solve Dyson equation
auto g = Tg(sig.shape()); // Declare Green's function
g = rhs; // Get right hand side of Dyson equation
auto g_rs = nda::matrix_view<get_value_t<Tg>>(nda::reshape(g, norb, r * norb)); // Reshape g to be compatible w/ LAPACK
auto g_rs = nda::matrix_view<nda::get_value_t<Tg>>(nda::reshape(g, norb, r * norb)); // Reshape g to be compatible w/ LAPACK
nda::lapack::getrs(sysmat, g_rs, ipiv); // Back solve
if constexpr (std::floating_point<Ht>) { // If h is scalar, g is scalar-valued
return g;
Expand Down
4 changes: 2 additions & 2 deletions c++/cppdlr/dlr_imfreq.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ namespace cppdlr {
if (niom != g.shape(0)) throw std::runtime_error("First dim of g != # DLR imaginary frequency nodes.");

// Make a copy of the data in Fortran Layout as required by getrs
auto gf = nda::array<get_value_t<T>, get_rank<T>, F_layout>(g);
auto gf = nda::array<nda::get_value_t<T>, nda::get_rank<T>, nda::F_layout>(g);

// Reshape as matrix_view with r rows
auto gfv = nda::reshape(gf, niom, g.size() / niom);
Expand All @@ -101,7 +101,7 @@ namespace cppdlr {
auto s = nda::vector<double>(r); // Not needed
double rcond = 0; // Not needed
int rank = 0; // Not needed
nda::lapack::gelss(nda::matrix<dcomplex, F_layout>(cf2if), gfv, s, rcond, rank);
nda::lapack::gelss(nda::matrix<dcomplex, nda::F_layout>(cf2if), gfv, s, rcond, rank);
}

return gf(nda::range(r), nda::ellipsis());
Expand Down
6 changes: 3 additions & 3 deletions c++/cppdlr/dlr_imtime.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ namespace cppdlr {
if (r != g.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");

// Make a copy of the data in Fortran Layout as required by getrs
auto gf = nda::array<get_value_t<T>, get_rank<T>, F_layout>(g);
auto gf = nda::array<nda::get_value_t<T>, nda::get_rank<T>, nda::F_layout>(g);

// Reshape as matrix_view with r rows
auto gfv = nda::reshape(gf, r, g.size() / r);
Expand Down Expand Up @@ -227,14 +227,14 @@ namespace cppdlr {
// Get matrix for least squares fitting: columns are DLR basis functions
// evaluating at data points t. Must built in Fortran layout for
// compatibility with LAPACK.
auto kmat = nda::matrix<S, F_layout>(n, r); // Make sure matrix has same scalar type as g
auto kmat = nda::matrix<S, nda::F_layout>(n, r); // Make sure matrix has same scalar type as g
for (int j = 0; j < r; ++j) {
for (int i = 0; i < n; ++i) { kmat(i, j) = k_it(t(i), dlr_rf(j)); }
}

// Reshape g to matrix w/ first dimension n, and put in Fortran layout for
// compatibility w/ LAPACK
auto g_rs = nda::matrix<S, F_layout>(nda::reshape(g, n, g.size() / n));
auto g_rs = nda::matrix<S, nda::F_layout>(nda::reshape(g, n, g.size() / n));

// Solve least squares problem to obtain DLR coefficients
auto s = nda::vector<double>(r); // Singular values (not needed)
Expand Down
64 changes: 32 additions & 32 deletions c++/cppdlr/utils.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
#include <nda/nda.hpp>
#include <nda/blas.hpp>

using namespace nda;

namespace cppdlr {
using dcomplex = std::complex<double>;

/**
* Class constructor for barycheb: barycentric Lagrange interpolation at
Expand Down Expand Up @@ -101,7 +101,7 @@ namespace cppdlr {
*/

// Type T must be scalar-valued rank 2 array/array_view or matrix/matrix_view
template <nda::MemoryArrayOfRank<2> T, nda::Scalar S = get_value_t<T>>
template <nda::MemoryArrayOfRank<2> T, nda::Scalar S = nda::get_value_t<T>>
std::tuple<typename T::regular_type, nda::vector<double>, nda::vector<int>> pivrgs(T const &a, double eps) {

auto _ = nda::range::all;
Expand All @@ -116,7 +116,7 @@ namespace cppdlr {
// Compute norms of rows of input matrix, and rescale eps tolerance
auto norms = nda::vector<double>(m);
double epssq = eps * eps;
for (int j = 0; j < m; ++j) { norms(j) = real(blas::dotc(aa(j, _), aa(j, _))); }
for (int j = 0; j < m; ++j) { norms(j) = nda::real(nda::blas::dotc(aa(j, _), aa(j, _))); }

// Begin pivoted double Gram-Schmidt procedure
int jpiv = 0, jj = 0;
Expand Down Expand Up @@ -151,10 +151,10 @@ namespace cppdlr {

// Orthogonalize current rows (now the chosen pivot row) against all
// previously chosen rows
for (int k = 0; k < j; ++k) { aa(j, _) = aa(j, _) - aa(k, _) * blas::dotc(aa(k, _), aa(j, _)); }
for (int k = 0; k < j; ++k) { aa(j, _) = aa(j, _) - aa(k, _) * nda::blas::dotc(aa(k, _), aa(j, _)); }

// Get norm of current row
nrm = real(blas::dotc(aa(j, _), aa(j, _)));
nrm = nda::real(nda::blas::dotc(aa(j, _), aa(j, _)));
//nrm = nda::norm(aa(j, _));

// Terminate if sufficiently small, and return previously selected rows
Expand All @@ -167,8 +167,8 @@ namespace cppdlr {
// Orthogonalize remaining rows against current row
for (int k = j + 1; k < m; ++k) {
if (norms(k) <= epssq) { continue; } // Can skip rows with norm less than tolerance
aa(k, _) = aa(k, _) - aa(j, _) * blas::dotc(aa(j, _), aa(k, _));
norms(k) = real(blas::dotc(aa(k, _), aa(k, _)));
aa(k, _) = aa(k, _) - aa(j, _) * nda::blas::dotc(aa(j, _), aa(k, _));
norms(k) = nda::real(nda::blas::dotc(aa(k, _), aa(k, _)));
}
}

Expand Down Expand Up @@ -199,7 +199,7 @@ namespace cppdlr {
*/

// Type T must be scalar-valued rank 2 array/array_view or matrix/matrix_view
template <nda::MemoryArrayOfRank<2> T, nda::Scalar S = get_value_t<T>>
template <nda::MemoryArrayOfRank<2> T, nda::Scalar S = nda::get_value_t<T>>
std::tuple<typename T::regular_type, nda::vector<double>, nda::vector<int>> pivrgs_sym(T const &a, double eps) {

auto _ = nda::range::all;
Expand All @@ -218,7 +218,7 @@ namespace cppdlr {
// Compute norms of rows of input matrix, and rescale eps tolerance
auto norms = nda::vector<double>(m);
double epssq = eps * eps;
for (int j = 0; j < m; ++j) { norms(j) = real(blas::dotc(aa(j, _), aa(j, _))); }
for (int j = 0; j < m; ++j) { norms(j) = nda::real(nda::blas::dotc(aa(j, _), aa(j, _))); }

// Begin pivoted double Gram-Schmidt procedure
int jpiv = 0, jj = 0;
Expand Down Expand Up @@ -268,10 +268,10 @@ namespace cppdlr {

// Orthogonalize current row (now the first chosen pivot row) against all
// previously chosen rows
for (int k = 0; k < j; ++k) { aa(j, _) = aa(j, _) - aa(k, _) * blas::dotc(aa(k, _), aa(j, _)); }
for (int k = 0; k < j; ++k) { aa(j, _) = aa(j, _) - aa(k, _) * nda::blas::dotc(aa(k, _), aa(j, _)); }

// Get norm of current row
nrm = real(blas::dotc(aa(j, _), aa(j, _)));
nrm = nda::real(nda::blas::dotc(aa(j, _), aa(j, _)));

// Terminate if sufficiently small, and return previously selected rows
// (not including current row)
Expand All @@ -283,23 +283,23 @@ namespace cppdlr {
// Orthogonalize remaining rows against current row
for (int k = j + 1; k < m; ++k) {
if (norms(k) <= epssq) { continue; } // Can skip rows with norm less than tolerance
aa(k, _) = aa(k, _) - aa(j, _) * blas::dotc(aa(j, _), aa(k, _));
norms(k) = real(blas::dotc(aa(k, _), aa(k, _)));
aa(k, _) = aa(k, _) - aa(j, _) * nda::blas::dotc(aa(j, _), aa(k, _));
norms(k) = nda::real(nda::blas::dotc(aa(k, _), aa(k, _)));
}

// Orthogonalize current row (now the second chosen pivot row) against all
// previously chosen rows
for (int k = 0; k < j + 1; ++k) { aa(j + 1, _) = aa(j + 1, _) - aa(k, _) * blas::dotc(aa(k, _), aa(j + 1, _)); }
for (int k = 0; k < j + 1; ++k) { aa(j + 1, _) = aa(j + 1, _) - aa(k, _) * nda::blas::dotc(aa(k, _), aa(j + 1, _)); }

// Normalize current row
nrm = real(blas::dotc(aa(j + 1, _), aa(j + 1, _)));
nrm = nda::real(nda::blas::dotc(aa(j + 1, _), aa(j + 1, _)));
aa(j + 1, _) = aa(j + 1, _) * (1 / sqrt(nrm));

// Orthogonalize remaining rows against current row
for (int k = j + 2; k < m; ++k) {
if (norms(k) <= epssq) { continue; } // Can skip rows with norm less than tolerance
aa(k, _) = aa(k, _) - aa(j + 1, _) * blas::dotc(aa(j + 1, _), aa(k, _));
norms(k) = real(blas::dotc(aa(k, _), aa(k, _)));
aa(k, _) = aa(k, _) - aa(j + 1, _) * nda::blas::dotc(aa(j + 1, _), aa(k, _));
norms(k) = nda::real(nda::blas::dotc(aa(k, _), aa(k, _)));
}
}

Expand Down Expand Up @@ -332,7 +332,7 @@ namespace cppdlr {
*/

// Type T must be scalar-valued rank 2 array/array_view or matrix/matrix_view
template <nda::MemoryArrayOfRank<2> T, nda::Scalar S = get_value_t<T>>
template <nda::MemoryArrayOfRank<2> T, nda::Scalar S = nda::get_value_t<T>>
std::tuple<typename T::regular_type, nda::vector<double>, nda::vector<int>> pivrgs_sym(T const &a, int r) {

auto _ = nda::range::all;
Expand Down Expand Up @@ -360,7 +360,7 @@ namespace cppdlr {

// Compute norms of rows of input matrix
auto norms = nda::vector<double>(m);
for (int j = 0; j < m; ++j) { norms(j) = real(blas::dotc(aa(j, _), aa(j, _))); }
for (int j = 0; j < m; ++j) { norms(j) = nda::real(nda::blas::dotc(aa(j, _), aa(j, _))); }

// Begin pivoted double Gram-Schmidt procedure
int jpiv = 0, jj = 0;
Expand All @@ -384,14 +384,14 @@ namespace cppdlr {
//jpiv = 0; // Index of pivot row

// Normalize
nrm = real(blas::dotc(aa(0, _), aa(0, _)));
nrm = nda::real(nda::blas::dotc(aa(0, _), aa(0, _)));
aa(0, _) = aa(0, _) * (1 / sqrt(nrm));
//aa(0, _) /= sqrt(real(blas::dotc(aa(0, _), aa(0, _))));
//aa(0, _) /= sqrt(nda::real(nda::blas::dotc(aa(0, _), aa(0, _))));

// Orthogonalize remaining rows against current row
for (int k = 1; k < m; ++k) {
aa(k, _) = aa(k, _) - aa(0, _) * blas::dotc(aa(0, _), aa(k, _));
norms(k) = real(blas::dotc(aa(k, _), aa(k, _)));
aa(k, _) = aa(k, _) - aa(0, _) * nda::blas::dotc(aa(0, _), aa(k, _));
norms(k) = nda::real(nda::blas::dotc(aa(k, _), aa(k, _)));
}
}

Expand Down Expand Up @@ -433,30 +433,30 @@ namespace cppdlr {

// Orthogonalize current row (now the first chosen pivot row) against all
// previously chosen rows
for (int k = 0; k < j; ++k) { aa(j, _) = aa(j, _) - aa(k, _) * blas::dotc(aa(k, _), aa(j, _)); }
for (int k = 0; k < j; ++k) { aa(j, _) = aa(j, _) - aa(k, _) * nda::blas::dotc(aa(k, _), aa(j, _)); }

// Normalize current row
nrm = real(blas::dotc(aa(j, _), aa(j, _)));
nrm = nda::real(nda::blas::dotc(aa(j, _), aa(j, _)));
aa(j, _) = aa(j, _) * (1 / sqrt(nrm));

// Orthogonalize remaining rows against current row
for (int k = j + 1; k < m; ++k) {
aa(k, _) = aa(k, _) - aa(j, _) * blas::dotc(aa(j, _), aa(k, _));
norms(k) = real(blas::dotc(aa(k, _), aa(k, _)));
aa(k, _) = aa(k, _) - aa(j, _) * nda::blas::dotc(aa(j, _), aa(k, _));
norms(k) = nda::real(nda::blas::dotc(aa(k, _), aa(k, _)));
}

// Orthogonalize current row (now the second chosen pivot row) against all
// previously chosen rows
for (int k = 0; k < j + 1; ++k) { aa(j + 1, _) = aa(j + 1, _) - aa(k, _) * blas::dotc(aa(k, _), aa(j + 1, _)); }
for (int k = 0; k < j + 1; ++k) { aa(j + 1, _) = aa(j + 1, _) - aa(k, _) * nda::blas::dotc(aa(k, _), aa(j + 1, _)); }

// Normalize current row
nrm = real(blas::dotc(aa(j + 1, _), aa(j + 1, _)));
nrm = nda::real(nda::blas::dotc(aa(j + 1, _), aa(j + 1, _)));
aa(j + 1, _) = aa(j + 1, _) * (1 / sqrt(nrm));

// Orthogonalize remaining rows against current row
for (int k = j + 2; k < m; ++k) {
aa(k, _) = aa(k, _) - aa(j + 1, _) * blas::dotc(aa(j + 1, _), aa(k, _));
norms(k) = real(blas::dotc(aa(k, _), aa(k, _)));
aa(k, _) = aa(k, _) - aa(j + 1, _) * nda::blas::dotc(aa(j + 1, _), aa(k, _));
norms(k) = nda::real(nda::blas::dotc(aa(k, _), aa(k, _)));
}
}

Expand Down Expand Up @@ -523,7 +523,7 @@ namespace cppdlr {
/**
* @brief Get real-valued type corresponding to type of given nda MemoryArray
*/
template <nda::MemoryArray T> using make_real_t = decltype(make_regular(real(std::declval<T>())));
template <nda::MemoryArray T> using make_real_t = decltype(make_regular(nda::real(std::declval<T>())));

/**
* @brief Get complex-valued type corresponding to type of given nda MemoryArray
Expand Down