From 6b0cac55183bca508e6e22cb0eba359ed949479e Mon Sep 17 00:00:00 2001 From: jasonkaye Date: Thu, 20 Jun 2024 22:14:42 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20github.io=20from=20@=20flatiro?= =?UTF-8?q?ninstitute/cppdlr@e22d10e0b37fad5e92c6bd382beaeac7a47308d9=20?= =?UTF-8?q?=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/main/.doctrees/environment.pickle | Bin 1689556 -> 1689556 bytes .../html/classcppdlr_1_1imtime__ops.html | 5 +- .../doxygen/html/dlr__dyson_8hpp_source.html | 4 +- .../doxygen/html/dlr__imtime_8hpp_source.html | 1064 ++++++++--------- .../latex/classcppdlr_1_1imtime__ops.tex | 7 +- 5 files changed, 544 insertions(+), 536 deletions(-) diff --git a/docs/main/.doctrees/environment.pickle b/docs/main/.doctrees/environment.pickle index a28e433627e91dce4230bad6b67db298859bdce3..4ec288b4ac2061e580afc691999bd6f35096c95c 100644 GIT binary patch delta 1020 zcmb7>O-vI}6onc3Uxf~B0RbD40@BhpDv?MqCaCd;(imN!E);89I(@V=rJYh@s2dWb z$ruWEHZF__I};a}Sr9iGj7DSP#;`E~6GN1^cI8U%9iWD#i|^h!@7swaFT)YG&~xH|sRhL>r(`;jlG?+Z!1Znb1iecEk#mC-Sb{*v z3!Om;)xatFX>It%eGb!Yj-H~IFk!0}xux1Ues`%)8;%EUy%aXg=}E&zJ!aUhXAE2PfVfBL zQ1P&wIcKDR&^F-#lh{5$qQX)(x`ZA*dxcHxQ<;5A+Edyc@eika-`=?!Y;;7}=zOrz z;b0?gY&^_oN4h9$S&urePtWVaWqr1+kCqv){mwox{nw|Lr-laTrYed(p3aTs#zR># zsm`hF3NF%*?QB|;;{|bVYj|%94i{T&m#gY*(PN&p;~mNnEzTO&p2@S@#fRHP4yi>9 zh!HU%X2gP65gTGh4j>1SI;0+PAUx85I1v}(MjDYOq!~H1x?OBpj5Kx#-GWc(6uN|V Up-1ov#{|D{boJ4@mP@sN0aA)M$N&HU delta 1029 zcmb7>-Ahw(7{@#A{bW-&-=@yYIdjLIQHhcmfe~1n5_F+DgFX$;lD;PK8)}AU{z=XOioX-OM1`x- ze+4JnVN;w0PV~T)KpH@FYu^G2caQdDv?z6p4MLLaEqK zEV+^~t4#05(w?5WScS{^I^GY*#|BsZjhfU47Wgq8r-&LG5^6Eu{xN23(ppB}^00ko z0LsH>wcx}Z4%Co>Ia{=`Ybp<0;{$k8pB9XWHhsq-PEQ*$>QO_yo-<_8L*f}FL}!P! zl4wB=p4V8xx!yUkf1-*FDY`9;Ia-MPh`{N0^Ahg2a3#E6&> zGh#ukhz;S9qewM!46!3MNG(!_I1ndNk2D~SNE6b$v6FW_7;ET}PD)*pN9vY*Qm51* P3DOD4yK(=WYozKg2vjzL diff --git a/docs/main/doxygen/html/classcppdlr_1_1imtime__ops.html b/docs/main/doxygen/html/classcppdlr_1_1imtime__ops.html index 7351d8f..eb5bbce 100644 --- a/docs/main/doxygen/html/classcppdlr_1_1imtime__ops.html +++ b/docs/main/doxygen/html/classcppdlr_1_1imtime__ops.html @@ -1295,7 +1295,10 @@

Returns
DLR coefficients of G
+
Returns
DLR coefficients of G (if transpose = false, as it is by default; otherwise, see note below)
+
Note
By setting the optional argument transpose to true, this method applies the transpose of the values -> coefficients transformation. This is useful for constructing matrices of linear operators which act on vectors of DLR grid values, rather than DLR coefficients. As an example, suppose L is a linear functional such that L[G] = c, G is a Green's function and c is a scalar. If gc is the vector of DLR coefficient of G, then we can represent L by a vector l, with entries given by the action of L on the DLR basis functions, and we have l^T * gc = c. Then
+

c = l^T * it2cf * g = (it2cf^T * l)^T * g,

+

where g is the vector of values of G at the DLR nodes, and it2cf is the imaginary time values -> coefficients matrix. Thus it2cf^T * l is the vector of the linear operator T acting on the vector of values of G at the DLR nodes, and can be precomputed using the vals2coefs method with the transpose option set to true.

diff --git a/docs/main/doxygen/html/dlr__dyson_8hpp_source.html b/docs/main/doxygen/html/dlr__dyson_8hpp_source.html index ed9294c..edfad2b 100644 --- a/docs/main/doxygen/html/dlr__dyson_8hpp_source.html +++ b/docs/main/doxygen/html/dlr__dyson_8hpp_source.html @@ -222,8 +222,8 @@
228 
229 } // namespace cppdlr
Class responsible for all DLR imaginary time operations, including building imaginary time grid and t...
Definition: dlr_imtime.hpp:46
-
int rank() const
Get DLR rank.
Definition: dlr_imtime.hpp:635
-
nda::vector_const_view< double > get_itnodes() const
Get DLR imaginary time nodes.
Definition: dlr_imtime.hpp:589
+
int rank() const
Get DLR rank.
Definition: dlr_imtime.hpp:653
+
nda::vector_const_view< double > get_itnodes() const
Get DLR imaginary time nodes.
Definition: dlr_imtime.hpp:607
Class for solving Dyson equation in imaginary time.
diff --git a/docs/main/doxygen/html/dlr__imtime_8hpp_source.html b/docs/main/doxygen/html/dlr__imtime_8hpp_source.html index 4223962..c9c321d 100644 --- a/docs/main/doxygen/html/dlr__imtime_8hpp_source.html +++ b/docs/main/doxygen/html/dlr__imtime_8hpp_source.html @@ -117,555 +117,555 @@
70 
71  imtime_ops() = default;
72 
-
83  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> typename T::regular_type vals2coefs(T const &g, bool transpose = false) const {
-
84 
-
85  // MemoryArray type can be nda vector, matrix, array, or view of any of
-
86  // these; taking a regular_type converts, for example, a matrix view to a
-
87  // matrix.
-
88 
-
89  if (r != g.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
-
90 
-
91  // Make a copy of the data in Fortran Layout as required by getrs
-
92  auto gf = nda::array<get_value_t<T>, get_rank<T>, F_layout>(g);
-
93 
-
94  // Reshape as matrix_view with r rows
-
95  auto gfv = nda::reshape(gf, r, g.size() / r);
-
96 
-
97  // Solve linear system (multiple right hand sides) to convert vals -> coeffs
-
98  if constexpr (nda::is_complex_v<S>) { // getrs requires matrix and rhs to have same value type
-
99  transpose ? nda::lapack::getrs(nda::transpose(it2cf.zlu), gfv, it2cf.piv) : nda::lapack::getrs(it2cf.zlu, gfv, it2cf.piv);
-
100  } else {
-
101  transpose ? nda::lapack::getrs(nda::transpose(it2cf.lu), gfv, it2cf.piv) : nda::lapack::getrs(it2cf.lu, gfv, it2cf.piv);
-
102  }
-
103 
-
104  return gf;
-
105  }
+
101  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> typename T::regular_type vals2coefs(T const &g, bool transpose = false) const {
+
102 
+
103  // MemoryArray type can be nda vector, matrix, array, or view of any of
+
104  // these; taking a regular_type converts, for example, a matrix view to a
+
105  // matrix.
106 
-
115  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> typename T::regular_type coefs2vals(T const &gc) const {
-
116 
-
117  if (r != gc.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
-
118 
-
119  // Reshape gc to a matrix w/ first dimension r
-
120  auto gc_rs = nda::reshape(gc, r, gc.size() / r);
+
107  if (r != g.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
+
108 
+
109  // Make a copy of the data in Fortran Layout as required by getrs
+
110  auto gf = nda::array<get_value_t<T>, get_rank<T>, F_layout>(g);
+
111 
+
112  // Reshape as matrix_view with r rows
+
113  auto gfv = nda::reshape(gf, r, g.size() / r);
+
114 
+
115  // Solve linear system (multiple right hand sides) to convert vals -> coeffs
+
116  if constexpr (nda::is_complex_v<S>) { // getrs requires matrix and rhs to have same value type
+
117  transpose ? nda::lapack::getrs(nda::transpose(it2cf.zlu), gfv, it2cf.piv) : nda::lapack::getrs(it2cf.zlu, gfv, it2cf.piv);
+
118  } else {
+
119  transpose ? nda::lapack::getrs(nda::transpose(it2cf.lu), gfv, it2cf.piv) : nda::lapack::getrs(it2cf.lu, gfv, it2cf.piv);
+
120  }
121 
-
122  // Apply coeffs -> vals matrix
-
123  auto g = cf2it * nda::matrix_const_view<S>(gc_rs);
+
122  return gf;
+
123  }
124 
-
125  // Reshape to original dimensions and return
-
126  return nda::reshape(g, gc.shape());
-
127  }
-
128 
-
143  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> auto coefs2eval(T const &gc, double t) const {
-
144 
-
145  if (r != gc.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
+
133  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> typename T::regular_type coefs2vals(T const &gc) const {
+
134 
+
135  if (r != gc.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
+
136 
+
137  // Reshape gc to a matrix w/ first dimension r
+
138  auto gc_rs = nda::reshape(gc, r, gc.size() / r);
+
139 
+
140  // Apply coeffs -> vals matrix
+
141  auto g = cf2it * nda::matrix_const_view<S>(gc_rs);
+
142 
+
143  // Reshape to original dimensions and return
+
144  return nda::reshape(g, gc.shape());
+
145  }
146 
-
147  // Scalar-valued Green's functions are handled differently than matrix-valued Green's functions
-
148 
-
149  if constexpr (T::rank == 1) {
-
150 
-
151  // TODO: can be further optimized to reduce # exponential evals.
-
152  // Evaluate DLR expansion
-
153  S g = 0;
-
154  if (t >= 0) {
-
155  for (int l = 0; l < r; ++l) { g += k_it_abs(t, dlr_rf(l)) * gc(l); }
-
156  } else {
-
157  for (int l = 0; l < r; ++l) { g += k_it_abs(-t, -dlr_rf(l)) * gc(l); }
-
158  }
-
159 
-
160  return g;
-
161  } else {
+
161  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> auto coefs2eval(T const &gc, double t) const {
162 
-
163  // Reshape gc to matrix w/ first dimension r
-
164  auto gc_rs = nda::reshape(gc, r, gc.size() / r);
-
165 
-
166  // Get output shape
-
167  std::array<long, T::rank - 1> shape_out;
-
168  for (int i = 0; i < T::rank - 1; ++i) { shape_out[i] = gc.shape(i + 1); }
-
169 
-
170  // Get vector of evaluation of DLR expansion at a point
-
171  auto kvec = build_evalvec(t);
-
172 
-
173  // Evaluate DLR expansion, reshape to original dimensions (with first
-
174  // dimension summed out), and return
-
175  return nda::reshape(transpose(nda::matrix_const_view<S>(gc_rs)) * kvec, shape_out);
-
176  }
-
177  }
-
178 
-
191  nda::vector<double> build_evalvec(double t) const;
-
192 
-
202  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>>
-
203  typename T::regular_type fitvals2coefs(nda::vector_const_view<double> t, T const &g) const {
-
204 
-
205  // Check first dimension of g equal to length of t
-
206  int n = t.size();
-
207  if (n != g.shape(0)) throw std::runtime_error("First dim of g must be equal to length of t.");
-
208 
-
209  // Get matrix for least squares fitting: columns are DLR basis functions
-
210  // evaluating at data points t. Must built in Fortran layout for
-
211  // compatibility with LAPACK.
-
212  auto kmat = nda::matrix<S, F_layout>(n, r); // Make sure matrix has same scalar type as g
-
213  for (int j = 0; j < r; ++j) {
-
214  for (int i = 0; i < n; ++i) { kmat(i, j) = k_it(t(i), dlr_rf(j)); }
-
215  }
-
216 
-
217  // Reshape g to matrix w/ first dimension n, and put in Fortran layout for
-
218  // compatibility w/ LAPACK
-
219  auto g_rs = nda::matrix<S, F_layout>(nda::reshape(g, n, g.size() / n));
-
220 
-
221  // Solve least squares problem to obtain DLR coefficients
-
222  auto s = nda::vector<double>(r); // Singular values (not needed)
-
223  int rank = 0; // Rank of system matrix (not needed)
-
224  nda::lapack::gelss(kmat, g_rs, s, 0.0, rank);
-
225 
-
226  auto gc_shape = g.shape(); // Output shape is same as g...
-
227  gc_shape[0] = r; // ...with first dimension n replaced by r
-
228  auto gc = typename T::regular_type(gc_shape); // Output array: output type is same as g
-
229  reshape(gc, r, g.size() / n) = g_rs(nda::range(r), _); // Place result in output array
-
230 
-
231  return gc;
-
232  }
-
233 
-
245  template <nda::MemoryArray T> typename T::regular_type reflect(T const &g) const {
-
246 
-
247  if (r != g.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
+
163  if (r != gc.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
+
164 
+
165  // Scalar-valued Green's functions are handled differently than matrix-valued Green's functions
+
166 
+
167  if constexpr (T::rank == 1) {
+
168 
+
169  // TODO: can be further optimized to reduce # exponential evals.
+
170  // Evaluate DLR expansion
+
171  S g = 0;
+
172  if (t >= 0) {
+
173  for (int l = 0; l < r; ++l) { g += k_it_abs(t, dlr_rf(l)) * gc(l); }
+
174  } else {
+
175  for (int l = 0; l < r; ++l) { g += k_it_abs(-t, -dlr_rf(l)) * gc(l); }
+
176  }
+
177 
+
178  return g;
+
179  } else {
+
180 
+
181  // Reshape gc to matrix w/ first dimension r
+
182  auto gc_rs = nda::reshape(gc, r, gc.size() / r);
+
183 
+
184  // Get output shape
+
185  std::array<long, T::rank - 1> shape_out;
+
186  for (int i = 0; i < T::rank - 1; ++i) { shape_out[i] = gc.shape(i + 1); }
+
187 
+
188  // Get vector of evaluation of DLR expansion at a point
+
189  auto kvec = build_evalvec(t);
+
190 
+
191  // Evaluate DLR expansion, reshape to original dimensions (with first
+
192  // dimension summed out), and return
+
193  return nda::reshape(transpose(nda::matrix_const_view<S>(gc_rs)) * kvec, shape_out);
+
194  }
+
195  }
+
196 
+
209  nda::vector<double> build_evalvec(double t) const;
+
210 
+
220  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>>
+
221  typename T::regular_type fitvals2coefs(nda::vector_const_view<double> t, T const &g) const {
+
222 
+
223  // Check first dimension of g equal to length of t
+
224  int n = t.size();
+
225  if (n != g.shape(0)) throw std::runtime_error("First dim of g must be equal to length of t.");
+
226 
+
227  // Get matrix for least squares fitting: columns are DLR basis functions
+
228  // evaluating at data points t. Must built in Fortran layout for
+
229  // compatibility with LAPACK.
+
230  auto kmat = nda::matrix<S, F_layout>(n, r); // Make sure matrix has same scalar type as g
+
231  for (int j = 0; j < r; ++j) {
+
232  for (int i = 0; i < n; ++i) { kmat(i, j) = k_it(t(i), dlr_rf(j)); }
+
233  }
+
234 
+
235  // Reshape g to matrix w/ first dimension n, and put in Fortran layout for
+
236  // compatibility w/ LAPACK
+
237  auto g_rs = nda::matrix<S, F_layout>(nda::reshape(g, n, g.size() / n));
+
238 
+
239  // Solve least squares problem to obtain DLR coefficients
+
240  auto s = nda::vector<double>(r); // Singular values (not needed)
+
241  int rank = 0; // Rank of system matrix (not needed)
+
242  nda::lapack::gelss(kmat, g_rs, s, 0.0, rank);
+
243 
+
244  auto gc_shape = g.shape(); // Output shape is same as g...
+
245  gc_shape[0] = r; // ...with first dimension n replaced by r
+
246  auto gc = typename T::regular_type(gc_shape); // Output array: output type is same as g
+
247  reshape(gc, r, g.size() / n) = g_rs(nda::range(r), _); // Place result in output array
248 
-
249  // Initialize reflection matrix, if it hasn't been done already
-
250  if (refl.empty()) { reflect_init(); }
+
249  return gc;
+
250  }
251 
-
252  if constexpr (T::rank == 1) { // Scalar-valued Green's function
-
253  return matmul(refl, g);
-
254  } else {
-
255  auto gr = typename T::regular_type(g.shape()); // Output has same type/shape as g
-
256  reshape(gr, r, g.size() / r) = matmul(refl, reshape(g, r, g.size() / r)); // Reshape, matrix multiply, reshape back
-
257  return gr;
-
258  }
-
259  }
-
260 
-
290  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>>
-
291  typename T::regular_type convolve(double beta, statistic_t statistic, T const &fc, T const &gc, bool time_order = false) const {
-
292 
-
293  if (r != fc.shape(0) || r != gc.shape(0)) throw std::runtime_error("First dim of input arrays must be equal to DLR rank r.");
-
294  if (fc.shape() != gc.shape()) throw std::runtime_error("Input arrays must have the same shape.");
-
295 
-
296  // TODO: implement bosonic case and remove
-
297  if (statistic == 0) throw std::runtime_error("imtime_ops::convolve not yet implemented for bosonic Green's functions.");
-
298 
-
299  // Initialize convolution, if it hasn't been done already
-
300  if (!time_order & hilb.empty()) { convolve_init(); }
-
301  if (time_order & thilb.empty()) { tconvolve_init(); }
-
302 
-
303  // Get view of helper matrices based on time_order flag
-
304  auto hilb_v = (time_order ? nda::matrix_const_view<double>(thilb) : nda::matrix_const_view<double>(hilb));
-
305  auto tcf2it_v = (time_order ? nda::matrix_const_view<double>(ttcf2it) : nda::matrix_const_view<double>(tcf2it));
-
306 
-
307  if constexpr (T::rank == 1) { // Scalar-valued Green's function
-
308 
-
309  // Take array view of fc and gc
-
310  auto fca = nda::array_const_view<S, 1>(fc);
-
311  auto gca = nda::array_const_view<S, 1>(gc);
-
312 
-
313  // Diagonal contribution
-
314  auto h = matvecmul(tcf2it_v, make_regular(fca * gca));
-
315 
-
316  // Off-diagonal contribution
-
317  auto tmp = fca * arraymult(hilb_v, gca) + gca * arraymult(hilb_v, fca);
-
318  return beta * (h + matvecmul(cf2it, make_regular(tmp)));
-
319 
-
320  } else if (T::rank == 3) { // Matrix-valued Green's function
-
321 
-
322  // Diagonal contribution
-
323  auto fcgc = nda::array<S, 3>(fc.shape()); // Product of coefficients of f and g
-
324  for (int i = 0; i < r; ++i) { fcgc(i, _, _) = matmul(fc(i, _, _), gc(i, _, _)); }
-
325  auto h = arraymult(tcf2it_v, fcgc);
+
263  template <nda::MemoryArray T> typename T::regular_type reflect(T const &g) const {
+
264 
+
265  if (r != g.shape(0)) throw std::runtime_error("First dim of g != DLR rank r.");
+
266 
+
267  // Initialize reflection matrix, if it hasn't been done already
+
268  if (refl.empty()) { reflect_init(); }
+
269 
+
270  if constexpr (T::rank == 1) { // Scalar-valued Green's function
+
271  return matmul(refl, g);
+
272  } else {
+
273  auto gr = typename T::regular_type(g.shape()); // Output has same type/shape as g
+
274  reshape(gr, r, g.size() / r) = matmul(refl, reshape(g, r, g.size() / r)); // Reshape, matrix multiply, reshape back
+
275  return gr;
+
276  }
+
277  }
+
278 
+
308  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>>
+
309  typename T::regular_type convolve(double beta, statistic_t statistic, T const &fc, T const &gc, bool time_order = false) const {
+
310 
+
311  if (r != fc.shape(0) || r != gc.shape(0)) throw std::runtime_error("First dim of input arrays must be equal to DLR rank r.");
+
312  if (fc.shape() != gc.shape()) throw std::runtime_error("Input arrays must have the same shape.");
+
313 
+
314  // TODO: implement bosonic case and remove
+
315  if (statistic == 0) throw std::runtime_error("imtime_ops::convolve not yet implemented for bosonic Green's functions.");
+
316 
+
317  // Initialize convolution, if it hasn't been done already
+
318  if (!time_order & hilb.empty()) { convolve_init(); }
+
319  if (time_order & thilb.empty()) { tconvolve_init(); }
+
320 
+
321  // Get view of helper matrices based on time_order flag
+
322  auto hilb_v = (time_order ? nda::matrix_const_view<double>(thilb) : nda::matrix_const_view<double>(hilb));
+
323  auto tcf2it_v = (time_order ? nda::matrix_const_view<double>(ttcf2it) : nda::matrix_const_view<double>(tcf2it));
+
324 
+
325  if constexpr (T::rank == 1) { // Scalar-valued Green's function
326 
-
327  // Off-diagonal contribution
-
328  auto tmp1 = arraymult(hilb_v, fc);
-
329  auto tmp2 = arraymult(hilb_v, gc);
-
330  for (int i = 0; i < r; ++i) { tmp1(i, _, _) = matmul(tmp1(i, _, _), gc(i, _, _)) + matmul(fc(i, _, _), tmp2(i, _, _)); }
-
331 
-
332  return beta * (h + arraymult(cf2it, tmp1));
+
327  // Take array view of fc and gc
+
328  auto fca = nda::array_const_view<S, 1>(fc);
+
329  auto gca = nda::array_const_view<S, 1>(gc);
+
330 
+
331  // Diagonal contribution
+
332  auto h = matvecmul(tcf2it_v, make_regular(fca * gca));
333 
-
334  } else {
-
335  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
-
336  }
-
337  }
-
338 
-
360  template <nda::MemoryMatrix Tf, nda::MemoryArray Tg, nda::Scalar Sf = nda::get_value_t<Tf>, nda::Scalar Sg = nda::get_value_t<Tg>,
-
361  nda::Scalar S = typename std::common_type<Sf, Sg>::type>
-
362  typename Tg::regular_type convolve(Tf const &fconv, Tg const &g) const {
-
363 
-
364  if (r != g.shape(0)) throw std::runtime_error("First dim of input g must be equal to DLR rank r.");
-
365 
-
366  if constexpr (Tg::rank == 1) { // Scalar-valued Green's function
-
367 
-
368  if (fconv.shape(1) != g.shape(0)) throw std::runtime_error("Input array dimensions incompatible.");
-
369 
-
370  return matvecmul(fconv, g);
-
371 
-
372  } else if (Tg::rank == 3) { // Matrix-valued Green's function
-
373 
-
374  if (fconv.shape(1) != g.shape(0) * g.shape(1)) throw std::runtime_error("Input array dimensions incompatible.");
-
375 
-
376  int norb1 = g.shape(1);
-
377  int norb2 = g.shape(2);
-
378 
-
379  auto h = nda::array<S, 3>(r, norb1, norb2);
-
380  reshape(h, r * norb1, norb2) = matmul(fconv, reshape(g, r * norb1, norb2));
+
334  // Off-diagonal contribution
+
335  auto tmp = fca * arraymult(hilb_v, gca) + gca * arraymult(hilb_v, fca);
+
336  return beta * (h + matvecmul(cf2it, make_regular(tmp)));
+
337 
+
338  } else if (T::rank == 3) { // Matrix-valued Green's function
+
339 
+
340  // Diagonal contribution
+
341  auto fcgc = nda::array<S, 3>(fc.shape()); // Product of coefficients of f and g
+
342  for (int i = 0; i < r; ++i) { fcgc(i, _, _) = matmul(fc(i, _, _), gc(i, _, _)); }
+
343  auto h = arraymult(tcf2it_v, fcgc);
+
344 
+
345  // Off-diagonal contribution
+
346  auto tmp1 = arraymult(hilb_v, fc);
+
347  auto tmp2 = arraymult(hilb_v, gc);
+
348  for (int i = 0; i < r; ++i) { tmp1(i, _, _) = matmul(tmp1(i, _, _), gc(i, _, _)) + matmul(fc(i, _, _), tmp2(i, _, _)); }
+
349 
+
350  return beta * (h + arraymult(cf2it, tmp1));
+
351 
+
352  } else {
+
353  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
+
354  }
+
355  }
+
356 
+
378  template <nda::MemoryMatrix Tf, nda::MemoryArray Tg, nda::Scalar Sf = nda::get_value_t<Tf>, nda::Scalar Sg = nda::get_value_t<Tg>,
+
379  nda::Scalar S = typename std::common_type<Sf, Sg>::type>
+
380  typename Tg::regular_type convolve(Tf const &fconv, Tg const &g) const {
381 
-
382  return h;
+
382  if (r != g.shape(0)) throw std::runtime_error("First dim of input g must be equal to DLR rank r.");
383 
-
384  } else {
-
385  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
-
386  }
-
387  }
-
388 
-
433  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>>
-
434  nda::matrix<S> convmat(double beta, statistic_t statistic, T const &fc, bool time_order = false) const {
-
435 
-
436  if (r != fc.shape(0)) throw std::runtime_error("First dim of input array must be equal to DLR rank r.");
-
437 
-
438  // TODO: implement bosonic case and remove
-
439  if (statistic == 0) throw std::runtime_error("imtime_ops::convmat not yet implemented for bosonic Green's functions.");
-
440 
-
441  // Initialize convolution, if it hasn't been done already
-
442  if (!time_order & hilb.empty()) { convolve_init(); }
-
443  if (time_order & thilb.empty()) { tconvolve_init(); }
-
444 
-
445  // Get view of helper matrices based on time_order flag
-
446  auto hilb_v = (time_order ? nda::matrix_const_view<double>(thilb) : nda::matrix_const_view<double>(hilb));
-
447  auto tcf2it_v = (time_order ? nda::matrix_const_view<double>(ttcf2it) : nda::matrix_const_view<double>(tcf2it));
-
448 
-
449  if constexpr (T::rank == 1) { // Scalar-valued Green's function
-
450 
-
451  // First construct convolution matrix from DLR coefficients to DLR grid
-
452  // values
-
453  auto fconv = nda::matrix<S>(r, r); // Matrix of convolution by f
-
454 
-
455  // Diagonal contribution (given by diag(tau_k) * K(tau_k, om_l) * diag(fc_l))
-
456  for (int k = 0; k < r; ++k) {
-
457  for (int l = 0; l < r; ++l) { fconv(k, l) = tcf2it_v(k, l) * fc(l); }
-
458  }
-
459 
-
460  // Off-diagonal contribution (given by K * (diag(hilb*fc) +
-
461  // (diag(fc)*hilb)), where K is the matrix K(dlr_it(k), dlr_rf(l))
-
462  auto tmp1 = matvecmul(hilb_v, fc); // hilb * fc
-
463  auto tmp2 = nda::matrix<S>(r, r);
-
464  for (int k = 0; k < r; ++k) {
-
465  for (int l = 0; l < r; ++l) {
-
466  tmp2(k, l) = fc(k) * hilb_v(k, l); // diag(fc) * hilb
-
467  }
-
468  tmp2(k, k) += tmp1(k); // diag(fc)*hilb + diag(hilb*fc)
-
469  }
-
470  fconv += matmul(cf2it, tmp2);
-
471 
-
472  // Then precompose with DLR grid values to DLR coefficients matrix
-
473  if constexpr (nda::is_complex_v<S>) { // getrs requires matrix and rhs to have same value type
-
474  nda::lapack::getrs(transpose(it2cf.zlu), fconv, it2cf.piv); // Note: lapack effectively tranposes fconv by fortran reordering here
-
475  } else {
-
476  // getrs requires matrix and rhs to have same value type
-
477  nda::lapack::getrs(transpose(it2cf.lu), fconv, it2cf.piv);
-
478  }
-
479 
-
480  return beta * fconv;
-
481 
-
482  } else if (T::rank == 3) { // Matrix-valued Green's function
-
483 
-
484  int norb1 = fc.shape(1);
-
485  int norb2 = fc.shape(2);
-
486 
-
487  // First construct convolution matrix from DLR coefficients to DLR grid
-
488  // values
-
489  auto fconv = nda::matrix<S>(r * norb1, r * norb2); // Matrix of convolution by f
-
490  auto fconv_rs = nda::reshape(fconv, r, norb1, r, norb2); // Array view to index into fconv for conevenience
-
491 
-
492  // Diagonal contribution (given by diag(tau_k) * K(tau_k, om_l) * diag(fc_l))
-
493  for (int k = 0; k < r; ++k) {
-
494  for (int l = 0; l < r; ++l) { fconv_rs(k, _, l, _) = tcf2it_v(k, l) * fc(l, _, _); }
-
495  }
-
496 
-
497  // Off-diagonal contribution (given by K * (diag(hilb*fc) +
-
498  // (diag(fc)*hilb)), where K is the matrix K(dlr_it(k), dlr_rf(l))
-
499  auto tmp1 = arraymult(hilb_v, fc); // hilb * fc
-
500  auto tmp2 = nda::array<S, 4>(r, norb1, r, norb2);
-
501  for (int k = 0; k < r; ++k) {
-
502  for (int l = 0; l < r; ++l) {
-
503  tmp2(k, _, l, _) = fc(k, _, _) * hilb_v(k, l); // diag(fc) * hilb
-
504  }
-
505  tmp2(k, _, k, _) += tmp1(k, _, _); // diag(fc)*hilb + diag(hilb*fc)
-
506  }
-
507  fconv_rs += arraymult(cf2it, tmp2);
-
508 
-
509  // Then precompose with DLR grid values to DLR coefficients matrix
-
510 
-
511  // Transpose last two indices to put make column DLR index the last
-
512  // index
-
513  auto fconvtmp = nda::matrix<S>(r * norb1 * norb2, r);
-
514  auto fconvtmp_rs = nda::reshape(fconvtmp, r, norb1, norb2, r);
-
515  for (int i = 0; i < norb2; ++i) {
-
516  for (int k = 0; k < r; ++k) { fconvtmp_rs(_, _, i, k) = fconv_rs(_, _, k, i); }
-
517  }
-
518 
-
519  // Do the solve
-
520  if constexpr (nda::is_complex_v<S>) { // getrs requires matrix and rhs to have same value type
-
521  nda::lapack::getrs(transpose(it2cf.zlu), fconvtmp, it2cf.piv); // Note: lapack effectively tranposes fconv by fortran reordering here
-
522  } else {
-
523  // getrs requires matrix and rhs to have same value type
-
524  nda::lapack::getrs(transpose(it2cf.lu), fconvtmp, it2cf.piv);
-
525  }
+
384  if constexpr (Tg::rank == 1) { // Scalar-valued Green's function
+
385 
+
386  if (fconv.shape(1) != g.shape(0)) throw std::runtime_error("Input array dimensions incompatible.");
+
387 
+
388  return matvecmul(fconv, g);
+
389 
+
390  } else if (Tg::rank == 3) { // Matrix-valued Green's function
+
391 
+
392  if (fconv.shape(1) != g.shape(0) * g.shape(1)) throw std::runtime_error("Input array dimensions incompatible.");
+
393 
+
394  int norb1 = g.shape(1);
+
395  int norb2 = g.shape(2);
+
396 
+
397  auto h = nda::array<S, 3>(r, norb1, norb2);
+
398  reshape(h, r * norb1, norb2) = matmul(fconv, reshape(g, r * norb1, norb2));
+
399 
+
400  return h;
+
401 
+
402  } else {
+
403  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
+
404  }
+
405  }
+
406 
+
451  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>>
+
452  nda::matrix<S> convmat(double beta, statistic_t statistic, T const &fc, bool time_order = false) const {
+
453 
+
454  if (r != fc.shape(0)) throw std::runtime_error("First dim of input array must be equal to DLR rank r.");
+
455 
+
456  // TODO: implement bosonic case and remove
+
457  if (statistic == 0) throw std::runtime_error("imtime_ops::convmat not yet implemented for bosonic Green's functions.");
+
458 
+
459  // Initialize convolution, if it hasn't been done already
+
460  if (!time_order & hilb.empty()) { convolve_init(); }
+
461  if (time_order & thilb.empty()) { tconvolve_init(); }
+
462 
+
463  // Get view of helper matrices based on time_order flag
+
464  auto hilb_v = (time_order ? nda::matrix_const_view<double>(thilb) : nda::matrix_const_view<double>(hilb));
+
465  auto tcf2it_v = (time_order ? nda::matrix_const_view<double>(ttcf2it) : nda::matrix_const_view<double>(tcf2it));
+
466 
+
467  if constexpr (T::rank == 1) { // Scalar-valued Green's function
+
468 
+
469  // First construct convolution matrix from DLR coefficients to DLR grid
+
470  // values
+
471  auto fconv = nda::matrix<S>(r, r); // Matrix of convolution by f
+
472 
+
473  // Diagonal contribution (given by diag(tau_k) * K(tau_k, om_l) * diag(fc_l))
+
474  for (int k = 0; k < r; ++k) {
+
475  for (int l = 0; l < r; ++l) { fconv(k, l) = tcf2it_v(k, l) * fc(l); }
+
476  }
+
477 
+
478  // Off-diagonal contribution (given by K * (diag(hilb*fc) +
+
479  // (diag(fc)*hilb)), where K is the matrix K(dlr_it(k), dlr_rf(l))
+
480  auto tmp1 = matvecmul(hilb_v, fc); // hilb * fc
+
481  auto tmp2 = nda::matrix<S>(r, r);
+
482  for (int k = 0; k < r; ++k) {
+
483  for (int l = 0; l < r; ++l) {
+
484  tmp2(k, l) = fc(k) * hilb_v(k, l); // diag(fc) * hilb
+
485  }
+
486  tmp2(k, k) += tmp1(k); // diag(fc)*hilb + diag(hilb*fc)
+
487  }
+
488  fconv += matmul(cf2it, tmp2);
+
489 
+
490  // Then precompose with DLR grid values to DLR coefficients matrix
+
491  if constexpr (nda::is_complex_v<S>) { // getrs requires matrix and rhs to have same value type
+
492  nda::lapack::getrs(transpose(it2cf.zlu), fconv, it2cf.piv); // Note: lapack effectively tranposes fconv by fortran reordering here
+
493  } else {
+
494  // getrs requires matrix and rhs to have same value type
+
495  nda::lapack::getrs(transpose(it2cf.lu), fconv, it2cf.piv);
+
496  }
+
497 
+
498  return beta * fconv;
+
499 
+
500  } else if (T::rank == 3) { // Matrix-valued Green's function
+
501 
+
502  int norb1 = fc.shape(1);
+
503  int norb2 = fc.shape(2);
+
504 
+
505  // First construct convolution matrix from DLR coefficients to DLR grid
+
506  // values
+
507  auto fconv = nda::matrix<S>(r * norb1, r * norb2); // Matrix of convolution by f
+
508  auto fconv_rs = nda::reshape(fconv, r, norb1, r, norb2); // Array view to index into fconv for conevenience
+
509 
+
510  // Diagonal contribution (given by diag(tau_k) * K(tau_k, om_l) * diag(fc_l))
+
511  for (int k = 0; k < r; ++k) {
+
512  for (int l = 0; l < r; ++l) { fconv_rs(k, _, l, _) = tcf2it_v(k, l) * fc(l, _, _); }
+
513  }
+
514 
+
515  // Off-diagonal contribution (given by K * (diag(hilb*fc) +
+
516  // (diag(fc)*hilb)), where K is the matrix K(dlr_it(k), dlr_rf(l))
+
517  auto tmp1 = arraymult(hilb_v, fc); // hilb * fc
+
518  auto tmp2 = nda::array<S, 4>(r, norb1, r, norb2);
+
519  for (int k = 0; k < r; ++k) {
+
520  for (int l = 0; l < r; ++l) {
+
521  tmp2(k, _, l, _) = fc(k, _, _) * hilb_v(k, l); // diag(fc) * hilb
+
522  }
+
523  tmp2(k, _, k, _) += tmp1(k, _, _); // diag(fc)*hilb + diag(hilb*fc)
+
524  }
+
525  fconv_rs += arraymult(cf2it, tmp2);
526 
-
527  // Transpose back
-
528  for (int i = 0; i < norb2; ++i) {
-
529  for (int k = 0; k < r; ++k) { fconv_rs(_, _, k, i) = fconvtmp_rs(_, _, i, k); }
-
530  }
-
531 
-
532  return beta * fconv;
-
533 
-
534  } else {
-
535  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
-
536  }
-
537  }
-
538 
-
560  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> S innerprod(T const &fc, T const &gc) const {
-
561 
-
562  if (r != fc.shape(0) || r != gc.shape(0)) throw std::runtime_error("First dim of input arrays must be equal to DLR rank r.");
-
563  if (fc.shape() != gc.shape()) throw std::runtime_error("Input arrays must have the same shape.");
-
564 
-
565  // Initialize inner product matrix, if it hasn't been done already
-
566  if (ipmat.empty()) { innerprod_init(); }
-
567 
-
568  S ip = 0;
-
569  if constexpr (T::rank == 1) { // Scalar-valued Green's function
-
570  ip = nda::blas::dotc(fc, matvecmul(ipmat, gc));
-
571  } else if (T::rank == 3) { // Matrix-valued Green's function
-
572  int norb = fc.shape(1);
-
573  // Compute inner product
-
574  for (int i = 0; i < norb; ++i) {
-
575  for (int j = 0; j < norb; ++j) { ip += nda::blas::dotc(fc(_, i, j), matvecmul(ipmat, gc(_, i, j))); }
-
576  }
-
577  } else {
-
578  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
-
579  }
-
580 
-
581  return ip;
-
582  }
-
583 
-
589  nda::vector_const_view<double> get_itnodes() const { return dlr_it; };
-
590  double get_itnodes(int i) const { return dlr_it(i); };
-
591 
-
598  nda::vector_const_view<double> get_rfnodes() const { return dlr_rf; };
-
599  double get_rfnodes(int i) const { return dlr_rf(i); };
-
600 
-
606  nda::matrix_const_view<double> get_cf2it() const { return cf2it; };
-
607 
-
613  nda::matrix_const_view<double> get_it2cf_lu() const { return it2cf.lu; };
-
614 
-
621  nda::matrix_const_view<dcomplex> get_it2cf_zlu() const { return it2cf.zlu; };
-
622 
-
628  nda::vector_const_view<int> get_it2cf_piv() const { return it2cf.piv; };
-
629 
-
635  int rank() const { return r; }
-
636  double lambda() const { return lambda_; }
-
637 
-
648  nda::matrix_const_view<double> get_ipmat() const {
-
649  if (ipmat.empty()) { innerprod_init(); }
-
650  return ipmat;
-
651  }
-
652 
-
661  void convolve_init() const {
-
662 
-
663  hilb = nda::matrix<double>(r, r);
-
664  tcf2it = nda::matrix<double>(r, r);
-
665 
-
666  // "Discrete Hilbert transform" matrix -(1-delta_jk)/(dlr_rf(j) -
-
667  // dlr_rf(k)), scaled by beta
-
668  for (int j = 0; j < r; ++j) {
-
669  for (int k = 0; k < r; ++k) {
-
670  if (j == k) {
-
671  hilb(j, k) = 0;
-
672  } else {
-
673  hilb(j, k) = 1.0 / (dlr_rf(j) - dlr_rf(k));
-
674  }
-
675  }
-
676  }
-
677 
-
678  // Matrix which applies DLR coefficients to imaginary time grid values
-
679  // transformation matrix, and then multiplies the result by tau, the
-
680  // imaginary time variable
-
681  for (int j = 0; j < r; ++j) {
-
682  for (int k = 0; k < r; ++k) {
-
683  if (dlr_it(j) > 0) {
-
684  tcf2it(j, k) = -(dlr_it(j) + k_it(1.0, dlr_rf(k))) * cf2it(j, k);
-
685  } else {
-
686  tcf2it(j, k) = -(dlr_it(j) - k_it(0.0, dlr_rf(k))) * cf2it(j, k);
-
687  }
-
688  }
-
689  }
-
690  }
-
691 
-
701  void tconvolve_init() const {
-
702 
-
703  thilb = nda::matrix<double>(r, r);
-
704  ttcf2it = nda::matrix<double>(r, r);
-
705 
-
706  // "Discrete Hilbert transform" matrix
-
707  // -(1-delta_jk)*K(0,dlr_rf(k))/(dlr_rf(j) - dlr_rf(k)) for time-ordered
-
708  // convolution, scaled by beta
-
709  for (int j = 0; j < r; ++j) {
-
710  for (int k = 0; k < r; ++k) {
-
711  if (j == k) {
-
712  thilb(j, k) = 0;
-
713  } else {
-
714  thilb(j, k) = k_it(0.0, dlr_rf(k)) / (dlr_rf(k) - dlr_rf(j));
-
715  }
-
716  }
-
717  }
-
718 
-
719  // Matrix which applies K(0,dlr_rf(j)) multiplication, then DLR
-
720  // coefficients to imaginary time grid values transformation matrix, and
-
721  // then multiplies the result by tau, the imaginary time variable
-
722  for (int j = 0; j < r; ++j) {
-
723  for (int k = 0; k < r; ++k) {
-
724  if (dlr_it(j) > 0) {
-
725  ttcf2it(j, k) = dlr_it(j) * cf2it(j, k) * k_it(0.0, dlr_rf(k));
-
726  } else {
-
727  ttcf2it(j, k) = (1 + dlr_it(j)) * cf2it(j, k) * k_it(0.0, dlr_rf(k));
-
728  }
-
729  }
-
730  }
-
731  }
-
732 
-
740  void innerprod_init() const {
-
741 
-
742  ipmat = nda::matrix<double>(r, r);
-
743 
-
744  // Matrix of inner product of two DLR expansions
-
745  double ssum = 0;
-
746  for (int k = 0; k < r; ++k) {
-
747  for (int l = 0; l < r; ++l) {
-
748  ssum = dlr_rf(k) + dlr_rf(l);
-
749  if (ssum == 0) {
-
750  ipmat(k, l) = k_it(0.0, dlr_rf(k)) * k_it(0.0, dlr_rf(l));
-
751  } else if (std::abs(ssum) < 1) {
-
752  ipmat(k, l) = -k_it(0.0, dlr_rf(k)) * k_it(0.0, dlr_rf(l)) * std::expm1(-ssum) / ssum;
-
753  } else {
-
754  ipmat(k, l) = (k_it(0.0, dlr_rf(k)) * k_it(0.0, dlr_rf(l)) - k_it(1.0, dlr_rf(k)) * k_it(1.0, dlr_rf(l))) / ssum;
-
755  }
-
756  }
-
757  }
-
758  }
+
527  // Then precompose with DLR grid values to DLR coefficients matrix
+
528 
+
529  // Transpose last two indices to put make column DLR index the last
+
530  // index
+
531  auto fconvtmp = nda::matrix<S>(r * norb1 * norb2, r);
+
532  auto fconvtmp_rs = nda::reshape(fconvtmp, r, norb1, norb2, r);
+
533  for (int i = 0; i < norb2; ++i) {
+
534  for (int k = 0; k < r; ++k) { fconvtmp_rs(_, _, i, k) = fconv_rs(_, _, k, i); }
+
535  }
+
536 
+
537  // Do the solve
+
538  if constexpr (nda::is_complex_v<S>) { // getrs requires matrix and rhs to have same value type
+
539  nda::lapack::getrs(transpose(it2cf.zlu), fconvtmp, it2cf.piv); // Note: lapack effectively tranposes fconv by fortran reordering here
+
540  } else {
+
541  // getrs requires matrix and rhs to have same value type
+
542  nda::lapack::getrs(transpose(it2cf.lu), fconvtmp, it2cf.piv);
+
543  }
+
544 
+
545  // Transpose back
+
546  for (int i = 0; i < norb2; ++i) {
+
547  for (int k = 0; k < r; ++k) { fconv_rs(_, _, k, i) = fconvtmp_rs(_, _, i, k); }
+
548  }
+
549 
+
550  return beta * fconv;
+
551 
+
552  } else {
+
553  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
+
554  }
+
555  }
+
556 
+
578  template <nda::MemoryArray T, nda::Scalar S = nda::get_value_t<T>> S innerprod(T const &fc, T const &gc) const {
+
579 
+
580  if (r != fc.shape(0) || r != gc.shape(0)) throw std::runtime_error("First dim of input arrays must be equal to DLR rank r.");
+
581  if (fc.shape() != gc.shape()) throw std::runtime_error("Input arrays must have the same shape.");
+
582 
+
583  // Initialize inner product matrix, if it hasn't been done already
+
584  if (ipmat.empty()) { innerprod_init(); }
+
585 
+
586  S ip = 0;
+
587  if constexpr (T::rank == 1) { // Scalar-valued Green's function
+
588  ip = nda::blas::dotc(fc, matvecmul(ipmat, gc));
+
589  } else if (T::rank == 3) { // Matrix-valued Green's function
+
590  int norb = fc.shape(1);
+
591  // Compute inner product
+
592  for (int i = 0; i < norb; ++i) {
+
593  for (int j = 0; j < norb; ++j) { ip += nda::blas::dotc(fc(_, i, j), matvecmul(ipmat, gc(_, i, j))); }
+
594  }
+
595  } else {
+
596  throw std::runtime_error("Input arrays must be rank 1 (scalar-valued Green's function) or 3 (matrix-valued Green's function).");
+
597  }
+
598 
+
599  return ip;
+
600  }
+
601 
+
607  nda::vector_const_view<double> get_itnodes() const { return dlr_it; };
+
608  double get_itnodes(int i) const { return dlr_it(i); };
+
609 
+
616  nda::vector_const_view<double> get_rfnodes() const { return dlr_rf; };
+
617  double get_rfnodes(int i) const { return dlr_rf(i); };
+
618 
+
624  nda::matrix_const_view<double> get_cf2it() const { return cf2it; };
+
625 
+
631  nda::matrix_const_view<double> get_it2cf_lu() const { return it2cf.lu; };
+
632 
+
639  nda::matrix_const_view<dcomplex> get_it2cf_zlu() const { return it2cf.zlu; };
+
640 
+
646  nda::vector_const_view<int> get_it2cf_piv() const { return it2cf.piv; };
+
647 
+
653  int rank() const { return r; }
+
654  double lambda() const { return lambda_; }
+
655 
+
666  nda::matrix_const_view<double> get_ipmat() const {
+
667  if (ipmat.empty()) { innerprod_init(); }
+
668  return ipmat;
+
669  }
+
670 
+
679  void convolve_init() const {
+
680 
+
681  hilb = nda::matrix<double>(r, r);
+
682  tcf2it = nda::matrix<double>(r, r);
+
683 
+
684  // "Discrete Hilbert transform" matrix -(1-delta_jk)/(dlr_rf(j) -
+
685  // dlr_rf(k)), scaled by beta
+
686  for (int j = 0; j < r; ++j) {
+
687  for (int k = 0; k < r; ++k) {
+
688  if (j == k) {
+
689  hilb(j, k) = 0;
+
690  } else {
+
691  hilb(j, k) = 1.0 / (dlr_rf(j) - dlr_rf(k));
+
692  }
+
693  }
+
694  }
+
695 
+
696  // Matrix which applies DLR coefficients to imaginary time grid values
+
697  // transformation matrix, and then multiplies the result by tau, the
+
698  // imaginary time variable
+
699  for (int j = 0; j < r; ++j) {
+
700  for (int k = 0; k < r; ++k) {
+
701  if (dlr_it(j) > 0) {
+
702  tcf2it(j, k) = -(dlr_it(j) + k_it(1.0, dlr_rf(k))) * cf2it(j, k);
+
703  } else {
+
704  tcf2it(j, k) = -(dlr_it(j) - k_it(0.0, dlr_rf(k))) * cf2it(j, k);
+
705  }
+
706  }
+
707  }
+
708  }
+
709 
+
719  void tconvolve_init() const {
+
720 
+
721  thilb = nda::matrix<double>(r, r);
+
722  ttcf2it = nda::matrix<double>(r, r);
+
723 
+
724  // "Discrete Hilbert transform" matrix
+
725  // -(1-delta_jk)*K(0,dlr_rf(k))/(dlr_rf(j) - dlr_rf(k)) for time-ordered
+
726  // convolution, scaled by beta
+
727  for (int j = 0; j < r; ++j) {
+
728  for (int k = 0; k < r; ++k) {
+
729  if (j == k) {
+
730  thilb(j, k) = 0;
+
731  } else {
+
732  thilb(j, k) = k_it(0.0, dlr_rf(k)) / (dlr_rf(k) - dlr_rf(j));
+
733  }
+
734  }
+
735  }
+
736 
+
737  // Matrix which applies K(0,dlr_rf(j)) multiplication, then DLR
+
738  // coefficients to imaginary time grid values transformation matrix, and
+
739  // then multiplies the result by tau, the imaginary time variable
+
740  for (int j = 0; j < r; ++j) {
+
741  for (int k = 0; k < r; ++k) {
+
742  if (dlr_it(j) > 0) {
+
743  ttcf2it(j, k) = dlr_it(j) * cf2it(j, k) * k_it(0.0, dlr_rf(k));
+
744  } else {
+
745  ttcf2it(j, k) = (1 + dlr_it(j)) * cf2it(j, k) * k_it(0.0, dlr_rf(k));
+
746  }
+
747  }
+
748  }
+
749  }
+
750 
+
758  void innerprod_init() const {
759 
-
769  void reflect_init() const {
-
770 
-
771  refl = nda::matrix<double>(r, r);
-
772 
-
773  // Matrix of reflection acting on DLR coefficients and returning values at
-
774  // DLR nodes
-
775  for (int i = 0; i < r; ++i) {
-
776  for (int j = 0; j < r; ++j) { refl(i, j) = k_it(-dlr_it(i), dlr_rf(j)); }
-
777  }
-
778 
-
779  // Precompose with DLR values to coefficients matrix
-
780  nda::lapack::getrs(transpose(it2cf.lu), refl, it2cf.piv); // Lapack effectively transposes refl by fortran reordering here
-
781  }
-
782 
-
783  private:
-
784  double lambda_;
-
785  int r;
-
786  nda::vector<double> dlr_rf;
-
787  nda::vector<double> dlr_it;
-
788  nda::matrix<double> cf2it;
-
789 
-
793  struct {
-
794  nda::matrix<double> lu;
-
795  nda::matrix<dcomplex> zlu;
-
796  nda::vector<int> piv;
-
797  } it2cf;
-
798 
-
799  // Arrays used for dlr_imtime::convolve
-
800  mutable nda::matrix<double> hilb;
-
801  mutable nda::matrix<double> tcf2it;
-
802 
-
803  // Arrays used for dlr_imtime::tconvolve
-
804  mutable nda::matrix<double> thilb;
-
805  mutable nda::matrix<double> ttcf2it;
-
806 
-
807  // Array used for dlr_imtime::innerprod
-
808  mutable nda::matrix<double> ipmat;
-
809 
-
810  // Array used for dlr_imtime::reflect
-
811  mutable nda::matrix<double> refl;
-
812 
-
813  // -------------------- hdf5 -------------------
-
814 
-
815  public:
-
816  static std::string hdf5_format() { return "cppdlr::imtime_ops"; }
-
817 
-
818  friend void h5_write(h5::group fg, std::string const &subgroup_name, imtime_ops const &m) {
-
819 
-
820  h5::group gr = fg.create_group(subgroup_name);
-
821  write_hdf5_format(gr, m);
-
822 
-
823  h5::write(gr, "lambda", m.lambda());
-
824  h5::write(gr, "rf", m.get_rfnodes());
-
825  h5::write(gr, "it", m.get_itnodes());
-
826  h5::write(gr, "cf2it", m.get_cf2it());
-
827  h5::write(gr, "it2cf_lu", m.get_it2cf_lu());
-
828  h5::write(gr, "it2cf_zlu", m.get_it2cf_zlu());
-
829  h5::write(gr, "it2cf_piv", m.get_it2cf_piv());
-
830  }
-
831 
-
832  friend void h5_read(h5::group fg, std::string const &subgroup_name, imtime_ops &m) {
-
833 
-
834  h5::group gr = fg.open_group(subgroup_name);
-
835  assert_hdf5_format(gr, m);
-
836 
-
837  auto lambda = h5::read<double>(gr, "lambda");
-
838  auto rf = h5::read<nda::vector<double>>(gr, "rf");
-
839  auto it = h5::read<nda::vector<double>>(gr, "it");
-
840  auto cf2it_ = h5::read<nda::matrix<double>>(gr, "cf2it");
-
841  auto it2cf_lu = h5::read<nda::matrix<double>>(gr, "it2cf_lu");
-
842  auto it2cf_zlu = h5::read<nda::matrix<dcomplex>>(gr, "it2cf_zlu");
-
843  auto it2cf_piv = h5::read<nda::vector<int>>(gr, "it2cf_piv");
-
844 
-
845  m = imtime_ops(lambda, rf, it, cf2it_, it2cf_lu, it2cf_zlu, it2cf_piv);
-
846  }
-
847  };
-
848 
-
849 } // namespace cppdlr
+
760  ipmat = nda::matrix<double>(r, r);
+
761 
+
762  // Matrix of inner product of two DLR expansions
+
763  double ssum = 0;
+
764  for (int k = 0; k < r; ++k) {
+
765  for (int l = 0; l < r; ++l) {
+
766  ssum = dlr_rf(k) + dlr_rf(l);
+
767  if (ssum == 0) {
+
768  ipmat(k, l) = k_it(0.0, dlr_rf(k)) * k_it(0.0, dlr_rf(l));
+
769  } else if (std::abs(ssum) < 1) {
+
770  ipmat(k, l) = -k_it(0.0, dlr_rf(k)) * k_it(0.0, dlr_rf(l)) * std::expm1(-ssum) / ssum;
+
771  } else {
+
772  ipmat(k, l) = (k_it(0.0, dlr_rf(k)) * k_it(0.0, dlr_rf(l)) - k_it(1.0, dlr_rf(k)) * k_it(1.0, dlr_rf(l))) / ssum;
+
773  }
+
774  }
+
775  }
+
776  }
+
777 
+
787  void reflect_init() const {
+
788 
+
789  refl = nda::matrix<double>(r, r);
+
790 
+
791  // Matrix of reflection acting on DLR coefficients and returning values at
+
792  // DLR nodes
+
793  for (int i = 0; i < r; ++i) {
+
794  for (int j = 0; j < r; ++j) { refl(i, j) = k_it(-dlr_it(i), dlr_rf(j)); }
+
795  }
+
796 
+
797  // Precompose with DLR values to coefficients matrix
+
798  nda::lapack::getrs(transpose(it2cf.lu), refl, it2cf.piv); // Lapack effectively transposes refl by fortran reordering here
+
799  }
+
800 
+
801  private:
+
802  double lambda_;
+
803  int r;
+
804  nda::vector<double> dlr_rf;
+
805  nda::vector<double> dlr_it;
+
806  nda::matrix<double> cf2it;
+
807 
+
811  struct {
+
812  nda::matrix<double> lu;
+
813  nda::matrix<dcomplex> zlu;
+
814  nda::vector<int> piv;
+
815  } it2cf;
+
816 
+
817  // Arrays used for dlr_imtime::convolve
+
818  mutable nda::matrix<double> hilb;
+
819  mutable nda::matrix<double> tcf2it;
+
820 
+
821  // Arrays used for dlr_imtime::tconvolve
+
822  mutable nda::matrix<double> thilb;
+
823  mutable nda::matrix<double> ttcf2it;
+
824 
+
825  // Array used for dlr_imtime::innerprod
+
826  mutable nda::matrix<double> ipmat;
+
827 
+
828  // Array used for dlr_imtime::reflect
+
829  mutable nda::matrix<double> refl;
+
830 
+
831  // -------------------- hdf5 -------------------
+
832 
+
833  public:
+
834  static std::string hdf5_format() { return "cppdlr::imtime_ops"; }
+
835 
+
836  friend void h5_write(h5::group fg, std::string const &subgroup_name, imtime_ops const &m) {
+
837 
+
838  h5::group gr = fg.create_group(subgroup_name);
+
839  write_hdf5_format(gr, m);
+
840 
+
841  h5::write(gr, "lambda", m.lambda());
+
842  h5::write(gr, "rf", m.get_rfnodes());
+
843  h5::write(gr, "it", m.get_itnodes());
+
844  h5::write(gr, "cf2it", m.get_cf2it());
+
845  h5::write(gr, "it2cf_lu", m.get_it2cf_lu());
+
846  h5::write(gr, "it2cf_zlu", m.get_it2cf_zlu());
+
847  h5::write(gr, "it2cf_piv", m.get_it2cf_piv());
+
848  }
+
849 
+
850  friend void h5_read(h5::group fg, std::string const &subgroup_name, imtime_ops &m) {
+
851 
+
852  h5::group gr = fg.open_group(subgroup_name);
+
853  assert_hdf5_format(gr, m);
+
854 
+
855  auto lambda = h5::read<double>(gr, "lambda");
+
856  auto rf = h5::read<nda::vector<double>>(gr, "rf");
+
857  auto it = h5::read<nda::vector<double>>(gr, "it");
+
858  auto cf2it_ = h5::read<nda::matrix<double>>(gr, "cf2it");
+
859  auto it2cf_lu = h5::read<nda::matrix<double>>(gr, "it2cf_lu");
+
860  auto it2cf_zlu = h5::read<nda::matrix<dcomplex>>(gr, "it2cf_zlu");
+
861  auto it2cf_piv = h5::read<nda::vector<int>>(gr, "it2cf_piv");
+
862 
+
863  m = imtime_ops(lambda, rf, it, cf2it_, it2cf_lu, it2cf_zlu, it2cf_piv);
+
864  }
+
865  };
+
866 
+
867 } // namespace cppdlr
Class responsible for all DLR imaginary time operations, including building imaginary time grid and t...
Definition: dlr_imtime.hpp:46
-
friend void h5_read(h5::group fg, std::string const &subgroup_name, imtime_ops &m)
Definition: dlr_imtime.hpp:832
-
T::regular_type reflect(T const &g) const
Compute reflection of imaginary time Green's function.
Definition: dlr_imtime.hpp:245
-
friend void h5_write(h5::group fg, std::string const &subgroup_name, imtime_ops const &m)
Definition: dlr_imtime.hpp:818
-
auto coefs2eval(T const &gc, double t) const
Evaluate DLR expansion of G, given by its DLR coefficients, at imaginary time point.
Definition: dlr_imtime.hpp:143
-
nda::matrix_const_view< double > get_cf2it() const
Get transformation matrix from DLR coefficients to values at DLR imaginary time nodes.
Definition: dlr_imtime.hpp:606
+
friend void h5_read(h5::group fg, std::string const &subgroup_name, imtime_ops &m)
Definition: dlr_imtime.hpp:850
+
T::regular_type reflect(T const &g) const
Compute reflection of imaginary time Green's function.
Definition: dlr_imtime.hpp:263
+
friend void h5_write(h5::group fg, std::string const &subgroup_name, imtime_ops const &m)
Definition: dlr_imtime.hpp:836
+
auto coefs2eval(T const &gc, double t) const
Evaluate DLR expansion of G, given by its DLR coefficients, at imaginary time point.
Definition: dlr_imtime.hpp:161
+
nda::matrix_const_view< double > get_cf2it() const
Get transformation matrix from DLR coefficients to values at DLR imaginary time nodes.
Definition: dlr_imtime.hpp:624
nda::vector< double > build_evalvec(double t) const
Build vector of evaluation of DLR expansion at an imaginary time point.
Definition: dlr_imtime.cpp:59
-
nda::matrix_const_view< dcomplex > get_it2cf_zlu() const
Get LU factors of transformation matrix from DLR imaginary time values to coefficients,...
Definition: dlr_imtime.hpp:621
-
nda::vector_const_view< double > get_rfnodes() const
Get DLR real frequency nodes.
Definition: dlr_imtime.hpp:598
-
void reflect_init() const
Initialization for reflection method.
Definition: dlr_imtime.hpp:769
-
double lambda() const
Definition: dlr_imtime.hpp:636
-
nda::matrix< dcomplex > zlu
Same as lu, cast to complex (for use with lapack::getrs w/ cmplx input)
Definition: dlr_imtime.hpp:795
-
nda::matrix< S > convmat(double beta, statistic_t statistic, T const &fc, bool time_order=false) const
Compute matrix of convolution by an imaginary time Green's function.
Definition: dlr_imtime.hpp:434
-
T::regular_type convolve(double beta, statistic_t statistic, T const &fc, T const &gc, bool time_order=false) const
Compute convolution of two imaginary time Green's functions.
Definition: dlr_imtime.hpp:291
-
void convolve_init() const
Initialization for convolution methods.
Definition: dlr_imtime.hpp:661
-
static std::string hdf5_format()
Definition: dlr_imtime.hpp:816
-
T::regular_type fitvals2coefs(nda::vector_const_view< double > t, T const &g) const
Obtain DLR coefficients of a Green's function G from scattered imaginary time grid by least squares f...
Definition: dlr_imtime.hpp:203
-
nda::matrix_const_view< double > get_ipmat() const
Get inner product matrix.
Definition: dlr_imtime.hpp:648
-
void innerprod_init() const
Initialization for inner product method.
Definition: dlr_imtime.hpp:740
-
T::regular_type coefs2vals(T const &gc) const
Transform DLR coefficients of Green's function G to values on DLR imaginary time grid.
Definition: dlr_imtime.hpp:115
-
nda::vector< int > piv
LU pivots (LAPACK format) of imaginary time vals -> coefs matrix.
Definition: dlr_imtime.hpp:796
-
nda::matrix< double > lu
LU factors (LAPACK format) of imaginary time vals -> coefs matrix.
Definition: dlr_imtime.hpp:794
-
Tg::regular_type convolve(Tf const &fconv, Tg const &g) const
Compute convolution of two imaginary time Green's functions, given matrix of convolution by one of th...
Definition: dlr_imtime.hpp:362
-
int rank() const
Get DLR rank.
Definition: dlr_imtime.hpp:635
-
nda::vector_const_view< double > get_itnodes() const
Get DLR imaginary time nodes.
Definition: dlr_imtime.hpp:589
+
nda::matrix_const_view< dcomplex > get_it2cf_zlu() const
Get LU factors of transformation matrix from DLR imaginary time values to coefficients,...
Definition: dlr_imtime.hpp:639
+
nda::vector_const_view< double > get_rfnodes() const
Get DLR real frequency nodes.
Definition: dlr_imtime.hpp:616
+
void reflect_init() const
Initialization for reflection method.
Definition: dlr_imtime.hpp:787
+
double lambda() const
Definition: dlr_imtime.hpp:654
+
nda::matrix< dcomplex > zlu
Same as lu, cast to complex (for use with lapack::getrs w/ cmplx input)
Definition: dlr_imtime.hpp:813
+
nda::matrix< S > convmat(double beta, statistic_t statistic, T const &fc, bool time_order=false) const
Compute matrix of convolution by an imaginary time Green's function.
Definition: dlr_imtime.hpp:452
+
T::regular_type convolve(double beta, statistic_t statistic, T const &fc, T const &gc, bool time_order=false) const
Compute convolution of two imaginary time Green's functions.
Definition: dlr_imtime.hpp:309
+
void convolve_init() const
Initialization for convolution methods.
Definition: dlr_imtime.hpp:679
+
static std::string hdf5_format()
Definition: dlr_imtime.hpp:834
+
T::regular_type fitvals2coefs(nda::vector_const_view< double > t, T const &g) const
Obtain DLR coefficients of a Green's function G from scattered imaginary time grid by least squares f...
Definition: dlr_imtime.hpp:221
+
nda::matrix_const_view< double > get_ipmat() const
Get inner product matrix.
Definition: dlr_imtime.hpp:666
+
void innerprod_init() const
Initialization for inner product method.
Definition: dlr_imtime.hpp:758
+
T::regular_type coefs2vals(T const &gc) const
Transform DLR coefficients of Green's function G to values on DLR imaginary time grid.
Definition: dlr_imtime.hpp:133
+
nda::vector< int > piv
LU pivots (LAPACK format) of imaginary time vals -> coefs matrix.
Definition: dlr_imtime.hpp:814
+
nda::matrix< double > lu
LU factors (LAPACK format) of imaginary time vals -> coefs matrix.
Definition: dlr_imtime.hpp:812
+
Tg::regular_type convolve(Tf const &fconv, Tg const &g) const
Compute convolution of two imaginary time Green's functions, given matrix of convolution by one of th...
Definition: dlr_imtime.hpp:380
+
int rank() const
Get DLR rank.
Definition: dlr_imtime.hpp:653
+
nda::vector_const_view< double > get_itnodes() const
Get DLR imaginary time nodes.
Definition: dlr_imtime.hpp:607
imtime_ops()=default
-
S innerprod(T const &fc, T const &gc) const
Compute inner product of two imaginary time Green's functions.
Definition: dlr_imtime.hpp:560
+
S innerprod(T const &fc, T const &gc) const
Compute inner product of two imaginary time Green's functions.
Definition: dlr_imtime.hpp:578
imtime_ops(double lambda, nda::vector_const_view< double > dlr_rf, nda::vector_const_view< double > dlr_it, nda::matrix_const_view< double > cf2it, nda::matrix_const_view< double > it2cf_lu, nda::matrix_const_view< dcomplex > it2cf_zlu, nda::vector_const_view< int > it2cf_piv)
Definition: dlr_imtime.hpp:67
-
double get_rfnodes(int i) const
Definition: dlr_imtime.hpp:599
-
nda::vector_const_view< int > get_it2cf_piv() const
Get LU pivots of transformation matrix from DLR imaginary time values to coefficients.
Definition: dlr_imtime.hpp:628
-
void tconvolve_init() const
Initialization for time-ordered convolution methods.
Definition: dlr_imtime.hpp:701
-
double get_itnodes(int i) const
Definition: dlr_imtime.hpp:590
-
nda::matrix_const_view< double > get_it2cf_lu() const
Get LU factors of transformation matrix from DLR imaginary time values to coefficients.
Definition: dlr_imtime.hpp:613
-
T::regular_type vals2coefs(T const &g, bool transpose=false) const
Transform values of Green's function G on DLR imaginary time grid to DLR coefficients.
Definition: dlr_imtime.hpp:83
+
double get_rfnodes(int i) const
Definition: dlr_imtime.hpp:617
+
nda::vector_const_view< int > get_it2cf_piv() const
Get LU pivots of transformation matrix from DLR imaginary time values to coefficients.
Definition: dlr_imtime.hpp:646
+
void tconvolve_init() const
Initialization for time-ordered convolution methods.
Definition: dlr_imtime.hpp:719
+
double get_itnodes(int i) const
Definition: dlr_imtime.hpp:608
+
nda::matrix_const_view< double > get_it2cf_lu() const
Get LU factors of transformation matrix from DLR imaginary time values to coefficients.
Definition: dlr_imtime.hpp:631
+
T::regular_type vals2coefs(T const &g, bool transpose=false) const
Transform values of Green's function G on DLR imaginary time grid to DLR coefficients.
Definition: dlr_imtime.hpp:101
Definition: dlr_build.cpp:27
diff --git a/docs/main/doxygen/latex/classcppdlr_1_1imtime__ops.tex b/docs/main/doxygen/latex/classcppdlr_1_1imtime__ops.tex index e5e4b79..42eb895 100644 --- a/docs/main/doxygen/latex/classcppdlr_1_1imtime__ops.tex +++ b/docs/main/doxygen/latex/classcppdlr_1_1imtime__ops.tex @@ -572,9 +572,14 @@ \hline \end{DoxyParams} \begin{DoxyReturn}{Returns} -DLR coefficients of G +DLR coefficients of G (if transpose = false, as it is by default; otherwise, see note below) \end{DoxyReturn} +\begin{DoxyNote}{Note} +By setting the optional argument {\ttfamily transpose} to true, this method applies the transpose of the values -\/$>$ coefficients transformation. This is useful for constructing matrices of linear operators which act on vectors of DLR grid values, rather than DLR coefficients. As an example, suppose L is a linear functional such that L\mbox{[}G\mbox{]} = c, G is a Green\textquotesingle{}s function and c is a scalar. If gc is the vector of DLR coefficient of G, then we can represent L by a vector l, with entries given by the action of L on the DLR basis functions, and we have l$^\wedge$T $\ast$ gc = c. Then +\end{DoxyNote} +c = l$^\wedge$T $\ast$ it2cf $\ast$ g = (it2cf$^\wedge$T $\ast$ l)$^\wedge$T $\ast$ g, +where g is the vector of values of G at the DLR nodes, and it2cf is the imaginary time values -\/$>$ coefficients matrix. Thus it2cf$^\wedge$T $\ast$ l is the vector of the linear operator T acting on the vector of values of G at the DLR nodes, and can be precomputed using the vals2coefs method with the transpose option set to true. \doxysubsection{Friends And Related Function Documentation} \mbox{\Hypertarget{classcppdlr_1_1imtime__ops_a02e5f8519e54734a74b94226d10e082d}\label{classcppdlr_1_1imtime__ops_a02e5f8519e54734a74b94226d10e082d}}