diff --git a/python/solid_dmft/dmft_tools/solvers/ctseg_interface.py b/python/solid_dmft/dmft_tools/solvers/ctseg_interface.py index a29e423d..ee83ea41 100644 --- a/python/solid_dmft/dmft_tools/solvers/ctseg_interface.py +++ b/python/solid_dmft/dmft_tools/solvers/ctseg_interface.py @@ -92,7 +92,7 @@ def solve(self, **kwargs): self.triqs_solver.Delta_tau << self.Delta_time if self.general_params['h_int_type'][self.icrsh] == 'dyn_density_density': - mpi.report('add dynamic interaction from AIMBES') + mpi.report('\nAdding dynamic interaction from AIMBES.') # convert 4 idx tensor to two index tensor Uloc_dlr = self.gw_params['Uloc_dlr'][self.icrsh]['up'] Uloc_dlr_2idx_prime = Gf(mesh=Uloc_dlr.mesh, target_shape=[Uloc_dlr.target_shape[0], Uloc_dlr.target_shape[1]]) @@ -102,20 +102,27 @@ def solve(self, **kwargs): _, Uprime = reduce_4index_to_2index(Uloc_dlr_idx) Uloc_dlr_2idx_prime[coeff] = Uprime + # extract w=0 limit for analytic Sigma_Hartree for the impurity + Uloc_w0_2idx_prime = make_gf_imfreq(Uloc_dlr_2idx_prime, n_iw=1) + self.Uw0_prime = Uloc_w0_2idx_prime.data[0].real + mpi.report('Screened interaction U(iw) at iw=0 w/o the shift of V_bare (density-density only): ') + mpi.report(self.Uw0_prime) + # create full frequency objects Uloc_tau_2idx_prime = make_gf_imtime(Uloc_dlr_2idx_prime, n_tau=self.solver_params['n_tau_bosonic']) - Uloc_tau_2idx_prime_sumk = BlockGf(name_list=['up', 'down'], block_list=[Uloc_tau_2idx_prime, Uloc_tau_2idx_prime]) - Uloc_tau_2idx_prime_solver = self.sum_k.block_structure.convert_gf( - Uloc_tau_2idx_prime_sumk, ish_from=self.icrsh, space_from='sumk', space_to='solver' - ) - - # fill D0_tau from Uloc_tau_2idx_prime - for iblock, Uloc_i in Uloc_tau_2idx_prime_solver: - for jblock, Uloc_j in Uloc_tau_2idx_prime_solver: - # same spin and opposite spin interaction have same interaction for dynamic part - # Hund's rule does not apply here - self.triqs_solver.D0_tau[iblock, jblock] << Uloc_tau_2idx_prime_solver[iblock] + # fill D0_tau from Uloc_tau_2idx and Uloc_tau_2idx_prime + ish = self.sum_k.inequiv_to_corr[self.icrsh] + norb = Uloc_dlr.target_shape[0] + gf_struct = self.sum_k.gf_struct_solver_list[ish] + o1 = 0 + for name1, n1 in gf_struct: + o2 = 0 + for name2, n2 in gf_struct: + # same and opposite spin are the same + self.triqs_solver.D0_tau[name1, name2] << Uloc_tau_2idx_prime[o1:o1 + n1, o2:o2 + n2].real + o2 = (o2 + n2) % norb + o1 = (o1 + n1) % norb # TODO: add Jerp_Iw to the solver @@ -175,10 +182,6 @@ def set_Gs_from_G_l(): return - # first print average sign - if mpi.is_master_node(): - print('\nAverage sign: {}'.format(self.triqs_solver.results.average_sign)) - # get Delta_time from solver self.Delta_time << self.triqs_solver.Delta_tau @@ -199,27 +202,44 @@ def set_Gs_from_G_l(): self.Sigma_Hartree_sumk = {} self.Sigma_moments = {} if mpi.is_master_node(): + mpi.report('Evaluating static impurity self-energy analytically using interacting density from ctseg...\n' + '(results will be used in the subsequent tail fitting or the crm dyson solver)') # get density density U tensor from solver U_dict = extract_U_dict2(self.h_int) # print("sum_k is" + self.sum_k.__repr__()) norb = common.get_n_orbitals(self.sum_k) norb = norb[self.icrsh]['up'] U_dd = dict_to_matrix(U_dict, gf_struct=self.sum_k.gf_struct_solver_list[self.icrsh]) - # extract Uijij and Uijji terms + # extract Uijij (inter- and intra-orbital Coulomb) and Uijji (Hund's coupling) terms + # a) For static impurity problem, Us are the static screened interactions + # b) For dynamic impurity problem, Us are the bare interactions Uijij = U_dd[0:norb, norb : 2 * norb] Uijji = Uijij - U_dd[0:norb, 0:norb] - # and construct full Uijkl tensor + # and construct full Uijkl tensor for static interaction Uijkl = construct_Uijkl(Uijij, Uijji) + if self.general_params['h_int_type'][self.icrsh] == 'dyn_density_density': + # For dynamic impurity problems, separate Us are needed for the Hartree and exchange self-energy + # a) Hartree term is evaluated using the screened interactions at w=0 + # b) Exchange term is evaluated using the bare interactions + Uijij += self.Uw0_prime + Uijji[:, :] = 0.0 + Uw0_ijkl = construct_Uijkl(Uijij, Uijji) + else: + Uw0_ijkl = Uijkl + # now calculated Hartree shift via - # \Sigma^0_{\alpha \beta} = \sum_{i j} n_{i j} \left( 2 U_{\alpha i \beta j} - U_{\alpha i j \beta} \right) + # \Sigma^0_{\alpha \beta} = \sum_{i j} n_{i j} \left( 2 Uw0_{\alpha i \beta j} - U_{\alpha i j \beta} \right) for block, norb in self.sum_k.gf_struct_sumk[self.icrsh]: self.Sigma_Hartree_sumk[block] = np.zeros((norb, norb), dtype=float) for iorb, jorb in product(range(norb), repeat=2): for inner in range(norb): - self.Sigma_Hartree_sumk[block][iorb, jorb] += self.orbital_occupations_sumk[block][inner, inner].real * ( - 2 * Uijkl[iorb, inner, jorb, inner].real - Uijkl[iorb, inner, inner, jorb].real - ) + # exchange diagram K + self.Sigma_Hartree_sumk[block][iorb, jorb] -= ( + self.orbital_occupations_sumk[block][inner, inner].real * (Uijkl[iorb, inner, inner, jorb].real)) + # Hartree (Coulomb) diagram J + self.Sigma_Hartree_sumk[block][iorb, jorb] += ( + self.orbital_occupations_sumk[block][inner, inner].real * (2 * Uw0_ijkl[iorb, inner, jorb, inner].real)) # convert to solver block structure self.Sigma_Hartree = self.sum_k.block_structure.convert_matrix( @@ -252,12 +272,30 @@ def set_Gs_from_G_l(): # if measured in Legendre basis, get G_l from solver too if self.solver_params['legendre_fit']: + mpi.report('Self-energy post-processing algorithm: Legendre fitting') self.G_time_orig << self.triqs_solver.results.G_tau self.G_l << legendre_filter.apply(self.G_time, self.solver_params['n_l']) # get G_time, G_freq, Sigma_freq from G_l set_Gs_from_G_l() elif self.solver_params['perform_tail_fit']: - self.Sigma_freq = inverse(self.G0_freq) - inverse(self.G_freq) + if not self.solver_params['improved_estimator']: + mpi.report('Self-energy post-processing algorithm: tail fitting with analytic static impurity self-energy') + self.Sigma_freq = inverse(self.G0_freq) - inverse(self.G_freq) + else: + mpi.report('Self-energy post-processing algorithm: ' + 'improved estimator + tail fitting with analytic static imppurity self-energy') + self.F_freq = self.G_freq.copy() + self.F_freq << 0.0 + self.F_time = self.G_time.copy() + self.F_time << self.triqs_solver.results.F_tau + F_known_moments = make_zero_tail(self.F_freq, n_moments=1) + for i, bl in enumerate(self.F_freq.indices): + self.F_freq[bl] << Fourier(self.triqs_solver.results.F_tau[bl], F_known_moments[i]) + + for block, fw in self.F_freq: + for iw in fw.mesh: + self.Sigma_freq[block][iw] = self.F_freq[block][iw] / self.G_freq[block][iw] + # without any degenerate shells we run the minimization for all blocks self.Sigma_freq, tail = self._fit_tail_window( self.Sigma_freq, @@ -273,27 +311,23 @@ def set_Gs_from_G_l(): self.G_freq = inverse(inverse(self.G0_freq) - self.Sigma_freq) # if improved estimators are turned on calc Sigma from F_tau, otherwise: - elif self.solver_params['improved_estimator']: + elif self.solver_params['improved_estimator'] and not self.solver_params['perform_tail_fit']: + mpi.report('Self-energy post-processing algorithm: improved estimator') self.F_freq = self.G_freq.copy() self.F_freq << 0.0 self.F_time = self.G_time.copy() self.F_time << self.triqs_solver.results.F_tau F_known_moments = make_zero_tail(self.F_freq, n_moments=1) - if mpi.is_master_node(): - for i, bl in enumerate(self.F_freq.indices): - self.F_freq[bl] << Fourier(self.triqs_solver.results.F_tau[bl], F_known_moments[i]) - # fit tail of improved estimator and G_freq - self.F_freq << self._gf_fit_tail_fraction(self.F_freq, fraction=0.9, replace=0.5, known_moments=F_known_moments) - self.G_freq << self._gf_fit_tail_fraction(self.G_freq, fraction=0.9, replace=0.5, known_moments=Gf_known_moments) + for i, bl in enumerate(self.F_freq.indices): + self.F_freq[bl] << Fourier(self.triqs_solver.results.F_tau[bl], F_known_moments[i]) - self.F_freq << mpi.bcast(self.F_freq) - self.G_freq << mpi.bcast(self.G_freq) for block, fw in self.F_freq: for iw in fw.mesh: self.Sigma_freq[block][iw] = self.F_freq[block][iw] / self.G_freq[block][iw] # should this be moved to abstract class? elif self.solver_params['crm_dyson_solver']: + mpi.report('Self-energy post-processing algorithm: crm dyson solver') from triqs.gf.dlr_crm_dyson_solver import minimize_dyson mpi.report('\nCRM Dyson solver to extract Σ impurity\n') @@ -360,7 +394,7 @@ def set_Gs_from_G_l(): self.G_time_dlr = mpi.bcast(self.G_time_dlr) self.G_freq = mpi.bcast(self.G_freq) else: - mpi.report('\n!!!! WARNING !!!! tail of solver output not handled! Turn on either measure_ft, legendre_fit\n') + mpi.report('\n!!!! WARNING !!!! tail of solver output not handled! Turn on either measure_F_tau, legendre_fit\n') self.Sigma_freq << inverse(self.G0_freq) - inverse(self.G_freq) if self.solver_params['measure_state_hist']: @@ -368,9 +402,6 @@ def set_Gs_from_G_l(): if self.solver_params['measure_pert_order']: self.perturbation_order_histo = self.triqs_solver.results.pert_order_Delta - bin_vec = np.arange(0, self.perturbation_order_histo.data.shape[0]) - self.avg_pert_order = np.sum(bin_vec * self.perturbation_order_histo.data[:]) - if mpi.is_master_node(): - print(f'Average perturbation order: {self.avg_pert_order}') + self.avg_pert_order = self.triqs_solver.results.average_order_Delta return diff --git a/python/solid_dmft/gw_embedding/bdft_converter.py b/python/solid_dmft/gw_embedding/bdft_converter.py index de6bebc8..8d40c50d 100644 --- a/python/solid_dmft/gw_embedding/bdft_converter.py +++ b/python/solid_dmft/gw_embedding/bdft_converter.py @@ -102,6 +102,102 @@ def check_iaft_accuracy(Aw, ir_kernel, stats, mpi.report('=================== done ===================\n') +def estimate_zero_moment(Aw, iw_mesh): + iw_m1 = iw_mesh[-1] + iw_m2 = iw_mesh[-2] + t = Aw[-1].real - (Aw[-1] - Aw[-2]).real * iw_m2 ** 2 / ( + iw_m2 ** 2 - iw_m1 ** 2) + t = t.astype(complex) + + return t + + +def extract_t_and_delta(g_weiss_wsIab, ir_kernel): + iwn_mesh_imp = ir_kernel.wn_mesh('f') * np.pi / ir_kernel.beta + + ns, nImp = g_weiss_wsIab.shape[1:3] + weiss_tmp = np.zeros(g_weiss_wsIab.shape, dtype=complex) + for n, g in enumerate(g_weiss_wsIab): + for s in range(ns): + for I in range(nImp): + weiss_tmp[n, s, I] = 1j * iwn_mesh_imp[n] - np.linalg.inv(g[s, I]) + + t_sIab_estimate = estimate_zero_moment(weiss_tmp, iwn_mesh_imp) + + # construct delta: delta(iw) = iw - t - g^{-1}(iw) + delta_estimate = np.zeros(g_weiss_wsIab.shape, dtype=complex) + nbnd = t_sIab_estimate.shape[-1] + for n in range(delta_estimate.shape[0]): + for s in range(ns): + for I in range(nImp): + g_weiss_inv = np.linalg.inv(g_weiss_wsIab[n, 0, 0]) + delta_estimate[n, s, I] = 1j * iwn_mesh_imp[n] * np.eye(nbnd) - t_sIab_estimate[s, I] - g_weiss_inv + ir_kernel.check_leakage(delta_estimate, 'f', 'delta_estimate', w_input=True) + + return t_sIab_estimate, delta_estimate + + +def read_t_and_delta(aimb_h5, it_1e=None): + mpi.report(f"Reading the analytic H_loc0 and the corresponding hybridization from aimbes h5 {aimb_h5}.") + with HDFArchive(aimb_h5, 'r') as ar: + if not it_1e: + it_1e = ar['downfold_1e/final_iter'] + iter_grp = ar[f'downfold_1e/iter{it_1e}'] + + t_sIab = iter_grp['H0_sIab'] + iter_grp['Vhf_gw_sIab'] - iter_grp['Vhf_dc_sIab'] + if 'Vcorr_gw_sIab' in iter_grp: + t_sIab += (iter_grp['Vcorr_gw_sIab'] - iter_grp['Vcorr_dc_sIab']) + + nspin, nImp, nOrbs = t_sIab.shape[:3] + for s in np.arange(nspin): + for I in range(nImp): + t_sIab[s, I] -= np.eye(nOrbs) * iter_grp["mu"] + + delta_wsIab = iter_grp['delta_wsIab'] + + return t_sIab, delta_wsIab + + +def tail_fit_g_weiss(g_weiss_wsIab, ir_kernel, gw_data, wmax_imp=None, eps_imp=None): + mpi.report("Extracting H_loc0 and hybridization from tail fitting fermionic Weiss field g.") + # if user-defined wmax and eps for the impurity + if wmax_imp is not None or eps_imp is not None: + ir_imp_kernel = IAFT(beta=gw_data['beta'], + lmbda=gw_data['beta'] * gw_data['gw_dlr_wmax'], + prec=gw_data['gw_ir_prec'], verbal=False) + g_imp = ir_kernel.w_interpolate(g_weiss_wsIab, ir_imp_kernel.wn_mesh('f'), 'f') + ir_imp_kernel.check_leakage(g_imp, 'f', "fermionic Weiss field on the customized imaginary meshes", + w_input=True) + else: + ir_imp_kernel = ir_kernel + g_imp = g_weiss_wsIab + + mpi.report("") + # extracting H0 and Delta from g_weiss + t_sIab, delta_wsIab = extract_t_and_delta(g_imp, ir_imp_kernel) + + return t_sIab, delta_wsIab, ir_imp_kernel + + +def bath_fitting(delta_wsIab, iw_mesh, Np=5): + mpi.report(f"Bath fitting for hybridization with nbath/impurity orbital = {Np}") + try: + from adapol import hybfit + except ImportError: + raise ImportError("bath fitting requires the adapol package (https://github.com/flatironinstitute/adapol). \n" + "Please ensure that it is installed. ") + + nspin, nImp = delta_wsIab.shape[1:3] + error = -1 + for s in np.arange(nspin): + for I in range(nImp): + bath_energy, bath_hyb, fit_error, func = hybfit(delta_wsIab[:, s, I], 1j*iw_mesh, + Np=5, solver='sdp', verbose=False) + delta_wsIab[:, s, I] = func(1j*iw_mesh) + error = max(error, abs(fit_error)) + mpi.report(f"Bath fitting error = {error}") + + def calc_Sigma_DC_gw(Wloc_dlr, Gloc_dlr, Vloc, verbose=False): r""" Calculate the double counting part of the self-energy from the screened Coulomb interaction @@ -301,7 +397,9 @@ def calc_W_from_Gloc(Gloc_dlr, U): def convert_gw_output(job_h5, gw_h5, dlr_wmax=None, dlr_eps=None, - it_1e=0, it_2e=0, ha_ev_conv = False): + it_1e=0, it_2e=0, + delta_calc_type="tail_fit", delta_bath_fit=False, + ha_ev_conv = False): """ read bdft output and convert to triqs Gf DLR objects @@ -391,50 +489,57 @@ def convert_gw_output(job_h5, gw_h5, dlr_wmax=None, dlr_eps=None, for ir_w in range(Uloc_ir_jk.shape[0]): Uloc_ir[ir_w, or1, or2, or3, or4] = Uloc_ir_jk[ir_w, or1, or3, or2, or4] - # get Hloc_0 Vhf_dc_sIab = ar[f'downfold_1e/iter{it_1e}']['Vhf_dc_sIab'][0, 0] Vhf_sIab = ar[f'downfold_1e/iter{it_1e}']['Vhf_gw_sIab'][0, 0] - H0_loc = ar[f'downfold_1e/iter{it_1e}']['H0_sIab'] if 'Vcorr_gw_sIab' in ar[f'downfold_1e/iter{it_1e}']: mpi.report('Found Vcorr_sIab in the bdft checkpoint file, ' 'i.e. Embedding on top of an effective QP Hamiltonian.') - Vcorr_sIab = ar[f'downfold_1e/iter{it_1e}/Vcorr_gw_sIab'][0, 0] - Vcorr_dc_sIab = ar[f'downfold_1e/iter{it_1e}/Vcorr_dc_sIab'][0, 0] - Hloc0 = -1*(np.eye(n_orb) * gw_data['mu_emb'] - H0_loc[0,0] - Vhf_sIab - Vcorr_sIab + Vhf_dc_sIab + Vcorr_dc_sIab) qp_emb = True else: Sigma_wsIab = ar[f'downfold_1e/iter{it_1e}']['Sigma_gw_wsIab'] qp_emb = False - Hloc0 = -1*(np.eye(n_orb) * gw_data['mu_emb'] - H0_loc[0,0] - (Vhf_sIab-Vhf_dc_sIab)) + mpi.report("") # get IR object - mpi.report('create IR kernel and convert to DLR') + mpi.report('Creating IR kernel and convert to DLR.') # create IR kernel - mpi.report("") + mpi.report("\nReading IR representation from aimbes code...") ir_kernel = IAFT(beta=gw_data['beta'], lmbda=gw_data['lam'], prec=gw_data['gw_ir_prec']) - if dlr_wmax is not None or dlr_eps is not None: - # check user-defined dlr_wmax and dlr_eps for the impurity - check_iaft_accuracy(g_weiss_wsIab, ir_kernel, 'f', - gw_data['beta'], gw_data['gw_dlr_wmax'], gw_data['gw_dlr_prec'], - "fermionic Weiss field g") - + mpi.report("Constructing DLR mesh (wmax, eps) = ({}, {})...".format(gw_data['gw_dlr_wmax'], gw_data['gw_dlr_prec'])) gw_data['mesh_dlr_iw_b'] = MeshDLRImFreq( - beta=gw_data['beta']/conv_fac, + beta=gw_data['beta'] / conv_fac, statistic='Boson', - w_max=gw_data['gw_dlr_wmax']*conv_fac, + w_max=gw_data['gw_dlr_wmax'] * conv_fac, eps=gw_data['gw_dlr_prec'], symmetrize=True ) gw_data['mesh_dlr_iw_f'] = MeshDLRImFreq( - beta=gw_data['beta']/conv_fac, + beta=gw_data['beta'] / conv_fac, statistic='Fermion', - w_max=gw_data['gw_dlr_wmax']*conv_fac, + w_max=gw_data['gw_dlr_wmax'] * conv_fac, eps=gw_data['gw_dlr_prec'], symmetrize=True ) + if delta_calc_type not in {"analytic", "tail_fit"}: + raise ValueError("calc_type must be either \'analytic\' or \'tail_fit\'.") + + if delta_calc_type == "analytic": + Hloc0, delta_wsIab = read_t_and_delta(gw_h5, it_1e) + ir_imp_kernel = ir_kernel + elif delta_calc_type == "tail_fit": + Hloc0, delta_wsIab, ir_imp_kernel = tail_fit_g_weiss(g_weiss_wsIab, ir_kernel, gw_data, + wmax_imp=dlr_wmax, eps_imp=dlr_eps) + if delta_bath_fit: + bath_fitting(delta_wsIab, ir_imp_kernel.wn_mesh('f')*np.pi/ir_imp_kernel.beta) + Hloc0 = Hloc0[0,0] + + # need to update g_weiss? + + mpi.report("") + ( U_dlr_list, G0_dlr_list, @@ -464,7 +569,8 @@ def convert_gw_output(job_h5, gw_h5, dlr_wmax=None, dlr_eps=None, G0_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True) G0_dlr_list.append(G0_dlr) - temp = _get_dlr_from_IR(delta_wsIab[:, 0, ish, :, :]/conv_fac, ir_kernel, gw_data['mesh_dlr_iw_f'], dim=2) + # FIXME make consistent usage of ir_kernel and ir_imp_kernel + temp = _get_dlr_from_IR(delta_wsIab[:, 0, ish, :, :]/conv_fac, ir_imp_kernel, gw_data['mesh_dlr_iw_f'], dim=2) delta_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True) delta_dlr_list.append(delta_dlr) @@ -495,7 +601,7 @@ def convert_gw_output(job_h5, gw_h5, dlr_wmax=None, dlr_eps=None, gw_data['n_orb'] = n_orb_list # write Uloc / Wloc back to h5 archive - mpi.report(f'writing results in {job_h5}/DMFT_input') + mpi.report(f'Writing results in {job_h5}/DMFT_input') with HDFArchive(job_h5, 'a') as ar: if 'DMFT_input' not in ar: diff --git a/python/solid_dmft/gw_embedding/gw_flow.py b/python/solid_dmft/gw_embedding/gw_flow.py index e5b774ee..46c27788 100644 --- a/python/solid_dmft/gw_embedding/gw_flow.py +++ b/python/solid_dmft/gw_embedding/gw_flow.py @@ -49,14 +49,15 @@ from triqs.gf.meshes import MeshImFreq from triqs.operators import c_dag, c, Operator from triqs_dft_tools.block_structure import BlockStructure +from triqs_dft_tools.sumk_dft import SumkDFT from solid_dmft.version import solid_dmft_hash from solid_dmft.version import version as solid_dmft_version from solid_dmft.dmft_tools import formatter from solid_dmft.dmft_tools import results_to_archive -from solid_dmft.dmft_tools.solver import SolverStructure +from solid_dmft.dmft_tools.solver import create_solver from solid_dmft.dmft_tools import interaction_hamiltonian -from solid_dmft.dmft_cycle import _extract_quantity_per_inequiv +from solid_dmft.dmft_cycle import _extract_quantity_per_inequiv, _determine_block_structure from solid_dmft.dmft_tools import greens_functions_mixer as gf_mixer from solid_dmft.gw_embedding.bdft_converter import convert_gw_output @@ -232,8 +233,9 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): assert gw_params['code'] == 'aimbes', 'Only AIMBES is currently supported as gw code' # prepare output h5 archive + archive = general_params['jobname']+'/'+general_params['seedname']+'.h5' if mpi.is_master_node(): - with HDFArchive(general_params['jobname'] + '/' + general_params['seedname'] + '.h5', 'a') as ar: + with HDFArchive(archive, 'a') as ar: if 'DMFT_results' not in ar: ar.create_group('DMFT_results') if 'last_iter' not in ar['DMFT_results']: @@ -259,31 +261,40 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): general_params['dlr_wmax'], general_params['dlr_eps'], it_1e = gw_params['it_1e'], it_2e = gw_params['it_2e'], + delta_calc_type = gw_params['delta_calc_type'], + delta_bath_fit = gw_params['delta_bath_fit'] ) gw_params.update(gw_data) mpi.barrier() gw_params = mpi.bcast(gw_params) iteration = gw_params['it_1e'] + # check if general_params[n_iw] is consistent with dlr meshes + mesh_f_idx = np.array([iw.index for iw in gw_params['mesh_dlr_iw_f']]) + mesh_b_idx = np.array([iw.index for iw in gw_params['mesh_dlr_iw_b']]) + max_idx = max(abs(mesh_f_idx[0]), abs(mesh_f_idx[-1]), abs(mesh_b_idx[0]), abs(mesh_b_idx[-1])) + if max_idx > general_params['n_iw']: + mpi.report(f"\n!!! WARNING !!!!\n" + f"general_params['n_iw'] = {general_params['n_iw']} < maximum DLRImFreq index ({max_idx}). " + f"solid_dmft will automatically set general_params['n_iw'] = {max_idx+1}.\n") + general_params['n_iw'] = max_idx + 1 + + general_params['beta'] = gw_params['beta'] + sumk_mesh = MeshImFreq(beta=general_params['beta'], statistic='Fermion', n_iw=general_params['n_iw']) + # if GW calculation was performed with spin never average spin channels if gw_params['number_of_spins'] == 2: general_params['magnetic'] = True - # dummy helper class for sumk - sumk = dummy_sumk(gw_params['n_inequiv_shells'], gw_params['n_orb'], - general_params['enforce_off_diag'], gw_params['use_rot'], - general_params['magnetic']) - - sumk.mesh = MeshImFreq(beta=gw_params['beta'], statistic='Fermion', n_iw=general_params['n_iw']) + sumk = SumkDFT(hdf_file=general_params['jobname'] + '/' + general_params['seedname'] + '.h5', + mesh=sumk_mesh, use_dft_blocks=False, h_field=general_params['h_field']) sumk.chemical_potential = gw_params['mu_emb'] sumk.dc_imp = gw_params['Vhf_dc'] - general_params['beta'] = gw_params['beta'] - # create h_int - general_params = _extract_quantity_per_inequiv('h_int_type', sumk.n_inequiv_shells, general_params) - general_params = _extract_quantity_per_inequiv('dc_type', sumk.n_inequiv_shells, general_params) - - h_int, gw_params = interaction_hamiltonian.construct(sumk, general_params, advanced_params, gw_params) + for key in ['h_int_type', 'dc_type', 'enforce_off_diag']: + general_params = _extract_quantity_per_inequiv(key, sumk.n_inequiv_shells, general_params) + for key in ['map_solver_struct', 'pick_solver_struct', 'mapped_solver_struct_degeneracies']: + advanced_params = _extract_quantity_per_inequiv(key, sumk.n_inequiv_shells, advanced_params) if len(solver_params) == 1 and solver_params[0]['idx_impurities'] is None: map_imp_solver = [0] * sumk.n_inequiv_shells @@ -302,6 +313,39 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): solver_type_per_imp = [solver_params[map_imp_solver[iineq]]['type'] for iineq in range(sumk.n_inequiv_shells)] mpi.report(f'\nSolver type per impurity: {solver_type_per_imp}') + # determine block_structure + dens_mat_gw = [None] * sumk.n_inequiv_shells + for ish in range(sumk.n_inequiv_shells): + dens_mat_gw[ish] = gw_params['Gloc_dlr'][ish].density() + sumk, _ = _determine_block_structure(sumk, general_params, advanced_params, solver_type_per_imp, dens_mat_gw) + + # print block structure and rotation matrix based on the DFT input + if mpi.is_master_node(): + print('\n number of ineq. correlated shells: {}'.format(sumk.n_inequiv_shells)) + print('\n block structure summary') + for icrsh in range(sumk.n_inequiv_shells): + shlst = [ish for ish, ineq_shell in enumerate(sumk.corr_to_inequiv) if ineq_shell == icrsh] + print(' -- Shell type #{:3d}: '.format(icrsh) + format(shlst)) + print(' | shell multiplicity ' + str(len(shlst))) + print(' | block struct. sum_k : ' + format(sumk.gf_struct_sumk[sumk.inequiv_to_corr[icrsh]])) + print(' | block struct. solver: ' + format(sumk.gf_struct_solver[icrsh])) + print(' | deg. orbitals : ' + format(sumk.deg_shells[icrsh])) + + print('\nRotation matrices') + formatter.print_rotation_matrix(sumk) + mpi.barrier() + + # write block structure to h5 archive + if gw_params['it_1e'] == 1 and mpi.is_master_node(): + with HDFArchive(archive, 'a') as ar: + if 'block_structure' not in ar['DMFT_input']: + ar['DMFT_input']['block_structure'] = sumk.block_structure + if 'deg_shells' not in ar['DMFT_input']: + ar['DMFT_input']['deg_shells'] = sumk.deg_shells + + # create h_int + h_int, gw_params = interaction_hamiltonian.construct(sumk, general_params, advanced_params, gw_params) + # create solver objects solvers = [None] * sumk.n_inequiv_shells if mpi.is_master_node(): @@ -317,10 +361,18 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): sumk.n_inequiv_shells, max(gw_params['n_orb']),max(gw_params['n_orb'])),dtype=complex) - for ish in range(sumk.n_inequiv_shells): + for icrsh in range(sumk.n_inequiv_shells): # Construct the Solver instances - solvers[ish] = SolverStructure(general_params, solver_params[map_imp_solver[ish]], - gw_params, advanced_params, sumk, ish, h_int[ish]) + mpi.report("Creating solver with new interface") + solvers[icrsh] = create_solver(general_params=general_params, + solver_params=solver_params[map_imp_solver[icrsh]], + gw_params=gw_params, + advanced_params=advanced_params, + sum_k=sumk, + h_int=h_int[icrsh], + icrsh=icrsh, + iteration_offset=iteration, + deg_orbs_ftps=None) # init local density matrices for observables density_tot = 0.0 @@ -362,29 +414,17 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): # prepare solver input imp_eal = sumk.block_structure.convert_matrix(gw_params['Hloc0'][ish], ish_from=ish, space_from='sumk', space_to='solver') - for name, g0 in G0_dlr: - # Estimate the HF shift - G0_iw = solvers[ish].G0_freq[name] - Delta_iw = Gf(mesh=G0_iw.mesh, target_shape=G0_iw.target_shape) - Delta_iw << iOmega_n - inverse(G0_iw) - tail, err = fit_hermitian_tail(Delta_iw) - # overwrite H0 using estimation from high-frequency tail - imp_eal[name] = tail[0] - mpi.report(f"Tail fitting for extracting the HF shift in g_weiss with error {err}") + Delta_dlr = sumk.block_structure.convert_gf(gw_params['delta_dlr'][ish], ish_from=ish, space_from='sumk', space_to='solver') + for name, delta in Delta_dlr: if mpi.is_master_node(): print('H_loc0[{:2d}] block: {}'.format(ish, name)) fmt = '{:11.7f}' * imp_eal[name].shape[0] for block in imp_eal[name]: print((' '*11 + fmt).format(*block.real)) - G0_dlr_iw = make_gf_dlr_imfreq(g0) - Delta_dlr_iw = Gf(mesh=G0_dlr_iw.mesh, target_shape=g0.target_shape) - for iw in G0_dlr_iw.mesh: - Delta_dlr_iw[iw] = iw.value - inverse(G0_dlr_iw[iw]) - imp_eal[name] - Delta_dlr = make_gf_dlr(Delta_dlr_iw) # create now full delta(tau) - Delta_tau = make_hermitian(make_gf_imtime(Delta_dlr, n_tau=general_params['n_tau'])) + Delta_tau = make_hermitian(make_gf_imtime(delta, n_tau=general_params['n_tau'])) # without SOC delta_tau needs to be real if not sumk.SO == 1: @@ -400,15 +440,21 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): # Make non-interacting operator for Hloc0 Hloc_0 = Operator() - for spin, spin_block in imp_eal.items(): - for o1 in range(spin_block.shape[0]): - for o2 in range(spin_block.shape[1]): - # check if off-diag element is larger than threshold - if o1 != o2 and abs(spin_block[o1, o2]) < solvers[ish].solver_params['off_diag_threshold']: - continue - else: - # TODO: adapt for SOC calculations, which should keep the imag part - Hloc_0 += spin_block[o1, o2].real / 2 * (c_dag(spin, o1) * c(spin, o2) + c_dag(spin, o2) * c(spin, o1)) + if solver_type_per_imp[ish] == 'ctseg': + mpi.report('Discarding off-diagonals in Hloc_0 anyway since ctseg enforces it.') + for spin, spin_block in imp_eal.items(): + for o1 in range(spin_block.shape[0]): + Hloc_0 += spin_block[o1, o1].real * c_dag(spin, o1) * c(spin, o1) + else: + for spin, spin_block in imp_eal.items(): + for o1 in range(spin_block.shape[0]): + for o2 in range(spin_block.shape[1]): + # check if off-diag element is larger than threshold + if o1 != o2 and abs(spin_block[o1, o2]) < solver_params[ish]['off_diag_threshold']: + continue + else: + # TODO: adapt for SOC calculations, which should keep the imag part + Hloc_0 += spin_block[o1, o2].real / 2 * (c_dag(spin, o1) * c(spin, o2) + c_dag(spin, o2) * c(spin, o1)) solvers[ish].Hloc_0 = Hloc_0 mpi.report('\nSolving the impurity problem for shell {} ...'.format(ish)) @@ -500,9 +546,9 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): for i, (block, gf) in enumerate(Sigma_dlr_sumk): Vhf_imp_sIab[i,ish] = Sigma_Hartree_sumk[block] # Make sure sigma_ir[iw].conj() = sigma_ir[-iw] - for n in range(ir_nw_half): - iw_pos = ir_nw_half+n - iw_neg = ir_nw_half-1-n + for iw_idx in range(ir_nw_half): + iw_pos = ir_nw_half+iw_idx + iw_neg = ir_nw_half-1-iw_idx Sigma_ir[iw_pos,i,ish] = gf(iw_mesh(ir_mesh_idx[iw_pos])) Sigma_ir[iw_neg,i,ish] = gf(iw_mesh(ir_mesh_idx[iw_pos])).conj() @@ -513,14 +559,13 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): print("\nChecking impurity self-energy on the IR mesh...") ir_kernel.check_leakage(Sigma_ir, stats='f', name="impurity self-energy", w_input=True) - # Writes results to h5 archive + mpi.report('Writing iter {} results to h5 archives {} and {}.'.format(iteration, archive, gw_params['h5_file'])) if mpi.is_master_node(): - with HDFArchive(general_params['jobname'] + '/' + general_params['seedname'] + '.h5', 'a') as ar: - results_to_archive.write(ar, sumk, general_params, solver_params, solvers, - map_imp_solver, solver_type_per_imp, iteration, - False, gw_params['mu_emb'], density_mat_pre, density_mat) - + results_to_archive.write(archive, sumk, general_params, solver_params, solvers, + map_imp_solver, solver_type_per_imp, iteration, + False, gw_params['mu_emb'], density_mat_pre, density_mat) + with HDFArchive(archive, 'a') as ar: # store also IR / DLR Sigma ar['DMFT_results/it_{}'.format(iteration)]['ir_mesh'] = ir_mesh ar['DMFT_results/it_{}'.format(iteration)]['Sigma_imp_wsIab'] = Sigma_ir @@ -529,7 +574,7 @@ def embedding_driver(general_params, solver_params, gw_params, advanced_params): ar['DMFT_results/it_{}'.format(iteration)][f'Sigma_dlr_{ish}'] = Sigma_dlr[ish] # write results to GW h5_file - with HDFArchive(gw_params['h5_file'],'a') as ar: + with HDFArchive(gw_params['h5_file'], 'a') as ar: ar[f'downfold_1e/iter{iteration}']['Sigma_imp_wsIab'] = Sigma_ir ar[f'downfold_1e/iter{iteration}']['Vhf_imp_sIab'] = Vhf_imp_sIab diff --git a/python/solid_dmft/gw_embedding/iaft.py b/python/solid_dmft/gw_embedding/iaft.py index 4a9ca312..768605ed 100644 --- a/python/solid_dmft/gw_embedding/iaft.py +++ b/python/solid_dmft/gw_embedding/iaft.py @@ -15,7 +15,7 @@ class IAFT(object): Dependency: sparse-ir with xprec supports (https://sparse-ir.readthedocs.io/en/latest/) - To install sparse-ir with xprec supports: "pip install sparse-ir[xprex]". + To install sparse-ir with xprec supports: "pip install sparse-ir[xprec]". Attributes: beta: float @@ -45,7 +45,7 @@ class IAFT(object): nw_b: int Number of bosonic frequency sampling points """ - def __init__(self, beta: float, lmbda: float, prec: float = 1e-15): + def __init__(self, beta: float, lmbda: float, prec: float = 1e-15, verbal: bool = True): """ :param beta: float Inverse temperature (a.u.) @@ -84,20 +84,19 @@ def __init__(self, beta: float, lmbda: float, prec: float = 1e-15): self.Ttw_bb = np.dot(Ttl_bb, self.Tlw_bb) self.Twt_bb = np.dot(Twl_bb, self.Tlt_bb) - print(self) - sys.stdout.flush() + if verbal: + print(self) + sys.stdout.flush() def __str__(self): - return "*******************************\n" \ - "Imaginary-Axis Fourier Transform based on IR basis and sparse-sampling:\n" \ - "*******************************\n" \ - " - precision = {}\n" \ - " - beta = {}\n" \ - " - lambda = {}\n" \ - " - nt_f, nw_f = {}, {}\n" \ - " - nt_b, nw_b = {}, {}\n" \ - "*******************************\n".format(self.prec, self.beta, self.lmbda, self.nt_f, self.nw_f, - self.nt_b, self.nw_b) + return ("Mesh details on the imaginary axis\n" \ + "----------------------------------\n" \ + "precision = {}\n" \ + "beta = {}\n" \ + "lambda = {}\n" \ + "nt_f, nw_f = {}, {}\n" \ + "nt_b, nw_b = {}, {}\n".format(self.prec, self.beta, self.lmbda, self.nt_f, self.nw_f, + self.nt_b, self.nw_b)) def wn_mesh(self, stats: str, ir_notation: bool = True): """ @@ -146,6 +145,41 @@ def tau_to_w(self, Ot, stats: str): Ow = Ow.reshape((Twt.shape[0],) + Ot_shape[1:]) return Ow + def tau_to_w_phsym(self, Ot, stats: str): + """ + Fourier transform from imaginary-time axis to Matsubara-frequency axis w/ particle-hole symmetry + :param Ot: numpy.ndarray + imaginary-time object with dimensions (nts, ...) + :param stats: str + statistics: 'f' for fermions and 'b' for bosons + + :return: numpy.ndarray + Matsubara-frequency object with dimensions (nw, ...) + """ + if stats != 'b': + raise ValueError("FT w/ particle-hole symmetry only support bosonic correlation functions") + + nw_half = self.nw_b // 2 if self.nw_b % 2 == 0 else self.nw_b // 2 + 1 + nt_half = self.nt_b // 2 if self.nt_b % 2 == 0 else self.nt_b // 2 + 1 + if Ot.shape[0] != nt_half: + raise ValueError( + "tau_to_w_phsym: Number of tau points are inconsistent: {} and {}".format(Ot.shape[0], nt_half)) + + Twt_pos = np.zeros((nw_half, nt_half), dtype=self.Twt_bb.dtype) + for n in range(nw_half): + iw = self.nw_b // 2 + n + for it in range(nt_half): + imt = self.nt_b - it - 1 + Twt_pos[n, it] = self.Twt_bb[iw, it] if it == imt else self.Twt_bb[iw, it] + self.Twt_bb[iw, imt] + + Ot_shape = Ot.shape + Ot = Ot.reshape(Ot.shape[0], -1) + Ow = np.dot(Twt_pos, Ot) + + Ot = Ot.reshape(Ot_shape) + Ow = Ow.reshape((Twt_pos.shape[0],) + Ot_shape[1:]) + return Ow + def w_to_tau(self, Ow, stats): """ Fourier transform from Matsubara-frequency axis to imaginary-time axis. @@ -174,6 +208,43 @@ def w_to_tau(self, Ow, stats): Ot = Ot.reshape((Ttw.shape[0],) + Ow_shape[1:]) return Ot + def w_to_tau_phsym(self, Ow, stats): + """ + Fourier transform from Matsubara-frequency axis to imaginary-time axis w/ particle-hole symmetry. + + :param Ow: numpy.ndarray + Matsubara-frequency object with dimensions (nw, ...) + :param stats: str + statistics, 'f' for fermions and 'b' for bosons + + :return: numpy.ndarray + Imaginary-time object with dimensions (nt, ...) + """ + if stats != 'b': + raise ValueError("FT w/ particle-hole symmetry only support bosonic correlation functions") + + nw_half = self.nw_b // 2 if self.nw_b % 2 == 0 else self.nw_b // 2 + 1 + nt_half = self.nt_b // 2 if self.nt_b % 2 == 0 else self.nt_b // 2 + 1 + if Ow.shape[0] != nw_half: + raise ValueError( + "w_to_tau_phsym: Number of w points are inconsistent: {} and {}".format(Ow.shape[0], nw_half)) + + Ttw_pos = np.zeros((nt_half, nw_half), dtype=self.Ttw_bb.dtype) + for it in range(nt_half): + for n in range(nw_half): + iw = self.nw_b // 2 + n + imw = self.nw_b // 2 - n + Ttw_pos[it, n] = self.Ttw_bb[it, iw] if iw == imw else self.Ttw_bb[it, iw] + self.Ttw_bb[it, imw] + + Ow_shape = Ow.shape + Ow = Ow.reshape(Ow.shape[0], -1) + Ot = np.dot(Ttw_pos, Ow) + + Ow = Ow.reshape(Ow_shape) + Ot = Ot.reshape((Ttw_pos.shape[0],) + Ow_shape[1:]) + return Ot + + def w_interpolate(self, Ow, wn_mesh_interp, stats: str, ir_notation: bool = True): """ Interpolate a dynamic object to arbitrary points on the Matsubara axis. @@ -215,6 +286,56 @@ def w_interpolate(self, Ow, wn_mesh_interp, stats: str, ir_notation: bool = True Ow_interp = Ow_interp.reshape((wn_indices.shape[0],) + Ow_shape[1:]) return Ow_interp + def w_interpolate_phsym(self, Ow, wn_mesh_interp, stats: str, ir_notation: bool = True): + """ + Interpolate a dynamic object to arbitrary points on the Matsubara axis. + + :param Ow: numpy.ndarray + Dynamic object on the Matsubara sampling points, self.wn_mesh. + :param wn_mesh_interp: numpy.ndarray(dim=1, dtype=int) + Target frequencies "INDICES". + The physical Matsubara frequencies are wn_mesh_interp * pi/beta. + :param stats: str + Statistics, 'f' for fermions and 'b' for bosons. + :param ir_notation: bool + Whether wn_mesh_interp is in sparse_ir notation where iwn = n*pi/beta for both fermions and bosons. + Otherwise, iwn = (2n+1)*pi/beta for fermions and 2n*pi/beta for bosons. + + :return: numpy.ndarray + Matsubara-frequency object with dimensions (nw_interp, ...) + """ + if stats != 'b': + raise ValueError("FT w/ particle-hole symmetry only support bosonic correlation functions") + + nw_half = self.nw_b // 2 if self.nw_b % 2 == 0 else self.nw_b // 2 + 1 + nt_half = self.nt_b // 2 if self.nt_b % 2 == 0 else self.nt_b // 2 + 1 + if Ow.shape[0] != nw_half: + raise ValueError( + "w_interpolate_phsym: Number of w points are inconsistent: {} and {}".format(Ow.shape[0], nw_half)) + + if ir_notation: + wn_indices = np.asarray(wn_mesh_interp) + else: + wn_indices = np.array([2*n for n in wn_mesh_interp], dtype=int) + Tlw = self.Tlw_bb + Tlw_pos = np.zeros((Tlw.shape[0], nw_half), dtype=Tlw.dtype) + for l in range(Tlw.shape[0]): + for n in range(nw_half): + iw = self.nw_b // 2 + n + imw = self.nw_b // 2 - n + Tlw_pos[l, n] = Tlw[l, iw] if iw == imw else Tlw[l, iw] + Tlw[l, imw] + + Twl_interp = self.bases.basis_b.uhat(wn_indices).T + Tww = np.dot(Twl_interp, Tlw_pos) + + Ow_shape = Ow.shape + Ow = Ow.reshape(Ow.shape[0], -1) + Ow_interp = np.dot(Tww, Ow) + + Ow = Ow.reshape(Ow_shape) + Ow_interp = Ow_interp.reshape((wn_indices.shape[0],) + Ow_shape[1:]) + return Ow_interp + def tau_interpolate(self, Ot, tau_mesh_interp, stats: str): """ Interpolate a dynamic object to arbitrary points on the imaginary-time axis. @@ -248,7 +369,58 @@ def tau_interpolate(self, Ot, tau_mesh_interp, stats: str): Ot_interp = Ot_interp.reshape((np.shape(tau_mesh_interp)[0],) + Ot_shape[1:]) return Ot_interp + def tau_interpolate_phsym(self, Ot, tau_mesh_interp, stats: str): + """ + Interpolate a dynamic object to arbitrary points on the imaginary-time axis. + + :param Ot: numpy.ndarray + Dynamic object on the imaginary-time sampling points, self.tau_mesh. + :param tau_mesh_interp: numpy.ndarray(dim=1, dtype=float) + Target tau points. + :param stats: str + Statistics, 'f' for fermions and 'b' for bosons + + :return: numpy.ndarray + Imaginary-time object with dimensions (nt_interp, ...) + """ + if stats != 'b': + raise ValueError("FT w/ particle-hole symmetry only support bosonic correlation functions") + + nw_half = self.nw_b // 2 if self.nw_b % 2 == 0 else self.nw_b // 2 + 1 + nt_half = self.nt_b // 2 if self.nt_b % 2 == 0 else self.nt_b // 2 + 1 + if Ot.shape[0] != nt_half: + raise ValueError( + "tau_interpolate_phsym: Number of tau points are inconsistent: {} and {}".format(Ot.shape[0], nw_half)) + + Tlt = self.Tlt_ff if stats == 'f' else self.Tlt_bb + Tlt_pos = np.zeros((Tlt.shape[0], nt_half), dtype=Tlt.dtype) + for l in range(Tlt.shape[0]): + for it in range(nt_half): + imt = self.nt_b - it - 1 + Tlt_pos[l, it] = Tlt[l, it] if it == imt else Tlt[l, it] + Tlt[l, imt] + + Ttl_interp = self.bases.basis_b.u(tau_mesh_interp).T + Ttt = np.dot(Ttl_interp, Tlt_pos) + + Ot_shape = Ot.shape + Ot = Ot.reshape(Ot.shape[0], -1) + Ot_interp = np.dot(Ttt, Ot) + + Ot = Ot.reshape(Ot_shape) + Ot_interp = Ot_interp.reshape((np.shape(tau_mesh_interp)[0],) + Ot_shape[1:]) + return Ot_interp + def check_leakage(self, Ot, stats: str, name: str = "", w_input: bool = False): + """ + Check decay of the IR coefficients to assess the quality of IR basis for the beta and lambda. + The coefficients should decay exponentially, and the leakage is defined as: + leakage = the smallest coefficients / the largest coefficients + :param Ot: + :param stats: + :param name: + :param w_input: + :return: + """ if w_input: Ot_ = self.w_to_tau(Ot, stats) self.check_leakage(Ot_, stats, name, w_input=False) diff --git a/python/solid_dmft/io_tools/default.toml b/python/solid_dmft/io_tools/default.toml index 428ba0ab..0757afdb 100644 --- a/python/solid_dmft/io_tools/default.toml +++ b/python/solid_dmft/io_tools/default.toml @@ -197,6 +197,8 @@ h5_file = "" use_rot = false it_1e = 0 it_2e = 0 +delta_calc_type = "tail_fit" +delta_bath_fit = false [advanced] dc_factor = "" diff --git a/python/solid_dmft/io_tools/documentation.txt b/python/solid_dmft/io_tools/documentation.txt index 111127f4..b307ada3 100644 --- a/python/solid_dmft/io_tools/documentation.txt +++ b/python/solid_dmft/io_tools/documentation.txt @@ -354,20 +354,20 @@ max_time : int, default = -1 maximum amount the solver is allowed to spend in each iteration measure_G_tau : bool, default = True should the solver measure G(tau)? -measure_nnt : boold, default = False +measure_nn_tau : boold, default = False measure two particle density-density correlation function (suszeptibility) measure_pert_order : bool, default = False measure perturbation order histograms: triqs.github.io/cthyb/latest/guide/perturbation_order_notebook.html. The result is stored in the h5 archive under 'DMFT_results' at every iteration in the subgroups 'pert_order_imp_X' and 'pert_order_total_imp_X' -measure_statehist : bool, default = False +measure_state_hist : bool, default = False measure state histogram, i.e. diagonal components of many body density matrix n_cycles_tot : int, mandatory total number of sweeps n_l : int, default = None number of Legendre coefficients. Needed if measure_G_l=True or legendre_fit=True -n_tau_k : int, default = 10001 +n_tau_bosonic : int, default = 10001 number imaginary time points for dynamic interactions n_warmup_cycles : int, mandatory number of warmup cycles before real measurement sets in diff --git a/python/solid_dmft/io_tools/verify_input_params.py b/python/solid_dmft/io_tools/verify_input_params.py index 1fe1a1e8..538b01f5 100644 --- a/python/solid_dmft/io_tools/verify_input_params.py +++ b/python/solid_dmft/io_tools/verify_input_params.py @@ -112,7 +112,13 @@ def _verify_input_params_solver(params: FullConfig) -> None: entry['improved_estimator'], entry['perform_tail_fit']] if sum(tail_op) > 1: - raise ValueError('Only one of the options "crm_dyson_solver", "legendre_fit", "improved_estimator", and "perform_tail_fit" can be set to True.') + # allow improved_estimator + tail_fit + if sum([entry['improved_estimator'], entry['perform_tail_fit']]) < 2: + raise ValueError( + 'Except "improved_estimator" + "perform_tail_fit", ' + 'only one of the options "crm_dyson_solver", "legendre_fit", ' + '"improved_estimator", and "perform_tail_fit" can be set to True.' + ) if entry['type'] == 'cthyb': tail_op = [entry['crm_dyson_solver'], entry['legendre_fit'], diff --git a/python/solid_dmft/main.py b/python/solid_dmft/main.py index 54e0cf1a..18130817 100644 --- a/python/solid_dmft/main.py +++ b/python/solid_dmft/main.py @@ -96,9 +96,22 @@ def run_dmft(params, config_file_name=None): elif general_params['gw_embedding']: from solid_dmft.gw_embedding.gw_flow import embedding_driver if mpi.is_master_node(): + # Checks for h5 file + if not os.path.exists(general_params['seedname']+'.h5'): + raise FileNotFoundError('Input h5 file not found') + # Creates output directory if it does not exist if not os.path.exists(general_params['jobname']): os.makedirs(general_params['jobname']) + + # Copies h5 archive to subfolder if it is not there + h5_name = general_params['seedname']+'.h5' + if not os.path.isfile(general_params['jobname']+'/'+os.path.basename(h5_name)): + shutil.copyfile(h5_name, general_params['jobname']+'/'+os.path.basename(h5_name)) + + # Copies config file to subfolder + if config_file_name is not None: + shutil.copyfile(config_file_name, general_params['jobname']+'/'+os.path.basename(config_file_name)) mpi.barrier() # run GW embedding embedding_driver(general_params, solver_params, gw_params, advanced_params)