diff --git a/python/solid_dmft/dmft_cycle.py b/python/solid_dmft/dmft_cycle.py
index 127f6da0..28270aa9 100755
--- a/python/solid_dmft/dmft_cycle.py
+++ b/python/solid_dmft/dmft_cycle.py
@@ -27,7 +27,6 @@
# system
-import os
from copy import deepcopy
from timeit import default_timer as timer
import numpy as np
@@ -38,9 +37,9 @@
from triqs.version import version as triqs_version
from h5 import HDFArchive
import triqs.utility.mpi as mpi
-from triqs.gf import Gf, make_hermitian, MeshReFreq, MeshImFreq
-from triqs.gf.tools import inverse
-from triqs_dft_tools import BlockStructure
+from triqs.operators import c_dag, c, Operator
+from triqs.gf import make_hermitian, fit_hermitian_tail, MeshReFreq, MeshImFreq, make_gf_from_fourier, iOmega_n
+from triqs.gf.tools import inverse, make_zero_tail
from triqs_dft_tools.sumk_dft import SumkDFT
# own modules
@@ -266,7 +265,7 @@ def _chi_setup(sum_k, solver_params, map_imp_solver):
def dmft_cycle(general_params, solver_params, advanced_params, dft_params,
- n_iter, dft_irred_kpt_indices=None, dft_energy=None):
+ gw_params, n_iter, dft_irred_kpt_indices=None, dft_energy=None):
"""
main dmft cycle that works for one shot and CSC equally
@@ -278,8 +277,10 @@ def dmft_cycle(general_params, solver_params, advanced_params, dft_params,
solver parameters as a dict
advanced_params : dict
advanced parameters as a dict
- observables : dict
- current observable array for calculation
+ dft_params : dict
+ dft parameters as a dict
+ gw_params : dict
+ gw parameters as a dict
n_iter : int
number of iterations to be executed
dft_irred_kpt_indices: iterable of int
@@ -483,7 +484,7 @@ def dmft_cycle(general_params, solver_params, advanced_params, dft_params,
general_params = afm_mapping.determine(general_params, archive, sum_k.n_inequiv_shells)
# Constructs interaction Hamiltonian and writes it to the h5 archive
- h_int = interaction_hamiltonian.construct(sum_k, general_params, solver_type_per_imp)
+ h_int, gw_params = interaction_hamiltonian.construct(sum_k, general_params, solver_type_per_imp, gw_params)
if mpi.is_master_node():
archive['DMFT_input']['h_int'] = h_int
@@ -506,7 +507,7 @@ def dmft_cycle(general_params, solver_params, advanced_params, dft_params,
for icrsh in range(sum_k.n_inequiv_shells):
# Construct the Solver instances
solvers[icrsh] = SolverStructure(general_params, solver_params[map_imp_solver[icrsh]],
- advanced_params, sum_k, icrsh, h_int[icrsh],
+ gw_params, advanced_params, sum_k, icrsh, h_int[icrsh],
iteration_offset, deg_orbs_ftps)
# store solver hash to archive
@@ -522,11 +523,10 @@ def dmft_cycle(general_params, solver_params, advanced_params, dft_params,
# Extracts local GF per *inequivalent* shell
local_gf_dft = sum_k.extract_G_loc(broadening=broadening, with_Sigma=False, mu=dft_mu)
- density_mat_dft = [gf.density() for gf in local_gf_dft]
# Determines initial Sigma and DC
- sum_k, solvers = initial_sigma.determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
- archive, iteration_offset, density_mat_dft, solvers,
+ sum_k, solvers = initial_sigma.determine_dc_and_initial_sigma(general_params, gw_params, advanced_params, sum_k,
+ archive, iteration_offset, local_gf_dft, solvers,
solver_type_per_imp)
sum_k = manipulate_mu.set_initial_mu(general_params, sum_k, iteration_offset, archive, broadening)
@@ -549,7 +549,7 @@ def dmft_cycle(general_params, solver_params, advanced_params, dft_params,
if mpi.is_master_node() and iteration_offset == 0:
write_header_to_file(general_params, sum_k)
observables = add_dft_values_as_zeroth_iteration(observables, general_params, solver_type_per_imp, dft_mu, dft_energy, sum_k,
- local_gf_dft, density_mat_dft, shell_multiplicity)
+ local_gf_dft, shell_multiplicity)
write_obs(observables, sum_k, general_params)
# write convergence file
convergence.prep_conv_file(general_params, sum_k)
@@ -669,14 +669,57 @@ def _dmft_step(sum_k, solvers, it, general_params,
solvers[icrsh].G0_freq << make_hermitian(solvers[icrsh].G0_freq)
sum_k.symm_deg_gf(solvers[icrsh].G0_freq, ish=icrsh)
+ if ((solver_type_per_imp[icrsh] == 'cthyb' and solver_params[icrsh]['delta_interface'])
+ or solver_type_per_imp[icrsh] == 'ctseg'):
+ mpi.report('\n Using the delta interface for passing Delta(tau) and Hloc0 directly to the solver.')
+ # prepare solver input
+ sumk_eal = sum_k.eff_atomic_levels()[icrsh]
+ solver_eal = sum_k.block_structure.convert_matrix(sumk_eal, space_from='sumk', ish_from=sum_k.inequiv_to_corr[icrsh])
+ # fill Delta_time from Delta_freq sum_k to solver
+ # for name, g0 in self.G0_freq:
+ for name, g0 in solvers[icrsh].G0_freq:
+ solvers[icrsh].Delta_freq[name] << iOmega_n - inverse(g0) - solver_eal[name]
+ known_moments = make_zero_tail(solvers[icrsh].Delta_freq[name], 1)
+ tail, err = fit_hermitian_tail(solvers[icrsh].Delta_freq[name], known_moments)
+ # without SOC delta_tau needs to be real
+ if not sum_k.SO == 1:
+ solvers[icrsh].Delta_time[name] << make_gf_from_fourier(solvers[icrsh].Delta_freq[name],
+ solvers[icrsh].Delta_time.mesh, tail).real
+ else:
+ solvers[icrsh].Delta_time[name] << make_gf_from_fourier(solvers[icrsh].Delta_freq[name],
+ solvers[icrsh].Delta_time.mesh, tail)
+
+ if solver_params[icrsh]['diag_delta']:
+ for o1 in range(g0.target_shape[0]):
+ for o2 in range(g0.target_shape[0]):
+ if o1 != o2:
+ solvers[icrsh].Delta_time[name].data[:, o1, o2] = 0.0 + 0.0j
+
+ # Make non-interacting operator for Hloc0
+ Hloc_0 = Operator()
+ for spin, spin_block in solver_eal.items():
+ for o1 in range(spin_block.shape[0]):
+ for o2 in range(spin_block.shape[1]):
+ # check if off-diag element is larger than threshold
+ if o1 != o2 and abs(spin_block[o1,o2]) < solver_params[icrsh]['off_diag_threshold']:
+ continue
+ else:
+ # TODO: adapt for SOC calculations, which should keep the imag part
+ Hloc_0 += spin_block[o1,o2].real/2 * (c_dag(spin,o1) * c(spin,o2) + c_dag(spin,o2) * c(spin,o1))
+ solvers[icrsh].Hloc_0 = Hloc_0
+
# store solver to h5 archive
if general_params['store_solver'] and mpi.is_master_node():
+ if 'solver' not in archive['DMFT_input']:
+ archive['DMFT_input'].create_group('solver')
archive['DMFT_input/solver'].create_group('it_'+str(it))
archive['DMFT_input/solver/it_'+str(it)]['S_'+str(icrsh)] = solvers[icrsh].triqs_solver
# store DMFT input directly in last_iter
if mpi.is_master_node():
archive['DMFT_results/last_iter']['G0_freq_{}'.format(icrsh)] = solvers[icrsh].G0_freq
+ if solver_type_per_imp[icrsh] == 'cthyb' and solver_params[icrsh]['delta_interface']:
+ archive['DMFT_results/last_iter']['Delta_time_{}'.format(icrsh)] = solvers[icrsh].Delta_time
# setup of measurement of chi(SzSz(tau) if requested
# TODO: move this into solver class?
@@ -730,6 +773,9 @@ def _dmft_step(sum_k, solvers, it, general_params,
if solver_type_per_imp[iineq] == 'hartree':
sum_k.dc_energ[icrsh] = solvers[iineq].DC_energy
+ # symmetrize Sigma over degenerate blocks
+ for icrsh in range(sum_k.n_inequiv_shells):
+ sum_k.symm_deg_gf(solvers[icrsh].Sigma_freq, ish=icrsh)
# doing the dmft loop and set new sigma into sumk
sum_k.put_Sigma([solvers[icrsh].Sigma_freq for icrsh in range(sum_k.n_inequiv_shells)])
diff --git a/python/solid_dmft/dmft_tools/initial_self_energies.py b/python/solid_dmft/dmft_tools/initial_self_energies.py
index 41893dff..914e6733 100755
--- a/python/solid_dmft/dmft_tools/initial_self_energies.py
+++ b/python/solid_dmft/dmft_tools/initial_self_energies.py
@@ -34,10 +34,10 @@
# triqs
from h5 import HDFArchive
import triqs.utility.mpi as mpi
-from triqs.gf import BlockGf, Gf
+from triqs.gf import BlockGf, Gf, make_gf_imfreq, MeshDLRImFreq, make_gf_dlr, MeshReFreq
+import itertools
-
-def calculate_double_counting(sum_k, density_matrix, general_params, advanced_params, solver_type_per_imp):
+def calculate_double_counting(sum_k, density_matrix, general_params, gw_params, advanced_params, solver_type_per_imp, G_loc_all=None):
"""
Calculates the double counting, including all manipulations from advanced_params.
@@ -48,8 +48,14 @@ def calculate_double_counting(sum_k, density_matrix, general_params, advanced_pa
List of density matrices for all inequivalent shells
general_params : dict
general parameters as a dict
+ gw_params : dict
+ GW parameters as a dict
advanced_params : dict
advanced parameters as a dict
+ solver_type_per_imp : list of str
+ List of solver types for each impurity
+ G_loc_all : list of BlockGf (Green's function) objects, optional
+ List of local Green's functions for all shells
Returns
--------
@@ -101,6 +107,88 @@ def calculate_double_counting(sum_k, density_matrix, general_params, advanced_pa
Javg = 2*advanced_params['dc_J'][icrsh]
sum_k.calc_dc(density_matrix_DC[icrsh], U_interact=Uavg, J_hund=Javg,
orb=icrsh, use_dc_formula=0)
+ # DC calculated for dynamic interaction from AIMBES
+ elif general_params['dc_type'][icrsh] in ('crpa_static', 'crpa_static_qp', 'crpa_dynamic'):
+ from solid_dmft.gw_embedding.bdft_converter import calc_Sigma_DC_gw, calc_W_from_Gloc, convert_gw_output
+ mpi.report('\n*** Using dynamic interactions to calculate DC ***')
+
+ # lad GW input from h5 file
+ if 'Uloc_dlr' not in gw_params:
+ if mpi.is_master_node():
+ gw_data, ir_kernel = convert_gw_output(
+ general_params['jobname'] + '/' + general_params['seedname'] + '.h5',
+ gw_params['h5_file'],
+ it_1e = gw_params['it_1e'],
+ it_2e = gw_params['it_2e'],
+ ha_ev_conv = True
+ )
+ gw_params.update(gw_data)
+ gw_params = mpi.bcast(gw_params)
+
+ mesh = MeshDLRImFreq(sum_k.mesh.beta, 'Fermion',
+ sum_k.mesh(sum_k.mesh.last_index()).value.imag, gw_params['Uloc_dlr'][icrsh].mesh.eps)
+ Gloc_dlr_iw = sum_k.block_structure.create_gf(ish=icrsh, space='sumk', mesh=mesh)
+
+ G_loc_sumk = sum_k.block_structure.convert_gf(G_loc_all[icrsh], ish_from=icrsh, space_from='solver', space_to='sumk')
+ for block, gf in Gloc_dlr_iw:
+ for iw in gf.mesh:
+ gf[iw] = G_loc_sumk[block](iw)
+ Gloc_dlr = make_gf_dlr(Gloc_dlr_iw)
+
+ U_matrix_rot = {'up' : gw_params['U_matrix_rot'][icrsh], 'down' : gw_params['U_matrix_rot'][icrsh]}
+
+ # there are two options here evaluate DC from Wloc_GW and Uloc
+ # or Wloc_GG and Uloc (here GG means Wloc calculated via Gloc*Gloc)
+ Wloc_dlr = calc_W_from_Gloc(Gloc_dlr, U_matrix_rot)
+ Sig_DC_dlr, Sig_DC_hartree, Sig_DC_exchange = calc_Sigma_DC_gw(Wloc_dlr,
+ Gloc_dlr,
+ U_matrix_rot)
+ Sig_DC_iw = make_gf_imfreq(Sig_DC_dlr, n_iw=len(sum_k.mesh)//2)
+ Sig_DC_iw_dyn = Sig_DC_iw.copy()
+ for block, gf in Sig_DC_iw:
+ for iorb, jorb in itertools.product(range(gf.target_shape[0]), repeat=2):
+ # create full freq dependent DC
+ gf[iorb, jorb] += Sig_DC_hartree[block][iorb, jorb].real + Sig_DC_exchange[block][iorb, jorb].real
+
+ # dynamic interaction but static DC
+ if general_params['dc_type'][icrsh] == 'crpa_static':
+ # for the static DC form we follow doi.org/10.1103/PhysRevB.95.155104 Eq 31
+ # Sig_DC = Sig_DC_hartree + Sig_DC_exchange
+ for block, gf in Sig_DC_iw:
+ Sig_DC_hartree[block] += Sig_DC_exchange[block]
+
+ mpi.report(f'DC for imp {icrsh} block {block} via Σ_dc_HF + Σ_dc_ex:')
+ mpi.report(Sig_DC_hartree[block].real)
+ # transform dc to sumk blocks
+ sum_k.dc_imp[icrsh] = Sig_DC_hartree
+
+ elif general_params['dc_type'][icrsh] == 'crpa_static_qp':
+ # for the static DC on top of GW we follow doi.org/10.1103/PhysRevB.95.155104 Eq 31
+ # Sig_DC = Sig_DC_hartree + Sig_DC_exchange + Sig_DC_iw(0)
+ mesh_w = MeshReFreq(window=(-0.5,0.5), n_w=101)
+ Sig_DC_w = sum_k.block_structure.create_gf(ish=icrsh, space='sumk', mesh=mesh_w)
+ for block, gf in Sig_DC_w:
+ gf.set_from_pade(Sig_DC_iw[block], n_points=len(sum_k.mesh)//10, freq_offset=0.0001)
+ Sig_DC_hartree[block] = 0.5*(Sig_DC_w[block](0.0) + Sig_DC_w[block](0.0).conj().T).real
+ mpi.report(f'DC for imp {icrsh} block {block} via Σ_dc_HF + Σ_dc_ex:')
+ mpi.report(Sig_DC_hartree[block].real)
+ # transform dc to sumk blocks
+ sum_k.dc_imp[icrsh] = Sig_DC_hartree
+
+ elif general_params['dc_type'][icrsh] == 'crpa_dynamic':
+ for block, gf in Sig_DC_iw:
+ mpi.report(f'Full dynamic DC from cRPA for imp {icrsh} block {block} at iw_n=0:')
+ mpi.report(gf(0).real)
+ mpi.report(f'Full dynamic DC from cRPA for imp {icrsh} block {block} at iw_n=n:')
+ mpi.report(gf.data[-1,:,:].real)
+ Sig_DC_hartree[block] += Sig_DC_exchange[block]
+
+ # sum_k.dc_imp stores the sumk block structure version
+ sum_k.dc_imp[icrsh] = Sig_DC_hartree
+
+ # the dynamic part of DC is stored in different object
+ sum_k.dc_imp_dyn[icrsh] = Sig_DC_iw_dyn
+
else:
mpi.report(f'\nCalculating standard DC for impurity {icrsh} with U={advanced_params["dc_U"][icrsh]} and J={advanced_params["dc_J"][icrsh]}')
sum_k.calc_dc(density_matrix_DC[icrsh], U_interact=advanced_params['dc_U'][icrsh],
@@ -346,8 +434,8 @@ def _set_loaded_sigma(sum_k, loaded_sigma, loaded_dc_imp, general_params):
return start_sigma
-def determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
- archive, iteration_offset, density_mat_dft, solvers,
+def determine_dc_and_initial_sigma(general_params, gw_params, advanced_params, sum_k,
+ archive, iteration_offset, G_loc_all, solvers,
solver_type_per_imp):
"""
Determines the double counting (DC) and the initial Sigma. This can happen
@@ -368,6 +456,8 @@ def determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
----------
general_params : dict
general parameters as a dict
+ gw_params : dict
+ GW parameters as a dict
advanced_params : dict
advanced parameters as a dict
sum_k : SumkDFT object
@@ -376,8 +466,8 @@ def determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
the archive of the current calculation
iteration_offset : int
the iterations done before this calculation
- density_mat_dft : numpy array
- DFT density matrix
+ G_loc_all : Gf
+ local Green function for all shells
solvers : list
list of Solver instances
@@ -391,13 +481,15 @@ def determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
"""
start_sigma = None
last_g0 = None
+ density_mat_dft = [G_loc_all[iineq].density() for iineq in range(sum_k.n_inequiv_shells)]
if mpi.is_master_node():
# Resumes previous calculation
if iteration_offset > 0:
print('\nFrom previous calculation:', end=' ')
start_sigma, sum_k.dc_imp, sum_k.dc_energ, last_g0, _ = _load_sigma_from_h5(archive, -1)
if general_params['csc'] and not general_params['dc_dmft']:
- sum_k = calculate_double_counting(sum_k, density_mat_dft, general_params, advanced_params, solver_type_per_imp)
+ sum_k = calculate_double_counting(sum_k, density_mat_dft, general_params, gw_params,
+ advanced_params, solver_type_per_imp, G_loc_all)
# Loads Sigma from different calculation
elif general_params['load_sigma']:
print('\nFrom {}:'.format(general_params['path_to_sigma']), end=' ')
@@ -408,27 +500,27 @@ def determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
# Recalculate double counting in case U, J or DC formula changed
if general_params['dc']:
if general_params['dc_dmft']:
- sum_k = calculate_double_counting(sum_k, loaded_density_matrix,
- general_params, advanced_params,
- solver_type_per_imp)
+ sum_k = calculate_double_counting(sum_k, loaded_density_matrix, general_params, gw_params,
+ advanced_params, solver_type_per_imp, G_loc_all)
else:
- sum_k = calculate_double_counting(sum_k, density_mat_dft,
- general_params, advanced_params,
- solver_type_per_imp)
+ sum_k = calculate_double_counting(sum_k, density_mat_dft, general_params, gw_params,
+ advanced_params, solver_type_per_imp, G_loc_all)
start_sigma = _set_loaded_sigma(sum_k, loaded_sigma, loaded_dc_imp, general_params)
# Sets DC as Sigma because no initial Sigma given
elif general_params['dc']:
- sum_k = calculate_double_counting(sum_k, density_mat_dft, general_params, advanced_params,
- solver_type_per_imp)
+ sum_k = calculate_double_counting(sum_k, density_mat_dft, general_params, gw_params,
+ advanced_params, solver_type_per_imp, G_loc_all)
# initialize Sigma from sum_k
start_sigma = [sum_k.block_structure.create_gf(ish=iineq, gf_function=Gf, space='solver',
mesh=sum_k.mesh)
for iineq in range(sum_k.n_inequiv_shells)]
for icrsh in range(sum_k.n_inequiv_shells):
- dc_value = sum_k.dc_imp[sum_k.inequiv_to_corr[icrsh]][sum_k.spin_block_names[sum_k.SO][0]][0, 0]
+ dc_pot = sum_k.block_structure.convert_matrix(sum_k.dc_imp[sum_k.inequiv_to_corr[icrsh]],
+ ish_from=sum_k.inequiv_to_corr[icrsh],
+ space_from='sumk', space_to='solver')
if (general_params['magnetic'] and general_params['magmom'] and sum_k.SO == 0):
# if we are doing a magnetic calculation and initial magnetic moments
@@ -439,13 +531,14 @@ def determine_dc_and_initial_sigma(general_params, advanced_params, sum_k,
# if magmom positive the up channel will be favored
for spin_channel in sum_k.gf_struct_solver[icrsh].keys():
if 'up' in spin_channel:
- start_sigma[icrsh][spin_channel] << -fac + dc_value
+ start_sigma[icrsh][spin_channel] << -fac + dc_pot[spin_channel]
else:
- start_sigma[icrsh][spin_channel] << fac + dc_value
+ start_sigma[icrsh][spin_channel] << fac + dc_pot[spin_channel]
else:
- start_sigma[icrsh] << dc_value
- # Sets Sigma to zero because neither initial Sigma nor DC given
+ for spin_channel in sum_k.gf_struct_solver[icrsh].keys():
+ start_sigma[icrsh][spin_channel] << dc_pot[spin_channel]
+ # Sets Sigma to zero because neither initial Sigma nor DC given
elif (not general_params['dc'] and general_params['magnetic']):
start_sigma = [sum_k.block_structure.create_gf(ish=iineq, gf_function=Gf, space='solver', mesh=sum_k.mesh)
for iineq in range(sum_k.n_inequiv_shells)]
diff --git a/python/solid_dmft/dmft_tools/interaction_hamiltonian.py b/python/solid_dmft/dmft_tools/interaction_hamiltonian.py
index 2cc21a57..daeac30f 100644
--- a/python/solid_dmft/dmft_tools/interaction_hamiltonian.py
+++ b/python/solid_dmft/dmft_tools/interaction_hamiltonian.py
@@ -35,57 +35,90 @@
# triqs
from h5 import HDFArchive
import triqs.utility.mpi as mpi
+from triqs.gf import make_gf_imfreq
from triqs.operators import util, n, c, c_dag, Operator
from solid_dmft.dmft_tools import solver
+
+
try:
import forktps as ftps
except ImportError:
pass
-def _load_crpa_interaction_matrix(sum_k, filename='UIJKL'):
+def _load_crpa_interaction_matrix(sum_k, general_params, gw_params, filename='UIJKL'):
"""
- Loads VASP cRPA data to use as an interaction Hamiltonian.
+ Loads dynamic interaction data to use as an interaction Hamiltonian.
"""
def _round_to_int(data):
return (np.array(data) + .5).astype(int)
+ if gw_params['code'] == 'Vasp':
# Loads data from VASP cRPA file
- print('Loading cRPA matrix from file: '+str(filename))
- data = np.loadtxt(filename, unpack=True)
- u_matrix_four_indices = np.zeros(_round_to_int(np.max(data[:4], axis=1)), dtype=complex)
- for entry in data.T:
- # VASP switches the order of the indices, ijkl -> ikjl
- i, k, j, l = _round_to_int(entry[:4])-1
- u_matrix_four_indices[i, j, k, l] = entry[4] + 1j * entry[5]
-
- # Slices up the four index U-matrix, separating shells
- u_matrix_four_indices_per_shell = [None] * sum_k.n_inequiv_shells
- first_index_shell = 0
- for ish in range(sum_k.n_corr_shells):
- icrsh = sum_k.corr_to_inequiv[ish]
- n_orb = solver.get_n_orbitals(sum_k)[icrsh]['up']
- u_matrix_temp = u_matrix_four_indices[first_index_shell:first_index_shell+n_orb,
- first_index_shell:first_index_shell+n_orb,
- first_index_shell:first_index_shell+n_orb,
- first_index_shell:first_index_shell+n_orb]
- # I think for now we should stick with real interactions make real
- u_matrix_temp.imag = 0.0
-
- if ish == icrsh:
- u_matrix_four_indices_per_shell[icrsh] = u_matrix_temp
- elif not np.allclose(u_matrix_four_indices_per_shell[icrsh], u_matrix_temp, atol=1e-6, rtol=0):
- # TODO: for some reason, some entries in the matrices differ by a sign. Check that
- # mpi.report(np.allclose(np.abs(u_matrix_four_indices_per_shell[icrsh]), np.abs(u_matrix_temp),
- # atol=1e-6, rtol=0))
- mpi.report('Warning: cRPA matrix for impurity {} '.format(icrsh)
- + 'differs for shells {} and {}'.format(sum_k.inequiv_to_corr[icrsh], ish))
-
- first_index_shell += n_orb
-
- if not np.allclose(u_matrix_four_indices.shape, first_index_shell):
- print('Warning: different number of orbitals in cRPA matrix than in calculation.')
-
- return u_matrix_four_indices_per_shell
+ print('Loading Vasp cRPA matrix from file: '+str(filename))
+ data = np.loadtxt(filename, unpack=True)
+ u_matrix_four_indices = np.zeros(_round_to_int(np.max(data[:4], axis=1)), dtype=complex)
+ for entry in data.T:
+ # VASP switches the order of the indices, ijkl -> ikjl
+ i, k, j, l = _round_to_int(entry[:4])-1
+ u_matrix_four_indices[i, j, k, l] = entry[4] + 1j * entry[5]
+
+ # Slices up the four index U-matrix, separating shells
+ u_matrix_four_indices_per_shell = [None] * sum_k.n_inequiv_shells
+ first_index_shell = 0
+ for ish in range(sum_k.n_corr_shells):
+ icrsh = sum_k.corr_to_inequiv[ish]
+ n_orb = solver.get_n_orbitals(sum_k)[icrsh]['up']
+ u_matrix_temp = u_matrix_four_indices[first_index_shell:first_index_shell+n_orb,
+ first_index_shell:first_index_shell+n_orb,
+ first_index_shell:first_index_shell+n_orb,
+ first_index_shell:first_index_shell+n_orb]
+ # I think for now we should stick with real interactions make real
+ u_matrix_temp.imag = 0.0
+
+ if ish == icrsh:
+ u_matrix_four_indices_per_shell[icrsh] = u_matrix_temp
+ elif not np.allclose(u_matrix_four_indices_per_shell[icrsh], u_matrix_temp, atol=1e-6, rtol=0):
+ # TODO: for some reason, some entries in the matrices differ by a sign. Check that
+ # mpi.report(np.allclose(np.abs(u_matrix_four_indices_per_shell[icrsh]), np.abs(u_matrix_temp),
+ # atol=1e-6, rtol=0))
+ mpi.report('Warning: cRPA matrix for impurity {} '.format(icrsh)
+ + 'differs for shells {} and {}'.format(sum_k.inequiv_to_corr[icrsh], ish))
+
+ first_index_shell += n_orb
+
+ if not np.allclose(u_matrix_four_indices.shape, first_index_shell):
+ print('Warning: different number of orbitals in cRPA matrix than in calculation.')
+
+ elif gw_params['code'] == 'aimbes':
+ from solid_dmft.gw_embedding.bdft_converter import convert_gw_output
+ u_matrix_four_indices_per_shell = []
+ # lad GW input from h5 file
+ if mpi.is_master_node():
+ if 'Uloc_dlr' not in gw_params:
+ gw_data, ir_kernel = convert_gw_output(
+ general_params['jobname'] + '/' + general_params['seedname'] + '.h5',
+ gw_params['h5_file'],
+ it_1e = gw_params['it_1e'],
+ it_2e = gw_params['it_2e'],
+ ha_ev_conv = True
+ )
+ gw_params.update(gw_data)
+ for icrsh in range(sum_k.n_inequiv_shells):
+ # for now we assume that up / down are equal
+ if general_params['h_int_type'][icrsh] in ('crpa', 'crpa_density_density'):
+ Uloc_0 = make_gf_imfreq(gw_params['Uloc_dlr'][icrsh]['up'],1)
+ u_matrix_four_indices_per_shell.append(Uloc_0.data[0,:,:,:,:] + gw_params['Vloc'][icrsh]['up'])
+ else:
+ u_matrix_four_indices_per_shell.append(gw_params['Vloc'][icrsh]['up'])
+
+ u_matrix_four_indices_per_shell[icrsh] = u_matrix_four_indices_per_shell[icrsh]
+ mpi.barrier()
+ u_matrix_four_indices_per_shell = mpi.bcast(u_matrix_four_indices_per_shell)
+ gw_params = mpi.bcast(gw_params)
+ else:
+ raise ValueError('Unknown code for reading cRPA results: {}'.format(gw_params['code']))
+
+ return u_matrix_four_indices_per_shell, gw_params
# def _adapt_U_2index_for_SO(Umat, Upmat):
@@ -369,11 +402,11 @@ def _rotate_four_index_matrix(sum_k, general_params, Umat_full, icrsh):
# Transposes rotation matrix here because TRIQS has a slightly different definition
Umat_full_rotated = util.transform_U_matrix(Umat_full, sum_k.rot_mat[ish].T)
- if general_params['h_int_type'][icrsh] in ('density_density', 'crpa_density_density'):
+ if general_params['h_int_type'][icrsh] in ('density_density', 'crpa_density_density', 'dyn_density_density'):
if not np.allclose(Umat_full_rotated, Umat_full):
mpi.report('WARNING: applying a rotation matrix changes the dens-dens Hamiltonian.\n'
+ 'This changes the definition of the ignored spin flip and pair hopping.')
- elif general_params['h_int_type'][icrsh] in ('full_slater', 'crpa'):
+ elif general_params['h_int_type'][icrsh] in ('full_slater', 'crpa', 'dyn_full'):
if not np.allclose(Umat_full_rotated, Umat_full):
mpi.report('WARNING: applying a rotation matrix changes the interaction Hamiltonian.\n'
+ 'Please ensure that the rotation is correct!')
@@ -470,7 +503,7 @@ def h_int_simple_intra(spin_names,n_orb,U,off_diag=None,map_operator_structure=N
return H
-def construct(sum_k, general_params, solver_type_per_imp):
+def construct(sum_k, general_params, solver_type_per_imp, gw_params=None):
"""
Constructs the interaction Hamiltonian. Currently implemented are the
Kanamori Hamiltonian (usually for 2 or 3 orbitals), the density-density and
@@ -555,28 +588,25 @@ def construct(sum_k, general_params, solver_type_per_imp):
# read from file options
- if general_params['h_int_type'][icrsh] in ('crpa', 'crpa_density_density'):
- Umat_full = _load_crpa_interaction_matrix(sum_k, icrsh)
+ if general_params['h_int_type'][icrsh] in ('crpa', 'crpa_density_density', 'dyn_density_density', 'dyn_full'):
+ Umat_full, gw_params = _load_crpa_interaction_matrix(sum_k, general_params, gw_params)
if sum_k.SO == 1:
- Umat_full = [_adapt_U_4index_for_SO(Umat_full_per_imp)
- for Umat_full_per_imp in Umat_full]
+ Umat_full[icrsh] = _adapt_U_4index_for_SO(Umat_full[icrsh])
# Rotates the interaction matrix
- Umat_full_rotated = _rotate_four_index_matrix(sum_k, general_params, Umat_full, icrsh)
+ if sum_k.use_rotations:
+ Umat_full[icrsh] = _rotate_four_index_matrix(sum_k, general_params, Umat_full[icrsh], icrsh)
+ Umat_full[icrsh][np.abs(Umat_full[icrsh]) < general_params['U_crpa_threshold']] = 0.0
+ gw_params['U_matrix_rot']= Umat_full
# construct slater / density density from U tensor
- if general_params['h_int_type'][icrsh] == 'crpa':
- h_int[icrsh] = _construct_slater(sum_k, general_params, Umat_full_rotated, icrsh)
+ if general_params['h_int_type'][icrsh] in ('crpa', 'dyn_full'):
+ h_int[icrsh] = _construct_slater(sum_k, general_params, Umat_full[icrsh].real, icrsh)
else:
- h_int[icrsh] = _construct_density_density(sum_k, general_params, Umat_full_rotated, icrsh)
- continue
-
- # dynamic interaction from file
- if general_params['h_int_type'][icrsh] == 'dynamic':
- h_int[icrsh] = _construct_dynamic(sum_k, general_params, icrsh)
+ h_int[icrsh] = _construct_density_density(sum_k, general_params, Umat_full[icrsh].real, icrsh)
continue
raise NotImplementedError('Error when constructing the interaction Hamiltonian.')
- return h_int
+ return h_int, gw_params
diff --git a/python/solid_dmft/dmft_tools/observables.py b/python/solid_dmft/dmft_tools/observables.py
index ecc29667..ffe7d02b 100644
--- a/python/solid_dmft/dmft_tools/observables.py
+++ b/python/solid_dmft/dmft_tools/observables.py
@@ -147,7 +147,7 @@ def write_header_to_file(general_params, sum_k):
def add_dft_values_as_zeroth_iteration(observables, general_params, solver_type_per_imp, dft_mu, dft_energy,
- sum_k, G_loc_all_dft, density_mat_dft, shell_multiplicity):
+ sum_k, G_loc_all_dft, shell_multiplicity):
"""
Calculates the DFT observables that should be written as the zeroth iteration.
@@ -157,14 +157,15 @@ def add_dft_values_as_zeroth_iteration(observables, general_params, solver_type_
general_params : general parameters as a dict
+ solver_type_per_imp : list of strings
+ list of solver types for each impurity
+
dft_mu : dft chemical potential
sum_k : SumK Object instances
G_loc_all_dft : Gloc from DFT for G(beta/2)
- density_mat_dft : occupations from DFT
-
shell_multiplicity : degeneracy of impurities
Returns
@@ -173,6 +174,7 @@ def add_dft_values_as_zeroth_iteration(observables, general_params, solver_type_
observables: list of dicts
"""
dft_energy = 0.0 if dft_energy is None else dft_energy
+ density_mat_dft = [G_loc_all_dft[iineq].density() for iineq in range(sum_k.n_inequiv_shells)]
observables['iteration'].append(0)
observables['mu'].append(float(dft_mu))
diff --git a/python/solid_dmft/dmft_tools/results_to_archive.py b/python/solid_dmft/dmft_tools/results_to_archive.py
index 24a196f5..39822399 100644
--- a/python/solid_dmft/dmft_tools/results_to_archive.py
+++ b/python/solid_dmft/dmft_tools/results_to_archive.py
@@ -43,6 +43,9 @@ def _compile_information(sum_k, general_params, solver_params, solvers, map_imp_
if dens is not None:
write_to_h5['deltaN_trace'] = dens
+ if any(str(entry) in ('crpa_dynamic') for entry in general_params['dc_type']):
+ write_to_h5['DC_pot_dyn'] = sum_k.dc_imp_dyn
+
for icrsh in range(sum_k.n_inequiv_shells):
isolvsec = map_imp_solver[icrsh]
if solver_type_per_imp[icrsh] in ['cthyb', 'hubbardI']:
@@ -52,6 +55,10 @@ def _compile_information(sum_k, general_params, solver_params, solvers, map_imp_
if solver_params[isolvsec]['measure_density_matrix']:
write_to_h5['full_dens_mat_{}'.format(icrsh)] = solvers[icrsh].density_matrix
write_to_h5['h_loc_diag_{}'.format(icrsh)] = solvers[icrsh].h_loc_diagonalization
+ if solver_type_per_imp[icrsh] in ('cthyb','hubbardI'):
+ write_to_h5['Sigma_moments_{}'.format(icrsh)] = solvers[icrsh].Sigma_moments
+ write_to_h5['G_moments_{}'.format(icrsh)] = solvers[icrsh].G_moments
+ write_to_h5['Sigma_Hartree_{}'.format(icrsh)] = solvers[icrsh].Sigma_Hartree
elif solver_type_per_imp[icrsh] == 'ftps':
write_to_h5['Delta_freq_{}'.format(icrsh)] = solvers[icrsh].Delta_freq
@@ -75,6 +82,10 @@ def _compile_information(sum_k, general_params, solver_params, solvers, map_imp_
write_to_h5['G_time_orig_{}'.format(icrsh)] = solvers[icrsh].G_time_orig
write_to_h5['Gimp_l_{}'.format(icrsh)] = solvers[icrsh].G_l
+ if solver_params[isolvsec]['crm_dyson_solver']:
+ write_to_h5['G_time_dlr_{}'.format(icrsh)] = solvers[icrsh].G_time_dlr
+ write_to_h5['Sigma_dlr_{}'.format(icrsh)] = solvers[icrsh].Sigma_dlr
+
if solver_type_per_imp[icrsh] == 'ctint' and solver_params[isolvsec]['measure_histogram']:
write_to_h5['pert_order_imp_{}'.format(icrsh)] = solvers[icrsh].perturbation_order
@@ -91,12 +102,18 @@ def _compile_information(sum_k, general_params, solver_params, solvers, map_imp_
if solver_type_per_imp[icrsh] == 'ctseg':
# if legendre was set, that we have both now!
- if (solver_params[isolvsec]['measure_gl'] or solver_params[isolvsec]['legendre_fit']):
+ if (solver_params[isolvsec]['legendre_fit']):
write_to_h5['G_time_orig_{}'.format(icrsh)] = solvers[icrsh].G_time_orig
write_to_h5['Gimp_l_{}'.format(icrsh)] = solvers[icrsh].G_l
- if solver_params[isolvsec]['measure_ft']:
+ if solver_params[isolvsec]['improved_estimator']:
write_to_h5['F_freq_{}'.format(icrsh)] = solvers[icrsh].F_freq
write_to_h5['F_time_{}'.format(icrsh)] = solvers[icrsh].F_time
+ if solver_params[isolvsec]['crm_dyson_solver']:
+ write_to_h5['G_time_dlr_{}'.format(icrsh)] = solvers[icrsh].G_time_dlr
+ write_to_h5['Sigma_dlr_{}'.format(icrsh)] = solvers[icrsh].Sigma_dlr
+ if general_params['h_int_type'][icrsh] == 'dyn_density_density':
+ write_to_h5['D0_time_{}'.format(icrsh)] = solvers[icrsh].triqs_solver.D0_tau
+ write_to_h5['Jperp_time_{}'.format(icrsh)] = solvers[icrsh].triqs_solver.Jperp_tau
return write_to_h5
diff --git a/python/solid_dmft/dmft_tools/solver.py b/python/solid_dmft/dmft_tools/solver.py
index 5f9d381c..a2f08d30 100755
--- a/python/solid_dmft/dmft_tools/solver.py
+++ b/python/solid_dmft/dmft_tools/solver.py
@@ -21,14 +21,18 @@
# .
#
################################################################################
+# pyright: reportUnusedExpression=false
import numpy as np
from itertools import product
-from triqs.gf import MeshImTime, MeshReTime, MeshReFreq, MeshLegendre, Gf, BlockGf, make_hermitian, Omega, iOmega_n, make_gf_from_fourier, fit_hermitian_tail
+from triqs.gf import MeshImTime, MeshReTime, MeshDLRImFreq, MeshReFreq, MeshLegendre, Gf, BlockGf, make_gf_imfreq, make_hermitian, Omega, iOmega_n, make_gf_from_fourier, make_gf_dlr, fit_gf_dlr, make_gf_dlr_imtime, make_gf_imtime
from triqs.gf.tools import inverse, make_zero_tail
from triqs.gf.descriptors import Fourier
-from triqs.operators import c_dag, c, Operator
+from triqs.operators import c_dag, c, Operator, util
+from triqs.operators.util.U_matrix import reduce_4index_to_2index
+from triqs.operators.util.extractors import block_matrix_from_op
import triqs.utility.mpi as mpi
+import itertools
from h5 import HDFArchive
from solid_dmft.io_tools.dict_to_h5 import prep_params_for_h5
@@ -61,7 +65,7 @@ def get_n_orbitals(sum_k):
return n_orbitals
-def _gf_fit_tail_fraction(Gf, fraction=0.4, replace=None, known_moments=None):
+def _gf_fit_tail_fraction(Gf, fraction=0.4, replace=None, known_moments=[]):
"""
fits the tail of Gf object by making a polynomial
fit of the Gf on the given fraction of the Gf mesh
@@ -92,10 +96,10 @@ def _gf_fit_tail_fraction(Gf, fraction=0.4, replace=None, known_moments=None):
for i, bl in enumerate(Gf_fit.indices):
Gf_fit[bl].mesh.set_tail_fit_parameters(tail_fraction=fraction)
- if known_moments:
- tail = Gf_fit[bl].fit_hermitian_tail(known_moments[i])
- else:
+ if known_moments == []:
tail = Gf_fit[bl].fit_hermitian_tail()
+ else:
+ tail = Gf_fit[bl].fit_hermitian_tail(known_moments[i])
nmax_frac = int(len(Gf_fit[bl].mesh)/2 * (1-replace))
Gf_fit[bl].replace_by_tail(tail[0],n_min=nmax_frac)
@@ -115,7 +119,8 @@ class SolverStructure:
solve impurity problem
'''
- def __init__(self, general_params, solver_params, advanced_params, sum_k, icrsh, h_int, iteration_offset, deg_orbs_ftps):
+ def __init__(self, general_params, solver_params, gw_params, advanced_params, sum_k,
+ icrsh, h_int, iteration_offset=None, deg_orbs_ftps=None):
r'''
Initialisation of the solver instance with h_int for impurity "icrsh" based on soliDMFT parameters.
@@ -137,6 +142,7 @@ def __init__(self, general_params, solver_params, advanced_params, sum_k, icrsh,
self.general_params = general_params
self.solver_params = solver_params
+ self.gw_params = gw_params
self.advanced_params = advanced_params
self.sum_k = sum_k
self.icrsh = icrsh
@@ -257,15 +263,16 @@ def _init_ImFreq_objects(self):
# create all ImTime instances
self.n_tau = self.general_params['n_tau']
self.G_time = self.sum_k.block_structure.create_gf(ish=self.icrsh, gf_function=Gf, space='solver',
- mesh=MeshImTime(beta=self.general_params['beta'],
+ mesh=MeshImTime(beta=self.sum_k.mesh.beta,
S='Fermion', n_tau=self.n_tau)
)
# copy
self.Delta_time = self.G_time.copy()
# create all Legendre instances
- if (self.solver_params['type'] in ['cthyb', 'ctseg']
+ if (self.solver_params['type'] in ['cthyb']
and (self.solver_params['measure_G_l'] or self.solver_params['legendre_fit'])
+ or self.solver_params['type'] == 'ctseg' and self.solver_params['legendre_fit']
or self.solver_params['type'] == 'hubbardI' and self.solver_params['measure_G_l']):
self.n_l = self.solver_params['n_l']
@@ -276,6 +283,10 @@ def _init_ImFreq_objects(self):
# move original G_freq to G_freq_orig
self.G_time_orig = self.G_time.copy()
+ if self.solver_params['type'] in ['cthyb', 'ctseg'] and self.solver_params['crm_dyson_solver']:
+ self.G_time_dlr = None
+ self.Sigma_dlr = None
+
if self.solver_params['type'] in ['cthyb', 'hubbardI'] and self.solver_params['measure_density_matrix']:
self.density_matrix = None
self.h_loc_diagonalization = None
@@ -283,6 +294,9 @@ def _init_ImFreq_objects(self):
if self.solver_params['type'] == 'cthyb' and self.solver_params['measure_chi'] is not None:
self.O_time = None
+ if self.solver_params['type'] in ['cthyb'] and self.solver_params['delta_interface']:
+ self.Hloc_0 = Operator()
+
def _init_ReFreq_objects(self):
r'''
Initialize all ReFreq objects
@@ -354,47 +368,28 @@ def solve(self, **kwargs):
assert 'random_seed' not in self.triqs_solver_params
if self.solver_params['type'] == 'cthyb':
+
if self.solver_params['delta_interface']:
- mpi.report('\n Using the delta interface for cthyb passing Delta(tau) and Hloc0 directly.')
- # prepare solver input
- sumk_eal = self.sum_k.eff_atomic_levels()[self.icrsh]
- solver_eal = self.sum_k.block_structure.convert_matrix(sumk_eal, space_from='sumk', ish_from=self.sum_k.inequiv_to_corr[self.icrsh])
- # fill Delta_time from Delta_freq sum_k to solver
- for name, g0 in self.G0_freq:
- self.Delta_freq[name] << iOmega_n - inverse(g0) - solver_eal[name]
- known_moments = make_zero_tail(self.Delta_freq[name], 1)
- tail, err = fit_hermitian_tail(self.Delta_freq[name], known_moments)
- # without SOC delta_tau needs to be real
- if not self.sum_k.SO == 1:
- self.triqs_solver.Delta_tau[name] << make_gf_from_fourier(self.Delta_freq[name], self.triqs_solver.Delta_tau.mesh, tail).real
- else:
- self.triqs_solver.Delta_tau[name] << make_gf_from_fourier(self.Delta_freq[name], self.triqs_solver.Delta_tau.mesh, tail)
-
- # Make non-interacting operator for Hloc0
- Hloc_0 = Operator()
- for spin, spin_block in solver_eal.items():
- for o1 in range(spin_block.shape[0]):
- for o2 in range(spin_block.shape[1]):
- # check if off-diag element is larger than threshold
- if o1 != o2 and abs(spin_block[o1,o2]) < self.solver_params['off_diag_threshold']:
- continue
- else:
- # TODO: adapt for SOC calculations, which should keep the imag part
- Hloc_0 += spin_block[o1,o2].real/2 * (c_dag(spin,o1) * c(spin,o2) + c_dag(spin,o2) * c(spin,o1))
- self.triqs_solver_params['h_loc0'] = Hloc_0
+ self.triqs_solver.Delta_tau << self.Delta_time
+ self.triqs_solver_params['h_loc0'] = self.Hloc_0
else:
# fill G0_freq from sum_k to solver
- self.triqs_solver.G0_iw << self.G0_freq
+ self.triqs_solver.G0_iw << make_hermitian(self.G0_freq)
# update solver in h5 archive one last time for debugging if solve command crashes
- if mpi.is_master_node():
+ if self.general_params['store_solver'] and mpi.is_master_node():
with HDFArchive(self.general_params['jobname']+'/'+self.general_params['seedname']+'.h5', 'a') as archive:
if not 'it_-1' in archive['DMFT_input/solver']:
archive['DMFT_input/solver'].create_group('it_-1')
+ archive['DMFT_input/solver/it_-1'][f'S_{self.icrsh}'] = self.triqs_solver
+ if self.solver_params['delta_interface']:
+ archive['DMFT_input/solver/it_-1'][f'Delta_time_{self.icrsh}'] = self.triqs_solver.Delta_tau
+ else:
+ archive['DMFT_input/solver/it_-1'][f'G0_freq_{self.icrsh}'] = self.triqs_solver.G0_iw
+ # archive['DMFT_input/solver/it_-1'][f'Delta_freq_{self.icrsh}'] = self.Delta_freq
+ archive['DMFT_input/solver/it_-1'][f'solve_params_{self.icrsh}'] = prep_params_for_h5(self.solver_params)
archive['DMFT_input/solver/it_-1'][f'triqs_solver_params_{self.icrsh}'] = prep_params_for_h5(self.triqs_solver_params)
archive['DMFT_input/solver/it_-1']['mpi_size'] = mpi.size
- if self.general_params['store_solver']:
- archive['DMFT_input/solver/it_-1'][f'S_{self.icrsh}'] = self.triqs_solver
mpi.barrier()
# Solve the impurity problem for icrsh shell
@@ -434,6 +429,20 @@ def solve(self, **kwargs):
if self.solver_params['measure_density_matrix']:
self.density_matrix = self.triqs_solver.dm
self.h_loc_diagonalization = self.triqs_solver.ad
+ # get moments
+ from triqs_cthyb.tail_fit import sigma_high_frequency_moments, green_high_frequency_moments
+ self.Sigma_moments = sigma_high_frequency_moments(self.density_matrix,
+ self.h_loc_diagonalization,
+ self.sum_k.gf_struct_solver_list[self.icrsh],
+ self.h_int
+ )
+ self.Sigma_Hartree = {bl: sigma_bl[0] for bl, sigma_bl in self.Sigma_moments.items()}
+ self.G_moments = green_high_frequency_moments(self.density_matrix,
+ self.h_loc_diagonalization,
+ self.sum_k.gf_struct_solver_list[self.icrsh],
+ self.h_int
+ )
+
# *************************************
# call postprocessing
@@ -483,6 +492,12 @@ def make_positive_definite(G):
# ensure that Delta is positive definite
self.Delta_freq_solver = make_positive_definite(self.Delta_freq_solver)
+ if self.general_params['store_solver'] and mpi.is_master_node():
+ archive = HDFArchive(self.general_params['jobname']+'/'+self.general_params['seedname']+'.h5', 'a')
+ if not 'it_-1' in archive['DMFT_input/solver']:
+ archive['DMFT_input/solver'].create_group('it_-1')
+ archive['DMFT_input/solver/it_-1']['Delta_orig'] = self.Delta_freq_solver
+
# remove off-diagonal terms
if self.solver_params['diag_delta']:
for name, delta in self.Delta_freq_solver:
@@ -556,6 +571,8 @@ def make_positive_definite(G):
# store solver to h5 archive
if self.general_params['store_solver'] and mpi.is_master_node():
with HDFArchive(self.general_params['jobname']+'/'+self.general_params['seedname']+'.h5', 'a') as archive:
+ if not 'it_-1' in archive['DMFT_input/solver']:
+ archive['DMFT_input/solver'].create_group('it_-1')
archive['DMFT_input/solver'].create_group('it_-1')
archive['DMFT_input/solver/it_-1']['Delta'] = self.Delta_freq_solver
archive['DMFT_input/solver/it_-1']['S_'+str(self.icrsh)] = self.triqs_solver
@@ -593,15 +610,71 @@ def make_positive_definite(G):
if self.solver_params['type'] == 'ctseg':
# fill G0_freq from sum_k to solver
- self.triqs_solver.G0_iw << self.G0_freq
-
- if self.general_params['h_int_type'] == 'dynamic':
- for b1, b2 in product(self.sum_k.gf_struct_solver_dict[self.icrsh].keys(), repeat=2):
- self.triqs_solver.D0_iw[b1+"|"+b2] << self.U_iw[self.icrsh]
+ self.triqs_solver.Delta_tau << self.Delta_time
+ # extract hartree shift per orbital for solver
+ hloc_0_bm = block_matrix_from_op(self.Hloc_0, self.sum_k.gf_struct_solver_list[self.icrsh])
+ hartree_shift = []
+ for block in hloc_0_bm:
+ for iorb in range(block.shape[0]):
+ # minus sign here as the solver treats the term as chemical potential and not Hloc0
+ hartree_shift.append(-block[iorb,iorb].real)
+ mpi.report('hartree_shift:', hartree_shift)
+
+ if self.general_params['h_int_type'][self.icrsh] == 'dyn_density_density':
+ mpi.report('add dynamic interaction from bdft')
+ # convert 4 idx tensor to two index tensor
+ ish = self.sum_k.inequiv_to_corr[self.icrsh]
+ # prepare dynamic 2 idx parts
+ Uloc_dlr = self.gw_params['Uloc_dlr'][self.icrsh]['up']
+ Uloc_dlr_2idx = Gf(mesh=Uloc_dlr.mesh, target_shape=[Uloc_dlr.target_shape[0],Uloc_dlr.target_shape[1]])
+ Uloc_dlr_2idx_prime = Gf(mesh=Uloc_dlr.mesh, target_shape=[Uloc_dlr.target_shape[0],Uloc_dlr.target_shape[1]])
+
+ for coeff in Uloc_dlr.mesh:
+ # Transposes rotation matrix here because TRIQS has a slightly different definition
+ if self.sum_k.use_rotations:
+ Uloc_dlr_idx = util.transform_U_matrix(Uloc_dlr[coeff], self.sum_k.rot_mat[ish].T)
+ else:
+ Uloc_dlr_idx = Uloc_dlr[coeff]
+ U, Uprime = reduce_4index_to_2index(Uloc_dlr_idx)
+ # apply rot mat here
+ Uloc_dlr_2idx[coeff] = U
+ Uloc_dlr_2idx_prime[coeff] = Uprime
+
+ # create full frequency objects
+ Uloc_tau_2idx = make_gf_imtime(Uloc_dlr_2idx, n_tau=self.solver_params['n_tau_k'])
+ Uloc_tau_2idx_prime = make_gf_imtime(Uloc_dlr_2idx_prime, n_tau=self.solver_params['n_tau_k'])
+
+ # fill D0_tau from Uloc_tau_2idx and Uloc_tau_2idx_prime
+ norb = Uloc_dlr.target_shape[0]
+ # same spin interaction
+ self.triqs_solver.D0_tau[0:norb, 0:norb] << Uloc_tau_2idx.real
+ self.triqs_solver.D0_tau[norb:2*norb, norb:2*norb] << Uloc_tau_2idx.real
+ # opposite spin interaction
+ self.triqs_solver.D0_tau[0:norb, norb:2*norb] << Uloc_tau_2idx_prime.real
+ self.triqs_solver.D0_tau[norb:2*norb, 0:norb] << Uloc_tau_2idx_prime.real
+
+ # TODO: add Jerp_Iw to the solver
+
+ # self.triqs_solver. Jperp_iw << make_gf_imfreq(Uloc_dlr_2idx, n_iw=self.general_params['n_w_b_nn']) + V
+ mpi.report('\nLocal interaction Hamiltonian is:',self.h_int)
+ # update solver in h5 archive one last time for debugging if solve command crashes
+ if self.general_params['store_solver'] and mpi.is_master_node():
+ with HDFArchive(self.general_params['jobname']+'/'+self.general_params['seedname']+'.h5', 'a') as archive:
+ if 'it_-1' not in archive['DMFT_input/solver']:
+ archive['DMFT_input/solver'].create_group('it_-1')
+ archive['DMFT_input/solver/it_-1'][f'S_{self.icrsh}'] = self.triqs_solver
+ archive['DMFT_input/solver/it_-1'][f'Delta_time_{self.icrsh}'] = self.triqs_solver.Delta_tau
+ archive['DMFT_input/solver/it_-1'][f'solve_params_{self.icrsh}'] = prep_params_for_h5(self.solver_params)
+ archive['DMFT_input/solver/it_-1'][f'triqs_solver_params_{self.icrsh}'] = prep_params_for_h5(self.triqs_solver_params)
+ archive['DMFT_input/solver/it_-1']['mpi_size'] = mpi.size
+ if self.general_params['h_int_type'][self.icrsh] == 'dyn_density_density':
+ archive['DMFT_input/solver/it_-1'][f'Uloc_dlr_2idx_{self.icrsh}'] = Uloc_dlr_2idx
+ archive['DMFT_input/solver/it_-1'][f'Uloc_dlr_2idx_prime_{self.icrsh}'] = Uloc_dlr_2idx_prime
+ mpi.barrier()
# Solve the impurity problem for icrsh shell
# *************************************
- self.triqs_solver.solve(h_int=self.h_int, **self.triqs_solver_params)
+ self.triqs_solver.solve(h_int=self.h_int, hartree_shift=hartree_shift, **self.triqs_solver_params)
# *************************************
# call postprocessing
@@ -678,7 +751,7 @@ def _create_ctint_solver(self):
gf_struct = self.sum_k.gf_struct_solver_list[self.icrsh]
- if self.general_params['h_int_type'][self.icrsh] == 'dynamic':
+ if self.general_params['h_int_type'][self.icrsh] == 'dyn_density_density':
self.U_iw = None
if mpi.is_master_node():
with HDFArchive(self.general_params['jobname']+'/'+self.general_params['seedname']+'.h5', 'r') as archive:
@@ -825,47 +898,32 @@ def _create_ctseg_solver(self):
# Separately stores all params that go into solve() call of solver
self.triqs_solver_params = {}
- keys_to_pass = ('improved_estimator', 'length_cycle', 'max_time', 'measure_pert_order', 'n_warmup_cycles')
+ keys_to_pass = ('length_cycle', 'max_time', 'measure_statehist', 'measure_nnt')
for key in keys_to_pass:
self.triqs_solver_params[key] = self.solver_params[key]
# Calculates number of sweeps per rank
self.triqs_solver_params['n_cycles'] = int(self.solver_params['n_cycles_tot'] / mpi.size)
+ # cast warmup cycles to int in case given in scientific notation
+ self.triqs_solver_params['n_warmup_cycles'] = int(self.solver_params['n_warmup_cycles'])
# Rename parameters that are differentin ctseg than cthyb
self.triqs_solver_params['measure_gt'] = self.solver_params['measure_G_tau']
- self.triqs_solver_params['measure_gw'] = self.solver_params['measure_G_iw']
- self.triqs_solver_params['measure_gl'] = self.solver_params['measure_G_l']
- self.triqs_solver_params['measure_hist'] = self.solver_params['measure_pert_order']
+ self.triqs_solver_params['measure_perturbation_order_histograms'] = self.solver_params['measure_pert_order']
# Makes sure measure_gw is true if improved estimators are used
- if self.triqs_solver_params['improved_estimator']:
+ if self.solver_params['improved_estimator']:
self.triqs_solver_params['measure_gt'] = True
self.triqs_solver_params['measure_ft'] = True
else:
self.triqs_solver_params['measure_ft'] = False
- if self.general_params['h_int_type'][self.icrsh] == 'dynamic':
- self.U_iw = None
- if mpi.is_master_node():
- with HDFArchive(self.general_params['jobname']+'/'+self.general_params['seedname']+'.h5', 'r') as archive:
- self.U_iw = archive['dynamic_U']['U_iw']
- self.U_iw = mpi.bcast(self.U_iw)
- n_w_b_nn = self.U_iw[self.icrsh].mesh.last_index()+1
- else:
- n_w_b_nn = 1001
gf_struct = self.sum_k.gf_struct_solver_list[self.icrsh]
# Construct the triqs_solver instances
- if self.solver_params['measure_gl']:
- triqs_solver = ctseg_solver(beta=self.general_params['beta'], gf_struct=gf_struct,
- n_iw=self.general_params['n_iw'], n_tau=self.general_params['n_tau'],
- n_legendre_g=self.solver_params['n_l'], n_w_b_nn = n_w_b_nn, n_tau_k=int(n_w_b_nn*2.5), n_tau_jperp=int(n_w_b_nn*2.5))
- else:
- triqs_solver = ctseg_solver(beta=self.general_params['beta'], gf_struct=gf_struct,
- n_iw=self.general_params['n_iw'], n_tau=self.general_params['n_tau'],
- n_w_b_nn = n_w_b_nn, n_tau_k=int(n_w_b_nn*2.5), n_tau_jperp=int(n_w_b_nn*2.5))
-
+ triqs_solver = ctseg_solver(beta=self.general_params['beta'], gf_struct=gf_struct,
+ n_tau=self.general_params['n_tau'],
+ n_tau_k=int(self.solver_params['n_tau_k']))
return triqs_solver
def _create_ftps_solver(self):
@@ -972,7 +1030,72 @@ def set_Gs_from_G_l():
elif self.solver_params['perform_tail_fit'] and not self.solver_params['legendre_fit']:
# if tailfit has been used replace Sigma with the tail fitted Sigma from cthyb
self.Sigma_freq << self.triqs_solver.Sigma_iw
- self.sum_k.symm_deg_gf(self.Sigma_freq, ish=self.icrsh)
+ elif self.solver_params['crm_dyson_solver']:
+ from triqs.gf.dlr_crm_dyson_solver import minimize_dyson
+
+ mpi.report('\nCRM Dyson solver to extract Σ impurity\n')
+ # fit QMC G_tau to DLR
+ if mpi.is_master_node():
+ G_dlr = fit_gf_dlr(self.triqs_solver.G_tau,
+ w_max=self.general_params['dlr_wmax'], eps=self.general_params['dlr_eps'])
+ self.G_time_dlr = make_gf_dlr_imtime(G_dlr)
+
+ # assume little error on G0_iw and use to get G0_dlr
+ mesh_dlr_iw = MeshDLRImFreq(G_dlr.mesh)
+ G0_dlr_iw = self.sum_k.block_structure.create_gf(ish=self.icrsh, gf_function=Gf, mesh=mesh_dlr_iw, space='solver')
+ for block, gf in G0_dlr_iw:
+ for iwn in mesh_dlr_iw:
+ gf[iwn] = self.G0_freq[block](iwn)
+ self.sum_k.symm_deg_gf(G0_dlr_iw, ish=self.icrsh)
+
+ # average moments
+ self.sum_k.symm_deg_gf(self.triqs_solver.Sigma_Hartree, ish=self.icrsh)
+ first_mom = {}
+ for block, mom in self.triqs_solver.Sigma_moments.items():
+ first_mom[block] = mom[1]
+ self.sum_k.symm_deg_gf(first_mom, ish=self.icrsh)
+
+ for block, mom in self.triqs_solver.Sigma_moments.items():
+ mom[0] = self.triqs_solver.Sigma_Hartree[block]
+ mom[1] = first_mom[block]
+
+ # minimize dyson for the first entry of each deg shell
+ self.Sigma_dlr = self.sum_k.block_structure.create_gf(ish=self.icrsh, gf_function=Gf, mesh=mesh_dlr_iw, space='solver')
+ # without any degenerate shells we run the minimization for all blocks
+ if self.sum_k.deg_shells[self.icrsh] == []:
+ for block, gf in self.Sigma_dlr:
+ np.random.seed(85281)
+ print('Minimizing Dyson via CRM for Σ[block]:', block)
+ gf, _, _ = minimize_dyson(G0_dlr=G0_dlr_iw[block],
+ G_dlr=G_dlr[block],
+ Sigma_moments=self.triqs_solver.Sigma_moments[block]
+ )
+ else:
+ for deg_shell in self.sum_k.deg_shells[self.icrsh]:
+ for i, block in enumerate(deg_shell):
+ if i == 0:
+ np.random.seed(85281)
+ print('Minimizing Dyson via CRM for Σ[block]:', block)
+ self.Sigma_dlr[block], _, _ = minimize_dyson(G0_dlr=G0_dlr_iw[block],
+ G_dlr=G_dlr[block],
+ Sigma_moments=self.triqs_solver.Sigma_moments[block]
+ )
+ sol_block = block
+ else:
+ self.Sigma_dlr[block] << self.Sigma_dlr[sol_block]
+
+ print(f'Copying result from first block of deg list to {block}')
+ print('\n')
+
+ self.Sigma_freq = make_gf_imfreq(self.Sigma_dlr, n_iw=self.general_params['n_iw'])
+ for block, gf in self.Sigma_freq:
+ gf += self.triqs_solver.Sigma_moments[block][0]
+
+ mpi.barrier()
+ self.Sigma_freq = mpi.bcast(self.Sigma_freq)
+ self.Sigma_dlr = mpi.bcast(self.Sigma_dlr)
+ self.G_time = mpi.bcast(self.G_time)
+ self.G_time_dlr = mpi.bcast(self.G_time_dlr)
else:
# obtain Sigma via dyson from symmetrized G_freq
self.Sigma_freq << inverse(self.G0_freq) - inverse(self.G_freq)
@@ -981,6 +1104,9 @@ def set_Gs_from_G_l():
if self.solver_params['measure_density_matrix']:
self.density_matrix = self.triqs_solver.density_matrix
self.h_loc_diagonalization = self.triqs_solver.h_loc_diagonalization
+ self.Sigma_moments = self.triqs_solver.Sigma_moments
+ self.Sigma_Hartree = self.triqs_solver.Sigma_Hartree
+ self.G_moments = self.triqs_solver.G_moments
if self.solver_params['measure_pert_order']:
self.perturbation_order = self.triqs_solver.perturbation_order
@@ -1162,7 +1288,7 @@ def _ctseg_postprocessing(self):
def set_Gs_from_G_l():
- if self.solver_params['measure_ft'] and mpi.is_master_node():
+ if self.solver_params['improved_estimator'] and mpi.is_master_node():
print('\n !!!!WARNING!!!! \n you enabled both improved estimators and legendre based filtering / sampling. Sigma will be overwritten by legendre result. \n !!!!WARNING!!!!\n')
# create new G_freq and G_time
@@ -1183,51 +1309,42 @@ def set_Gs_from_G_l():
# first print average sign
if mpi.is_master_node():
- print('\nAverage sign: {}'.format(self.triqs_solver.average_sign))
+ print('\nAverage sign: {}'.format(self.triqs_solver.results.sign))
# get Delta_time from solver
self.Delta_time << self.triqs_solver.Delta_tau
- self.G_time << self.triqs_solver.G_tau
+ self.G_time << self.triqs_solver.results.G_tau
self.sum_k.symm_deg_gf(self.G_time, ish=self.icrsh)
- if self.solver_params['measure_gw']:
- self.G_freq << self.triqs_solver.G_iw
- else:
- if mpi.is_master_node():
- # create empty moment container (list of np.arrays)
- Gf_known_moments = make_zero_tail(self.G_freq,n_moments=2)
- for i, bl in enumerate(self.G_freq.indices):
- # 0 moment is 0, dont touch it, but first moment is 1 for the Gf
- Gf_known_moments[i][1] = np.eye(self.G_freq[bl].target_shape[0])
- self.G_freq[bl] << Fourier(self.G_time[bl], Gf_known_moments[i])
- self.G_freq << mpi.bcast(self.G_freq)
+ if mpi.is_master_node():
+ # create empty moment container (list of np.arrays)
+ Gf_known_moments = make_zero_tail(self.G_freq,n_moments=2)
+ for i, bl in enumerate(self.G_freq.indices):
+ # 0 moment is 0, dont touch it, but first moment is 1 for the Gf
+ Gf_known_moments[i][1] = np.eye(self.G_freq[bl].target_shape[0])
+ self.G_freq[bl] << Fourier(self.G_time[bl], Gf_known_moments[i])
+ self.G_freq << mpi.bcast(self.G_freq)
self.G_freq << make_hermitian(self.G_freq)
self.G_freq_unsym << self.G_freq
self.sum_k.symm_deg_gf(self.G_freq, ish=self.icrsh)
# if measured in Legendre basis, get G_l from solver too
- if self.solver_params['measure_gl']:
- # store original G_time into G_time_orig
- self.G_time_orig << self.triqs_solver.G_tau
- self.G_l << self.triqs_solver.G_l
- # get G_time, G_freq, Sigma_freq from G_l
- set_Gs_from_G_l()
- elif self.solver_params['legendre_fit']:
- self.G_time_orig << self.triqs_solver.G_tau
+ if self.solver_params['legendre_fit']:
+ self.G_time_orig << self.triqs_solver.results.G_tau
self.G_l << legendre_filter.apply(self.G_time, self.solver_params['n_l'])
# get G_time, G_freq, Sigma_freq from G_l
set_Gs_from_G_l()
# if improved estimators are turned on calc Sigma from F_tau, otherwise:
- elif self.solver_params['measure_ft']:
+ elif self.solver_params['improved_estimator']:
self.F_freq = self.G_freq.copy()
self.F_freq << 0.0
self.F_time = self.G_time.copy()
- self.F_time << self.triqs_solver.F_tau
+ self.F_time << self.triqs_solver.results.F_tau
F_known_moments = make_zero_tail(self.F_freq, n_moments=1)
if mpi.is_master_node():
for i, bl in enumerate(self.F_freq.indices):
- self.F_freq[bl] << Fourier(self.triqs_solver.F_tau[bl], F_known_moments[i])
+ self.F_freq[bl] << Fourier(self.triqs_solver.results.F_tau[bl], F_known_moments[i])
# fit tail of improved estimator and G_freq
self.F_freq << _gf_fit_tail_fraction(self.F_freq, fraction=0.9, replace=0.5, known_moments=F_known_moments)
self.G_freq << _gf_fit_tail_fraction(self.G_freq ,fraction=0.9, replace=0.5, known_moments=Gf_known_moments)
@@ -1238,12 +1355,78 @@ def set_Gs_from_G_l():
for iw in fw.mesh:
self.Sigma_freq[block][iw] = self.F_freq[block][iw] / self.G_freq[block][iw]
+ elif self.solver_params['crm_dyson_solver']:
+ from triqs.gf.dlr_crm_dyson_solver import minimize_dyson
+
+ mpi.report('\nCRM Dyson solver to extract Σ impurity\n')
+ # fit QMC G_tau to DLR
+ if mpi.is_master_node():
+ G_dlr = fit_gf_dlr(self.triqs_solver.results.G_tau,
+ w_max=self.general_params['dlr_wmax'], eps=self.general_params['dlr_eps'])
+ self.G_time_dlr = make_gf_dlr_imtime(G_dlr)
+ self.G_freq = make_gf_imfreq(G_dlr, n_iw=self.general_params['n_iw'])
+
+ # assume little error on G0_iw and use to get G0_dlr
+ mesh_dlr_iw = MeshDLRImFreq(G_dlr.mesh)
+ G0_dlr_iw = self.sum_k.block_structure.create_gf(ish=self.icrsh, gf_function=Gf, mesh=mesh_dlr_iw, space='solver')
+ for block, gf in G0_dlr_iw:
+ for iwn in mesh_dlr_iw:
+ gf[iwn] = self.G0_freq[block](iwn)
+ self.sum_k.symm_deg_gf(G0_dlr_iw, ish=self.icrsh)
+ G0_dlr = make_gf_dlr(G0_dlr_iw)
+
+ # get Hartree shift for optimizer
+ G0_iw = make_gf_imfreq(G0_dlr, n_iw=self.general_params['n_iw'])
+ G_iw = make_gf_imfreq(G_dlr, n_iw=self.general_params['n_iw'])
+ Sigma_iw = inverse(G0_iw) - inverse(G_iw)
+
+ # minimize dyson for the first entry of each deg shell
+ self.Sigma_dlr = self.sum_k.block_structure.create_gf(ish=self.icrsh, gf_function=Gf, mesh=mesh_dlr_iw, space='solver')
+ # without any degenerate shells we run the minimization for all blocks
+ if self.sum_k.deg_shells[self.icrsh] == []:
+ for block, gf in self.Sigma_dlr:
+ tail, err = gf.fit_hermitian_tail()
+ np.random.seed(85281)
+ print('Minimizing Dyson via CRM for Σ[block]:', block)
+ gf, _, _ = minimize_dyson(G0_dlr=G0_dlr_iw[block],
+ G_dlr=G_dlr[block],
+ Sigma_moments=tail[0:1]
+ )
+ else:
+ for deg_shell in self.sum_k.deg_shells[self.icrsh]:
+ for i, block in enumerate(deg_shell):
+ if i == 0:
+ tail, err = Sigma_iw[block].fit_hermitian_tail()
+ np.random.seed(85281)
+ print('Minimizing Dyson via CRM for Σ[block]:', block)
+ self.Sigma_dlr[block], _, _ = minimize_dyson(G0_dlr=G0_dlr_iw[block],
+ G_dlr=G_dlr[block],
+ Sigma_moments=tail[0:1]
+ )
+ sol_block = block
+ else:
+ print(f'Copying result from first block of deg list to {block}')
+ self.Sigma_dlr[block] << self.Sigma_dlr[sol_block]
+
+ self.Sigma_freq[block] = make_gf_imfreq(self.Sigma_dlr[block], n_iw=self.general_params['n_iw'])
+ self.Sigma_freq[block] += tail[0]
+ print('\n')
+
+
+ mpi.barrier()
+ self.Sigma_freq = mpi.bcast(self.Sigma_freq)
+ self.Sigma_dlr = mpi.bcast(self.Sigma_dlr)
+ self.G_time_dlr = mpi.bcast(self.G_time_dlr)
+ self.G_freq = mpi.bcast(self.G_freq)
else:
- mpi.report('\n!!!! WARNING !!!! tail of solver output not handled! Turn on either measure_ft, legendre_fit, or measure_gl\n')
+ mpi.report('\n!!!! WARNING !!!! tail of solver output not handled! Turn on either measure_ft, legendre_fit\n')
self.Sigma_freq << inverse(self.G0_freq) - inverse(self.G_freq)
- if self.solver_params['measure_hist']:
- self.perturbation_order = self.triqs_solver.histogram
+ if self.solver_params['measure_statehist']:
+ self.state_histogram = self.triqs_solver.results.state_hist
+
+ if self.solver_params['measure_pert_order']:
+ self.perturbation_order_histo = self.triqs_solver.results.perturbation_order_histo_Delta
return
diff --git a/python/solid_dmft/gw_embedding/bdft_converter.py b/python/solid_dmft/gw_embedding/bdft_converter.py
new file mode 100644
index 00000000..adc9a2b6
--- /dev/null
+++ b/python/solid_dmft/gw_embedding/bdft_converter.py
@@ -0,0 +1,436 @@
+# -*- coding: utf-8 -*-
+################################################################################
+#
+# solid_dmft - A versatile python wrapper to perform DFT+DMFT calculations
+# utilizing the TRIQS software library
+#
+# Copyright (C) 2018-2020, ETH Zurich
+# Copyright (C) 2021, The Simons Foundation
+# authors: A. Hampel, M. Merkel, and S. Beck
+#
+# solid_dmft is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, either version 3 of the License, or (at your option) any later
+# version.
+#
+# solid_dmft is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License along with
+# solid_dmft (in the file COPYING.txt in this directory). If not, see
+# .
+#
+################################################################################
+"""
+converter from bdft output to edmft input for solid_dmft
+"""
+
+import numpy as np
+from scipy.constants import physical_constants
+
+
+from h5 import HDFArchive
+from triqs.utility import mpi
+from triqs.gf import (
+ Gf,
+ BlockGf,
+ make_gf_dlr_imtime,
+ make_gf_dlr,
+ make_gf_dlr_imfreq,
+)
+from triqs.gf.meshes import MeshDLRImFreq, MeshDLRImTime
+import itertools
+
+from solid_dmft.gw_embedding.iaft import IAFT
+
+HARTREE_EV = physical_constants['Hartree energy in eV'][0]
+
+def _get_dlr_from_IR(Gf_ir, ir_kernel, mesh_dlr_iw, dim=2):
+ n_orb = Gf_ir.shape[-1]
+ stats = 'f' if mesh_dlr_iw.statistic == 'Fermion' else 'b'
+
+ if stats == 'b':
+ Gf_ir_pos = Gf_ir.copy()
+ Gf_ir = np.zeros([Gf_ir_pos.shape[0] * 2 - 1] + [n_orb] * dim, dtype=complex)
+ Gf_ir[: Gf_ir_pos.shape[0]] = Gf_ir_pos[::-1]
+ Gf_ir[Gf_ir_pos.shape[0] :] = Gf_ir_pos[1:]
+
+ Gf_dlr_iw = Gf(mesh=mesh_dlr_iw, target_shape=[n_orb] * dim)
+
+ # prepare idx array for spare ir
+ if stats == 'f':
+ mesh_dlr_iw_idx = np.array([iwn.index for iwn in mesh_dlr_iw])
+ else:
+ mesh_dlr_iw_idx = np.array([iwn.index for iwn in mesh_dlr_iw])
+
+ Gf_dlr_iw.data[:] = ir_kernel.w_interpolate(Gf_ir, mesh_dlr_iw_idx, stats=stats, ir_notation=False)
+
+ Gf_dlr = make_gf_dlr(Gf_dlr_iw)
+ return Gf_dlr
+
+
+def calc_Sigma_DC_gw(Wloc_dlr, Gloc_dlr, Vloc, verbose=False):
+ if isinstance(Gloc_dlr, BlockGf):
+ Sig_DC_dlr_list = []
+ Sig_DC_hartree_list = {}
+ Sig_DC_exchange_list = {}
+ for block, gloc in Gloc_dlr:
+ res = calc_Sigma_DC_gw(Wloc_dlr[block], gloc, Vloc[block], verbose)
+ Sig_DC_dlr_list.append(res[0])
+ Sig_DC_hartree_list[block] = res[1]
+ Sig_DC_exchange_list[block] = res[2]
+
+ return (
+ BlockGf(name_list=list(Gloc_dlr.indices), block_list=Sig_DC_dlr_list),
+ Sig_DC_hartree_list,
+ Sig_DC_exchange_list,
+ )
+
+ n_orb = Gloc_dlr.target_shape[0]
+
+ # dynamic part
+ Gloc_dlr_t = make_gf_dlr_imtime(Gloc_dlr)
+ Sig_dlr_t = Gf(mesh=Gloc_dlr_t.mesh, target_shape=Gloc_dlr_t.target_shape)
+
+ Wloc_dlr_t = make_gf_dlr_imtime(Wloc_dlr)
+
+ for tau in Gloc_dlr_t.mesh:
+ # Wloc_dlr is bosonic and the mesh has a different hash, use call to get value at tau point
+ Sig_dlr_t[tau] = -1 * np.einsum('ijkl, jk -> li', Wloc_dlr_t[tau], Gloc_dlr_t[tau])
+
+ Sig_DC_dlr = make_gf_dlr(Sig_dlr_t)
+
+ # static hartree Part
+ Sig_DC_hartree = np.zeros((n_orb, n_orb))
+ Sig_DC_hartree = 2 * np.einsum('ijkl, lj -> ik', Vloc, Gloc_dlr.density())
+ # symmetrize
+ Sig_DC_hartree = 0.5 * (Sig_DC_hartree + Sig_DC_hartree.conj().T)
+
+ if verbose:
+ print('static Hartree part of DC')
+ print(Sig_DC_hartree.real)
+ if np.any(np.imag(Sig_DC_hartree) > 1e-3):
+ print('Im:')
+ print(np.imag(Sig_DC_hartree))
+
+ # static exchange part
+ Sig_DC_exchange = np.zeros((n_orb, n_orb))
+ Sig_DC_exchange = -1 * np.einsum('ijkl, jk -> li', Vloc, Gloc_dlr.density())
+ # symmetrize
+ Sig_DC_exchange = 0.5 * (Sig_DC_exchange + Sig_DC_exchange.conj().T)
+
+ if verbose:
+ print('static exchange part of DC')
+ print(Sig_DC_exchange.real)
+ if np.any(np.imag(Sig_DC_exchange) > 1e-3):
+ print('Im:')
+ print(np.imag(Sig_DC_exchange))
+ return Sig_DC_dlr, Sig_DC_hartree, Sig_DC_exchange
+
+
+def calc_W_from_Gloc(Gloc_dlr: Gf | BlockGf, U: np.ndarray | dict) -> Gf | BlockGf:
+ r"""
+
+ Calculate Wijkl from given constant U tensor and Gf on DLRMesh
+
+
+ triqs notation for Uijkl = phi*_i(r) phi*_j(r') U(r,r') phi_l'(r') phi_k(r)
+ = Uijkl c^+_i c^+_j' c_l' c_k
+
+ where the ' denotes a spin index different from the other without '
+
+ the according diagram is (left and right have same spin):
+
+ j (phi) k' (phi)
+ \ /
+ < <
+ \__________/
+ / \
+ > >
+ / \
+ i (phi*) l'
+
+ we now have to move to a product basis form to combine two indices
+ i.e. go from nb,nb,nb,nb to nb**2,nb**2 tensors:
+
+ Uji,kl = phi*_i(r) phi_j(r) U(r,r') phi*_k(r') phi_l(r')
+ = Psi*_ji(r) U(r,r') Psi_kl(r')
+
+ So we have to transform the triqs notation of Uijkl -> Uki,jl, i.e.
+ swap col/rows as (2,0,1,3) to go to the basis and the in the end
+ swap W_ki,jl back in reverse.
+
+ Then we compute pubble polarizability as
+
+ Pi_ab,kl(tau) = -2 G_bl(tau) G_ka(beta - tau)
+
+ So that
+
+ [ U Pi(iwn) ]_ji,kl = sum_ab U_ji,ab Pi_ab,kl(iwn)
+
+ i.e.
+ j' a ___
+ \ / \ k
+ < < \
+ \__________/ \
+ / \ /
+ > > /
+ / \___/ l
+ i' b
+
+ then the screened Coulomb interaction in product basis is:
+
+ W_ji,kl(iwn) = [1 - U Pi(iwn) ]^-1_ji,kl Uji,kl - Uji,kl
+
+ (subtract static shift here), and finally convert back to triqs notation.
+
+
+ Parameters
+ ----------
+ Gloc_dlr : BlockGf or Gf with MeshDLR
+
+ U : np.ndarray of with shape [Gloc_dlr.target_shape]*4 or dict of np.ndarray
+
+ Returns
+ -------
+ W_dlr : BlockGf or Gf
+ screened Coulomb interaction
+ """
+
+ if isinstance(Gloc_dlr, BlockGf):
+ Wloc_list = []
+ for block, gloc in Gloc_dlr:
+ if isinstance(U, np.ndarray):
+ Wloc_list.append(calc_W_from_Gloc(gloc, U))
+ else:
+ Wloc_list.append(calc_W_from_Gloc(gloc, U[block]))
+
+ return BlockGf(name_list=list(Gloc_dlr.indices), block_list=Wloc_list)
+
+ nb = Gloc_dlr.target_shape[0]
+ Gloc_dlr_t = make_gf_dlr_imtime(Gloc_dlr)
+ mesh_bos = MeshDLRImTime(
+ beta=Gloc_dlr.mesh.beta,
+ statistic='Boson',
+ w_max=Gloc_dlr.mesh.w_max,
+ eps=Gloc_dlr.mesh.eps,
+ )
+
+ PI_dlr_t = Gf(mesh=mesh_bos, target_shape=[nb] * 4)
+ for tau in Gloc_dlr_t.mesh:
+ PI_dlr_t[tau] = -2 * np.einsum('bl, ka -> abkl', Gloc_dlr_t[tau], Gloc_dlr(Gloc_dlr_t.mesh.beta - tau))
+
+ PI_dlr = make_gf_dlr(PI_dlr_t)
+ PI_dlr_w = make_gf_dlr_imfreq(PI_dlr)
+
+ # need to swap indices and go into product basis
+ U_prod = np.transpose(U, (2, 0, 1, 3)).reshape(nb**2, nb**2)
+
+ W_dlr_w = Gf(mesh=PI_dlr_w.mesh, target_shape=[nb] * 4)
+
+ ones = np.eye(nb**2)
+ for w in PI_dlr_w.mesh:
+ eps = ones - U_prod @ PI_dlr_w[w].reshape(nb**2, nb**2)
+ # in product basis W_ji,kl
+ W_dlr_w[w] = (np.linalg.inv(eps) @ U_prod - U_prod).reshape(nb, nb, nb, nb)
+
+ # swap indices back
+ W_dlr_w[w] = np.transpose(W_dlr_w[w], (1, 2, 0, 3))
+ W_dlr = make_gf_dlr(W_dlr_w)
+
+ return W_dlr
+
+
+def convert_gw_output(job_h5, gw_h5, it_1e=0, it_2e=0, ha_ev_conv = False):
+ """
+ read bdft output and convert to triqs Gf DLR objects
+
+ Parameters
+ ----------
+ job_h5: string
+ path to solid_dmft job file
+ gw_h5: string
+ path to GW checkpoint file for AIMBES code
+ it_1e: int, optional
+ iteration to read from gw_h5 calculation for 1e downfolding, defaults to last iteration
+ it_2e: int, optional
+ iteration to read from gw_h5 calculation for 2e downfolding, defaults to last iteration
+ ha_ev_conv: bool, optional
+ convert energies from Hartree to eV, defaults to False
+
+ Returns
+ -------
+ gw_data: dict
+ dictionary holding all read objects: mu_emb, beta, lam, w_max, prec, mesh_dlr_iw_b,
+ mesh_dlr_iw_f, n_orb, G0_dlr, Gloc_dlr, Sigma_imp_dlr, Sigma_imp_DC_dlr, Uloc_dlr,
+ Vloc, Hloc0, Vhf_dc, Vhf
+ ir_kernel: sparse_ir kernel object
+ IR kernel with AIMBES paramaters
+ """
+
+ mpi.report('reading output from bdft code')
+
+ gw_data = {}
+
+ if ha_ev_conv:
+ conv_fac = HARTREE_EV
+ else:
+ conv_fac = 1.0
+
+ with HDFArchive(gw_h5, 'r') as ar:
+ if not it_1e or not it_2e:
+ it_1e = ar['downfold_1e/final_iter']
+ it_2e = ar['downfold_2e/final_iter']
+
+ mpi.report(f'Reading results from downfold_1e iter {it_1e} and downfold_2e iter {it_2e} from given AIMBES chkpt file.')
+
+ # auxilary quantities
+ gw_data['it_1e'] = it_1e
+ gw_data['it_2e'] = it_2e
+ gw_data['mu_emb'] = ar[f'downfold_1e/iter{it_1e}']['mu']
+ gw_data['beta'] = ar['imaginary_fourier_transform']['beta']
+ gw_data['lam'] = ar['imaginary_fourier_transform']['lambda']
+ gw_data['gw_wmax'] = gw_data['lam'] / gw_data['beta']
+ gw_data['number_of_spins'] = ar['system/number_of_spins']
+ assert gw_data['number_of_spins'] == 1, 'spin calculations not yet supported in converter'
+
+ prec = ar['imaginary_fourier_transform']['prec']
+ if prec == 'high':
+ # set to highest DLR precision possible
+ gw_data['gw_ir_prec'] = 1e-15
+ gw_data['gw_dlr_prec'] = 1e-13
+ elif prec == 'mid':
+ gw_data['gw_ir_prec'] = 1e-10
+ gw_data['gw_dlr_prec'] = 1e-10
+ elif prec == 'low':
+ gw_data['gw_ir_prec'] = 1e-6
+ gw_data['gw_dlr_prec'] = 1e-6
+
+ # 1 particle properties
+ g_weiss_wsIab = ar[f'downfold_1e/iter{it_1e}']['g_weiss_wsIab']
+ Sigma_dc_wsIab = ar[f'downfold_1e/iter{it_1e}']['Sigma_dc_wsIab']
+ Gloc = ar[f'downfold_1e/iter{it_1e}']['Gloc_wsIab']
+ gw_data['n_inequiv_shells'] = Gloc.shape[2]
+
+ # 2 particle properties
+ # TODO: discuss how the site index is used right now in bDFT
+ Vloc_jk = ar[f'downfold_2e/iter{it_2e}']['Vloc_abcd']
+ Uloc_ir_jk = ar[f'downfold_2e/iter{it_2e}']['Uloc_wabcd'][:, ...]
+ # switch inner two indices to match triqs notation
+ Vloc = np.zeros(Vloc_jk.shape, dtype=complex)
+ Uloc_ir = np.zeros(Uloc_ir_jk.shape, dtype=complex)
+ n_orb = Vloc.shape[0]
+ for or1, or2, or3, or4 in itertools.product(range(n_orb), repeat=4):
+ Vloc[or1, or2, or3, or4] = Vloc_jk[or1, or3, or2, or4]
+ for ir_w in range(Uloc_ir_jk.shape[0]):
+ Uloc_ir[ir_w, or1, or2, or3, or4] = Uloc_ir_jk[ir_w, or1, or3, or2, or4]
+
+ # get Hloc_0
+ Vhf_dc_sIab = ar[f'downfold_1e/iter{it_1e}']['Vhf_dc_sIab'][0, 0]
+ Vhf_sIab = ar[f'downfold_1e/iter{it_1e}']['Vhf_gw_sIab'][0, 0]
+ H0_loc = ar[f'downfold_1e/iter{it_1e}']['H0_sIab']
+
+ if 'Vcorr_gw_sIab' in ar[f'downfold_1e/iter{it_1e}']:
+ mpi.report('Found Vcorr_sIab in the bdft checkpoint file, '
+ 'i.e. Embedding on top of an effective QP Hamiltonian.')
+ Vcorr_sIab = ar[f'downfold_1e/iter{it_1e}/Vcorr_gw_sIab'][0, 0]
+ Vcorr_dc_sIab = ar[f'downfold_1e/iter{it_1e}/Vcorr_dc_sIab'][0, 0]
+ Hloc0 = -1*(np.eye(n_orb) * gw_data['mu_emb'] - H0_loc[0,0] - Vhf_sIab - Vcorr_sIab + Vhf_dc_sIab + Vcorr_dc_sIab)
+ qp_emb = True
+ else:
+ Sigma_wsIab = ar[f'downfold_1e/iter{it_1e}']['Sigma_gw_wsIab']
+ qp_emb = False
+ Hloc0 = -1*(np.eye(n_orb) * gw_data['mu_emb'] - H0_loc[0,0] - (Vhf_sIab-Vhf_dc_sIab))
+
+ # get IR object
+ mpi.report('create IR kernel and convert to DLR')
+ # create IR kernel
+ ir_kernel = IAFT(beta=gw_data['beta'], lmbda=gw_data['lam'], prec=gw_data['gw_ir_prec'])
+
+ gw_data['mesh_dlr_iw_b'] = MeshDLRImFreq(
+ beta=gw_data['beta']/conv_fac,
+ statistic='Boson',
+ w_max=gw_data['gw_wmax']*conv_fac,
+ eps=gw_data['gw_dlr_prec'],
+ )
+ gw_data['mesh_dlr_iw_f'] = MeshDLRImFreq(
+ beta=gw_data['beta']/conv_fac,
+ statistic='Fermion',
+ w_max=gw_data['gw_wmax']*conv_fac,
+ eps=gw_data['gw_dlr_prec'],
+ )
+
+ (
+ U_dlr_list,
+ G0_dlr_list,
+ Gloc_dlr_list,
+ Sigma_dlr_list,
+ Sigma_DC_dlr_list,
+ V_list,
+ Hloc_list,
+ Vhf_list,
+ Vhf_dc_list,
+ n_orb_list,
+ ) = [], [], [], [], [], [], [], [], [], []
+ for ish in range(gw_data['n_inequiv_shells']):
+ # fit IR Uloc on DLR iw mesh
+ temp = _get_dlr_from_IR(Uloc_ir*conv_fac, ir_kernel, gw_data['mesh_dlr_iw_b'], dim=4)
+ Uloc_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True)
+
+ U_dlr_list.append(Uloc_dlr)
+ V_list.append({'up': Vloc.copy()*conv_fac, 'down': Vloc*conv_fac})
+ Hloc_list.append({'up': Hloc0.copy()*conv_fac, 'down': Hloc0*conv_fac})
+ Vhf_list.append({'up': Vhf_sIab.copy()*conv_fac, 'down': Vhf_sIab*conv_fac})
+ Vhf_dc_list.append({'up': Vhf_dc_sIab.copy()*conv_fac, 'down': Vhf_dc_sIab*conv_fac})
+ n_orb_list.append(n_orb)
+
+ temp = _get_dlr_from_IR(g_weiss_wsIab[:, 0, ish, :, :]/conv_fac, ir_kernel, gw_data['mesh_dlr_iw_f'], dim=2)
+ G0_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True)
+ G0_dlr_list.append(G0_dlr)
+
+ temp = _get_dlr_from_IR(Gloc[:, 0, ish, :, :]/conv_fac, ir_kernel, gw_data['mesh_dlr_iw_f'], dim=2)
+ Gloc_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True)
+ Gloc_dlr_list.append(Gloc_dlr)
+
+ # since Sigma can have a static shift we return DLR Imfreq mesh
+ if not qp_emb:
+ temp = _get_dlr_from_IR(Sigma_wsIab[:, 0, ish, :, :]*conv_fac, ir_kernel, gw_data['mesh_dlr_iw_f'], dim=2)
+ Sigma_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True)
+ Sigma_dlr_list.append(Sigma_dlr)
+
+ temp = _get_dlr_from_IR(Sigma_dc_wsIab[:, 0, ish, :, :]*conv_fac, ir_kernel, gw_data['mesh_dlr_iw_f'], dim=2)
+ Sigma_DC_dlr = BlockGf(name_list=['up', 'down'], block_list=[temp, temp], make_copies=True)
+ Sigma_DC_dlr_list.append(Sigma_DC_dlr)
+
+ gw_data['G0_dlr'] = G0_dlr_list
+ gw_data['Gloc_dlr'] = Gloc_dlr_list
+ gw_data['Sigma_imp_dlr'] = Sigma_dlr_list
+ gw_data['Sigma_imp_DC_dlr'] = Sigma_DC_dlr_list
+ gw_data['Uloc_dlr'] = U_dlr_list
+ gw_data['Vloc'] = V_list
+ gw_data['Hloc0'] = Hloc_list
+ gw_data['Vhf_dc'] = Vhf_dc_list
+ gw_data['Vhf'] = Vhf_list
+ gw_data['n_orb'] = n_orb_list
+
+ # write Uloc / Wloc back to h5 archive
+ mpi.report(f'writing results in {job_h5}/DMFT_input')
+
+ with HDFArchive(job_h5, 'a') as ar:
+ if 'DMFT_results' in ar and 'iteration_count' in ar['DMFT_results']:
+ it = ar['DMFT_results']['iteration_count'] + 1
+ else:
+ it = 1
+ if 'DMFT_input' not in ar:
+ ar.create_group('DMFT_input')
+ if f'iter{it}' not in ar['DMFT_input']:
+ ar['DMFT_input'].create_group(f'iter{it}')
+
+ for key, value in gw_data.items():
+ ar[f'DMFT_input/iter{it}'][key] = value
+
+ mpi.report(f'finished writing results in {job_h5}/DMFT_input')
+ return gw_data, ir_kernel
+
+
diff --git a/python/solid_dmft/gw_embedding/gw_flow.py b/python/solid_dmft/gw_embedding/gw_flow.py
new file mode 100644
index 00000000..52f9fb97
--- /dev/null
+++ b/python/solid_dmft/gw_embedding/gw_flow.py
@@ -0,0 +1,491 @@
+# -*- coding: utf-8 -*-
+################################################################################
+#
+# solid_dmft - A versatile python wrapper to perform DFT+DMFT calculations
+# utilizing the TRIQS software library
+#
+# Copyright (C) 2018-2020, ETH Zurich
+# Copyright (C) 2021, The Simons Foundation
+# authors: A. Hampel, M. Merkel, and S. Beck
+#
+# solid_dmft is free software: you can redistribute it and/or modify it under the
+# terms of the GNU General Public License as published by the Free Software
+# Foundation, either version 3 of the License, or (at your option) any later
+# version.
+#
+# solid_dmft is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+# You should have received a copy of the GNU General Public License along with
+# solid_dmft (in the file COPYING.txt in this directory). If not, see
+# .
+#
+################################################################################
+# pyright: reportUnusedExpression=false
+"""
+Module for gw flow
+"""
+
+from timeit import default_timer as timer
+import numpy as np
+
+from h5 import HDFArchive
+from triqs.utility import mpi
+from triqs.gf.tools import inverse
+from triqs.gf import (
+ Gf,
+ BlockGf,
+ make_hermitian,
+ make_gf_dlr,
+ make_gf_imfreq,
+ make_gf_imtime,
+ make_gf_dlr_imfreq,
+)
+from triqs.version import git_hash as triqs_hash
+from triqs.version import version as triqs_version
+from triqs.gf.meshes import MeshImFreq
+from triqs.operators import c_dag, c, Operator
+from triqs_dft_tools.block_structure import BlockStructure
+
+from solid_dmft.version import solid_dmft_hash
+from solid_dmft.version import version as solid_dmft_version
+from solid_dmft.dmft_tools import formatter
+from solid_dmft.dmft_tools import results_to_archive
+from solid_dmft.dmft_tools.solver import SolverStructure
+from solid_dmft.dmft_tools import interaction_hamiltonian
+from solid_dmft.dmft_cycle import _extract_quantity_per_inequiv
+from solid_dmft.gw_embedding.bdft_converter import convert_gw_output
+
+
+class dummy_sumk(object):
+ """
+ create dummy sumk helper object
+ """
+
+ def __init__(self, n_inequiv_shells, n_orb_list, enforce_off_diag, use_rot, magnetic):
+ self.n_inequiv_shells = n_inequiv_shells
+ self.SO = 0
+ self.use_rotations = use_rot
+ if self.use_rotations:
+ raise ValueError('rotations not implemented yet for GW embedding')
+ self.gf_struct_solver = []
+ self.gf_struct_sumk = []
+ self.spin_block_names = []
+ self.inequiv_to_corr = []
+ self.corr_to_inequiv = []
+ self.deg_shells = []
+ self.dc_energ = [0.0 for ish in range(self.n_inequiv_shells)]
+ self.sumk_to_solver = [{} for ish in range(self.n_inequiv_shells)]
+ self.solver_to_sumk = [{} for ish in range(self.n_inequiv_shells)]
+ self.solver_to_sumk_block = [{} for ish in range(self.n_inequiv_shells)]
+ for ish in range(self.n_inequiv_shells):
+ self.inequiv_to_corr.append(ish)
+ self.corr_to_inequiv.append(ish)
+ self.spin_block_names.append(['up', 'down'])
+ self.gf_struct_sumk.append([('up', n_orb_list[ish]), ('down', n_orb_list[ish])])
+
+ # use full off-diagonal block structure in impurity solver
+ if enforce_off_diag:
+ self.gf_struct_solver.append({'up_0': n_orb_list[ish], 'down_0': n_orb_list[ish]})
+ if not magnetic:
+ self.deg_shells.append([['up_0', 'down_0']])
+ # setup standard mapping between sumk and solver
+ for block, inner_dim in self.gf_struct_sumk[ish]:
+ self.solver_to_sumk_block[ish][f'{block}_0'] = block
+ for iorb in range(inner_dim):
+ self.sumk_to_solver[ish][(block, iorb)] = (block + '_0', iorb)
+ self.solver_to_sumk[ish][(block + '_0', iorb)] = (block, iorb)
+ else:
+ self.gf_struct_solver.append({})
+ self.deg_shells.append([])
+ for block, inner_dim in self.gf_struct_sumk[ish]:
+ for iorb in range(inner_dim):
+ self.gf_struct_solver[ish][f'{block}_{iorb}'] = 1
+ if not magnetic and block == 'up':
+ self.deg_shells[ish].append([f'up_{iorb}', f'down_{iorb}'])
+ # setup standard mapping between sumk and solver
+ self.solver_to_sumk_block[ish][f'{block}_{iorb}'] = block
+ self.sumk_to_solver[ish][(block, iorb)] = (f'{block}_{iorb}', 0)
+ self.solver_to_sumk[ish][(f'{block}_{iorb}', 0)] = (block, iorb)
+
+
+ self.gf_struct_solver_list = [sorted([(k, v) for k, v in list(gfs.items())], key=lambda x: x[0]) for gfs in self.gf_struct_solver]
+
+ # creat block_structure object for solver
+ self.block_structure = BlockStructure(
+ gf_struct_sumk=self.gf_struct_sumk,
+ gf_struct_solver=self.gf_struct_solver,
+ solver_to_sumk=self.solver_to_sumk,
+ sumk_to_solver=self.sumk_to_solver,
+ solver_to_sumk_block=self.solver_to_sumk_block,
+ deg_shells=self.deg_shells,
+ corr_to_inequiv = self.corr_to_inequiv,
+ transformation=None,
+ )
+
+ def symm_deg_gf(self, gf_to_symm, ish=0):
+ r"""
+ Averages a GF or a dict of np.ndarrays over degenerate shells.
+
+ Degenerate shells of an inequivalent correlated shell are defined by
+ `self.deg_shells`. This function enforces corresponding degeneracies
+ in the input GF.
+
+ Parameters
+ ----------
+ gf_to_symm : gf_struct_solver like
+ Input and output GF (i.e., it gets overwritten)
+ or dict of np.ndarrays.
+ ish : int
+ Index of an inequivalent shell. (default value 0)
+
+ """
+
+ # when reading block_structures written with older versions from
+ # an h5 file, self.deg_shells might be None
+ if self.deg_shells is None:
+ return
+
+ if not isinstance(gf_to_symm, BlockGf) and isinstance(gf_to_symm[list(gf_to_symm.keys())[0]], np.ndarray):
+ blockgf = False
+ elif isinstance(gf_to_symm, BlockGf):
+ blockgf = True
+ else:
+ raise ValueError("gf_to_symm should be either a BlockGf or a dict of numpy arrays")
+
+ for degsh in self.deg_shells[ish]:
+ # ss will hold the averaged orbitals in the basis where the
+ # blocks are all equal
+ # i.e. maybe_conjugate(v^dagger gf v)
+ ss = None
+ n_deg = len(degsh)
+ for key in degsh:
+ if ss is None:
+ if blockgf:
+ ss = gf_to_symm[key].copy()
+ ss.zero()
+ helper = ss.copy()
+ else:
+ ss = np.zeros_like(gf_to_symm[key])
+ helper = np.zeros_like(gf_to_symm[key])
+
+ # get the transformation matrix
+ if isinstance(degsh, dict):
+ v, C = degsh[key]
+ else:
+ # for backward compatibility, allow degsh to be a list
+ if blockgf:
+ v = np.eye(*ss.target_shape)
+ else:
+ v = np.eye(*ss.shape)
+ C = False
+ # the helper is in the basis where the blocks are all equal
+ if blockgf:
+ helper.from_L_G_R(v.conjugate().transpose(), gf_to_symm[key], v)
+ else:
+ helper = np.dot(v.conjugate().transpose(), np.dot(gf_to_symm[key], v))
+
+ if C:
+ helper << helper.transpose()
+ # average over all shells
+ ss += helper / (1.0 * n_deg)
+ # now put back the averaged gf to all shells
+ for key in degsh:
+ if isinstance(degsh, dict):
+ v, C = degsh[key]
+ else:
+ # for backward compatibility, allow degsh to be a list
+ if blockgf:
+ v = np.eye(*ss.target_shape)
+ else:
+ v = np.eye(*ss.shape)
+ C = False
+ if blockgf and C:
+ gf_to_symm[key].from_L_G_R(v, ss.transpose().copy(), v.conjugate().transpose())
+ elif blockgf and not C:
+ gf_to_symm[key].from_L_G_R(v, ss, v.conjugate().transpose())
+ elif not blockgf and C:
+ gf_to_symm[key] = np.dot(v, np.dot(ss.transpose().copy(), v.conjugate().transpose()))
+ elif not blockgf and not C:
+ gf_to_symm[key] = np.dot(v, np.dot(ss, v.conjugate().transpose()))
+
+def embedding_driver(general_params, solver_params, gw_params, advanced_params):
+ """
+ Function to run the gw embedding cycle.
+
+ Parameters
+ ----------
+ general_params : dict
+ general parameters as a dict
+ solver_params : dict
+ solver parameters as a dict
+ gw_params : dict
+ dft parameters as a dict
+ advanced_params : dict
+ advanced parameters as a dict
+ """
+
+ assert gw_params['code'] == 'aimbes', 'Only AIMBES is currently supported as gw code'
+
+ iteration = 1
+ # prepare output h5 archive
+ if mpi.is_master_node():
+ with HDFArchive(general_params['jobname'] + '/' + general_params['seedname'] + '.h5', 'a') as ar:
+ if 'DMFT_results' not in ar:
+ ar.create_group('DMFT_results')
+ if 'last_iter' not in ar['DMFT_results']:
+ ar['DMFT_results'].create_group('last_iter')
+ if 'DMFT_input' not in ar:
+ ar.create_group('DMFT_input')
+ ar['DMFT_input']['program'] = 'solid_dmft'
+ ar['DMFT_input'].create_group('solver')
+ ar['DMFT_input'].create_group('version')
+ ar['DMFT_input']['version']['triqs_hash'] = triqs_hash
+ ar['DMFT_input']['version']['triqs_version'] = triqs_version
+ ar['DMFT_input']['version']['solid_dmft_hash'] = solid_dmft_hash
+ ar['DMFT_input']['version']['solid_dmft_version'] = solid_dmft_version
+
+ # make sure each iteration is saved to h5 file
+ general_params['h5_save_freq'] = 1
+
+ # lad GW input from h5 file
+ if mpi.is_master_node():
+ gw_data, ir_kernel = convert_gw_output(
+ general_params['jobname'] + '/' + general_params['seedname'] + '.h5',
+ gw_params['h5_file'],
+ it_1e = gw_params['it_1e'],
+ it_2e = gw_params['it_2e'],
+ )
+ gw_params.update(gw_data)
+ mpi.barrier()
+ gw_params = mpi.bcast(gw_params)
+ iteration = gw_params['it_1e']
+
+ # if GW calculation was performed with spin never average spin channels
+ if gw_params['number_of_spins'] == 2:
+ general_params['magnetic'] = True
+
+ # dummy helper class for sumk
+ sumk = dummy_sumk(gw_params['n_inequiv_shells'], gw_params['n_orb'],
+ general_params['enforce_off_diag'], gw_params['use_rot'],
+ general_params['magnetic'])
+
+ sumk.mesh = MeshImFreq(beta=gw_params['beta'], statistic='Fermion', n_iw=general_params['n_iw'])
+ sumk.chemical_potential = gw_params['mu_emb']
+ sumk.dc_imp = gw_params['Vhf_dc']
+ general_params['beta'] = gw_params['beta']
+
+ # create h_int
+ general_params = _extract_quantity_per_inequiv('h_int_type', sumk.n_inequiv_shells, general_params)
+ general_params = _extract_quantity_per_inequiv('dc_type', sumk.n_inequiv_shells, general_params)
+
+ h_int, gw_params = interaction_hamiltonian.construct(sumk, general_params, advanced_params, gw_params)
+
+ if len(solver_params) == 1 and solver_params[0]['idx_impurities'] is None:
+ map_imp_solver = [0] * sumk.n_inequiv_shells
+ else:
+ all_idx_imp = [i for entry in solver_params for i in entry['idx_impurities']]
+ if sorted(all_idx_imp) != list(range(sumk.n_inequiv_shells)):
+ raise ValueError('All impurities must be listed exactly once in solver.idx_impurities'
+ f'but instead got {all_idx_imp}')
+
+ map_imp_solver = []
+ for iineq in range(sumk.n_inequiv_shells):
+ for isolver, entry in enumerate(solver_params):
+ if iineq in entry['idx_impurities']:
+ map_imp_solver.append(isolver)
+ break
+ solver_type_per_imp = [solver_params[map_imp_solver[iineq]]['type'] for iineq in range(sumk.n_inequiv_shells)]
+ mpi.report(f'\nSolver type per impurity: {solver_type_per_imp}')
+
+ # create solver objects
+ solvers = [None] * sumk.n_inequiv_shells
+ if mpi.is_master_node():
+ Sigma_dlr = [None] * sumk.n_inequiv_shells
+ Sigma_dlr_iw = [None] * sumk.n_inequiv_shells
+ ir_mesh_idx = ir_kernel.wn_mesh(stats='f',ir_notation=False)
+ ir_mesh = (2*ir_mesh_idx+1)*np.pi/gw_params['beta']
+ Sigma_ir = np.zeros((len(ir_mesh_idx),
+ gw_params['number_of_spins'],
+ sumk.n_inequiv_shells,max(gw_params['n_orb']),max(gw_params['n_orb'])),
+ dtype=complex)
+ Vhf_imp_sIab = np.zeros((gw_params['number_of_spins'],
+ sumk.n_inequiv_shells,
+ max(gw_params['n_orb']),max(gw_params['n_orb'])),dtype=complex)
+
+ for ish in range(sumk.n_inequiv_shells):
+ # Construct the Solver instances
+ solvers[ish] = SolverStructure(general_params, solver_params[map_imp_solver[ish]],
+ gw_params, advanced_params, sumk, ish, h_int[ish])
+
+ # init local density matrices for observables
+ density_tot = 0.0
+ density_shell = np.zeros(sumk.n_inequiv_shells)
+ density_mat = [None] * sumk.n_inequiv_shells
+ density_mat_unsym = [None] * sumk.n_inequiv_shells
+ density_shell_pre = np.zeros(sumk.n_inequiv_shells)
+ density_mat_pre = [None] * sumk.n_inequiv_shells
+
+ if sumk.SO:
+ printed = ((np.real, 'real'), (np.imag, 'imaginary'))
+ else:
+ printed = ((np.real, 'real'),)
+
+ for ish in range(sumk.n_inequiv_shells):
+ density_shell_pre[ish] = np.real(gw_params['Gloc_dlr'][ish].total_density())
+ mpi.report(
+ '\n *** Correlated Shell type #{:3d} : '.format(ish)
+ + 'Estimated total charge of impurity problem = {:.6f}'.format(density_shell_pre[ish])
+ )
+ density_mat_pre[ish] = gw_params['Gloc_dlr'][ish].density()
+ mpi.report('Estimated density matrix:')
+ for key, value in sorted(density_mat_pre[ish].items()):
+ for func, name in printed:
+ mpi.report('{}, {} part'.format(key, name))
+ mpi.report(func(value))
+
+ if not general_params['enforce_off_diag']:
+ mpi.report('\n*** WARNING: off-diagonal elements are neglected in the impurity solver ***')
+
+ if ((solver_type_per_imp[ish] == 'cthyb' and solver_params[ish]['delta_interface'])
+ or solver_type_per_imp[ish] == 'ctseg'):
+ mpi.report('\n Using the delta interface for passing Delta(tau) and Hloc0 directly to the solver.\n')
+ Gloc_dlr = sumk.block_structure.convert_gf(gw_params['Gloc_dlr'][ish], ish_from=ish, space_from='sumk', space_to='solver')
+ # prepare solver input
+ imp_eal = sumk.block_structure.convert_matrix(gw_params['Hloc0'][ish], ish_from=ish, space_from='sumk', space_to='solver')
+ # fill Delta_time from Delta_freq sumk to solver
+ for name, g0 in Gloc_dlr:
+ G0_dlr_iw = make_gf_dlr_imfreq(g0)
+ # make non-interacting impurity Hamiltonian hermitian
+ imp_eal[name] = (imp_eal[name] + imp_eal[name].T.conj())/2
+ if mpi.is_master_node():
+ print('H_loc0[{:2d}] block: {}'.format(ish, name))
+ fmt = '{:11.7f}' * imp_eal[name].shape[0]
+ for block in imp_eal[name]:
+ print((' '*11 + fmt).format(*block.real))
+
+ Delta_dlr_iw = Gf(mesh=G0_dlr_iw.mesh, target_shape=g0.target_shape)
+ for iw in G0_dlr_iw.mesh:
+ Delta_dlr_iw[iw] = iw.value - inverse(G0_dlr_iw[iw]) - imp_eal[name]
+
+ # without SOC delta_tau needs to be real
+ if not sumk.SO == 1:
+ Delta_dlr = make_gf_dlr(Delta_dlr_iw)
+ Delta_tau = make_hermitian(make_gf_imtime(Delta_dlr, n_tau=general_params['n_tau']))
+ # create now full delta(tau)
+ solvers[ish].Delta_time[name] << Delta_tau.real
+ else:
+ solvers[ish].Delta_time[name] << Delta_tau
+
+ if solver_params[ish]['diag_delta']:
+ for o1 in range(imp_eal[name].shape[0]):
+ for o2 in range(imp_eal[name].shape[0]):
+ if o1 != o2:
+ solvers[ish].Delta_time[name].data[:, o1, o2] = 0.0 + 0.0j
+
+ # Make non-interacting operator for Hloc0
+ Hloc_0 = Operator()
+ for spin, spin_block in imp_eal.items():
+ for o1 in range(spin_block.shape[0]):
+ for o2 in range(spin_block.shape[1]):
+ # check if off-diag element is larger than threshold
+ if o1 != o2 and abs(spin_block[o1, o2]) < solver_params[ish]['off_diag_threshold']:
+ continue
+ else:
+ # TODO: adapt for SOC calculations, which should keep the imag part
+ Hloc_0 += spin_block[o1, o2].real / 2 * (c_dag(spin, o1) * c(spin, o2) + c_dag(spin, o2) * c(spin, o1))
+ solvers[ish].Hloc_0 = Hloc_0
+ else:
+ # convert G0 to solver basis
+ G0_dlr = sumk.block_structure.convert_gf(gw_params['G0_dlr'][ish], ish_from=ish, space_from='sumk', space_to='solver')
+ # dyson equation to extract G0_freq, using Hermitian symmetry
+ solvers[ish].G0_freq << make_hermitian(make_gf_imfreq(G0_dlr, n_iw=general_params['n_iw']))
+
+ mpi.report('\nSolving the impurity problem for shell {} ...'.format(ish))
+ mpi.barrier()
+ start_time = timer()
+ solvers[ish].solve()
+ mpi.barrier()
+ mpi.report('Actual time for solver: {:.2f} s'.format(timer() - start_time))
+
+ # some printout of the obtained density matrices and some basic checks from the unsymmetrized solver output
+ density_shell[ish] = np.real(solvers[ish].G_freq_unsym.total_density())
+ density_tot += density_shell[ish]
+ density_mat_unsym[ish] = solvers[ish].G_freq_unsym.density()
+ density_mat[ish] = solvers[ish].G_freq.density()
+ formatter.print_local_density(density_shell[ish], density_shell_pre[ish], density_mat_unsym[ish], sumk.SO)
+ mpi.report('')
+
+ # post-processing for GW
+ if mpi.is_master_node():
+ if solver_params[ish]['type'] in ('cthyb', 'ctseg') and solver_params[ish]['crm_dyson_solver']:
+ Sigma_dlr[ish] = make_gf_dlr(solvers[ish].Sigma_dlr)
+ else:
+ Sigma_dlr_iw[ish] = sumk.block_structure.create_gf(ish=ish,
+ gf_function=Gf,
+ space='solver',
+ mesh=gw_params['mesh_dlr_iw_f'])
+ for w in Sigma_dlr_iw[ish].mesh:
+ for block, gf in Sigma_dlr_iw[ish]:
+ gf[w] = solvers[ish].Sigma_freq[block](w)-solvers[ish].Sigma_Hartree[block]
+
+ sumk.symm_deg_gf(Sigma_dlr_iw[ish],ish=ish)
+ Sigma_dlr[ish] = make_gf_dlr(Sigma_dlr_iw[ish])
+
+ for i, (block, gf) in enumerate(Sigma_dlr[ish]):
+ # print Hartree shift
+ print('Σ_HF {}'.format(block))
+ fmt = '{:11.7f}' * solvers[ish].Sigma_Hartree[block].shape[0]
+ for vhf in solvers[ish].Sigma_Hartree[block]:
+ print((' '*11 + fmt).format(*vhf.real))
+
+ # average Hartree shift if not magnetic
+ if not general_params['magnetic']:
+ if general_params['enforce_off_diag']:
+ solvers[ish].Sigma_Hartree['up_0'] = 0.5*(solvers[ish].Sigma_Hartree['up_0']+
+ solvers[ish].Sigma_Hartree['down_0'])
+ solvers[ish].Sigma_Hartree['down_0'] = solvers[ish].Sigma_Hartree['up_0']
+ else:
+ for iorb in range(gw_params['n_orb'][ish]):
+ solvers[ish].Sigma_Hartree[f'up_{iorb}'] = 0.5*(solvers[ish].Sigma_Hartree[f'up_{iorb}']+
+ solvers[ish].Sigma_Hartree[f'down_{iorb}'])
+ solvers[ish].Sigma_Hartree[f'down_{iorb}'] = solvers[ish].Sigma_Hartree[f'up_{iorb}']
+
+ iw_mesh = solvers[ish].Sigma_freq.mesh
+ # convert Sigma to sumk basis
+ Sigma_dlr_sumk = sumk.block_structure.convert_gf(Sigma_dlr[ish], ish_from=ish, space_from='solver', space_to='sumk')
+ Sigma_Hartree_sumk = sumk.block_structure.convert_matrix(solvers[ish].Sigma_Hartree, ish_from=ish, space_from='solver', space_to='sumk')
+ # store Sigma and V_HF in sumk basis on IR mesh
+ for i, (block, gf) in enumerate(Sigma_dlr_sumk):
+ Vhf_imp_sIab[i,ish] = Sigma_Hartree_sumk[block]
+ for iw in range(len(ir_mesh_idx)):
+ Sigma_ir[iw,i,ish] = gf(iw_mesh(ir_mesh_idx[iw]))
+
+ if not general_params['magnetic']:
+ break
+
+ # Writes results to h5 archive
+ if mpi.is_master_node():
+ with HDFArchive(general_params['jobname'] + '/' + general_params['seedname'] + '.h5', 'a') as ar:
+ results_to_archive.write(ar, sumk, general_params, solver_params, solvers,
+ map_imp_solver, solver_type_per_imp, iteration,
+ False, gw_params['mu_emb'], density_mat_pre, density_mat)
+
+ # store also IR / DLR Sigma
+ ar['DMFT_results/it_{}'.format(iteration)]['ir_mesh'] = ir_mesh
+ ar['DMFT_results/it_{}'.format(iteration)]['Sigma_imp_wsIab'] = Sigma_ir
+ ar['DMFT_results/it_{}'.format(iteration)]['Vhf_imp_sIab'] = Vhf_imp_sIab
+ for ish in range(sumk.n_inequiv_shells):
+ ar['DMFT_results/it_{}'.format(iteration)][f'Sigma_dlr_{ish}'] = Sigma_dlr[ish]
+
+ # write results to GW h5_file
+ with HDFArchive(gw_params['h5_file'],'a') as ar:
+ ar[f'downfold_1e/iter{iteration}']['Sigma_imp_wsIab'] = Sigma_ir
+ ar[f'downfold_1e/iter{iteration}']['Vhf_imp_sIab'] = Vhf_imp_sIab
+
+
+ mpi.report('*** iteration finished ***')
+ mpi.report('#'*80)
+ mpi.barrier()
+ return
diff --git a/python/solid_dmft/gw_embedding/iaft.py b/python/solid_dmft/gw_embedding/iaft.py
new file mode 100644
index 00000000..568d2141
--- /dev/null
+++ b/python/solid_dmft/gw_embedding/iaft.py
@@ -0,0 +1,276 @@
+import numpy as np
+import sparse_ir
+
+"""
+Fourier transform on the imaginary axis based on IR basis and the sparse sampling technique.
+"""
+
+
+class IAFT(object):
+ """
+ Driver for FT on the imaginary axis.
+ Given inverse temperature, lambda and precision, the IAFT class evaluate the corresponding
+ IR basis and sparse sampling points on-the-fly.
+
+ Dependency:
+ sparse-ir with xprec supports (https://sparse-ir.readthedocs.io/en/latest/)
+ To install sparse-ir with xprec supports: "pip install sparse-ir[xprex]".
+
+ Attributes:
+ beta: float
+ Inverse temperature (a.u.)
+ lmbda: float
+ Dimensionless lambda parameter for constructing the IR basis
+ prec: float
+ Precision for IR basis
+ bases: sparse-ir.FiniteTempBasisSet
+ IR basis instance
+ tau_mesh_f: numpy.ndarray(dim=1)
+ Fermionic tau sampling points
+ tau_mesh_b: numpy.ndarray(dim=1)
+ Bosonic tau sampling points
+ wn_mesh_f: numpy.ndarray(dim=1)
+ Fermionic Matsubara "indices" sampling points. NOT PHYSICAL FREQUENCIES.
+ Physical Matsubara frequencies are wn_mesh_f * numpy.pi / beta
+ wn_mesh_b: numpy.ndarray(dim=1)
+ Bosonic Matsubara "indices" sampling points. NOT PHYSICAL FREQUENCIES.
+ Physical Matsubara frequencies are wn_mesh_f * numpy.pi / beta
+ nt_f: int
+ Number of fermionic tau sampling points
+ nt_b: int
+ Number of bosonic tau sampling points
+ nw_f: int
+ Number of fermionic frequency sampling points
+ nw_b: int
+ Number of bosonic frequency sampling points
+ """
+ def __init__(self, beta: float, lmbda: float, prec: float = 1e-15):
+ """
+ :param beta: float
+ Inverse temperature (a.u.)
+ :param lmbda: float
+ Lambda parameter for constructing IR basis.
+ :param prec: float
+ Precision for IR basis
+ """
+ self.beta = beta
+ self.lmbda = lmbda
+ self.prec = prec
+ self.wmax = lmbda / beta
+ self.statisics = {'f', 'b'}
+
+ self.bases = sparse_ir.FiniteTempBasisSet(beta=beta, wmax=self.wmax, eps=prec)
+ self.tau_mesh_f = self.bases.smpl_tau_f.sampling_points
+ self.tau_mesh_b = self.bases.smpl_tau_b.sampling_points
+ self._wn_mesh_f = self.bases.smpl_wn_f.sampling_points
+ self._wn_mesh_b = self.bases.smpl_wn_b.sampling_points
+ self.nt_f, self.nw_f = self.tau_mesh_f.shape[0], self._wn_mesh_f.shape[0]
+ self.nt_b, self.nw_b = self.tau_mesh_b.shape[0], self._wn_mesh_b.shape[0]
+
+ Ttl_ff = self.bases.basis_f.u(self.tau_mesh_f).T
+ Twl_ff = self.bases.basis_f.uhat(self._wn_mesh_f).T
+ Ttl_bb = self.bases.basis_b.u(self.tau_mesh_b).T
+ Twl_bb = self.bases.basis_b.uhat(self._wn_mesh_b).T
+
+ self.Tlt_ff = np.linalg.pinv(Ttl_ff)
+ self.Tlt_bb = np.linalg.pinv(Ttl_bb)
+ self.Tlw_ff = np.linalg.pinv(Twl_ff)
+ self.Tlw_bb = np.linalg.pinv(Twl_bb)
+
+ # Ttw_ff = Ttl_ff * [Twl_ff]^{-1}
+ self.Ttw_ff = np.dot(Ttl_ff, self.Tlw_ff)
+ self.Twt_ff = np.dot(Twl_ff, self.Tlt_ff)
+ self.Ttw_bb = np.dot(Ttl_bb, self.Tlw_bb)
+ self.Twt_bb = np.dot(Twl_bb, self.Tlt_bb)
+
+ print(self)
+
+ def __str__(self):
+ return "*******************************\n" \
+ "Imaginary-Axis Fourier Transform based on IR basis and sparse-sampling:\n" \
+ "*******************************\n" \
+ " - precision = {}\n" \
+ " - beta = {}\n" \
+ " - lambda = {}\n" \
+ " - nt_f, nw_f = {}, {}\n" \
+ " - nt_b, nw_b = {}, {}\n" \
+ "*******************************".format(self.prec, self.beta, self.lmbda, self.nt_f, self.nw_f,
+ self.nt_b, self.nw_b)
+
+ def wn_mesh(self, stats: str, ir_notation: bool = True):
+ """
+ Return Matsubara frequency indices.
+ :param stats: str
+ statistics: 'f' for fermions and 'b' for bosons
+ :param ir_notation: bool
+ Whether wn_mesh_interp is in sparse_ir notation where iwn = n*pi/beta for both fermions and bosons.
+ Otherwise, iwn = (2n+1)*pi/beta for fermions and 2n*pi/beta for bosons.
+
+ :return: numpy.ndarray(dim=1)
+ Matsubara frequency indices
+ """
+ if stats not in self.statisics:
+ raise ValueError("Unknown statistics '{}'. "
+ "Acceptable options are 'f' for fermion and 'b' for bosons.".format(stats))
+ wn_mesh = np.array(self._wn_mesh_f, dtype=int) if stats == 'f' else np.array(self._wn_mesh_b, dtype=int)
+ if not ir_notation:
+ wn_mesh = (wn_mesh-1)//2 if stats == 'f' else wn_mesh//2
+ return wn_mesh
+
+ def tau_to_w(self, Ot, stats: str):
+ """
+ Fourier transform from imaginary-time axis to Matsubara-frequency axis
+ :param Ot: numpy.ndarray
+ imaginary-time object with dimensions (nts, ...)
+ :param stats: str
+ statistics: 'f' for fermions and 'b' for bosons
+
+ :return: numpy.ndarray
+ Matsubara-frequency object with dimensions (nw, ...)
+ """
+ if stats not in self.statisics:
+ raise ValueError("Unknown statistics '{}'. "
+ "Acceptable options are 'f' for fermion and 'b' for bosons.".format(stats))
+ Twt = self.Twt_ff if stats == 'f' else self.Twt_bb
+ if Ot.shape[0] != Twt.shape[1]:
+ raise ValueError(
+ "tau_to_w: Number of tau points are inconsistent: {} and {}".format(Ot.shape[0], Twt.shape[1]))
+
+ Ot_shape = Ot.shape
+ Ot = Ot.reshape(Ot.shape[0], -1)
+ Ow = np.dot(Twt, Ot)
+
+ Ot = Ot.reshape(Ot_shape)
+ Ow = Ow.reshape((Twt.shape[0],) + Ot_shape[1:])
+ return Ow
+
+ def w_to_tau(self, Ow, stats):
+ """
+ Fourier transform from Matsubara-frequency axis to imaginary-time axis.
+
+ :param Ow: numpy.ndarray
+ Matsubara-frequency object with dimensions (nw, ...)
+ :param stats: str
+ statistics, 'f' for fermions and 'b' for bosons
+
+ :return: numpy.ndarray
+ Imaginary-time object with dimensions (nt, ...)
+ """
+ if stats not in self.statisics:
+ raise ValueError("Unknown statistics '{}'. "
+ "Acceptable options are 'f' for fermion and 'b' for bosons.".format(stats))
+ Ttw = self.Ttw_ff if stats == 'f' else self.Ttw_bb
+ if Ow.shape[0] != Ttw.shape[1]:
+ raise ValueError(
+ "w_to_tau: Number of w points are inconsistent: {} and {}".format(Ow.shape[0], Ttw.shape[1]))
+
+ Ow_shape = Ow.shape
+ Ow = Ow.reshape(Ow.shape[0], -1)
+ Ot = np.dot(Ttw, Ow)
+
+ Ow = Ow.reshape(Ow_shape)
+ Ot = Ot.reshape((Ttw.shape[0],) + Ow_shape[1:])
+ return Ot
+
+ def w_interpolate(self, Ow, wn_mesh_interp, stats: str, ir_notation: bool = True):
+ """
+ Interpolate a dynamic object to arbitrary points on the Matsubara axis.
+
+ :param Ow: numpy.ndarray
+ Dynamic object on the Matsubara sampling points, self.wn_mesh.
+ :param wn_mesh_interp: numpy.ndarray(dim=1, dtype=int)
+ Target frequencies "INDICES".
+ The physical Matsubara frequencies are wn_mesh_interp * pi/beta.
+ :param stats: str
+ Statistics, 'f' for fermions and 'b' for bosons.
+ :param ir_notation: bool
+ Whether wn_mesh_interp is in sparse_ir notation where iwn = n*pi/beta for both fermions and bosons.
+ Otherwise, iwn = (2n+1)*pi/beta for fermions and 2n*pi/beta for bosons.
+
+ :return: numpy.ndarray
+ Matsubara-frequency object with dimensions (nw_interp, ...)
+ """
+ if stats not in self.statisics:
+ raise ValueError("Unknown statistics '{}'. "
+ "Acceptable options are 'f' for fermion and 'b' for bosons.".format(stats))
+ if ir_notation:
+ wn_indices = wn_mesh_interp
+ else:
+ wn_indices = np.array([2*n+1 if stats == 'f' else 2*n for n in wn_mesh_interp], dtype=int)
+ Tlw = self.Tlw_ff if stats == 'f' else self.Tlw_bb
+ if Ow.shape[0] != Tlw.shape[1]:
+ raise ValueError(
+ "w_interpolate: Number of w points are inconsistent: {} and {}".format(Ow.shape[0], Tlw.shape[1]))
+
+ Twl_interp = self.bases.basis_f.uhat(wn_indices).T if stats == 'f' else self.bases.basis_b.uhat(wn_indices).T
+ Tww = np.dot(Twl_interp, Tlw)
+
+ Ow_shape = Ow.shape
+ Ow = Ow.reshape(Ow.shape[0], -1)
+ Ow_interp = np.dot(Tww, Ow)
+
+ Ow = Ow.reshape(Ow_shape)
+ Ow_interp = Ow_interp.reshape((wn_indices.shape[0],) + Ow_shape[1:])
+ return Ow_interp
+
+ def tau_interpolate(self, Ot, tau_mesh_interp, stats: str):
+ """
+ Interpolate a dynamic object to arbitrary points on the imaginary-time axis.
+
+ :param Ot: numpy.ndarray
+ Dynamic object on the imaginary-time sampling points, self.tau_mesh.
+ :param tau_mesh_interp: numpy.ndarray(dim=1, dtype=float)
+ Target tau points.
+ :param stats: str
+ Statistics, 'f' for fermions and 'b' for bosons
+
+ :return: numpy.ndarray
+ Imaginary-time object with dimensions (nt_interp, ...)
+ """
+ if stats not in self.statisics:
+ raise ValueError("Unknown statistics '{}'. "
+ "Acceptable options are 'f' for fermion and 'b' for bosons.".format(stats))
+ Tlt = self.Tlt_ff if stats == 'f' else self.Tlt_bb
+ if Ot.shape[0] != Tlt.shape[1]:
+ raise ValueError(
+ "t_interpolate: Number of tau points are inconsistent: {} and {}".format(Ot.shape[0], Tlt.shape[1]))
+
+ Ttl_interp = self.bases.basis_f.u(tau_mesh_interp).T if stats == 'f' else self.bases.basis_b.u(tau_mesh_interp).T
+ Ttt = np.dot(Ttl_interp, Tlt)
+
+ Ot_shape = Ot.shape
+ Ot = Ot.reshape(Ot.shape[0], -1)
+ Ot_interp = np.dot(Ttt, Ot)
+
+ Ot = Ot.reshape(Ot_shape)
+ Ot_interp = Ot_interp.reshape((tau_mesh_interp.shape[0],) + Ot_shape[1:])
+ return Ot_interp
+
+
+if __name__ == '__main__':
+ # Initialize IAFT object for given inverse temperature, lambda and precision
+ ft = IAFT(1000, 1e4, 1e-6)
+
+ print(ft.wn_mesh('f', True))
+
+ Gt = np.zeros((ft.nt_f, 2, 2, 2))
+ Gw = ft.tau_to_w(Gt, 'f')
+ print(Gw.shape)
+
+ # Interpolate to arbitrary tau point
+ tau_interp = np.array([0.0, ft.beta])
+ Gt_interp = ft.tau_interpolate(Gt, tau_interp, 'f')
+ print(Gt_interp.shape)
+
+ # wn in spare_ir notation
+ w_interp = np.array([-1,1,3,5], dtype=int)
+ Gw_interp = ft.w_interpolate(Gw, w_interp, 'f', True)
+ print(Gw_interp.shape)
+
+ # wn in physical notation
+ w_interp = np.array([-1,0,1,2,3,4], dtype=int)
+ Gw_interp = ft.w_interpolate(Gw, w_interp, 'f', False)
+ print(Gw_interp.shape)
+
+ Gt2 = ft.w_to_tau(Gw, 'f')
+ print(Gt2.shape)
diff --git a/python/solid_dmft/gw_embedding/qp_evs_to_eig.py b/python/solid_dmft/gw_embedding/qp_evs_to_eig.py
new file mode 100644
index 00000000..547a0c1b
--- /dev/null
+++ b/python/solid_dmft/gw_embedding/qp_evs_to_eig.py
@@ -0,0 +1,47 @@
+import sys
+
+import numpy as np
+from h5 import HDFArchive
+from scipy.constants import physical_constants
+
+###
+# This script read bdft output and dump g0w0 eigenvalues to si.eig for wannier90 interpolation
+###
+
+# first arg is the h5 to use
+if len(sys.argv) < 2:
+ print ("Usage: python qp_evs_to_eig.py
")
+ quit()
+print('h5 archive:', str(sys.argv[1]))
+bdft_output = str(sys.argv[1])
+
+Hartree_eV = physical_constants['Hartree energy in eV'][0]
+
+###### params ######
+# bdft output is defined by "ouptut" in "evgw0" block.
+# number of bands used in wannier90.
+# It should be consistent with "num_bands" in si.win
+nbnd_pp = 3
+# number of bands to include in the beginning
+excl = 20
+# iteration of evGW0
+it = None
+# seed for w90
+seed = 'svo'
+
+############
+############
+
+# Read evGW0 eigenvalues
+with HDFArchive(bdft_output, 'r') as ar:
+ if not it:
+ it = ar['scf']['final_iter']
+ eig = ar[f'scf/iter{it}/E_ska'].real * Hartree_eV
+
+
+# Write eigenvalues in the format of Quantum Espresso .eig file
+ns, nkpts, nbnd_gw = eig.shape
+with open(f'{seed}.eig', 'w') as f:
+ for k in range(1, nkpts+1):
+ for i in range(excl+1, excl+nbnd_pp+1):
+ f.write(" {} {} {}\n".format(i-excl, k, eig[0, k-1, i-1]))
diff --git a/python/solid_dmft/io_tools/default.toml b/python/solid_dmft/io_tools/default.toml
index 828d31a9..ac830937 100644
--- a/python/solid_dmft/io_tools/default.toml
+++ b/python/solid_dmft/io_tools/default.toml
@@ -9,6 +9,8 @@ csc = false
dc = true
dc_dmft = ""
dc_type = ""
+dlr_eps = ""
+dlr_wmax = ""
enforce_off_diag = true
eta = ""
fixed_mu_value = ""
@@ -16,12 +18,13 @@ g0_conv_crit = -1.0
g0_mix = 1.0
g0_mix_type = "linear"
gimp_conv_crit = -1.0
+gw_embedding = false
h_field = 0.0
h_field_it = 0
h_int_basis = "triqs"
h_int_type = ""
h5_save_freq = 5
-J = ""
+J = ""
jobname = "dmft_dir"
load_sigma = false
load_sigma_iter = -1
@@ -33,7 +36,7 @@ mu_initial_guess = ""
mu_mix_const = 1.0
mu_mix_per_occupation_offset = 0.0
mu_update_freq = 1
-n_iter_dmft = ""
+n_iter_dmft = ""
n_iter_dmft_first = 10
n_iter_dmft_per = 2
n_iw = 1025
@@ -51,7 +54,8 @@ set_rot = ""
sigma_conv_crit = -1.0
sigma_mix = 1.0
store_solver = false
-U = ""
+U = ""
+U_crpa_threshold = 0.0
U_prime = ""
w_range = [-10, 10]
@@ -60,14 +64,16 @@ w_range = [-10, 10]
# when initializing the solver.py class from solid_dmft
[[solver]]
type = "cthyb"
-idx_impurities = ""
+crm_dyson_solver = false
delta_interface = false
+diag_delta = false
fit_max_moment = ""
fit_max_n = ""
fit_max_w = ""
fit_min_n = ""
fit_min_w = ""
-imag_threshold = 1.0e-14
+idx_impurities = ""
+imag_threshold = 1.0e-8
legendre_fit = false
length_cycle = ""
loc_n_max = ""
@@ -100,18 +106,22 @@ random_seed = ""
[[solver]]
type = "ctseg"
+crm_dyson_solver = false
+diag_delta = false
idx_impurities = ""
improved_estimator = false
legendre_fit = false
length_cycle = ""
max_time = -1
-measure_G_iw = false
-measure_G_l = false
measure_G_tau = true
-measure_pert_order = false
+measure_nnt = false
+measure_pert_order = true
+measure_statehist = true
n_cycles_tot = ""
n_l = ""
+n_tau_k = 10001
n_warmup_cycles = ""
+off_diag_threshold = 0.0
random_seed = ""
[[solver]]
@@ -171,6 +181,13 @@ store_eigenvals = false
w90_exec = "wannier90.x"
w90_tolerance = 1e-6
+[gw]
+code = ""
+h5_file = ""
+use_rot = false
+it_1e = 0
+it_2e = 0
+
[advanced]
dc_factor = ""
dc_fixed_occ = ""
diff --git a/python/solid_dmft/io_tools/documentation.txt b/python/solid_dmft/io_tools/documentation.txt
index cd0daf81..9ac7ba45 100644
--- a/python/solid_dmft/io_tools/documentation.txt
+++ b/python/solid_dmft/io_tools/documentation.txt
@@ -41,6 +41,15 @@ dc_type : int or list of int, default = None
* 1: Held formula, needs to be used with slater-kanamori h_int_type=2
* 2: AMF
* 3: FLL for eg orbitals only with U,J for Kanamori
+
+ for cRPA interactions this can be also a string to determine the type of DC from the full interaction
+ * crpa_static
+ * crpa_static_qp
+ * crpa_dynamic
+dlr_eps : float, default = None
+ precision for DLR basis set if needed, see also triqs.gf.meshes.MeshDLR
+dlr_wmax : float, default = None
+ spectral width (one-side) for DLR basis set if needed, see also triqs.gf.meshes.MeshDLR
enforce_off_diag : bool or list of bool, default = True
only if False, the block structure can be reduced to ignore off-diagonal elements
if they are below the general.block_threshold of the block structure finder
@@ -61,6 +70,8 @@ g0_mix_type : string, default = 'linear'
broyden: broyden mixing
gimp_conv_crit : float, default = -1.0
stop the calculation if sum_w 1/(w^0.6) ||Gimp-Gloc|| is smaller than threshold
+gw_embedding : bool, default = False
+ use GW embedding workflow module (gw_flow.py) instead of dmft_cycle for aimbes GW embedding, see section gw
h_field : float, default = 0.0
magnetic field
h_field_it : int, default = 0
@@ -83,9 +94,9 @@ h_int_type : string, mandatory
* simple_intra: density-density like but only intra orbital with given U value (no rotations applied)
* crpa: use the cRPA matrix as interaction Hamiltonian
* crpa_density_density: use the density-density terms of the cRPA matrix
- * dynamic: use dynamic U from h5 archive
+ * dyn_full: use dynamic U from h5 archive
+ * dyn_density_density: use dynamic U from h5 archive but only the density-density terms
- Needs to be stored as Matsubara Gf under dynamic_U/U_iw in the input h5
h5_save_freq : int, default = 5
how often is the output saved to the h5 archive
J : float or list of float, mandatory
@@ -179,6 +190,9 @@ store_solver : bool, default = False
U : float or list of float, mandatory
U interaction value. If it is a float, the same U is assumed for all impurities,
otherwise as a list a different U can be specified per impurity.
+U_crpa_threshold : float, default = 0.0
+ threshold for the cRPA interaction matrix. If the absolute value of the
+ elements is below this threshold, they are set to zero.
U_prime : float or list of floats, default = None
U prime interaction value.
Only used for impurities where general.h_int_type is kanamori.
@@ -204,8 +218,13 @@ idx_impurities : list of int, default = None
cthyb
=====
+crm_dyson_solver : bool, default = False
+ use CRM Dyson solver to extract Sigma_imp from G(tau) (conflict with legendre_fit and tail_fit)
+ set dlr_wmax and dlr_eps parameters in general section to use
delta_interface : bool, default = False
use delta interface in cthyb instead of input G0
+diag_delta : bool, default = False
+ approximate the hybridization function as diagonal when using the delta interface
fit_max_moment : int, default = None
max moment to be fitted. Only used if solver.perform_tail_fit = True
fit_max_n : int, default = None
@@ -295,6 +314,11 @@ random_seed : str, default = None
ctseg
=====
+crm_dyson_solver : bool, default = False
+ use CRM Dyson solver to extract Sigma_imp from G(tau) (conflict with legendre_fit and tail_fit)
+ set dlr_wmax and dlr_eps parameters in general section to use
+diag_delta : bool, default = False
+ approximate the hybridization function as diagonal when using the delta interface
improved_estimator : bool, default = False
measure improved estimators
Sigma_iw will automatically be calculated via
@@ -305,23 +329,27 @@ length_cycle : int, mandatory
length of each cycle; number of sweeps before measurement is taken
max_time : int, default = -1
maximum amount the solver is allowed to spend in each iteration
-measure_G_iw : bool, default = False
- should the solver measure G(iw)?
-measure_G_l : bool, default = False
- measure Legendre Greens function
measure_G_tau : bool, default = True
should the solver measure G(tau)?
+measure_nnt : boold, default = False
+ measure two particle density-density correlation function (suszeptibility)
measure_pert_order : bool, default = False
measure perturbation order histograms: triqs.github.io/cthyb/latest/guide/perturbation_order_notebook.html.
The result is stored in the h5 archive under 'DMFT_results' at every iteration
in the subgroups 'pert_order_imp_X' and 'pert_order_total_imp_X'
+measure_statehist : bool, default = False
+ measure state histogram, i.e. diagonal components of many body density matrix
n_cycles_tot : int, mandatory
total number of sweeps
n_l : int, default = None
number of Legendre coefficients.
Needed if measure_G_l=True or legendre_fit=True
+n_tau_k : int, default = 10001
+ number imaginary time points for dynamic interactions
n_warmup_cycles : int, mandatory
number of warmup cycles before real measurement sets in
+off_diag_threshold : float, default = 0.0
+ threshold for off-diag elements in Hloc0
random_seed : str, default = None
if None, uses default seed by triqs.
If specified the int will be used for random seeds. Careful, this will give the same random
@@ -448,6 +476,19 @@ w90_exec : string, default ='wannier90.x'
w90_tolerance : float, default = 1e-6
threshold for mapping of shells and checks of the Hamiltonian
+[ gw ]
+--------
+code : str, default = None
+ GW embedding code: aimbes or Vasp to load screened interaction
+h5_file : str, default = None
+ path to h5 file in which the aimbes results are stored (checkpoint file)
+use_rot : bool, default = False
+ use rotations of sum_k object to rotate 2 particle objects
+it_1 : int, default = 0
+ iteration to load 1 particle objects from aimbes
+it_2 : int, default = 0
+ iteration to load 2 particle objects from aimbes
+
[ advanced ]
--------------
dc_factor : float, default = None
diff --git a/python/solid_dmft/io_tools/verify_input_params.py b/python/solid_dmft/io_tools/verify_input_params.py
index 5c1bddc0..51da5d3d 100644
--- a/python/solid_dmft/io_tools/verify_input_params.py
+++ b/python/solid_dmft/io_tools/verify_input_params.py
@@ -4,14 +4,21 @@
ParamDict = Dict[str, Any]
FullConfig = Dict[str, Union[ParamDict, List[ParamDict]]]
+
def _verify_input_params_general(params: FullConfig) -> None:
# Checks that grid parameters are specified completely
- if params['general']['beta'] is not None and (params['general']['n_iw'] is None
- or params['general']['n_tau'] is None):
+ if (
+ params['general']['beta'] is not None
+ and not params['general']['gw_embedding']
+ and (params['general']['n_iw'] is None or params['general']['n_tau'] is None)
+ ):
raise ValueError('Imaginary-frequency grid chosen, specify "n_iw" and "n_tau".')
- if params['general']['beta'] is None and (params['general']['eta'] is None or params['general']['n_w'] is None
- or params['general']['w_range'] is None):
+ if (
+ params['general']['beta'] is None
+ and not params['general']['gw_embedding']
+ and (params['general']['eta'] is None or params['general']['n_w'] is None or params['general']['w_range'] is None)
+ ):
raise ValueError('Real-frequency grid chosen, specify "eta", "n_w", and "w_range".')
# warning if sigma mixing is used, remove in future versions
@@ -22,10 +29,19 @@ def _verify_input_params_general(params: FullConfig) -> None:
raise ValueError('"calc_energies" is not valid for solver of type = "ftps"')
# Checks validity of other general params
- h_int_type_options = ('density_density', 'kanamori', 'full_slater', 'crpa',
- 'crpa_density_density', 'dynamic', 'ntot', 'simple_intra')
+ h_int_type_options = (
+ 'density_density',
+ 'kanamori',
+ 'full_slater',
+ 'crpa',
+ 'crpa_density_density',
+ 'dyn_density_density',
+ 'dyn_full',
+ 'ntot',
+ 'simple_intra',
+ )
if isinstance(params['general']['h_int_type'], str):
- if not params['general']['h_int_type'] in h_int_type_options:
+ if params['general']['h_int_type'] not in h_int_type_options:
raise ValueError(f'Invalid "h_int_type" = {params["general"]["h_int_type"]}.')
elif isinstance(params['general']['h_int_type'], list):
if any(entry not in h_int_type_options for entry in params['general']['h_int_type']):
@@ -45,6 +61,7 @@ def _verify_input_params_general(params: FullConfig) -> None:
if params['general']['h_int_basis'] not in ('triqs', 'wien2k', 'wannier90', 'qe', 'vasp'):
raise ValueError(f'Invalid "h_int_basis" = {params["general"]["h_int_basis"]}.')
+
def _verify_input_params_dft(params: FullConfig) -> None:
if params['dft']['dft_code'] not in ('vasp', 'qe', None):
raise ValueError(f'Invalid "dft.dft_code" = {params["dft"]["dft_code"]}.')
@@ -55,44 +72,59 @@ def _verify_input_params_dft(params: FullConfig) -> None:
if params['dft']['projector_type'] not in ('w90', 'plo'):
raise ValueError(f'Invalid "dft.projector_type" = {params["dft"]["projector_type"]}.')
+
def _verify_input_params_solver(params: FullConfig) -> None:
solver_params = params['solver']
# Checks that the solver impurities index are all lists if there are multiple solvers
if len(solver_params) > 1 or solver_params[0]['idx_impurities'] is not None:
if any(not isinstance(entry['idx_impurities'], list) for entry in solver_params):
- raise ValueError('All "solver.idx_impurities" need to specify the impurities '
- 'as a list of ints when there are multiple solvers.')
+ raise ValueError(
+ 'All "solver.idx_impurities" need to specify the impurities ' 'as a list of ints when there are multiple solvers.'
+ )
for entry in solver_params:
if any(not isinstance(imp, int) for imp in entry['idx_impurities']):
- raise ValueError('All "solver.idx_impurities" need to specify the impurities '
- 'as a list of ints when there are multiple solvers.')
+ raise ValueError(
+ 'All "solver.idx_impurities" need to specify the impurities ' 'as a list of ints when there are multiple solvers.'
+ )
# Checks that all solvers support the specified grid
# TODO: add real-frequency support for solvers that do both (e.g., hartree)
- supported_grids = {'real': ['ftps'],
- 'imag': ['cthyb', 'ctint', 'ctseg', 'hubbardI', 'hartree']}
+ supported_grids = {'real': ['ftps'], 'imag': ['cthyb', 'ctint', 'ctseg', 'hubbardI', 'hartree']}
if params['general']['beta'] is not None:
for entry in solver_params:
if entry['type'] not in supported_grids['imag']:
raise ValueError(f'Solver {entry["type"]} does not support real-frequency grid.')
- else:
+ elif not params['general']['gw_embedding']:
for entry in solver_params:
if entry['type'] not in supported_grids['real']:
raise ValueError(f'Solver {entry["type"]} does not support imaginary-frequency grid.')
+ for entry in solver_params:
+ if entry['type'] == 'cthyb' and entry['crm_dyson_solver'] and not entry['measure_density_matrix']:
+ raise ValueError(
+ 'Solver "cthyb" when using "crm_dyson_solver" must also measure the density matrix: "measure_density_matrix" = True'
+ )
+
+
+def _verify_input_params_gw(params: FullConfig) -> None:
+ pass
+
def _verify_input_params_advanced(params: FullConfig) -> None:
pass
+
def verify_before_dmft_cycle(params):
- """ All checks of params that can be done before dmft_cycle is called. """
+ """All checks of params that can be done before dmft_cycle is called."""
_verify_input_params_general(params)
_verify_input_params_dft(params)
_verify_input_params_solver(params)
+ _verify_input_params_gw(params)
_verify_input_params_advanced(params)
+
def verify_h5_dependent(sum_k, solver_type_per_imp, general_params):
- """ All checks of params that depend on the h5 file content that is stored in the SumkDFT object. """
+ """All checks of params that depend on the h5 file content that is stored in the SumkDFT object."""
# Incompatabilities for SO coupling
if sum_k.SO == 1 and general_params['magnetic'] and general_params['afm_order']:
raise ValueError('AFM order not supported with SO coupling')
@@ -102,10 +134,15 @@ def verify_h5_dependent(sum_k, solver_type_per_imp, general_params):
raise ValueError('enforce_off_diag must be True for a impurities solver by ftps or hartree solvers')
# Checks that the interaction Hamiltonian and the parameters match
- if any(h not in ['density_density', 'slater'] and r is not None
- for h, r in zip(general_params['h_int_type'], general_params['ratio_F4_F2'])):
- raise ValueError('"ratio_F4_F2" only considered for interaction Hamiltonians "density_density" and "slater". '
- 'Please set to None for all other Hamiltonians.')
+ if any(
+ h not in ['density_density', 'slater'] and r is not None
+ for h, r in zip(general_params['h_int_type'], general_params['ratio_F4_F2'])
+ ):
+ raise ValueError(
+ '"ratio_F4_F2" only considered for interaction Hamiltonians "density_density" and "slater". '
+ 'Please set to None for all other Hamiltonians.'
+ )
if any(h != 'kanamori' and up is not None for h, up in zip(general_params['h_int_type'], general_params['U_prime'])):
- raise ValueError('"U_prime" only considered for interaction Hamiltonian "kanamori". '
- 'Please set to None for all other Hamiltonians.')
+ raise ValueError(
+ '"U_prime" only considered for interaction Hamiltonian "kanamori". ' 'Please set to None for all other Hamiltonians.'
+ )
diff --git a/python/solid_dmft/main.py b/python/solid_dmft/main.py
index 88481db9..23663838 100644
--- a/python/solid_dmft/main.py
+++ b/python/solid_dmft/main.py
@@ -86,12 +86,22 @@ def run_dmft(params, config_file_name=None):
general_params = full_params['general']
solver_params = full_params['solver']
dft_params = full_params['dft']
+ gw_params = full_params['gw']
advanced_params = full_params['advanced']
if general_params['csc']:
# Start CSC calculation, always in same folder as dmft_config
general_params['jobname'] = '.'
csc_flow_control(general_params, solver_params, dft_params, advanced_params)
+ elif general_params['gw_embedding']:
+ from solid_dmft.gw_embedding.gw_flow import embedding_driver
+ if mpi.is_master_node():
+ # Creates output directory if it does not exist
+ if not os.path.exists(general_params['jobname']):
+ os.makedirs(general_params['jobname'])
+ mpi.barrier()
+ # run GW embedding
+ embedding_driver(general_params, solver_params, gw_params, advanced_params)
else:
# Sets up one-shot calculation
mpi.report('', '#'*80)
@@ -119,7 +129,7 @@ def run_dmft(params, config_file_name=None):
# Runs dmft_cycle
dmft_cycle(general_params, solver_params, advanced_params,
- dft_params, general_params['n_iter_dmft'])
+ dft_params, gw_params, general_params['n_iter_dmft'])
mpi.barrier()
if mpi.is_master_node():
diff --git a/python/solid_dmft/postprocessing/maxent_sigma.py b/python/solid_dmft/postprocessing/maxent_sigma.py
index 0db751e7..1db80287 100644
--- a/python/solid_dmft/postprocessing/maxent_sigma.py
+++ b/python/solid_dmft/postprocessing/maxent_sigma.py
@@ -201,6 +201,7 @@ def _run_maxent(continuators, error, omega_min, omega_max, n_points_maxent,
continue
opt_alphas[i][k, j, l] = result.analyzer_results[k][j][l][analyzer]['alpha_index']
+ mpi.barrier(1000)
# Synchronizes information between ranks
for i in imps_blocks_indices:
spectral_funcs[i] = mpi.all_reduce(spectral_funcs[i])
diff --git a/python/solid_dmft/postprocessing/plot_correlated_bands.py b/python/solid_dmft/postprocessing/plot_correlated_bands.py
index fbb343d9..f688583f 100644
--- a/python/solid_dmft/postprocessing/plot_correlated_bands.py
+++ b/python/solid_dmft/postprocessing/plot_correlated_bands.py
@@ -668,12 +668,12 @@ def plot_kslice(fig, ax, alatt_k_w, tb_data, freq_dict, n_orb, tb_dict, tb=True,
return ax
-def get_dmft_bands(n_orb, w90_path, w90_seed, mu_tb, add_spin=False, add_lambda=None, add_local=None,
+def get_dmft_bands(n_orb, mu_tb, w90_path=None, w90_seed=None, TB_obj=None, add_spin=False, add_lambda=None, add_local=None,
with_sigma=None, fermi_slice=False, qp_bands=False, orbital_order_to=None,
add_mu_tb=False, band_basis=False, proj_on_orb=None, trace=True, eta=0.0,
mu_shift=0.0, proj_nuk=None, **specs):
'''
- Extract tight-binding from given w90 seed_hr.dat and seed.wout files, and then extract from
+ Extract tight-binding from given w90 seed_hr.dat and seed.wout files or alternatively given TB_obj, and then extract from
given solid_dmft calculation the self-energy and construct the spectral function A(k,w) on
given k-path.
@@ -681,10 +681,14 @@ def get_dmft_bands(n_orb, w90_path, w90_seed, mu_tb, add_spin=False, add_lambda=
----------
n_orb : int
Number of Wannier orbitals in seed_hr.dat
+ mu_tb : float
+ Chemical potential of tight-binding calculation
w90_path : string
Path to w90 files
w90_seed : string
Seed of wannier90 calculation, i.e. seed_hr.dat and seed.wout
+ TB_obj : TB object
+ Tight-binding object from TB_from_wannier90
add_spin : bool, default=False
Extend w90 Hamiltonian by spin indices
add_lambda : float, default=None
@@ -764,19 +768,27 @@ def get_dmft_bands(n_orb, w90_path, w90_seed, mu_tb, add_spin=False, add_lambda=
if isinstance(proj_nuk, np.ndarray) and not band_basis:
band_basis = True
- # set up Wannier Hamiltonian
- n_orb = 2 * n_orb if add_spin else n_orb
- change_of_basis = change_basis(n_orb, orbital_order_to, orbital_order_w90)
- H_add_loc = np.zeros((n_orb, n_orb), dtype=complex)
- if not isinstance(add_local, type(None)):
- assert np.shape(add_local) == (n_orb, n_orb), 'add_local must have dimension (n_orb, n_orb), but has '\
- f'dimension {np.shape(add_local)}'
- H_add_loc += add_local
- if add_spin and add_lambda:
- H_add_loc += lambda_matrix_w90_t2g(add_lambda)
+ if TB_obj is None:
+ assert w90_path is not None and w90_seed is not None, 'Please provide either a TB object or a path to the wannier90 files'
+ # set up Wannier Hamiltonian
+ n_orb = 2 * n_orb if add_spin else n_orb
+ change_of_basis = change_basis(n_orb, orbital_order_to, orbital_order_w90)
+ H_add_loc = np.zeros((n_orb, n_orb), dtype=complex)
+ if not isinstance(add_local, type(None)):
+ assert np.shape(add_local) == (n_orb, n_orb), 'add_local must have dimension (n_orb, n_orb), but has '\
+ f'dimension {np.shape(add_local)}'
+ H_add_loc += add_local
+ if add_spin and add_lambda:
+ H_add_loc += lambda_matrix_w90_t2g(add_lambda)
+
+ tb = TB_from_wannier90(path=w90_path, seed=w90_seed, extend_to_spin=add_spin, add_local=H_add_loc)
+ else:
+ assert not add_spin, 'add_spin is only valid when reading from wannier90 files'
+ change_of_basis = change_basis(n_orb, orbital_order_to, orbital_order_w90)
+ tb = TB_obj
+
eta = eta * 1j
- tb = TB_from_wannier90(path=w90_path, seed=w90_seed, extend_to_spin=add_spin, add_local=H_add_loc)
# print local H(R)
h_of_r = np.einsum('ij, jk -> ik', np.linalg.inv(change_of_basis), np.einsum('ij, jk -> ik', tb.hoppings[(0, 0, 0)], change_of_basis))
if n_orb <= 12:
diff --git a/test/python/CMakeLists.txt b/test/python/CMakeLists.txt
index 792cbad1..57b1d6a8 100644
--- a/test/python/CMakeLists.txt
+++ b/test/python/CMakeLists.txt
@@ -39,7 +39,7 @@ set (integration_tests
svo_hubbardI_basic
svo_hartree
lno_hubbardI_mag
- svo_cthyb_basic_gl
+ svo_cthyb_basic_crm
svo_cthyb_basic_tf
lco_ftps
nio_cthyb_hartree
@@ -56,8 +56,9 @@ foreach(test ${integration_tests})
endforeach()
add_test(NAME ${test}
- #COMMAND bash ${test}.sh
- COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test.py
+ # COMMAND bash ${test}.sh
+ COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test.py
+ # COMMAND ${TRIQS_PYTHON_EXECUTABLE} test.py
WORKING_DIRECTORY ${test_dir}
)
@@ -74,7 +75,7 @@ foreach(file test_maxent.py helper.py)
endforeach()
add_test(NAME maxent_${test}
- COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test_maxent.py
+ COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test_maxent.py
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
)
@@ -95,10 +96,55 @@ foreach(test ${restart_integration_test})
endforeach()
add_test(NAME restart_${test}
- COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test_restart.py
+ COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test_restart.py
WORKING_DIRECTORY ${test_dir}
)
set_property(TEST restart_${test} APPEND PROPERTY ENVIRONMENT PYTHONPATH=${PROJECT_BINARY_DIR}/python:$ENV{PYTHONPATH})
endforeach()
set_property(TEST maxent_${test} APPEND PROPERTY DEPENDS "svo_hubbardI_basic")
+
+# ------------------------------#
+
+# GW embedding tests
+option(Test_GW_embedding "Run GW embedding tests" OFF)
+if(Test_GW_embedding)
+
+set(gw_emb_basic_tests
+ test_gw_embedding
+ )
+
+foreach(test ${gw_emb_basic_tests})
+ get_filename_component(test_name ${test} NAME_WE)
+ get_filename_component(test_dir ${test} DIRECTORY)
+ add_test(NAME ${test_name} COMMAND ${TRIQS_PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${test_dir}/${test_name}.py WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${test_dir})
+ set_property(TEST ${test_name} APPEND PROPERTY ENVIRONMENT PYTHONPATH=${PROJECT_BINARY_DIR}/python:$ENV{PYTHONPATH})
+endforeach()
+
+set (gw_emb_integration_tests
+ svo_cthyb_crpa
+ svo_ctseg_dyn
+ svo_gw_emb_stat
+ # svo_gw_emb_dyn
+ )
+
+foreach(test ${gw_emb_integration_tests})
+ set (test_dir ${CMAKE_CURRENT_BINARY_DIR}/${test})
+
+ foreach(file dmft_config.toml inp.h5 ref.h5 test.py)
+ FILE(COPY ${test}/${file} DESTINATION ${test_dir})
+ endforeach()
+
+ add_test(NAME ${test}
+ # COMMAND bash ${test}.sh
+ COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} ${TRIQS_PYTHON_EXECUTABLE} test.py
+ # COMMAND ${TRIQS_PYTHON_EXECUTABLE} test.py
+ WORKING_DIRECTORY ${test_dir}
+ )
+
+ set_property(TEST ${test} APPEND PROPERTY ENVIRONMENT PYTHONPATH=${PROJECT_BINARY_DIR}/python:$ENV{PYTHONPATH})
+endforeach()
+
+endif()
+
+
diff --git a/test/python/lno_hubbardI_mag/ref.h5 b/test/python/lno_hubbardI_mag/ref.h5
index a23b6baa..6a6cab04 100644
Binary files a/test/python/lno_hubbardI_mag/ref.h5 and b/test/python/lno_hubbardI_mag/ref.h5 differ
diff --git a/test/python/svo_cthyb_basic_gl/dmft_config.toml b/test/python/svo_cthyb_basic_crm/dmft_config.toml
similarity index 87%
rename from test/python/svo_cthyb_basic_gl/dmft_config.toml
rename to test/python/svo_cthyb_basic_crm/dmft_config.toml
index 57251c23..65cd906f 100644
--- a/test/python/svo_cthyb_basic_gl/dmft_config.toml
+++ b/test/python/svo_cthyb_basic_crm/dmft_config.toml
@@ -8,6 +8,8 @@ mu_initial_guess = -0.027041
prec_mu = 0.001
n_iw = 501
n_tau = 10001
+dlr_wmax = 4
+dlr_eps = 1e-8
h_int_type = "kanamori"
U = 8.0
@@ -31,10 +33,9 @@ load_sigma = false
[solver]
type = "cthyb"
-n_l = 35
-length_cycle = 120
+length_cycle = 250
n_warmup_cycles = 8000
n_cycles_tot = 1e+4
imag_threshold = 1e-5
-measure_G_l = true
measure_density_matrix = true
+crm_dyson_solver = true
diff --git a/test/python/svo_cthyb_basic_gl/inp.h5 b/test/python/svo_cthyb_basic_crm/inp.h5
similarity index 100%
rename from test/python/svo_cthyb_basic_gl/inp.h5
rename to test/python/svo_cthyb_basic_crm/inp.h5
diff --git a/test/python/svo_cthyb_basic_gl/ref.h5 b/test/python/svo_cthyb_basic_crm/ref.h5
similarity index 100%
rename from test/python/svo_cthyb_basic_gl/ref.h5
rename to test/python/svo_cthyb_basic_crm/ref.h5
diff --git a/test/python/svo_cthyb_basic_gl/test.py b/test/python/svo_cthyb_basic_crm/test.py
similarity index 100%
rename from test/python/svo_cthyb_basic_gl/test.py
rename to test/python/svo_cthyb_basic_crm/test.py
diff --git a/test/python/svo_cthyb_basic_tf/dmft_config.toml b/test/python/svo_cthyb_basic_tf/dmft_config.toml
index b4e9fb93..f280149e 100644
--- a/test/python/svo_cthyb_basic_tf/dmft_config.toml
+++ b/test/python/svo_cthyb_basic_tf/dmft_config.toml
@@ -13,7 +13,7 @@ U = 8.0
J = 0.65
U_prime = 6.5
-beta = 10
+beta = 40
n_iter_dmft = 2
store_solver = true
diff --git a/test/python/svo_cthyb_basic_tf/test.py b/test/python/svo_cthyb_basic_tf/test.py
index 6e6585e4..a25f567a 100644
--- a/test/python/svo_cthyb_basic_tf/test.py
+++ b/test/python/svo_cthyb_basic_tf/test.py
@@ -2,7 +2,7 @@
import shutil
from triqs.gf import *
-from triqs.utility.comparison_tests import assert_block_gfs_are_close
+from triqs.utility.comparison_tests import assert_block_gfs_are_close, assert_arrays_are_close
from h5 import HDFArchive
import triqs.utility.mpi as mpi
@@ -15,3 +15,16 @@
mpi.barrier()
solid.main([None, 'dmft_config.toml'])
+
+# with HDFArchive('out/inp.h5','r') as ar:
+# G_iw = ar['DMFT_results/last_iter/Gimp_freq_0']
+# G_iw_direct = ar['DMFT_results/last_iter/Gimp_freq_direct_0']
+#
+#
+# # compare direct measured G_iw
+# iw0 = len(G_iw.mesh)//2
+# n_iw = len(G_iw_direct.mesh) // 2
+#
+# # works only if statistics are improved to higher precision
+# for block, gf in G_iw_direct:
+# assert_arrays_are_close(G_iw[block].data[iw0-n_iw:iw0+n_iw, :, :], G_iw_direct[block].data, precision=5e-2)
diff --git a/test/python/svo_cthyb_crpa/dmft_config.toml b/test/python/svo_cthyb_crpa/dmft_config.toml
new file mode 100644
index 00000000..0a9bafe0
--- /dev/null
+++ b/test/python/svo_cthyb_crpa/dmft_config.toml
@@ -0,0 +1,39 @@
+[general]
+seedname = "inp"
+jobname = "out"
+mu_initial_guess = 13.201
+
+enforce_off_diag = false
+
+prec_mu = 0.001
+n_iw = 100
+
+h_int_type = "crpa"
+U_crpa_threshold = 1e-1
+
+beta = 11.024796652696498
+
+n_iter_dmft = 1
+
+dc_type = "crpa_static"
+dc = true
+
+store_solver = true
+h5_save_freq = 1
+
+[solver]
+type = "cthyb"
+length_cycle = 50
+n_warmup_cycles = 1e+4
+n_cycles_tot = 1e+5
+imag_threshold = 1e-4
+delta_interface = true
+measure_density_matrix = true
+perform_tail_fit = true
+fit_max_moment = 4
+fit_min_w = 4
+fit_max_w = 10
+
+[gw]
+code = "aimbes"
+h5_file = "../svo_gw_emb_stat/inp.h5"
diff --git a/test/python/svo_cthyb_crpa/inp.h5 b/test/python/svo_cthyb_crpa/inp.h5
new file mode 100644
index 00000000..9c5dda0a
Binary files /dev/null and b/test/python/svo_cthyb_crpa/inp.h5 differ
diff --git a/test/python/svo_cthyb_crpa/ref.h5 b/test/python/svo_cthyb_crpa/ref.h5
new file mode 100644
index 00000000..01067a1a
Binary files /dev/null and b/test/python/svo_cthyb_crpa/ref.h5 differ
diff --git a/test/python/svo_cthyb_crpa/test.py b/test/python/svo_cthyb_crpa/test.py
new file mode 100644
index 00000000..2a5e28d4
--- /dev/null
+++ b/test/python/svo_cthyb_crpa/test.py
@@ -0,0 +1,22 @@
+import sys
+import shutil
+
+from triqs.gf import *
+from triqs.utility.comparison_tests import assert_block_gfs_are_close, assert_arrays_are_close
+from h5 import HDFArchive
+import triqs.utility.mpi as mpi
+
+import solid_dmft.main as solid
+
+try:
+ from triqs_ctseg import Solver
+except ImportError:
+ print('ctseg solver not installed skipping')
+ sys.exit()
+
+if mpi.is_master_node():
+ shutil.rmtree('out', ignore_errors=True)
+
+mpi.barrier()
+
+solid.main([None, 'dmft_config.toml'])
diff --git a/test/python/svo_ctseg_dyn/dmft_config.toml b/test/python/svo_ctseg_dyn/dmft_config.toml
new file mode 100644
index 00000000..a817f84b
--- /dev/null
+++ b/test/python/svo_ctseg_dyn/dmft_config.toml
@@ -0,0 +1,38 @@
+[general]
+seedname = "inp"
+jobname = "out"
+mu_initial_guess = 13.223155
+
+prec_mu = 0.001
+n_iw = 200
+n_tau = 20000
+dlr_wmax = 2
+dlr_eps = 1e-6
+
+h_int_type = "dyn_density_density"
+# h_int_type = "crpa_density_density"
+
+beta = 11.024796652696498
+
+n_iter_dmft = 1
+
+# dc_type = "crpa_dynamic"
+dc_type = "crpa_static_qp"
+dc = true
+
+store_solver = true
+
+h5_save_freq = 1
+
+[solver]
+type = "ctseg"
+length_cycle = 80
+n_warmup_cycles = 1e+4
+n_cycles_tot = 1e+6
+off_diag_threshold = 1e-4
+crm_dyson_solver = true
+n_tau_k = 10001
+
+[gw]
+code = "aimbes"
+h5_file = "../svo_gw_emb_stat/inp.h5"
diff --git a/test/python/svo_ctseg_dyn/inp.h5 b/test/python/svo_ctseg_dyn/inp.h5
new file mode 100644
index 00000000..9c5dda0a
Binary files /dev/null and b/test/python/svo_ctseg_dyn/inp.h5 differ
diff --git a/test/python/svo_ctseg_dyn/ref.h5 b/test/python/svo_ctseg_dyn/ref.h5
new file mode 100644
index 00000000..01067a1a
Binary files /dev/null and b/test/python/svo_ctseg_dyn/ref.h5 differ
diff --git a/test/python/svo_ctseg_dyn/test.py b/test/python/svo_ctseg_dyn/test.py
new file mode 100644
index 00000000..ed5224af
--- /dev/null
+++ b/test/python/svo_ctseg_dyn/test.py
@@ -0,0 +1,20 @@
+import sys
+import shutil
+import importlib.util
+
+import triqs.utility.mpi as mpi
+
+import solid_dmft.main as solid
+
+# try ForkTPS import
+ctseg = importlib.util.find_spec("triqs_ctseg") is not None
+if not ctseg:
+ mpi.report('ImportWarning: ctseg needs to be installed to run this test')
+ sys.exit()
+
+if mpi.is_master_node():
+ shutil.rmtree('out', ignore_errors=True)
+
+mpi.barrier()
+
+solid.main([None, 'dmft_config.toml'])
diff --git a/test/python/svo_gw_emb_dyn/dmft_config.toml b/test/python/svo_gw_emb_dyn/dmft_config.toml
new file mode 100644
index 00000000..526cb27c
--- /dev/null
+++ b/test/python/svo_gw_emb_dyn/dmft_config.toml
@@ -0,0 +1,30 @@
+[general]
+seedname = "gw_dyn_test"
+jobname = "out"
+
+enforce_off_diag = false
+
+n_iw = 20000
+n_tau = 200001
+dlr_wmax = 0.5
+dlr_eps = 1e-8
+
+gw_embedding = true
+h_int_type = "dyn_density_density"
+store_solver = true
+
+[gw]
+code = "aimbes"
+h5_file = "inp.h5"
+it_1e = 1
+it_2e = 1
+
+[solver]
+type = "ctseg"
+n_tau_k = 10001
+length_cycle = 100
+n_warmup_cycles = 1e+3
+n_cycles_tot = 1e+5
+off_diag_threshold = 1e-5
+diag_delta = false
+crm_dyson_solver = true
diff --git a/test/python/svo_gw_emb_dyn/inp.h5 b/test/python/svo_gw_emb_dyn/inp.h5
new file mode 100644
index 00000000..f825c367
Binary files /dev/null and b/test/python/svo_gw_emb_dyn/inp.h5 differ
diff --git a/test/python/svo_gw_emb_dyn/ref.h5 b/test/python/svo_gw_emb_dyn/ref.h5
new file mode 100644
index 00000000..eb940892
Binary files /dev/null and b/test/python/svo_gw_emb_dyn/ref.h5 differ
diff --git a/test/python/svo_gw_emb_dyn/test.py b/test/python/svo_gw_emb_dyn/test.py
new file mode 100644
index 00000000..2e7f8ac2
--- /dev/null
+++ b/test/python/svo_gw_emb_dyn/test.py
@@ -0,0 +1,22 @@
+import sys
+import shutil
+
+from triqs.gf import *
+from triqs.utility.comparison_tests import assert_block_gfs_are_close, assert_arrays_are_close
+from h5 import HDFArchive
+import triqs.utility.mpi as mpi
+
+import solid_dmft.main as solid
+
+try:
+ from triqs_cthyb import Solver
+except ImportError:
+ print('ctseg solver not installed skipping')
+ sys.exit()
+
+if mpi.is_master_node():
+ shutil.rmtree('out', ignore_errors=True)
+
+mpi.barrier()
+
+solid.main([None, 'dmft_config.toml'])
diff --git a/test/python/svo_gw_emb_stat/dmft_config.toml b/test/python/svo_gw_emb_stat/dmft_config.toml
new file mode 100644
index 00000000..2f16588a
--- /dev/null
+++ b/test/python/svo_gw_emb_stat/dmft_config.toml
@@ -0,0 +1,36 @@
+[general]
+seedname = "gw_stat_test"
+jobname = "out"
+
+enforce_off_diag = false
+
+n_iw = 20000
+n_tau = 200001
+dlr_wmax = 0.5
+dlr_eps = 1e-8
+
+gw_embedding = true
+h_int_type = "crpa_density_density"
+store_solver = true
+
+[gw]
+code = "aimbes"
+h5_file = "inp.h5"
+it_1e = 1
+it_2e = 1
+
+[solver]
+type = "cthyb"
+length_cycle = 120
+n_warmup_cycles = 1e+3
+n_cycles_tot = 5e+5
+off_diag_threshold = 1e-5
+imag_threshold = 1e-4
+delta_interface = false
+diag_delta = false
+measure_density_matrix = true
+crm_dyson_solver = true
+# perform_tail_fit = true
+# fit_max_moment = 4
+# fit_min_w = 0.2
+# fit_max_w = 0.8
diff --git a/test/python/svo_gw_emb_stat/inp.h5 b/test/python/svo_gw_emb_stat/inp.h5
new file mode 100644
index 00000000..f825c367
Binary files /dev/null and b/test/python/svo_gw_emb_stat/inp.h5 differ
diff --git a/test/python/svo_gw_emb_stat/ref.h5 b/test/python/svo_gw_emb_stat/ref.h5
new file mode 100644
index 00000000..eb940892
Binary files /dev/null and b/test/python/svo_gw_emb_stat/ref.h5 differ
diff --git a/test/python/svo_gw_emb_stat/test.py b/test/python/svo_gw_emb_stat/test.py
new file mode 100644
index 00000000..2e7f8ac2
--- /dev/null
+++ b/test/python/svo_gw_emb_stat/test.py
@@ -0,0 +1,22 @@
+import sys
+import shutil
+
+from triqs.gf import *
+from triqs.utility.comparison_tests import assert_block_gfs_are_close, assert_arrays_are_close
+from h5 import HDFArchive
+import triqs.utility.mpi as mpi
+
+import solid_dmft.main as solid
+
+try:
+ from triqs_cthyb import Solver
+except ImportError:
+ print('ctseg solver not installed skipping')
+ sys.exit()
+
+if mpi.is_master_node():
+ shutil.rmtree('out', ignore_errors=True)
+
+mpi.barrier()
+
+solid.main([None, 'dmft_config.toml'])
diff --git a/test/python/test_gw_embedding.py b/test/python/test_gw_embedding.py
new file mode 100644
index 00000000..5dd652b4
--- /dev/null
+++ b/test/python/test_gw_embedding.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2018-2022 Simons Foundation
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You may obtain a copy of the License at
+# https:#www.gnu.org/licenses/gpl-3.0.txt
+#
+# Authors: Alexander Hampel
+
+import numpy as np
+
+from h5 import HDFArchive
+import unittest
+
+from triqs.gf import MeshDLRImFreq, Gf, BlockGf, make_gf_dlr, make_gf_imfreq, make_gf_dlr_imfreq
+
+from solid_dmft.gw_embedding.bdft_converter import convert_gw_output, calc_Sigma_DC_gw, calc_W_from_Gloc
+
+
+class test_gw_embedding(unittest.TestCase):
+
+ def setUp(self):
+ self.path = 'gw_embedding'
+ self.seed = 'qpgw_svo'
+ return
+
+ def test_bdft_converter(self):
+
+ gw_data, ir_kernel = convert_gw_output(job_h5='svo_hartree/inp.h5', gw_h5='svo_gw_emb_stat/inp.h5')
+
+ return
+
+
+ def test_calc_W_DC(self):
+ with HDFArchive('svo_hartree/inp.h5', 'r') as ar:
+ Uloc_dlr = ar['DMFT_input/iter1']['Uloc_dlr'][0]
+ Gloc_dlr = ar['DMFT_input/iter1']['Gloc_dlr'][0]
+ Vloc = ar['DMFT_input/iter1']['Vloc'][0]
+
+ mesh_dlr = MeshDLRImFreq(Gloc_dlr.mesh)
+ glist = []
+ Gloc_iw = make_gf_dlr_imfreq(Gloc_dlr)
+ for block, gf in Gloc_iw:
+ g_dlr_iw = Gf(mesh=mesh_dlr, target_shape=gf.target_shape)
+
+ for iw in g_dlr_iw.mesh:
+ g_dlr_iw[iw] = gf[iw]
+
+ glist.append(make_gf_dlr(g_dlr_iw))
+
+ Gloc_dlr = BlockGf(name_list=['up','down'], block_list=glist)
+
+ # create Coulomb tensor from Uloc_dlr
+ Uloc_0 = make_gf_imfreq(Uloc_dlr['up'],1).data[0,:,:,:,:] + Vloc['up']
+ U_dict = {'up' : Uloc_0, 'down' : Uloc_0}
+
+ # test Gf | np.ndarray
+ W_dlr = calc_W_from_Gloc(Gloc_dlr['up'], Uloc_0)
+ # test BlockGf | dict of np.ndarray
+ W_dlr = calc_W_from_Gloc(Gloc_dlr, U_dict)
+
+ Sig_DC_dlr, Sig_DC_hartree, Sig_DC_exchange = calc_Sigma_DC_gw(W_dlr['up'], Gloc_dlr['up'], Uloc_0)
+ DC_ref = np.array([[1.0030e-01-0.0j , 0.0+0.0j, 0.0+0.0j],
+ [0.0+0.0j, 1.003e-01+0.0j , 0.0+0.0j],
+ [0.0+0.0j, 0.0+0.0j, 1.003e-01+0.0j]])
+ assert np.allclose(Sig_DC_hartree, DC_ref, rtol=1e-3, atol=1e-3)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/test/python/test_interaction_hamiltonian.py b/test/python/test_interaction_hamiltonian.py
index f4f83c32..c272231a 100644
--- a/test/python/test_interaction_hamiltonian.py
+++ b/test/python/test_interaction_hamiltonian.py
@@ -19,8 +19,10 @@ def test_load_crpa_interaction_matrix(self):
sum_k.corr_to_inequiv = [0, 1, 1, 0]
sum_k.inequiv_to_corr = [0, 1]
sum_k.gf_struct_solver = [{'down_0' : 5, 'up_0' : 5},{'down_0' : 5, 'up_0' : 5}]
+ general_params = {}
+ gw_params = {'code' : 'Vasp'}
- crpa_matrix = _load_crpa_interaction_matrix(sum_k, 'UIJKL')
+ crpa_matrix, _ = _load_crpa_interaction_matrix(sum_k, general_params, gw_params, 'UIJKL')
assert [c.shape for c in crpa_matrix] == [(5, 5, 5, 5), (5, 5, 5, 5)]
diff --git a/test/python/test_observables.py b/test/python/test_observables.py
index 7138fdc2..e1a273c9 100644
--- a/test/python/test_observables.py
+++ b/test/python/test_observables.py
@@ -59,14 +59,13 @@ def test_add_dft_values_one_impurity_one_band(self):
G_loc_all_dft = [BlockGf(name_list=('up_0', 'down_0'), block_list=(gf_up, gf_down), make_copies=True)]
solver_type_per_imp = ['cthyb']
- density_mat_dft = [G_loc_all_dft[iineq].density() for iineq in range(sum_k.n_inequiv_shells)]
dft_mu = 12.
shell_multiplicity = [4]
observables = set_up_observables(sum_k.n_inequiv_shells)
observables = add_dft_values_as_zeroth_iteration(observables, general_params, solver_type_per_imp, dft_mu, None, sum_k,
- G_loc_all_dft, density_mat_dft, shell_multiplicity)
+ G_loc_all_dft, shell_multiplicity)
expected_observables = {'E_bandcorr': ['none'], 'E_tot': ['none'], 'E_dft': ['none'],
'E_DC': [['none']], 'E_int': [['none']],
@@ -116,7 +115,6 @@ def test_add_dft_values_two_impurites_two_bands(self):
BlockGf(name_list=('up_0', 'down_0'), block_list=(gf_up_two_bands, gf_down_two_bands), make_copies=True)]
solver_type_per_imp = ['cthyb', 'cthyb']
- density_mat_dft = [G_loc_all_dft[iineq].density() for iineq in range(sum_k.n_inequiv_shells)]
dft_mu = 2.
dft_energy = None
shell_multiplicity = [3, 1]
@@ -124,7 +122,7 @@ def test_add_dft_values_two_impurites_two_bands(self):
observables = set_up_observables(sum_k.n_inequiv_shells)
observables = add_dft_values_as_zeroth_iteration(observables, general_params, solver_type_per_imp, dft_mu, dft_energy, sum_k,
- G_loc_all_dft, density_mat_dft, shell_multiplicity)
+ G_loc_all_dft, shell_multiplicity)
expected_observables = {'E_bandcorr': [0.0], 'E_tot': [-5.3], 'E_dft': [0.0],
'E_DC': [[0.3], [5.0]], 'E_int': [[0.0], [0.0]],
'orb_occ': [{'down': [[1., 1.]], 'up': [[0.8044, 0.8044]]}, {'down': [[0.5, 0.5]], 'up': [[0.5, 0.5]]}],
diff --git a/test/python/test_plot_correlated_bands.py b/test/python/test_plot_correlated_bands.py
index a9e1d29b..617e1ffa 100644
--- a/test/python/test_plot_correlated_bands.py
+++ b/test/python/test_plot_correlated_bands.py
@@ -121,7 +121,7 @@ def test_get_kslice_nokz(self):
assert np.allclose(tb_data['e_mat'], emat_ref)
assert np.allclose(alatt_k_w, Akw_ref)
- def test_get_dmft_bands_reg_mesh(self):
+ def test_get_dmft_bands_reg_mesh_read_TB_obj(self):
tb_bands = {'kmesh': 'regular', 'n_k': 7}
tb_data, alatt_k_w, freq_dict = pcb.get_dmft_bands(with_sigma='calc', add_mu_tb=True,
@@ -135,5 +135,17 @@ def test_get_dmft_bands_reg_mesh(self):
assert np.allclose(tb_data['e_mat'], emat_ref)
assert np.allclose(alatt_k_w, Akw_ref)
+ # read now from TB_obj
+ w90_dict = {'TB_obj': tb_data['tb'], 'mu_tb': 12.3958, 'n_orb': 3,
+ 'orbital_order_w90': ['dxz', 'dyz', 'dxy']}
+
+ tb_data_obj, alatt_k_w_obj, freq_dict_obj = pcb.get_dmft_bands(with_sigma='calc', add_mu_tb=True,
+ orbital_order_to=self.orbital_order_to,
+ **w90_dict, **tb_bands, **self.sigma_dict)
+
+ assert np.allclose(tb_data_obj['e_mat'], emat_ref)
+ assert np.allclose(alatt_k_w_obj, Akw_ref)
+
+
if __name__ == '__main__':
unittest.main()