Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…presso-hp into feat/voronoi
  • Loading branch information
bastonero committed Apr 19, 2024
2 parents 99a46e4 + 4c8036e commit 83bbbed
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 16 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ def define(cls, spec):
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir'))
spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False))
spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False)
spec.input(
'init_walltime', valid_type=int, default=3600, non_db=True,
help='The walltime of the initialization `HpBaseWorkChain` in seconds (default: 3600).'
)
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
Expand Down Expand Up @@ -56,7 +60,7 @@ def run_init(self):
inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.only_initialization = orm.Bool(True)
inputs.clean_workdir = self.inputs.clean_workdir
inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough
inputs.hp.metadata.options.max_wallclock_seconds = self.inputs.init_walltime
inputs.metadata.call_link_label = 'initialization'

node = self.submit(HpBaseWorkChain, **inputs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ def define(cls, spec):
super().define(spec)
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir'))
spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False)
spec.input(
'init_walltime', valid_type=int, default=3600, non_db=True,
help='The walltime of the initialization `HpBaseWorkChain` in seconds (default: 3600).'
)
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
Expand Down Expand Up @@ -63,7 +67,7 @@ def run_init(self):
inputs.hp.parameters = orm.Dict(parameters)
inputs.clean_workdir = self.inputs.clean_workdir

inputs.hp.metadata.options.max_wallclock_seconds = 3600 # 1 hour is more than enough
inputs.hp.metadata.options.max_wallclock_seconds = self.inputs.init_walltime
inputs.metadata.call_link_label = 'initialization'

node = self.submit(HpBaseWorkChain, **inputs)
Expand Down
33 changes: 19 additions & 14 deletions src/aiida_quantumespresso_hp/workflows/hubbard.py
Original file line number Diff line number Diff line change
Expand Up @@ -422,6 +422,22 @@ def get_pseudos(self) -> dict:

return results

def relabel_hubbard_structure(self, workchain) -> None:
"""Relabel the Hubbard structure if new types have been detected."""
from aiida_quantumespresso.utils.hubbard import is_intersite_hubbard

if not is_intersite_hubbard(workchain.outputs.hubbard_structure.hubbard):
for site in workchain.outputs.hubbard.dict.sites:
if not site['type'] == site['new_type']:
result = structure_relabel_kinds(
self.ctx.current_hubbard_structure, workchain.outputs.hubbard, self.ctx.current_magnetic_moments
)
self.ctx.current_hubbard_structure = result['hubbard_structure']
if self.ctx.current_magnetic_moments is not None:
self.ctx.current_magnetic_moments = result['starting_magnetization']
self.report('new types have been detected: relabeling the structure.')
return

def run_relax(self):
"""Run the PwRelaxWorkChain to run a relax PwCalculation."""
inputs = self.get_inputs(PwRelaxWorkChain, 'relax')
Expand Down Expand Up @@ -593,14 +609,14 @@ def inspect_hp(self):

if not self.should_check_convergence():
self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure
self.relabel_hubbard_structure(workchain)

if not self.inputs.meta_convergence:
self.report('meta convergence is switched off, so not checking convergence of Hubbard parameters.')
self.ctx.is_converged = True

def check_convergence(self):
"""Check the convergence of the Hubbard parameters."""
from aiida_quantumespresso.utils.hubbard import is_intersite_hubbard

workchain = self.ctx.workchains_hp[-1]

# We store in memory the parameters before relabelling to make the comparison easier.
Expand All @@ -616,18 +632,7 @@ def check_convergence(self):

# We check if new types were created, in which case we relabel the `HubbardStructureData`
self.ctx.current_hubbard_structure = workchain.outputs.hubbard_structure

if not is_intersite_hubbard(workchain.outputs.hubbard_structure.hubbard):
for site in workchain.outputs.hubbard.dict.sites:
if not site['type'] == site['new_type']:
self.report('new types have been detected: relabeling the structure and starting new iteration.')
result = structure_relabel_kinds(
self.ctx.current_hubbard_structure, workchain.outputs.hubbard, self.ctx.current_magnetic_moments
)
self.ctx.current_hubbard_structure = result['hubbard_structure']
if self.ctx.current_magnetic_moments is not None:
self.ctx.current_magnetic_moments = result['starting_magnetization']
break
self.relabel_hubbard_structure(workchain)

if not self.should_check_convergence():
return
Expand Down
35 changes: 35 additions & 0 deletions tests/workflows/test_hubbard.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,41 @@ def test_skip_relax_iterations(generate_workchain_hubbard, generate_inputs_hubba
assert process.should_check_convergence()


@pytest.mark.usefixtures('aiida_profile')
def test_skip_relax_iterations_relabeling(
generate_workchain_hubbard, generate_inputs_hubbard, generate_hp_workchain_node, generate_hubbard_structure
):
"""Test `SelfConsistentHubbardWorkChain` when skipping the first relax iterations and relabeling is needed."""
from aiida.orm import Bool, Int

inputs = generate_inputs_hubbard()
inputs['skip_relax_iterations'] = Int(1)
inputs['meta_convergence'] = Bool(True)
process = generate_workchain_hubbard(inputs=inputs)
process.setup()

current_hubbard_structure = generate_hubbard_structure(u_value=1, only_u=True)
process.current_hubbard_structure = current_hubbard_structure
# 1
process.update_iteration()
assert process.ctx.skip_relax_iterations == 1
assert process.ctx.iteration == 1
assert not process.should_run_relax()
assert not process.should_check_convergence()
process.ctx.workchains_hp = [generate_hp_workchain_node(relabel=True, u_value=1, only_u=True)]
process.inspect_hp()
assert process.ctx.current_hubbard_structure.get_kind_names(
) != process.ctx.workchains_hp[-1].outputs.hubbard_structure.get_kind_names()
# 2
process.update_iteration()
assert process.should_run_relax()
assert process.should_check_convergence()
# 3
process.update_iteration()
assert process.should_run_relax()
assert process.should_check_convergence()


@pytest.mark.usefixtures('aiida_profile')
def test_relax_frequency(generate_workchain_hubbard, generate_inputs_hubbard):
"""Test `SelfConsistentHubbardWorkChain` when `relax_frequency` is different from 1."""
Expand Down

0 comments on commit 83bbbed

Please sign in to comment.