Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…presso-hp into feat/voronoi
  • Loading branch information
bastonero committed Dec 22, 2023
2 parents bcbd602 + 31ae4e9 commit f15de60
Show file tree
Hide file tree
Showing 13 changed files with 409 additions and 34 deletions.
3 changes: 3 additions & 0 deletions src/aiida_quantumespresso_hp/calculations/hp.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,9 @@ def define(cls, spec):
message='One of the required perturbation inputs files was not found.')
spec.exit_code(365, 'ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS',
message='The atomic positions were not sorted with Hubbard sites first.')
spec.exit_code(366, 'ERROR_FERMI_SHIFT',
message=('The code failed due to Fermi shift, probably due to low energy cutoff '
'or due to an incorrect treatment of an insulating state (i.e. no smearing shoudl be used).'))

# Significant errors but calculation can be used to restart
spec.exit_code(400, 'ERROR_OUT_OF_WALLTIME',
Expand Down
1 change: 1 addition & 0 deletions src/aiida_quantumespresso_hp/parsers/hp.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ def validate_premature_exit(self, logs):
'ERROR_COMPUTING_CHOLESKY',
'ERROR_MISSING_CHI_MATRICES',
'ERROR_INCOMPATIBLE_FFT_GRID',
'ERROR_FERMI_SHIFT',
]:
if exit_status in logs['error']:
return self.exit_codes.get(exit_status)
Expand Down
3 changes: 2 additions & 1 deletion src/aiida_quantumespresso_hp/parsers/parse_raw/hp.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,8 @@ def detect_important_message(logs, line):
'Reconstruction problem: some chi were not found': 'ERROR_MISSING_CHI_MATRICES',
'incompatible FFT grid': 'ERROR_INCOMPATIBLE_FFT_GRID',
REG_ERROR_CONVERGENCE_NOT_REACHED: 'ERROR_CONVERGENCE_NOT_REACHED',
ERROR_POSITIONS: 'ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS'
ERROR_POSITIONS: 'ERROR_INCORRECT_ORDER_ATOMIC_POSITIONS',
'WARNING: The Fermi energy shift is zero or too big!': 'ERROR_FERMI_SHIFT',
},
'warning': {
'Warning:': None,
Expand Down
27 changes: 27 additions & 0 deletions src/aiida_quantumespresso_hp/utils/general.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@
"""General utilies."""
from __future__ import annotations

from typing import List


def set_tot_magnetization(input_parameters: dict, tot_magnetization: float) -> bool:
"""Set the total magnetization based on its value and the input parameters.
Expand Down Expand Up @@ -37,3 +39,28 @@ def is_perturb_only_atom(parameters: dict) -> int | None:
break

return match


def distribute_base_workchains(n_atoms: int, n_total: int) -> List[int]:
"""Distribute the maximum number of `BaseWorkChains` to be launched.
The number of `BaseWorkChains` will be distributed over the number of atoms.
The elements of the resulting list correspond to the number of q-point
`BaseWorkChains` to be launched for each atom, in case q-point parallelization
is used. Otherwise, the method will only take care of limitting the number
of `HpParallelizeAtomsWorkChain` to be launched in parallel.
:param n_atoms: The number of atoms.
:param n_total: The number of base workchains to be launched.
:return: The number of base workchains to be launched for each atom.
"""
quotient = n_total // n_atoms
remainder = n_total % n_atoms
n_distributed = [quotient] * n_atoms

for i in range(remainder):
n_distributed[i] += 1

n_distributed = [x for x in n_distributed if x != 0]

return n_distributed
5 changes: 5 additions & 0 deletions src/aiida_quantumespresso_hp/workflows/hp/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def define(cls, spec):
'for any non-periodic directions.')
spec.input('parallelize_atoms', valid_type=orm.Bool, default=lambda: orm.Bool(False))
spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False))
spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False)
spec.outline(
cls.validate_qpoints,
if_(cls.should_parallelize_atoms)(
Expand Down Expand Up @@ -106,6 +107,8 @@ def get_builder_from_protocol(cls, code, protocol=None, parent_scf_folder=None,
data['parallelize_atoms'] = orm.Bool(inputs['parallelize_atoms'])
if 'parallelize_qpoints' in inputs:
data['parallelize_qpoints'] = orm.Bool(inputs['parallelize_qpoints'])
if 'max_concurrent_base_workchains' in inputs:
data['max_concurrent_base_workchains'] = orm.Int(inputs['max_concurrent_base_workchains'])

builder = cls.get_builder()
builder._data = data # pylint: disable=protected-access
Expand Down Expand Up @@ -163,6 +166,8 @@ def run_parallel_workchain(self):
inputs.clean_workdir = self.inputs.clean_workdir
inputs.parallelize_qpoints = self.inputs.parallelize_qpoints
inputs.hp.qpoints = self.ctx.qpoints
if 'max_concurrent_base_workchains' in self.inputs:
inputs.max_concurrent_base_workchains = self.inputs.max_concurrent_base_workchains
running = self.submit(HpParallelizeAtomsWorkChain, **inputs)
self.report(f'running in parallel, launching HpParallelizeAtomsWorkChain<{running.pk}>')
return ToContext(workchain=running)
Expand Down
34 changes: 24 additions & 10 deletions src/aiida_quantumespresso_hp/workflows/hp/parallelize_atoms.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,11 @@
"""Work chain to launch a Quantum Espresso hp.x calculation parallelizing over the Hubbard atoms."""
from aiida import orm
from aiida.common import AttributeDict
from aiida.engine import WorkChain
from aiida.engine import WorkChain, while_
from aiida.plugins import CalculationFactory, WorkflowFactory

from aiida_quantumespresso_hp.utils.general import distribute_base_workchains

PwCalculation = CalculationFactory('quantumespresso.pw')
HpCalculation = CalculationFactory('quantumespresso.hp')
HpBaseWorkChain = WorkflowFactory('quantumespresso.hp.base')
Expand All @@ -21,12 +23,15 @@ def define(cls, spec):
super().define(spec)
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir'))
spec.input('parallelize_qpoints', valid_type=orm.Bool, default=lambda: orm.Bool(False))
spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False)
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
cls.run_init,
cls.inspect_init,
cls.run_atoms,
while_(cls.should_run_atoms)(
cls.run_atoms,
),
cls.inspect_atoms,
cls.run_final,
cls.inspect_final,
Expand Down Expand Up @@ -66,28 +71,37 @@ def inspect_init(self):
self.report(f'initialization work chain {workchain} failed with status {workchain.exit_status}, aborting.')
return self.exit_codes.ERROR_INITIALIZATION_WORKCHAIN_FAILED

def run_atoms(self):
"""Run a separate `HpBaseWorkChain` for each of the defined Hubbard atoms."""
workchain = self.ctx.initialization

output_params = workchain.outputs.parameters.get_dict()
hubbard_sites = output_params['hubbard_sites']
self.ctx.hubbard_sites = list(output_params['hubbard_sites'].items())

def should_run_atoms(self):
"""Return whether there are more atoms to run."""
return len(self.ctx.hubbard_sites) > 0

def run_atoms(self):
"""Run a separate `HpBaseWorkChain` for each of the defined Hubbard atoms."""
parallelize_qpoints = self.inputs.parallelize_qpoints.value
workflow = HpParallelizeQpointsWorkChain if parallelize_qpoints else HpBaseWorkChain

for site_index, site_kind in hubbard_sites.items():
max_concurrent_base_workchains_sites = [-1] * len(self.ctx.hubbard_sites)
if 'max_concurrent_base_workchains' in self.inputs:
max_concurrent_base_workchains_sites = distribute_base_workchains(
len(self.ctx.hubbard_sites), self.inputs.max_concurrent_base_workchains.value
)

for max_concurrent_base_workchains_site in max_concurrent_base_workchains_sites:
site_index, site_kind = self.ctx.hubbard_sites.pop(0)
do_only_key = f'perturb_only_atom({site_index})'
key = f'atom_{site_index}'

inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.clean_workdir = self.inputs.clean_workdir
inputs.hp.parameters = inputs.hp.parameters.get_dict()
inputs.hp.parameters['INPUTHP'][do_only_key] = True
inputs.hp.parameters = orm.Dict(dict=inputs.hp.parameters)
inputs.hp.parameters = orm.Dict(inputs.hp.parameters)
inputs.metadata.call_link_label = key

if parallelize_qpoints and max_concurrent_base_workchains_site != -1:
inputs.max_concurrent_base_workchains = orm.Int(max_concurrent_base_workchains_site)
node = self.submit(workflow, **inputs)
self.to_context(**{key: node})
name = workflow.__name__
Expand Down
21 changes: 14 additions & 7 deletions src/aiida_quantumespresso_hp/workflows/hp/parallelize_qpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"""Work chain to launch a Quantum Espresso hp.x calculation parallelizing over the Hubbard atoms."""
from aiida import orm
from aiida.common import AttributeDict
from aiida.engine import WorkChain
from aiida.engine import WorkChain, while_
from aiida.plugins import CalculationFactory, WorkflowFactory

from aiida_quantumespresso_hp.utils.general import is_perturb_only_atom
Expand All @@ -29,12 +29,15 @@ def define(cls, spec):
# yapf: disable
super().define(spec)
spec.expose_inputs(HpBaseWorkChain, exclude=('only_initialization', 'clean_workdir'))
spec.input('max_concurrent_base_workchains', valid_type=orm.Int, required=False)
spec.input('clean_workdir', valid_type=orm.Bool, default=lambda: orm.Bool(False),
help='If `True`, work directories of all called calculation will be cleaned at the end of execution.')
spec.outline(
cls.run_init,
cls.inspect_init,
cls.run_qpoints,
while_(cls.should_run_qpoints)(
cls.run_qpoints,
),
cls.inspect_qpoints,
cls.run_final,
cls.results
Expand Down Expand Up @@ -75,14 +78,18 @@ def inspect_init(self):
self.report(f'initialization work chain {workchain} failed with status {workchain.exit_status}, aborting.')
return self.exit_codes.ERROR_INITIALIZATION_WORKCHAIN_FAILED

def run_qpoints(self):
"""Run a separate `HpBaseWorkChain` for each of the q points."""
workchain = self.ctx.initialization
self.ctx.qpoints = list(range(workchain.outputs.parameters.dict.number_of_qpoints))

number_of_qpoints = workchain.outputs.parameters.dict.number_of_qpoints
def should_run_qpoints(self):
"""Return whether there are more q points to run."""
return len(self.ctx.qpoints) > 0

for qpoint_index in range(number_of_qpoints):
def run_qpoints(self):
"""Run a separate `HpBaseWorkChain` for each of the q points."""
n_base_parallel = self.inputs.max_concurrent_base_workchains.value if 'max_concurrent_base_workchains' in self.inputs else len(self.ctx.qpoints)

for _ in self.ctx.qpoints[:n_base_parallel]:
qpoint_index = self.ctx.qpoints.pop(0)
key = f'qpoint_{qpoint_index + 1}' # to keep consistency with QE
inputs = AttributeDict(self.exposed_inputs(HpBaseWorkChain))
inputs.clean_workdir = self.inputs.clean_workdir
Expand Down
13 changes: 13 additions & 0 deletions tests/parsers/fixtures/hp/failed_fermi_shift/aiida.in
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
&INPUTHP
conv_thr_chi = 1.0000000000d-06
determine_q_mesh_only = .true.
find_atpert = 3
iverbosity = 2
max_seconds = 3.4200000000d+03
nq1 = 4
nq2 = 4
nq3 = 4
outdir = 'out'
perturb_only_atom(13) = .true.
prefix = 'aiida'
/
Loading

0 comments on commit f15de60

Please sign in to comment.