Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding a Maestro workflow #86

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions experiments/maestrowf/mpi-only/execute_experiment.tpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash
#
# Copyright 2023 Lawrence Livermore National Security, LLC and other
# Benchpark Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: Apache-2.0

{batch_nodes}
{batch_ranks}
{batch_timeout}

cd {experiment_run_dir}

{spack_setup}

{experiment_setup}

{command}
66 changes: 66 additions & 0 deletions experiments/maestrowf/mpi-only/ramble.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# Copyright 2023 Lawrence Livermore National Security, LLC and other
# Benchpark Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: Apache-2.0

ramble:
include:
- ./configs/spack.yaml
- ./configs/variables.yaml

config:
deprecated: true
spack_flags:
install: '--add --keep-stage'
concretize: '-U -f'

applications
maestrowf:
workloads:
problem1:
env_vars:
set:
OMP_NUM_THREADS: '{omp_num_threads}'
variables:
n_ranks: '{processes_per_node} * {n_nodes}'
p: 2
px: '{p}'
py: '{p}'
pz: '{p}'
n: ['55', '110']
nx: '{n}'
ny: '{n}'
nz: '{n}'
experiment_setup: ''
processes_per_node: ['8', '4']
n_nodes: ['1', '2']
threads_per_node_core: ['8', '10', '13'] #TODO: Specify n_threads according to available n_nodes and n_ranks
omp_num_threads: '{threads_per_node_core} * {n_nodes}'
experiments:
maestrowf_mpi_only_problem1_{n_nodes}_{omp_num_threads}_{px}_{py}_{pz}_{nx}_{ny}_{nz}:
variables:
env_name: maestrowf-mpi-only
matrices:
- size_threads:
- n # TODO: Filter matrix
- threads_per_node_core # TODO: Filter matrix
spack:
concretized: true
packages:
maestrowf:
spack_spec: PyMaestrowf@develop
compiler: default-compiler
hypre-omp:
spack_spec: [email protected] +mpi+openmp+mixedint
compiler: default-compiler
amg2023-omp:
spack_spec: amg2023@develop +mpi+openmp
compiler: default-compiler
environments:
amg2023-omp:
packages:
- lapack
- default-mpi
- hypre-omp
- amg2023-omp
- maestrowf
57 changes: 57 additions & 0 deletions repo/maestrowf/application.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Copyright 2023 Lawrence Livermore National Security, LLC and other
# Benchpark Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: Apache-2.0

from ramble.appkit import *

import sys

class maestrowf(SpackApplication):
"""Maestrowf benchmark"""
name = "maestrowf"

tags = ["maestrowf"]

executable('p1', 'maestro' +
' -P {px} {py} {pz}' +
' -n {nx} {ny} {nz}' +
' -problem 1' +
' -keepT', use_mpi=True)

executable('p2', 'maestro' +
' -P {px} {py} {pz}' +
' -n {nx} {ny} {nz}' +
' -problem 2' +
' -keepT', use_mpi=True)

workload('problem1', executables=['p1'])
workload('problem2', executables=['p2'])

workload_variable('px', default='2',
description='px',
workloads=['problem1', 'problem2'])
workload_variable('py', default='2',
description='py',
workloads=['problem1', 'problem2'])
workload_variable('pz', default='2',
description='pz',
workloads=['problem1', 'problem2'])
workload_variable('nx', default='220',
description='nx',
workloads=['problem1', 'problem2'])
workload_variable('ny', default='220',
description='ny',
workloads=['problem1', 'problem2'])
workload_variable('nz', default='220',
description='nz',
workloads=['problem1', 'problem2'])

figure_of_merit('Figure of Merit (FOM)', log_file='{experiment_run_dir}/{experiment_name}.out', fom_regex=r'Figure of Merit \(FOM\):\s+(?P<fom>[0-9]+\.[0-9]*(e^[0-9]*)?)', group_name='fom', units='')

#TODO: Fix the FOM success_criteria(...)
success_criteria('pass', mode='string', match=r'Figure of Merit \(FOM\)', file='{experiment_run_dir}/{experiment_name}.out')

# Evaluate the success
def evaluate_success(self):
return True