diff --git a/experiments/maestrowf/mpi-only/execute_experiment.tpl b/experiments/maestrowf/mpi-only/execute_experiment.tpl new file mode 100755 index 000000000..cda7e5bcb --- /dev/null +++ b/experiments/maestrowf/mpi-only/execute_experiment.tpl @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +{batch_nodes} +{batch_ranks} +{batch_timeout} + +cd {experiment_run_dir} + +{spack_setup} + +{experiment_setup} + +{command} diff --git a/experiments/maestrowf/mpi-only/ramble.yaml b/experiments/maestrowf/mpi-only/ramble.yaml new file mode 100644 index 000000000..5415950d0 --- /dev/null +++ b/experiments/maestrowf/mpi-only/ramble.yaml @@ -0,0 +1,66 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +ramble: + include: + - ./configs/spack.yaml + - ./configs/variables.yaml + + config: + deprecated: true + spack_flags: + install: '--add --keep-stage' + concretize: '-U -f' + + applications + maestrowf: + workloads: + problem1: + env_vars: + set: + OMP_NUM_THREADS: '{omp_num_threads}' + variables: + n_ranks: '{processes_per_node} * {n_nodes}' + p: 2 + px: '{p}' + py: '{p}' + pz: '{p}' + n: ['55', '110'] + nx: '{n}' + ny: '{n}' + nz: '{n}' + experiment_setup: '' + processes_per_node: ['8', '4'] + n_nodes: ['1', '2'] + threads_per_node_core: ['8', '10', '13'] #TODO: Specify n_threads according to available n_nodes and n_ranks + omp_num_threads: '{threads_per_node_core} * {n_nodes}' + experiments: + maestrowf_mpi_only_problem1_{n_nodes}_{omp_num_threads}_{px}_{py}_{pz}_{nx}_{ny}_{nz}: + variables: + env_name: maestrowf-mpi-only + matrices: + - size_threads: + - n # TODO: Filter matrix + - threads_per_node_core # TODO: Filter matrix + spack: + concretized: true + packages: + maestrowf: + spack_spec: PyMaestrowf@develop + compiler: default-compiler + hypre-omp: + spack_spec: hypre@2.28.0 +mpi+openmp+mixedint + compiler: default-compiler + amg2023-omp: + spack_spec: amg2023@develop +mpi+openmp + compiler: default-compiler + environments: + amg2023-omp: + packages: + - lapack + - default-mpi + - hypre-omp + - amg2023-omp + - maestrowf diff --git a/repo/maestrowf/application.py b/repo/maestrowf/application.py new file mode 100644 index 000000000..ff288f799 --- /dev/null +++ b/repo/maestrowf/application.py @@ -0,0 +1,57 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +from ramble.appkit import * + +import sys + +class maestrowf(SpackApplication): + """Maestrowf benchmark""" + name = "maestrowf" + + tags = ["maestrowf"] + + executable('p1', 'maestro' + + ' -P {px} {py} {pz}' + + ' -n {nx} {ny} {nz}' + + ' -problem 1' + + ' -keepT', use_mpi=True) + + executable('p2', 'maestro' + + ' -P {px} {py} {pz}' + + ' -n {nx} {ny} {nz}' + + ' -problem 2' + + ' -keepT', use_mpi=True) + + workload('problem1', executables=['p1']) + workload('problem2', executables=['p2']) + + workload_variable('px', default='2', + description='px', + workloads=['problem1', 'problem2']) + workload_variable('py', default='2', + description='py', + workloads=['problem1', 'problem2']) + workload_variable('pz', default='2', + description='pz', + workloads=['problem1', 'problem2']) + workload_variable('nx', default='220', + description='nx', + workloads=['problem1', 'problem2']) + workload_variable('ny', default='220', + description='ny', + workloads=['problem1', 'problem2']) + workload_variable('nz', default='220', + description='nz', + workloads=['problem1', 'problem2']) + + figure_of_merit('Figure of Merit (FOM)', log_file='{experiment_run_dir}/{experiment_name}.out', fom_regex=r'Figure of Merit \(FOM\):\s+(?P[0-9]+\.[0-9]*(e^[0-9]*)?)', group_name='fom', units='') + + #TODO: Fix the FOM success_criteria(...) + success_criteria('pass', mode='string', match=r'Figure of Merit \(FOM\)', file='{experiment_run_dir}/{experiment_name}.out') + + # Evaluate the success + def evaluate_success(self): + return True