Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update to version 4.6.1 #44

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ Log files and output will be saved in $HOME/reframe

## Requirements

- Reframe 3.10.1 installed as a module
- Reframe 4.6.1 installed as a module
- Python3
185 changes: 99 additions & 86 deletions config_vsc.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,44 +2,17 @@
import os
from py import builtin


antwerpen_mode_options = [
'--exec-policy=async',
'--output=/apps/antwerpen/reframe/logs/output/',
'--perflogdir=/apps/antwerpen/reframe/logs/',
'--stage=/apps/antwerpen/reframe/logs/stage/',
'--report-file=/apps/antwerpen/reframe/logs/reports/last-$VSC_INSTITUTE_CLUSTER.json',
'--compress-report',
'--nocolor']

perf_logging_format = [
'{"username": "%(osuser)s"',
'"version": "%(version)s"',
'"name": "%(check_name)s"',
'"system": "%(check_system)s"',
'"partition": "%(check_partition)s"',
'"environ": "%(check_environ)s"',
'"nodelist": "%(check_job_nodelist)s"',
'"num_tasks": "%(check_num_tasks)s"',
'"num_cpus_per_task": "%(check_num_cpus_per_task)s"',
'"num_tasks_per_node": "%(check_num_tasks_per_node)s"',
'"modules": "%(check_modules)s"',
'"jobid": "%(check_jobid)s"',
'"perf_var": "%(check_perf_var)s"',
'"perf_value": "%(check_perf_value)s"',
'"unit": "%(check_perf_unit)s"',
'"description": "%(check_descr)s"',
'"job_completion_time": "%(check_job_completion_time)s"',
'"check_result": "%(check_result)s"',
]

logging_format = perf_logging_format + ['"message": "%(message)s"', '"time": "%(asctime)s"}']
perf_logging_format[-1] += '}'
# use 'info' to log to syslog
syslog_level = 'warning'

# To run jobs on the kul cluster, you need to be a member of the following
# vsc group
kul_account_string_tier2 = '-A lpt2_vsc_test_suite'

# To run jobs on the calcua cluster, you need to be a member of the following
# vsc group
calcua_account_string_tier2 = '-A ap_calcua_staff'

# By default, not all installed modules are visible on the genius cluster
genius_modulepath = []
for version in ['2018a', '2019b', '2021a']:
Expand Down Expand Up @@ -74,12 +47,12 @@
'launcher': 'local',
},
{
'name': 'single-node',
'name': 'default-node',
'scheduler': 'slurm',
'modules': [],
'access': [],
'environs': ['standard'],
'descr': 'single-node jobs',
'descr': 'default-node jobs',
'max_jobs': 1,
'launcher': 'local',
},
Expand Down Expand Up @@ -111,12 +84,12 @@
'launcher': 'local',
},
{
'name': 'single-node',
'name': 'default-node',
'scheduler': 'slurm',
'modules': [],
'access': [hortense_access_flag],
'environs': ['standard'],
'descr': 'single-node jobs',
'descr': 'default-node jobs',
'max_jobs': 1,
'launcher': 'local',
},
Expand Down Expand Up @@ -153,12 +126,12 @@
'env_vars': [['MODULEPATH', ':'.join(genius_modulepath)]],
},
{
'name': 'single-node',
'name': 'default-node',
'scheduler': 'torque',
'modules': [],
'access': [kul_account_string_tier2],
'environs': ['standard'],
'descr': 'single-node jobs',
'descr': 'default-node jobs',
'max_jobs': 1,
'launcher': 'local',
'env_vars': [['MODULEPATH', ':'.join(genius_modulepath)]],
Expand All @@ -178,7 +151,7 @@
{
'name': 'vaughan',
'descr': 'VSC Tier-2 Vaughan',
'hostnames': ['login[1-2].vaughan'],
'hostnames': ['.*vaughan'],
'modules_system': 'lmod',
'partitions': [
{
Expand All @@ -188,26 +161,46 @@
'access': [],
'environs': ['standard'],
'descr': 'tests in the local node (no job)',
'max_jobs': 1,
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'single-node',
'name': 'default-node',
'scheduler': 'slurm',
'modules': [],
'access': [],
'access': [calcua_account_string_tier2],
'environs': ['standard'],
'descr': 'single-node jobs',
'max_jobs': 1,
'descr': 'default-node jobs',
'max_jobs': 10,
'launcher': 'local',
},
{
{
'name': 'zen2',
'scheduler': 'slurm',
'modules': [],
'access': [calcua_account_string_tier2, '-p zen2'],
'environs': ['standard'],
'descr': 'default-node jobs',
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'zen3',
'scheduler': 'slurm',
'modules': [],
'access': [calcua_account_string_tier2, '-p zen3'],
'environs': ['standard'],
'descr': 'default-node jobs',
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'mpi-job',
'scheduler': 'slurm',
'access': [],
'access': [calcua_account_string_tier2],
'environs': ['intel-2021a'],
'descr': 'MPI jobs',
'max_jobs': 1,
'max_jobs': 10,
# TODO Here we actually want to set vsc-mympirun, but since
# this is a custom launcher not shipped with ReFrame, we
# can only do this in the test itself after registering the
Expand All @@ -217,15 +210,15 @@
{
'name': 'nvidia',
'scheduler': 'slurm',
'access': ['-p ampere_gpu'],
'access': [calcua_account_string_tier2, '-p ampere_gpu'],
'environs': ['CUDA', 'standard'],
'descr': 'Nvidia ampere node',
'max_jobs': 1,
'max_jobs': 10,
'launcher': 'srun',
'resources': [
{
'name': 'gpu',
'options': ['--gres=gpu:{num_gpus}'],
'options': ['--gpus-per-node={num_gpus}'],
},
]
}
Expand All @@ -234,7 +227,7 @@
{
'name': 'leibniz',
'descr': 'VSC Tier-2 Leibniz',
'hostnames': ['login[1-2].leibniz'],
'hostnames': ['.*leibniz'],
'modules_system': 'lmod',
'partitions': [
{
Expand All @@ -244,26 +237,26 @@
'access': [],
'environs': ['standard'],
'descr': 'tests in the local node (no job)',
'max_jobs': 1,
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'single-node',
'name': 'default-node',
'scheduler': 'slurm',
'modules': [],
'access': [],
'access': [calcua_account_string_tier2],
'environs': ['standard'],
'descr': 'single-node jobs',
'max_jobs': 1,
'descr': 'default-node jobs',
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'mpi-job',
'scheduler': 'slurm',
'access': [],
'access': [calcua_account_string_tier2],
'environs': ['intel-2021a'],
'descr': 'MPI jobs',
'max_jobs': 1,
'max_jobs': 10,
# TODO Here we actually want to set vsc-mympirun, but since
# this is a custom launcher not shipped with ReFrame, we
# can only do this in the test itself after registering the
Expand All @@ -273,20 +266,61 @@
{
'name': 'nvidia',
'scheduler': 'slurm',
'access': ['-p pascal_gpu'],
'access': [calcua_account_string_tier2, '-p pascal_gpu'],
'environs': ['CUDA', 'standard'],
'descr': 'Nvidia pascal nodes',
'max_jobs': 2,
'max_jobs': 5,
'launcher': 'srun',
'resources': [
{
'name': 'gpu',
'options': ['--gres=gpu:{num_gpus}'],
'options': ['--gpus-per-node={num_gpus}'],
},
]
}
]
},
{
'name': 'breniac',
'descr': 'VSC Tier-2 Breniac',
'hostnames': ['.*breniac'],
'modules_system': 'lmod',
'partitions': [
{
'name': 'local',
'scheduler': 'local',
'modules': [],
'access': [],
'environs': ['standard'],
'descr': 'tests in the local node (no job)',
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'default-node',
'scheduler': 'slurm',
'modules': [],
'access': [calcua_account_string_tier2],
'environs': ['standard'],
'descr': 'default-node jobs',
'max_jobs': 10,
'launcher': 'local',
},
{
'name': 'mpi-job',
'scheduler': 'slurm',
'access': [calcua_account_string_tier2],
'environs': ['intel-2021a'],
'descr': 'MPI jobs',
'max_jobs': 10,
# TODO Here we actually want to set vsc-mympirun, but since
# this is a custom launcher not shipped with ReFrame, we
# can only do this in the test itself after registering the
# vsc-mympirun launcher
'launcher': 'srun',
},
]
},
],
'environments': [
{
Expand All @@ -296,7 +330,7 @@
'ftn': 'mpif90', 'modules': ['foss/2021a'],},
{
'name': 'intel-2021a',
'modules': ['intel'],
'modules': ['intel/2021a'],
'cc': 'mpiicc',
'cxx': 'mpiicpc',
'ftn': 'mpiifort',
Expand All @@ -311,42 +345,21 @@
],
'general': [
{
'purge_environment': True,
'purge_environment': False,
'resolve_module_conflicts': False, # avoid loading the module before submitting the job
'keep_stage_files': True,
}
],
'logging': [
{
'level': 'debug',
'handlers': [
{
'type': 'file',
'name': 'reframe.log',
'level': 'debug',
'format': '[%(asctime)s] %(levelname)s: %(check_name)s: %(message)s', # noqa: E501
'append': False,
},
{
'type': 'stream',
'name': 'stdout',
'level': 'info',
'format': '%(message)s',
},
{
'type': 'file',
'name': 'reframe.out',
'level': 'info',
'format': '%(message)s',
'append': False,
},
],
}
],
'modes': [
{
'name': 'UAstandard',
'options': antwerpen_mode_options,
},
]
}
}
4 changes: 2 additions & 2 deletions run.sh
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
module load ReFrame/4.2.0
module load ReFrame/4.6.1

export RFM_CONFIG_FILES=$(dirname $0)/config_vsc.py
export RFM_CHECK_SEARCH_PATH=$(dirname $0)/tests
Expand All @@ -7,5 +7,5 @@ export RFM_PREFIX=$VSC_SCRATCH/reframe
export RFM_CHECK_SEARCH_RECURSIVE=true
export RFM_SAVE_LOG_FILES=true

reframe --run "$@"
reframe --keep-stage-files --run "$@"
#rm $(dirname $0)/reframe.out $(dirname $0)/reframe.log
Loading