diff --git a/.github/workflows/test_eessi.yml b/.github/workflows/test_eessi.yml index 92b1f71cad..04195dd619 100644 --- a/.github/workflows/test_eessi.yml +++ b/.github/workflows/test_eessi.yml @@ -40,4 +40,33 @@ jobs: export EESSI_OS_TYPE=linux export EESSI_SOFTWARE_SUBDIR=${{matrix.EESSI_SOFTWARE_SUBDIR}} env | grep ^EESSI | sort + echo "just run check_missing_installations.sh (should use eessi-${{matrix.EESSI_VERSION}}.yml)" ./check_missing_installations.sh + + - name: Test check_missing_installations.sh with missing package (GCC/8.3.0) + run: | + source /cvmfs/pilot.eessi-hpc.org/versions/${{matrix.EESSI_VERSION}}/init/bash + module load EasyBuild + eb --version + export EESSI_PREFIX=/cvmfs/pilot.eessi-hpc.org/versions/${{matrix.EESSI_VERSION}} + export EESSI_OS_TYPE=linux + export EESSI_SOFTWARE_SUBDIR=${{matrix.EESSI_SOFTWARE_SUBDIR}} + env | grep ^EESSI | sort + echo "modify eessi-${{matrix.EESSI_VERSION}}.yml by adding a missing package (GCC/8.3.0)" + echo " GCC:" >> eessi-${{matrix.EESSI_VERSION}}.yml + echo " toolchains:" >> eessi-${{matrix.EESSI_VERSION}}.yml + echo " SYSTEM:" >> eessi-${{matrix.EESSI_VERSION}}.yml + echo " versions: '8.3.0'" >> eessi-${{matrix.EESSI_VERSION}}.yml + tail -n 4 eessi-${{matrix.EESSI_VERSION}}.yml + # note, check_missing_installations.sh exits 1 if a package was + # missing, which is intepreted as false (exit code based, not + # boolean logic), hence when the script exits 0 if no package was + # missing it is interpreted as true, thus the test did not capture + # the missing package + if ./check_missing_installations.sh; then + echo "did NOT capture missing package; test FAILED" + exit 1 + else + echo "captured missing package; test PASSED" + exit 0 + fi diff --git a/.github/workflows/test_eessi_container_script.yml b/.github/workflows/test_eessi_container_script.yml new file mode 100644 index 0000000000..929fb22cec --- /dev/null +++ b/.github/workflows/test_eessi_container_script.yml @@ -0,0 +1,135 @@ +# documentation: https://help.github.com/en/articles/workflow-syntax-for-github-actions +name: Tests for eessi_container.sh script +on: [push, pull_request, workflow_dispatch] +permissions: + contents: read # to fetch code (actions/checkout) +jobs: + eessi_container_script: + runs-on: ubuntu-20.04 + strategy: + fail-fast: false + matrix: + SCRIPT_TEST: + - help + - listrepos_default + - listrepos_custom + - run + - shell + - container + - resume + # FIXME disabled because '--access rw' is not working in CI environment + #- readwrite + #- save + steps: + - name: Check out software-layer repository + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + + - name: install Apptainer + run: | + ./install_apptainer_ubuntu.sh + + - name: Collect info on test environment + run: | + mount + df -h + + - name: Test eessi_container.sh script + run: | + test_cmd="cat /etc/os-release" + out_pattern="Debian GNU/Linux 11" + + if [[ ${{matrix.SCRIPT_TEST}} == 'help' ]]; then + ./eessi_container.sh --help + + # test use of --list-repos without custom repos.cfg + elif [[ ${{matrix.SCRIPT_TEST}} == 'listrepos_default' ]]; then + outfile=out_listrepos.txt + ./eessi_container.sh --verbose --list-repos | tee ${outfile} + grep "EESSI-pilot" ${outfile} + + # test use of --list-repos with custom repos.cfg + elif [[ ${{matrix.SCRIPT_TEST}} == 'listrepos_custom' ]]; then + outfile=out_listrepos.txt + outfile2=out_listrepos_2.txt + mkdir -p ${PWD}/cfg + echo "[EESSI/20AB.CD]" > cfg/repos.cfg + echo "repo_version = 20AB.CD" >> cfg/repos.cfg + echo "[EESSI/20HT.TP]" >> cfg/repos.cfg + echo "repo_version = 20HT.TP" >> cfg/repos.cfg + ./eessi_container.sh --verbose --list-repos | tee ${outfile} + grep "EESSI-pilot" ${outfile} + + export EESSI_REPOS_CFG_DIR_OVERRIDE=${PWD}/cfg + ./eessi_container.sh --verbose --list-repos | tee ${outfile2} + grep "[EESSI/2023.02]" ${outfile2} + + # test use of --mode run + elif [[ ${{matrix.SCRIPT_TEST}} == 'run' ]]; then + outfile=out_run.txt + echo "${test_cmd}" > test_script.sh + chmod u+x test_script.sh + export SINGULARITY_BIND="$PWD:/test" + ./eessi_container.sh --verbose --mode run /test/test_script.sh | tee ${outfile} + grep "${out_pattern}" ${outfile} + + # test use of --mode shell + elif [[ ${{matrix.SCRIPT_TEST}} == 'shell' ]]; then + outfile=out_shell.txt + ./eessi_container.sh --verbose --mode shell <<< "${test_cmd}" 2>&1 | tee ${outfile} + grep "${out_pattern}" ${outfile} + + # test use of --container option, using a totally different container; + # cfr. https://github.com/easybuilders/easybuild-containers + elif [[ ${{matrix.SCRIPT_TEST}} == 'container' ]]; then + outfile=out_container.txt + container="docker://ghcr.io/eessi/build-node:debian10" + ./eessi_container.sh --verbose --container ${container} --mode shell <<< "${test_cmd}" 2>&1 | tee ${outfile} + grep "Debian GNU/Linux 10" ${outfile} + + # test use of '--access rw' to get write access in container + elif [[ ${{matrix.SCRIPT_TEST}} == 'readwrite' ]]; then + outfile=out_readwrite.txt + fn="test_${RANDOM}.txt" + echo "touch /cvmfs/pilot.eessi-hpc.org/${fn}" > test_script.sh + chmod u+x test_script.sh + export SINGULARITY_BIND="$PWD:/test" + ./eessi_container.sh --verbose --access rw --mode run /test/test_script.sh > ${outfile} + + tmpdir=$(grep "\-\-resume" ${outfile} | sed "s/.*--resume \([^']*\).*/\1/g") + # note: must use '--access rw' again here, since touched file is in overlay upper dir + ./eessi_container.sh --verbose --resume ${tmpdir} --access rw --mode shell <<< "ls -l /cvmfs/pilot.eessi-hpc.org/${fn}" > ${outfile} + grep "/cvmfs/pilot.eessi-hpc.org/${fn}$" $outfile + + # test use of --resume + elif [[ ${{matrix.SCRIPT_TEST}} == 'resume' ]]; then + outfile=out_resume.txt + ./eessi_container.sh --verbose --mode shell <<< "${test_cmd}" > ${outfile} + + tmpdir=$(grep "\-\-resume" ${outfile} | sed "s/.*--resume \([^']*\).*/\1/g") + rm -f ${outfile} + + # make sure that container image exists + test -f ${tmpdir}/ghcr.io_eessi_build_node_debian11.sif || (echo "Container image not found in ${tmpdir}" >&2 && ls ${tmpdir} && exit 1) + + ./eessi_container.sh --verbose --resume ${tmpdir} --mode shell <<< "${test_cmd}" > ${outfile} + cat ${outfile} + grep "Resuming from previous run using temporary storage at ${tmpdir}" ${outfile} + grep "${out_pattern}" ${outfile} + + # test use of --save (+ --resume) + elif [[ ${{matrix.SCRIPT_TEST}} == 'save' ]]; then + outfile=out_save.txt + fn="test_${RANDOM}.txt" + test_cmd="touch /cvmfs/pilot.eessi-hpc.org/${fn}" + ./eessi_container.sh --verbose --mode shell --access rw --save test-save.tar <<< "${test_cmd}" 2>&1 | tee ${outfile} + rm -f ${outfile} + + ./eessi_container.sh --verbose --mode shell --access rw --resume test-save.tar <<< "ls -l /cvmfs/pilot.eessi-hpc.org/${fn}" > ${outfile} + grep "/cvmfs/pilot.eessi-hpc.org/${fn}$" $outfile + + tar tfv test-save.tar | grep "overlay-upper/${fn}" + + else + echo "Unknown test case: ${{matrix.SCRIPT_TEST}}" >&2 + exit 1 + fi diff --git a/.github/workflows/tests_archdetect.yml b/.github/workflows/tests_archdetect.yml index 45c835a165..46c8e9007d 100644 --- a/.github/workflows/tests_archdetect.yml +++ b/.github/workflows/tests_archdetect.yml @@ -14,15 +14,16 @@ jobs: - x86_64/amd/zen2/Azure-CentOS7-7V12 - x86_64/amd/zen3/Azure-CentOS7-7V73X - ppc64le/power9le/unknown-power9le - - aarch64/arm/neoverse-n1/Azure-Ubuntu20-Altra - - aarch64/arm/neoverse-n1/AWS-awslinux-graviton2 - - aarch64/arm/neoverse-v1/AWS-awslinux-graviton3 - - aarch64/arm/cortex-a72/debian-rpi4 + - aarch64/neoverse_n1/Azure-Ubuntu20-Altra + - aarch64/neoverse_n1/AWS-awslinux-graviton2 + - aarch64/neoverse_v1/AWS-awslinux-graviton3 + - aarch64/cortex-a72/debian-rpi4 fail-fast: false steps: - name: checkout uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 - + - name: Enable EESSI + uses: eessi/github-action-eessi@58b50fd2eead2162c2b9ac258d4fb60cc9f30503 # v2.0.13 - name: test eessi_archdetect.sh run: | export EESSI_MACHINE_TYPE=${{matrix.proc_cpuinfo}} @@ -35,3 +36,15 @@ jobs: echo "Test for ${{matrix.proc_cpuinfo}} FAILED: $CPU_ARCH" >&2 exit 1 fi + CPU_ARCHES=$(./init/eessi_archdetect.sh -a cpupath) + if [[ $CPU_ARCHES == "$( cat ./tests/archdetect/${{matrix.proc_cpuinfo}}.all.output )" ]]; then + echo "Test for ${{matrix.proc_cpuinfo}} PASSED: $CPU_ARCHES" >&2 + else + echo "Test for ${{matrix.proc_cpuinfo}} FAILED: $CPU_ARCHES" >&2 + exit 1 + fi + # Check all those architectures actually exist + for dir in $(echo "$CPU_ARCHES" | tr ':' '\n'); do + # Search all EESSI versions as we may drop support at some point + ls -d "$EESSI_PREFIX"/../*/software/linux/"$dir" + done diff --git a/.github/workflows/tests_readme.yml b/.github/workflows/tests_readme.yml new file mode 100644 index 0000000000..5c6d0318d4 --- /dev/null +++ b/.github/workflows/tests_readme.yml @@ -0,0 +1,32 @@ +# documentation: https://help.github.com/en/articles/workflow-syntax-for-github-actions +name: Tests for consistency of README.md +on: + push: + paths: + - README.md + - init/eessi_defaults + + pull_request: + branches: + - main + paths: + - README.md + - init/eessi_defaults +permissions: + contents: read # to fetch code (actions/checkout) +jobs: + build: + runs-on: ubuntu-20.04 + steps: + - name: Check out software-layer repository + uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + + - name: verify if README.md is consistent with EESSI_PILOT_VERSION from init/eessi_defaults + run: | + source init/eessi_defaults + grep "${EESSI_PILOT_VERSION}" README.md + + - name: verify if README.md is consistent with EESSI_CVMFS_REPO from init/eessi_defaults + run: | + source init/eessi_defaults + grep "${EESSI_CVMFS_REPO}" README.md diff --git a/.github/workflows/tests_scripts.yml b/.github/workflows/tests_scripts.yml index 9c4975c381..5c0b3893ae 100644 --- a/.github/workflows/tests_scripts.yml +++ b/.github/workflows/tests_scripts.yml @@ -4,9 +4,12 @@ on: push: paths: - build_container.sh + - create_directory_tarballs.sh + - EESSI-pilot-install-software.sh - install_software_layer.sh + - load_easybuild_module.sh - run_in_compat_layer_env.sh - - utils.sh + - scripts/utils.sh - update_lmod_cache.sh pull_request: @@ -14,9 +17,12 @@ on: - main paths: - build_container.sh + - create_directory_tarballs.sh + - EESSI-pilot-install-software.sh - install_software_layer.sh + - load_easybuild_module.sh - run_in_compat_layer_env.sh - - utils.sh + - scripts/utils.sh - update_lmod_cache.sh permissions: contents: read # to fetch code (actions/checkout) @@ -27,18 +33,48 @@ jobs: - name: checkout uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 - # see https://github.com/apptainer/singularity/issues/5390#issuecomment-899111181 - name: install Apptainer run: | - sudo apt-get install alien - alien --version - apptainer_rpm=$(curl --silent -L https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/a/ | grep 'apptainer-[0-9]' | sed 's/.*\(apptainer[0-9._a-z-]*.rpm\).*/\1/g') - curl -OL https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/a/$apptainer_rpm - sudo alien -d $apptainer_rpm - sudo apt install ./apptainer*.deb - apptainer --version - # also check whether 'singularity' command is still provided by Apptainer installation - singularity --version + ./install_apptainer_ubuntu.sh + + - name: test load_easybuild_module.sh script + run: | + # bind current directory into container as /software-layer + export SINGULARITY_BIND="${PWD}:/software-layer" + + for EB_VERSION in '4.5.0' '4.5.1' '4.7.2'; do + # Create script that uses load_easybuild_module.sh which we can run in compat layer environment + # note: Be careful with single vs double quotes below! + # ${EB_VERSION} should be expanded, so use double quotes; + # For statements using variables that are only defined in the script, like ${EASYBUILD_INSTALLPATH}, + # use single quotes to avoid expansion while creating the script. + test_script="${PWD}/eb-${EB_VERSION}.sh" + echo '#!/bin/bash' > ${test_script} + # both $EB and $TMPDIR environment must be set, required by load_easybuild_module.sh script + echo 'export EB="eb"' >> ${test_script} + echo 'export TMPDIR=$(mktemp -d)' >> ${test_script} + # set up environment to have utility functions in place that load_easybuild_module.sh script relies on, + # along with $EESSI_* environment variables, and Lmod + echo 'ls -l /software-layer/' >> ${test_script} + echo 'source /software-layer/scripts/utils.sh' >> ${test_script} + echo 'source /software-layer/init/eessi_environment_variables' >> ${test_script} + echo 'source ${EPREFIX}/usr/share/Lmod/init/bash' >> ${test_script} + # minimal configuration for EasyBuild so we can test installation aspect of load_easybuild_module.sh script + echo "export EASYBUILD_INSTALLPATH=/tmp/eb-${EB_VERSION}" >> ${test_script} + echo 'module use ${EASYBUILD_INSTALLPATH}/modules/all' >> ${test_script} + echo '' >> ${test_script} + echo "source /software-layer/load_easybuild_module.sh ${EB_VERSION}" >> ${test_script} + echo 'module list' >> ${test_script} + echo 'eb --version' >> ${test_script} + + chmod u+x ${test_script} + + # run wrapper script + capture & check output + out="${PWD}/eb-${EB_VERSION}.out" + ./eessi_container.sh --access rw --mode run --verbose /software-layer/run_in_compat_layer_env.sh /software-layer/eb-${EB_VERSION}.sh 2>&1 | tee ${out} + pattern="^This is EasyBuild ${EB_VERSION} " + grep "${pattern}" ${out} || (echo "Pattern '${pattern}' not found in output!" && exit 1) + done - name: test install_software_layer.sh script run: | @@ -49,3 +85,13 @@ jobs: # force using x86_64/generic, to avoid triggering an installation from scratch sed -i "s@./EESSI-pilot-install-software.sh@\"export EESSI_SOFTWARE_SUBDIR_OVERRIDE='x86_64/generic'; ./EESSI-pilot-install-software.sh\"@g" install_software_layer.sh ./build_container.sh run /tmp/$USER/EESSI /tmp/install_software_layer.sh + + - name: test create_directory_tarballs.sh script + run: | + # scripts need to be copied to /tmp, + # since create_directory_tarballs.sh must be accessible from within build container + cp -a * /tmp/ + cd /tmp + ./build_container.sh run /tmp/$USER/EESSI /tmp/create_directory_tarballs.sh 2021.12 + # check if tarballs have been produced + ls -l *.tar.gz diff --git a/EESSI-pilot-install-software.sh b/EESSI-pilot-install-software.sh index 2c7622b45a..d9bcf20231 100755 --- a/EESSI-pilot-install-software.sh +++ b/EESSI-pilot-install-software.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# Script to install EESSI pilot software stack (version 2021.12) +# Script to install EESSI pilot software stack (version set through init/eessi_defaults) # see example parsing of command line arguments at # https://wiki.bash-hackers.org/scripting/posparams#using_a_while_loop @@ -50,7 +50,7 @@ set -- "${POSITIONAL_ARGS[@]}" TOPDIR=$(dirname $(realpath $0)) -source $TOPDIR/utils.sh +source $TOPDIR/scripts/utils.sh # honor $TMPDIR if it is already defined, use /tmp otherwise if [ -z $TMPDIR ]; then @@ -139,62 +139,8 @@ fi REQ_EB_VERSION='4.5.0' -echo ">> Checking for EasyBuild module..." -ml_av_easybuild_out=$TMPDIR/ml_av_easybuild.out -module avail 2>&1 | grep -i easybuild/${REQ_EB_VERSION} &> ${ml_av_easybuild_out} -if [[ $? -eq 0 ]]; then - echo_green ">> EasyBuild module found!" -else - echo_yellow ">> No EasyBuild module yet, installing it..." - - EB_TMPDIR=${TMPDIR}/ebtmp - echo ">> Temporary installation (in ${EB_TMPDIR})..." - pip_install_out=${TMPDIR}/pip_install.out - pip3 install --prefix $EB_TMPDIR easybuild &> ${pip_install_out} - - echo ">> Final installation in ${EASYBUILD_INSTALLPATH}..." - export PATH=${EB_TMPDIR}/bin:$PATH - export PYTHONPATH=$(ls -d ${EB_TMPDIR}/lib/python*/site-packages):$PYTHONPATH - eb_install_out=${TMPDIR}/eb_install.out - ok_msg="Latest EasyBuild release installed, let's go!" - fail_msg="Installing latest EasyBuild release failed, that's not good... (output: ${eb_install_out})" - eb --install-latest-eb-release &> ${eb_install_out} - check_exit_code $? "${ok_msg}" "${fail_msg}" - - eb --search EasyBuild-${REQ_EB_VERSION}.eb | grep EasyBuild-${REQ_EB_VERSION}.eb > /dev/null - if [[ $? -eq 0 ]]; then - ok_msg="EasyBuild v${REQ_EB_VERSION} installed, alright!" - fail_msg="Installing EasyBuild v${REQ_EB_VERSION}, yikes! (output: ${eb_install_out})" - eb EasyBuild-${REQ_EB_VERSION}.eb >> ${eb_install_out} 2>&1 - check_exit_code $? "${ok_msg}" "${fail_msg}" - fi - - module avail easybuild/${REQ_EB_VERSION} &> ${ml_av_easybuild_out} - if [[ $? -eq 0 ]]; then - echo_green ">> EasyBuild module installed!" - else - fatal_error "EasyBuild/${REQ_EB_VERSION} module failed to install?! (output of 'pip install' in ${pip_install_out}, output of 'eb' in ${eb_install_out}, output of 'ml av easybuild' in ${ml_av_easybuild_out})" - fi -fi - -echo ">> Loading EasyBuild module..." -module load EasyBuild/$REQ_EB_VERSION -eb_show_system_info_out=${TMPDIR}/eb_show_system_info.out -$EB --show-system-info > ${eb_show_system_info_out} -if [[ $? -eq 0 ]]; then - echo_green ">> EasyBuild seems to be working!" - $EB --version | grep "${REQ_EB_VERSION}" - if [[ $? -eq 0 ]]; then - echo_green "Found EasyBuild version ${REQ_EB_VERSION}, looking good!" - else - $EB --version - fatal_error "Expected to find EasyBuild version ${REQ_EB_VERSION}, giving up here..." - fi - $EB --show-config -else - cat ${eb_show_system_info_out} - fatal_error "EasyBuild not working?!" -fi +# load EasyBuild module (will be installed if it's not available yet) +source ${TOPDIR}/load_easybuild_module.sh ${REQ_EB_VERSION} echo_green "All set, let's start installing some software in ${EASYBUILD_INSTALLPATH}..." @@ -362,6 +308,12 @@ fail_msg="Installation of WRF failed, that's unexpected..." OMPI_MCA_pml=ucx UCX_TLS=tcp $EB WRF-3.9.1.1-foss-2020a-dmpar.eb -r --include-easyblocks-from-pr 2648 check_exit_code $? "${ok_msg}" "${fail_msg}" +echo ">> Installing R 4.1.0 (better be patient)..." +ok_msg="R installed, wow!" +fail_msg="Installation of R failed, so sad..." +$EB --from-pr 14821 X11-20210518-GCCcore-10.3.0.eb -r && $EB --from-pr 16011 R-4.1.0-foss-2021a.eb --robot --parallel-extensions-install --experimental +check_exit_code $? "${ok_msg}" "${fail_msg}" + echo ">> Installing Nextflow 22.10.1..." ok_msg="Nextflow installed, the work must flow..." fail_msg="Installation of Nextflow failed, that's unexpected..." diff --git a/README.md b/README.md index 4d463b1c5b..daf02eebc2 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,22 @@ Environment set up to use EESSI pilot software stack, have fun! [EESSI pilot 2021.12] $ ``` +### Accessing EESSI via a container + +You need Singularity version 3.7 or newer. Then, simply run + +``` +$ ./eessi_container.sh +``` +Once you get presented the prompt `Singularity>` run the above `source` command. + +If you want to build a package for the software repository, simply add the arguments `--access rw`, e.g., full command would be + +``` +$ ./eessi_container.sh --access rw +``` +Note, not all features/arguments listed via `./eessi_container.sh --help` are implemented. + # License The software in this repository is distributed under the terms of the diff --git a/bot/build.sh b/bot/build.sh new file mode 100755 index 0000000000..c8def2cdd3 --- /dev/null +++ b/bot/build.sh @@ -0,0 +1,199 @@ +#!/usr/bin/env bash +# +# script to build the EESSI software layer. Intended use is that it is called +# by a (batch) job running on a compute node. +# +# This script is part of the EESSI software layer, see +# https://github.com/EESSI/software-layer.git +# +# author: Thomas Roeblitz (@trz42) +# +# license: GPLv2 +# + +# ASSUMPTIONs: +# - working directory has been prepared by the bot with a checkout of a +# pull request (OR by some other means) +# - the working directory contains a directory 'cfg' where the main config +# file 'job.cfg' has been deposited +# - the directory may contain any additional files referenced in job.cfg + +# stop as soon as something fails +set -e + +# source utils.sh and cfg_files.sh +source scripts/utils.sh +source scripts/cfg_files.sh + +# defaults +export JOB_CFG_FILE="${JOB_CFG_FILE_OVERRIDE:=./cfg/job.cfg}" +HOST_ARCH=$(uname -m) + +# check if ${JOB_CFG_FILE} exists +if [[ ! -r "${JOB_CFG_FILE}" ]]; then + fatal_error "job config file (JOB_CFG_FILE=${JOB_CFG_FILE}) does not exist or not readable" +fi +echo "bot/build.sh: showing ${JOB_CFG_FILE} from software-layer side" +cat ${JOB_CFG_FILE} + +echo "bot/build.sh: obtaining configuration settings from '${JOB_CFG_FILE}'" +cfg_load ${JOB_CFG_FILE} + +# if http_proxy is defined in ${JOB_CFG_FILE} use it, if not use env var $http_proxy +HTTP_PROXY=$(cfg_get_value "site_config" "http_proxy") +HTTP_PROXY=${HTTP_PROXY:-${http_proxy}} +echo "bot/build.sh: HTTP_PROXY='${HTTP_PROXY}'" + +# if https_proxy is defined in ${JOB_CFG_FILE} use it, if not use env var $https_proxy +HTTPS_PROXY=$(cfg_get_value "site_config" "https_proxy") +HTTPS_PROXY=${HTTPS_PROXY:-${https_proxy}} +echo "bot/build.sh: HTTPS_PROXY='${HTTPS_PROXY}'" + +LOCAL_TMP=$(cfg_get_value "site_config" "local_tmp") +echo "bot/build.sh: LOCAL_TMP='${LOCAL_TMP}'" +# TODO should local_tmp be mandatory? --> then we check here and exit if it is not provided + +SINGULARITY_CACHEDIR=$(cfg_get_value "site_config" "container_cachedir") +echo "bot/build.sh: SINGULARITY_CACHEDIR='${SINGULARITY_CACHEDIR}'" +if [[ ! -z ${SINGULARITY_CACHEDIR} ]]; then + # make sure that separate directories are used for different CPU families + SINGULARITY_CACHEDIR=${SINGULARITY_CACHEDIR}/${HOST_ARCH} + export SINGULARITY_CACHEDIR +fi + +echo -n "setting \$STORAGE by replacing any var in '${LOCAL_TMP}' -> " +# replace any env variable in ${LOCAL_TMP} with its +# current value (e.g., a value that is local to the job) +STORAGE=$(envsubst <<< ${LOCAL_TMP}) +echo "'${STORAGE}'" + +# make sure ${STORAGE} exists +mkdir -p ${STORAGE} + +# make sure the base tmp storage is unique +JOB_STORAGE=$(mktemp --directory --tmpdir=${STORAGE} bot_job_tmp_XXX) +echo "bot/build.sh: created unique base tmp storage directory at ${JOB_STORAGE}" + +# obtain list of modules to be loaded +LOAD_MODULES=$(cfg_get_value "site_config" "load_modules") +echo "bot/build.sh: LOAD_MODULES='${LOAD_MODULES}'" + +# singularity/apptainer settings: CONTAINER, HOME, TMPDIR, BIND +CONTAINER=$(cfg_get_value "repository" "container") +export SINGULARITY_HOME="${PWD}:/eessi_bot_job" +export SINGULARITY_TMPDIR="${PWD}/singularity_tmpdir" +mkdir -p ${SINGULARITY_TMPDIR} + +# load modules if LOAD_MODULES is not empty +if [[ ! -z ${LOAD_MODULES} ]]; then + for mod in $(echo ${LOAD_MODULES} | tr ',' '\n') + do + echo "bot/build.sh: loading module '${mod}'" + module load ${mod} + done +else + echo "bot/build.sh: no modules to be loaded" +fi + +# determine repository to be used from entry .repository in ${JOB_CFG_FILE} +REPOSITORY=$(cfg_get_value "repository" "repo_id") +EESSI_REPOS_CFG_DIR_OVERRIDE=$(cfg_get_value "repository" "repos_cfg_dir") +export EESSI_REPOS_CFG_DIR_OVERRIDE=${EESSI_REPOS_CFG_DIR_OVERRIDE:-${PWD}/cfg} +echo "bot/build.sh: EESSI_REPOS_CFG_DIR_OVERRIDE='${EESSI_REPOS_CFG_DIR_OVERRIDE}'" + +# determine pilot version to be used from .repository.repo_version in ${JOB_CFG_FILE} +# here, just set & export EESSI_PILOT_VERSION_OVERRIDE +# next script (eessi_container.sh) makes use of it via sourcing init scripts +# (e.g., init/eessi_defaults or init/minimal_eessi_env) +export EESSI_PILOT_VERSION_OVERRIDE=$(cfg_get_value "repository" "repo_version") +echo "bot/build.sh: EESSI_PILOT_VERSION_OVERRIDE='${EESSI_PILOT_VERSION_OVERRIDE}'" + +# determine CVMFS repo to be used from .repository.repo_name in ${JOB_CFG_FILE} +# here, just set EESSI_CVMFS_REPO_OVERRIDE, a bit further down +# "source init/eessi_defaults" via sourcing init/minimal_eessi_env +export EESSI_CVMFS_REPO_OVERRIDE=$(cfg_get_value "repository" "repo_name") +echo "bot/build.sh: EESSI_CVMFS_REPO_OVERRIDE='${EESSI_CVMFS_REPO_OVERRIDE}'" + +# determine architecture to be used from entry .architecture in ${JOB_CFG_FILE} +# fallbacks: +# - ${CPU_TARGET} handed over from bot +# - left empty to let downstream script(s) determine subdir to be used +EESSI_SOFTWARE_SUBDIR_OVERRIDE=$(cfg_get_value "architecture" "software_subdir") +EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_SOFTWARE_SUBDIR_OVERRIDE:-${CPU_TARGET}} +export EESSI_SOFTWARE_SUBDIR_OVERRIDE +echo "bot/build.sh: EESSI_SOFTWARE_SUBDIR_OVERRIDE='${EESSI_SOFTWARE_SUBDIR_OVERRIDE}'" + +# get EESSI_OS_TYPE from .architecture.os_type in ${JOB_CFG_FILE} (default: linux) +EESSI_OS_TYPE=$(cfg_get_value "architecture" "os_type") +export EESSI_OS_TYPE=${EESSI_OS_TYPE:-linux} +echo "bot/build.sh: EESSI_OS_TYPE='${EESSI_OS_TYPE}'" + +# prepare arguments to eessi_container.sh common to build and tarball steps +declare -a COMMON_ARGS=() +COMMON_ARGS+=("--verbose") +COMMON_ARGS+=("--access" "rw") +COMMON_ARGS+=("--mode" "run") +[[ ! -z ${CONTAINER} ]] && COMMON_ARGS+=("--container" "${CONTAINER}") +[[ ! -z ${HTTP_PROXY} ]] && COMMON_ARGS+=("--http-proxy" "${HTTP_PROXY}") +[[ ! -z ${HTTPS_PROXY} ]] && COMMON_ARGS+=("--https-proxy" "${HTTPS_PROXY}") +[[ ! -z ${REPOSITORY} ]] && COMMON_ARGS+=("--repository" "${REPOSITORY}") + +# make sure to use the same parent dir for storing tarballs of tmp +PREVIOUS_TMP_DIR=${PWD}/previous_tmp + +# prepare directory to store tarball of tmp for build step +TARBALL_TMP_BUILD_STEP_DIR=${PREVIOUS_TMP_DIR}/build_step +mkdir -p ${TARBALL_TMP_BUILD_STEP_DIR} + +# prepare arguments to eessi_container.sh specific to build step +declare -a BUILD_STEP_ARGS=() +BUILD_STEP_ARGS+=("--save" "${TARBALL_TMP_BUILD_STEP_DIR}") +BUILD_STEP_ARGS+=("--storage" "${STORAGE}") + +# prepare arguments to install_software_layer.sh (specific to build step) +GENERIC_OPT= +if [[ ${EESSI_SOFTWARE_SUBDIR_OVERRIDE} =~ .*/generic$ ]]; then + GENERIC_OPT="--generic" +fi + +# create tmp file for output of build step +build_outerr=$(mktemp build.outerr.XXXX) + +echo "Executing command to build software:" +echo "./eessi_container.sh ${COMMON_ARGS[@]} ${BUILD_STEP_ARGS[@]}" +echo " -- ./install_software_layer.sh ${GENERIC_OPT} \"$@\" 2>&1 | tee -a ${build_outerr}" +./eessi_container.sh "${COMMON_ARGS[@]}" "${BUILD_STEP_ARGS[@]}" \ + -- ./install_software_layer.sh ${GENERIC_OPT} "$@" 2>&1 | tee -a ${build_outerr} + +# prepare directory to store tarball of tmp for tarball step +TARBALL_TMP_TARBALL_STEP_DIR=${PREVIOUS_TMP_DIR}/tarball_step +mkdir -p ${TARBALL_TMP_TARBALL_STEP_DIR} + +# create tmp file for output of tarball step +tar_outerr=$(mktemp tar.outerr.XXXX) + +# prepare arguments to eessi_container.sh specific to tarball step +declare -a TARBALL_STEP_ARGS=() +TARBALL_STEP_ARGS+=("--save" "${TARBALL_TMP_TARBALL_STEP_DIR}") + +# determine temporary directory to resume from +BUILD_TMPDIR=$(grep ' as tmp directory ' ${build_outerr} | cut -d ' ' -f 2) +TARBALL_STEP_ARGS+=("--resume" "${BUILD_TMPDIR}") + +timestamp=$(date +%s) +# to set EESSI_PILOT_VERSION we need to source init/eessi_defaults now +source init/eessi_defaults +export TGZ=$(printf "eessi-%s-software-%s-%s-%d.tar.gz" ${EESSI_PILOT_VERSION} ${EESSI_OS_TYPE} ${EESSI_SOFTWARE_SUBDIR_OVERRIDE//\//-} ${timestamp}) + +# value of first parameter to create_tarball.sh - TMP_IN_CONTAINER - needs to be +# synchronised with setting of TMP_IN_CONTAINER in eessi_container.sh +# TODO should we make this a configurable parameter of eessi_container.sh using +# /tmp as default? +TMP_IN_CONTAINER=/tmp +echo "Executing command to create tarball:" +echo "./eessi_container.sh ${COMMON_ARGS[@]} ${TARBALL_STEP_ARGS[@]}" +echo " -- ./create_tarball.sh ${TMP_IN_CONTAINER} ${EESSI_PILOT_VERSION} ${EESSI_SOFTWARE_SUBDIR_OVERRIDE} /eessi_bot_job/${TGZ} 2>&1 | tee -a ${tar_outerr}" +./eessi_container.sh "${COMMON_ARGS[@]}" "${TARBALL_STEP_ARGS[@]}" \ + -- ./create_tarball.sh ${TMP_IN_CONTAINER} ${EESSI_PILOT_VERSION} ${EESSI_SOFTWARE_SUBDIR_OVERRIDE} /eessi_bot_job/${TGZ} 2>&1 | tee -a ${tar_outerr} + +exit 0 diff --git a/bot/check-build.sh b/bot/check-build.sh new file mode 100755 index 0000000000..ec1ca56bba --- /dev/null +++ b/bot/check-build.sh @@ -0,0 +1,492 @@ +#!/bin/bash +# +# Script to check the result of building the EESSI software layer. +# Intended use is that it is called by a (batch) job running on a compute +# node. +# +# This script is part of the EESSI software layer, see +# https://github.com/EESSI/software-layer.git +# +# author: Thomas Roeblitz (@trz42) +# +# license: GPLv2 +# + +# result cases + +# - SUCCESS (all of) +# - working directory contains slurm-JOBID.out file +# - working directory contains eessi*tar.gz +# - no message ERROR +# - no message FAILED +# - no message ' required modules missing:' +# - one or more of 'No missing installations' +# - message regarding created tarball +# - FAILED (one of ... implemented as NOT SUCCESS) +# - no slurm-JOBID.out file +# - no tarball +# - message with ERROR +# - message with FAILED +# - message with ' required modules missing:' +# - no message regarding created tarball + +# stop as soon as something fails +# set -e + +TOPDIR=$(dirname $(realpath $0)) + +source ${TOPDIR}/../scripts/utils.sh +source ${TOPDIR}/../scripts/cfg_files.sh + +# defaults +export JOB_CFG_FILE="${JOB_CFG_FILE_OVERRIDE:=./cfg/job.cfg}" + +# check if ${JOB_CFG_FILE} exists +if [[ ! -r "${JOB_CFG_FILE}" ]]; then + echo_red "job config file (JOB_CFG_FILE=${JOB_CFG_FILE}) does not exist or not readable" +else + echo "bot/check-build.sh: showing ${JOB_CFG_FILE} from software-layer side" + cat ${JOB_CFG_FILE} + + echo "bot/check-build.sh: obtaining configuration settings from '${JOB_CFG_FILE}'" + cfg_load ${JOB_CFG_FILE} +fi + +display_help() { + echo "usage: $0 [OPTIONS]" + echo " OPTIONS:" + echo " -h | --help - display this usage information [default: false]" + echo " -v | --verbose - display more information [default: false]" +} + +# set defaults for command line arguments +VERBOSE=0 + +POSITIONAL_ARGS=() + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + display_help + exit 0 + ;; + -v|--verbose) + VERBOSE=1 + shift 1 + ;; + --) + shift + POSITIONAL_ARGS+=("$@") # save positional args + break + ;; + -*|--*) + fatal_error "Unknown option: $1" "${CMDLINE_ARG_UNKNOWN_EXITCODE}" + ;; + *) # No more options + POSITIONAL_ARGS+=("$1") # save positional arg + shift + ;; + esac +done + +set -- "${POSITIONAL_ARGS[@]}" + +job_dir=${PWD} + +[[ ${VERBOSE} -ne 0 ]] && echo ">> analysing job in directory ${job_dir}" + +job_out="slurm-${SLURM_JOB_ID}.out" +[[ ${VERBOSE} -ne 0 ]] && echo ">> searching for job output file(s) matching '"${job_out}"'" +if [[ -f ${job_out} ]]; then + SLURM=1 + [[ ${VERBOSE} -ne 0 ]] && echo " found slurm output file '"${job_out}"'" +else + SLURM=0 + [[ ${VERBOSE} -ne 0 ]] && echo " Slurm output file '"${job_out}"' NOT found" +fi + +ERROR=-1 +if [[ ${SLURM} -eq 1 ]]; then + GP_error='ERROR: ' + grep_out=$(grep -v "^>> searching for " ${job_dir}/${job_out} | grep "${GP_error}") + [[ $? -eq 0 ]] && ERROR=1 || ERROR=0 + # have to be careful to not add searched for pattern into slurm out file + [[ ${VERBOSE} -ne 0 ]] && echo ">> searching for '"${GP_error}"'" + [[ ${VERBOSE} -ne 0 ]] && echo "${grep_out}" +fi + +FAILED=-1 +if [[ ${SLURM} -eq 1 ]]; then + GP_failed='FAILED: ' + grep_out=$(grep -v "^>> searching for " ${job_dir}/${job_out} | grep "${GP_failed}") + [[ $? -eq 0 ]] && FAILED=1 || FAILED=0 + # have to be careful to not add searched for pattern into slurm out file + [[ ${VERBOSE} -ne 0 ]] && echo ">> searching for '"${GP_failed}"'" + [[ ${VERBOSE} -ne 0 ]] && echo "${grep_out}" +fi + +MISSING=-1 +if [[ ${SLURM} -eq 1 ]]; then + GP_req_missing=' required modules missing:' + grep_out=$(grep -v "^>> searching for " ${job_dir}/${job_out} | grep "${GP_req_missing}") + [[ $? -eq 0 ]] && MISSING=1 || MISSING=0 + # have to be careful to not add searched for pattern into slurm out file + [[ ${VERBOSE} -ne 0 ]] && echo ">> searching for '"${GP_req_missing}"'" + [[ ${VERBOSE} -ne 0 ]] && echo "${grep_out}" +fi + +NO_MISSING=-1 +if [[ ${SLURM} -eq 1 ]]; then + GP_no_missing='No missing installations' + grep_out=$(grep -v "^>> searching for " ${job_dir}/${job_out} | grep "${GP_no_missing}") + [[ $? -eq 0 ]] && NO_MISSING=1 || NO_MISSING=0 + # have to be careful to not add searched for pattern into slurm out file + [[ ${VERBOSE} -ne 0 ]] && echo ">> searching for '"${GP_no_missing}"'" + [[ ${VERBOSE} -ne 0 ]] && echo "${grep_out}" +fi + +TGZ=-1 +TARBALL= +if [[ ${SLURM} -eq 1 ]]; then + GP_tgz_created="\.tar\.gz created!" + grep_out=$(grep -v "^>> searching for " ${job_dir}/${job_out} | grep "${GP_tgz_created}" | sort -u) + if [[ $? -eq 0 ]]; then + TGZ=1 + TARBALL=$(echo ${grep_out} | sed -e 's@^.*/\(eessi[^/ ]*\) .*$@\1@') + else + TGZ=0 + fi + # have to be careful to not add searched for pattern into slurm out file + [[ ${VERBOSE} -ne 0 ]] && echo ">> searching for '"${GP_tgz_created}"'" + [[ ${VERBOSE} -ne 0 ]] && echo "${grep_out}" +fi + +[[ ${VERBOSE} -ne 0 ]] && echo "SUMMARY: ${job_dir}/${job_out}" +[[ ${VERBOSE} -ne 0 ]] && echo " : ()" +[[ ${VERBOSE} -ne 0 ]] && echo " ERROR......: $([[ $ERROR -eq 1 ]] && echo 'yes' || echo 'no') (no)" +[[ ${VERBOSE} -ne 0 ]] && echo " FAILED.....: $([[ $FAILED -eq 1 ]] && echo 'yes' || echo 'no') (no)" +[[ ${VERBOSE} -ne 0 ]] && echo " REQ_MISSING: $([[ $MISSING -eq 1 ]] && echo 'yes' || echo 'no') (no)" +[[ ${VERBOSE} -ne 0 ]] && echo " NO_MISSING.: $([[ $NO_MISSING -eq 1 ]] && echo 'yes' || echo 'no') (yes)" +[[ ${VERBOSE} -ne 0 ]] && echo " TGZ_CREATED: $([[ $TGZ -eq 1 ]] && echo 'yes' || echo 'no') (yes)" + +job_result_file=_bot_job${SLURM_JOB_ID}.result + +if [[ ${SLURM} -eq 1 ]] && \ + [[ ${ERROR} -eq 0 ]] && \ + [[ ${FAILED} -eq 0 ]] && \ + [[ ${MISSING} -eq 0 ]] && \ + [[ ${NO_MISSING} -eq 1 ]] && \ + [[ ${TGZ} -eq 1 ]] && \ + [[ ! -z ${TARBALL} ]]; then + # SUCCESS + status="SUCCESS" + summary=":grin: SUCCESS" +else + # FAILURE + status="FAILURE" + summary=":cry: FAILURE" +fi + +### Example details/descriptions +# Note, final string must not contain any line breaks. Below example include +# line breaks for the sake of readability. In case of FAILURE, the structure is +# very similar (incl. information about Artefacts if any was produced), however, +# under Details some lines will be marked with :x: +#
+# :grin: SUCCESS _(click triangle for details)_ +#
+#
_Details_
+#
+# :white_check_mark: job output file slurm-4682.out
+# :white_check_mark: no message matching ERROR:
+# :white_check_mark: no message matching FAILED:
+# :white_check_mark: no message matching required modules missing:
+# :white_check_mark: found message(s) matching No missing installations
+# :white_check_mark: found message matching tar.gz created!
+#
+#
_Artefacts_
+#
+#
+# eessi-2023.06-software-linux-x86_64-generic-1682696567.tar.gz +# size: 234 MiB (245366784 bytes)
+# entries: 1234
+# modules under _2023.06/software/linux/x86_64/generic/modules/all/_
+#
+#           GCC/9.3.0.lua
+# GCC/10.3.0.lua
+# OpenSSL/1.1.lua +#
+# software under _2023.06/software/linux/x86_64/generic/software/_ +#
+#           GCC/9.3.0/
+# CMake/3.20.1-GCCcore-10.3.0/
+# OpenMPI/4.1.1-GCC-10.3.0/ +#
+# other under _2023.06/software/linux/x86_64/generic/_ +#
+#           .lmod/cache/spiderT.lua
+# .lmod/cache/spiderT.luac_5.1
+# .lmod/cache/timestamp +#
+#
+#
+#
+#
+# +#
+# :cry: FAILURE _(click triangle for details)_ +#
+#
_Details_
+#
+# :white_check_mark: job output file slurm-4682.out
+# :x: no message matching ERROR:
+# :white_check_mark: no message matching FAILED:
+# :x: no message matching required modules missing:
+# :white_check_mark: found message(s) matching No missing installations
+# :white_check_mark: found message matching tar.gz created!
+#
+#
_Artefacts_
+#
+# No artefacts were created or found. +#
+#
+#
+### + +# construct and write complete PR comment details: implements third alternative +comment_template="
__SUMMARY_FMT__
__DETAILS_FMT____ARTEFACTS_FMT__
" +comment_summary_fmt="__SUMMARY__ _(click triangle for details)_" +comment_details_fmt="
_Details_
__DETAILS_LIST__
" +comment_success_item_fmt=":white_check_mark: __ITEM__" +comment_failure_item_fmt=":x: __ITEM__" +comment_artefacts_fmt="
_Artefacts_
__ARTEFACTS_LIST__
" +comment_artefact_details_fmt="
__ARTEFACT_SUMMARY____ARTEFACT_DETAILS__
" + +function print_br_item() { + format="${1}" + item="${2}" + echo -n "${format//__ITEM__/${item}}
" +} + +function print_br_item2() { + format="${1}" + item="${2}" + item2="${3}" + format1="${format//__ITEM__/${item}}" + echo -n "${format1//__ITEM2__/${item2}}
" +} + +function print_code_item() { + format="${1}" + item="${2}" + echo -n "${format//__ITEM__/${item}}" +} + +function print_dd_item() { + format="${1}" + item="${2}" + echo -n "
${format//__ITEM__/${item}}
" +} + +function print_list_item() { + format="${1}" + item="${2}" + echo -n "
  • ${format//__ITEM__/${item}}
  • " +} + +function print_pre_item() { + format="${1}" + item="${2}" + echo -n "
    ${format//__ITEM__/${item}}
    " +} + +function success() { + format="${comment_success_item_fmt}" + item="$1" + print_br_item "${format}" "${item}" +} + +function failure() { + format="${comment_failure_item_fmt}" + item="$1" + print_br_item "${format}" "${item}" +} + +function add_detail() { + actual=${1} + expected=${2} + success_msg="${3}" + failure_msg="${4}" + if [[ ${actual} -eq ${expected} ]]; then + success "${success_msg}" + else + failure "${failure_msg}" + fi +} + +echo "[RESULT]" > ${job_result_file} +echo -n "comment_description = " >> ${job_result_file} + +# construct values for placeholders in comment_template: +# - __SUMMARY_FMT__ -> variable $comment_summary +# - __DETAILS_FMT__ -> variable $comment_details +# - __ARTEFACTS_FMT__ -> variable $comment_artefacts + +comment_summary="${comment_summary_fmt/__SUMMARY__/${summary}}" + +# first construct comment_details_list, abbreviated CoDeList +# then use it to set comment_details +CoDeList="" + +success_msg="job output file ${job_out}" +failure_msg="no job output file ${job_out}" +CoDeList=${CoDeList}$(add_detail ${SLURM} 1 "${success_msg}" "${failure_msg}") + +success_msg="no message matching ${GP_error}" +failure_msg="found message matching ${GP_error}" +CoDeList=${CoDeList}$(add_detail ${ERROR} 0 "${success_msg}" "${failure_msg}") + +success_msg="no message matching ${GP_failed}" +failure_msg="found message matching ${GP_failed}" +CoDeList=${CoDeList}$(add_detail ${FAILED} 0 "${success_msg}" "${failure_msg}") + +success_msg="no message matching ${GP_req_missing}" +failure_msg="found message matching ${GP_req_missing}" +CoDeList=${CoDeList}$(add_detail ${MISSING} 0 "${success_msg}" "${failure_msg}") + +success_msg="found message(s) matching ${GP_no_missing}" +failure_msg="no message matching ${GP_no_missing}" +CoDeList=${CoDeList}$(add_detail ${NO_MISSING} 1 "${success_msg}" "${failure_msg}") + +success_msg="found message matching ${GP_tgz_created}" +failure_msg="no message matching ${GP_tgz_created}" +CoDeList=${CoDeList}$(add_detail ${TGZ} 1 "${success_msg}" "${failure_msg}") + +comment_details="${comment_details_fmt/__DETAILS_LIST__/${CoDeList}}" + + +# first construct comment_artefacts_list, abbreviated CoArList +# then use it to set comment_artefacts +CoArList="" + +# TARBALL should only contain a single tarball +if [[ ! -z ${TARBALL} ]]; then + # Example of the detailed information for a tarball. The actual result MUST be a + # single line (no '\n') or it would break the structure of the markdown table + # that holds status updates of a bot job. + # + #
    + #
    + # eessi-2023.06-software-linux-x86_64-generic-1682696567.tar.gz + # size: 234 MiB (245366784 bytes)
    + # entries: 1234
    + # modules under _2023.06/software/linux/x86_64/intel/cascadelake/modules/all/_
    + #
    +    #       GCC/9.3.0.lua
    + # GCC/10.3.0.lua
    + # OpenSSL/1.1.lua + #
    + # software under _2023.06/software/linux/x86_64/intel/cascadelake/software/_ + #
    +    #       GCC/9.3.0/
    + # CMake/3.20.1-GCCcore-10.3.0/
    + # OpenMPI/4.1.1-GCC-10.3.0/ + #
    + # other under _2023.06/software/linux/x86_64/intel/cascadelake/_ + #
    +    #       .lmod/cache/spiderT.lua
    + # .lmod/cache/spiderT.luac_5.1
    + # .lmod/cache/timestamp + #
    + #
    + #
    + size="$(stat --dereference --printf=%s ${TARBALL})" + size_mib=$((${size} >> 20)) + tmpfile=$(mktemp --tmpdir=. tarfiles.XXXX) + tar tf ${TARBALL} > ${tmpfile} + entries=$(cat ${tmpfile} | wc -l) + # determine prefix from job config: VERSION/software/OS_TYPE/CPU_FAMILY/ARCHITECTURE + # e.g., 2023.06/software/linux/x86_64/intel/skylake_avx512 + # cfg/job.cfg contains (only the attributes to be used are shown below): + # [repository] + # repo_version = 2023.06 + # [architecture] + # os_type = linux + # software_subdir = x86_64/intel/skylake_avx512 + repo_version=$(cfg_get_value "repository" "repo_version") + os_type=$(cfg_get_value "architecture" "os_type") + software_subdir=$(cfg_get_value "architecture" "software_subdir") + prefix="${repo_version}/software/${os_type}/${software_subdir}" + + # extract directories/entries from tarball content + modules_entries=$(grep "${prefix}/modules" ${tmpfile}) + software_entries=$(grep "${prefix}/software" ${tmpfile}) + other_entries=$(cat ${tmpfile} | grep -v "${prefix}/modules" | grep -v "${prefix}/software") + other_shortened=$(echo "${other_entries}" | sed -e "s@^.*${prefix}/@@" | sort -u) + modules=$(echo "${modules_entries}" | grep "/all/.*/.*lua$" | sed -e 's@^.*/\([^/]*/[^/]*.lua\)$@\1@' | sort -u) + software_pkgs=$(echo "${software_entries}" | sed -e "s@${prefix}/software/@@" | awk -F/ '{if (NR >= 2) {print $1 "/" $2}}' | sort -u) + + artefact_summary="$(print_code_item '__ITEM__' ${TARBALL})" + CoArList="" + CoArList="${CoArList}$(print_br_item2 'size: __ITEM__ MiB (__ITEM2__ bytes)' ${size_mib} ${size})" + CoArList="${CoArList}$(print_br_item 'entries: __ITEM__' ${entries})" + CoArList="${CoArList}$(print_br_item 'modules under ___ITEM___' ${prefix}/modules/all)" + CoArList="${CoArList}
    "
    +    if [[ ! -z ${modules} ]]; then
    +        while IFS= read -r mod ; do
    +            CoArList="${CoArList}$(print_br_item '__ITEM__' ${mod})"
    +        done <<< "${modules}"
    +    else
    +        CoArList="${CoArList}$(print_br_item '__ITEM__' 'no module files in tarball')"
    +    fi
    +    CoArList="${CoArList}
    " + CoArList="${CoArList}$(print_br_item 'software under ___ITEM___' ${prefix}/software)" + CoArList="${CoArList}
    "
    +    if [[ ! -z ${software_pkgs} ]]; then
    +        while IFS= read -r sw_pkg ; do
    +            CoArList="${CoArList}$(print_br_item '__ITEM__' ${sw_pkg})"
    +        done <<< "${software_pkgs}"
    +    else
    +        CoArList="${CoArList}$(print_br_item '__ITEM__' 'no software packages in tarball')"
    +    fi
    +    CoArList="${CoArList}
    " + CoArList="${CoArList}$(print_br_item 'other under ___ITEM___' ${prefix})" + CoArList="${CoArList}
    "
    +    if [[ ! -z ${other_shortened} ]]; then
    +        while IFS= read -r other ; do
    +            CoArList="${CoArList}$(print_br_item '__ITEM__' ${other})"
    +        done <<< "${other_shortened}"
    +    else
    +        CoArList="${CoArList}$(print_br_item '__ITEM__' 'no other files in tarball')"
    +    fi
    +    CoArList="${CoArList}
    " +else + CoArList="${CoArList}$(print_dd_item 'No artefacts were created or found.' '')" +fi + +comment_artefacts_details="${comment_artefact_details_fmt/__ARTEFACT_SUMMARY__/${artefact_summary}}" +comment_artefacts_details="${comment_artefacts_details/__ARTEFACT_DETAILS__/${CoArList}}" +comment_artefacts="${comment_artefacts_fmt/__ARTEFACTS_LIST__/${comment_artefacts_details}}" + +# now put all pieces together creating comment_details from comment_template +comment_description=${comment_template/__SUMMARY_FMT__/${comment_summary}} +comment_description=${comment_description/__DETAILS_FMT__/${comment_details}} +comment_description=${comment_description/__ARTEFACTS_FMT__/${comment_artefacts}} + +echo "${comment_description}" >> ${job_result_file} + +# add overall result: SUCCESS, FAILURE, UNKNOWN + artefacts +# - this should make use of subsequent steps such as deploying a tarball more +# efficient +echo "status = ${status}" >> ${job_result_file} +echo "artefacts = " >> ${job_result_file} +echo "${TARBALL}" | sed -e 's/^/ /g' >> ${job_result_file} + +# remove tmpfile +if [[ -f ${tmpfile} ]]; then + rm ${tmpfile} +fi + +# exit script with value that reflects overall job result: SUCCESS (0), FAILURE (1) +test "${status}" == "SUCCESS" +exit $? diff --git a/bot/check-result.sh b/bot/check-result.sh new file mode 120000 index 0000000000..02f753db50 --- /dev/null +++ b/bot/check-result.sh @@ -0,0 +1 @@ +check-build.sh \ No newline at end of file diff --git a/build_container.sh b/build_container.sh index bddd3dfffc..23a9e665c9 100755 --- a/build_container.sh +++ b/build_container.sh @@ -1,5 +1,7 @@ #!/bin/bash +base_dir=$(dirname $(realpath $0)) + BUILD_CONTAINER="docker://ghcr.io/eessi/build-node:debian11" if [ $# -lt 2 ]; then @@ -39,9 +41,13 @@ if [ -z $SINGULARITY_HOME ]; then export SINGULARITY_HOME="$EESSI_TMPDIR/home:/home/$USER" fi +source ${base_dir}/init/eessi_defaults +# strip "/cvmfs/" from default setting +repo_name=${EESSI_CVMFS_REPO/\/cvmfs\//} + # set environment variables for fuse mounts in Singularity container -export EESSI_PILOT_READONLY="container:cvmfs2 pilot.eessi-hpc.org /cvmfs_ro/pilot.eessi-hpc.org" -export EESSI_PILOT_WRITABLE_OVERLAY="container:fuse-overlayfs -o lowerdir=/cvmfs_ro/pilot.eessi-hpc.org -o upperdir=$EESSI_TMPDIR/overlay-upper -o workdir=$EESSI_TMPDIR/overlay-work /cvmfs/pilot.eessi-hpc.org" +export EESSI_PILOT_READONLY="container:cvmfs2 ${repo_name} /cvmfs_ro/${repo_name}" +export EESSI_PILOT_WRITABLE_OVERLAY="container:fuse-overlayfs -o lowerdir=/cvmfs_ro/${repo_name} -o upperdir=$EESSI_TMPDIR/overlay-upper -o workdir=$EESSI_TMPDIR/overlay-work ${EESSI_CVMFS_REPO}" # pass $EESSI_SOFTWARE_SUBDIR_OVERRIDE into build container (if set) if [ ! -z ${EESSI_SOFTWARE_SUBDIR_OVERRIDE} ]; then diff --git a/check_missing_installations.sh b/check_missing_installations.sh index 30f9cc6ff7..4a5316c09f 100755 --- a/check_missing_installations.sh +++ b/check_missing_installations.sh @@ -3,6 +3,7 @@ # Script to check for missing installations in EESSI pilot software stack (version 2021.12) # # author: Kenneth Hoste (@boegel) +# author: Thomas Roeblitz (@trz42) # # license: GPLv2 # @@ -16,16 +17,35 @@ fi LOCAL_TMPDIR=$(mktemp -d) -source $TOPDIR/utils.sh +source $TOPDIR/scripts/utils.sh source $TOPDIR/configure_easybuild echo ">> Checking for missing installations in ${EASYBUILD_INSTALLPATH}..." -ok_msg="No missing installations, party time!" -fail_msg="On no, some installations are still missing, how did that happen?!" eb_missing_out=$LOCAL_TMPDIR/eb_missing.out # we need to use --from-pr to pull in some easyconfigs that are not available in EasyBuild version being used # PR #16531: Nextflow-22.10.1.eb -${EB:-eb} --from-pr 16531 --easystack eessi-${EESSI_PILOT_VERSION}.yml --experimental --missing | tee ${eb_missing_out} -grep "No missing modules" ${eb_missing_out} > /dev/null -check_exit_code $? "${ok_msg}" "${fail_msg}" +${EB:-eb} --from-pr 16531 --easystack eessi-${EESSI_PILOT_VERSION}.yml --experimental --missing 2>&1 | tee ${eb_missing_out} +exit_code=${PIPESTATUS[0]} + +ok_msg="Command 'eb --missing ...' succeeded, analysing output..." +fail_msg="Command 'eb --missing ...' failed, check log '${eb_missing_out}'" +check_exit_code ${exit_code} "${ok_msg}" "${fail_msg}" + +# the above assesses the installed software for each easyconfig provided in +# the easystack file and then print messages such as +# `No missing modules!` +# or +# `2 out of 3 required modules missing:` +# depending on the result of the assessment. Hence, we need to check if the +# output does not contain any line with ` required modules missing:` + +grep " required modules missing:" ${eb_missing_out} > /dev/null +exit_code=$? + +# if grep returns 1 (` required modules missing:` was NOT found), we set +# MODULES_MISSING to 0, otherwise (it was found or another error) we set it to 1 +[[ ${exit_code} -eq 1 ]] && MODULES_MISSING=0 || MODULES_MISSING=1 +ok_msg="No missing installations, party time!" +fail_msg="On no, some installations are still missing, how did that happen?!" +check_exit_code ${MODULES_MISSING} "${ok_msg}" "${fail_msg}" diff --git a/configure_easybuild b/configure_easybuild index 19b2d7454b..245553f342 100644 --- a/configure_easybuild +++ b/configure_easybuild @@ -13,6 +13,7 @@ export EASYBUILD_ZIP_LOGS=bzip2 export EASYBUILD_RPATH=1 export EASYBUILD_FILTER_ENV_VARS=LD_LIBRARY_PATH +export EASYBUILD_READ_ONLY_INSTALLDIR=1 # assume that eb_hooks.py is located in same directory as this script (configure_easybuild) TOPDIR=$(dirname $(realpath $BASH_SOURCE)) diff --git a/create_directory_tarballs.sh b/create_directory_tarballs.sh new file mode 100755 index 0000000000..70e666f871 --- /dev/null +++ b/create_directory_tarballs.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +SOFTWARE_LAYER_TARBALL_URL=https://github.com/EESSI/software-layer/tarball/main + +set -eo pipefail + +if [ $# -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +version=$1 + +TOPDIR=$(dirname $(realpath $0)) + +source $TOPDIR/scripts/utils.sh + +# Check if the EESSI version number encoded in the filename +# is valid, i.e. matches the format YYYY.DD +if ! echo "${version}" | egrep -q '^20[0-9][0-9]\.(0[0-9]|1[0-2])$' +then + fatal_error "${version} is not a valid EESSI version." +fi + +# Create tarball of init directory +tartmp=$(mktemp -t -d init.XXXXX) +mkdir "${tartmp}/${version}" +tarname="eessi-${version}-init-$(date +%s).tar.gz" +curl -Ls ${SOFTWARE_LAYER_TARBALL_URL} | tar xzf - -C "${tartmp}/${version}" --strip-components=1 --no-wildcards-match-slash --wildcards '*/init/' +source "${tartmp}/${version}/init/minimal_eessi_env" +if [ "${EESSI_PILOT_VERSION}" != "${version}" ] +then + fatal_error "Specified version ${version} does not match version ${EESSI_PILOT_VERSION} in the init files!" +fi +tar czf "${tarname}" -C "${tartmp}" "${version}" +rm -rf "${tartmp}" + +echo_green "Done! Created tarball ${tarname}." + +# Create tarball of scripts directory +# Version check has already been performed and would have caused script to exit at this point in case of problems +tartmp=$(mktemp -t -d scripts.XXXXX) +mkdir "${tartmp}/${version}" +tarname="eessi-${version}-scripts-$(date +%s).tar.gz" +curl -Ls ${SOFTWARE_LAYER_TARBALL_URL} | tar xzf - -C "${tartmp}/${version}" --strip-components=1 --no-wildcards-match-slash --wildcards '*/scripts/' +tar czf "${tarname}" -C "${tartmp}" "${version}" +rm -rf "${tartmp}" + +echo_green "Done! Created tarball ${tarname}." diff --git a/create_init_tarball.sh b/create_init_tarball.sh deleted file mode 100755 index 3393285a33..0000000000 --- a/create_init_tarball.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/bash - -SOFTWARE_LAYER_TARBALL_URL=https://github.com/EESSI/software-layer/tarball/main - -set -eo pipefail - -function echo_green() { - echo -e "\e[32m$1\e[0m" -} - -function echo_red() { - echo -e "\e[31m$1\e[0m" -} - -function error() { - echo_red "ERROR: $1" >&2 - exit 1 -} - -if [ $# -ne 1 ]; then - echo "Usage: $0 " >&2 - exit 1 -fi - -version=$1 - -# Check if the EESSI version number encoded in the filename -# is valid, i.e. matches the format YYYY.DD -if ! echo "${version}" | egrep -q '^20[0-9][0-9]\.(0[0-9]|1[0-2])$' -then - error "${version} is not a valid EESSI version." -fi - -tartmp=$(mktemp -t -d init.XXXXX) -mkdir "${tartmp}/${version}" -tarname="eessi-${version}-init-$(date +%s).tar.gz" -curl -Ls ${SOFTWARE_LAYER_TARBALL_URL} | tar xzf - -C "${tartmp}/${version}" --strip-components=1 --wildcards */init/ -source "${tartmp}/${version}/init/minimal_eessi_env" -if [ "${EESSI_PILOT_VERSION}" != "${version}" ] -then - error "Specified version ${version} does not match version ${EESSI_PILOT_VERSION} in the init files!" -fi -tar czf "${tarname}" -C "${tartmp}" "${version}" -rm -rf "${tartmp}" - -echo_green "Done! Created tarball ${tarname}." diff --git a/create_tarball.sh b/create_tarball.sh index f2643ccfc5..b6c72b341d 100755 --- a/create_tarball.sh +++ b/create_tarball.sh @@ -2,6 +2,8 @@ set -e +base_dir=$(dirname $(realpath $0)) + if [ $# -ne 4 ]; then echo "ERROR: Usage: $0 " >&2 exit 1 @@ -15,7 +17,8 @@ tmpdir=`mktemp -d` echo ">> tmpdir: $tmpdir" os="linux" -cvmfs_repo="/cvmfs/pilot.eessi-hpc.org" +source ${base_dir}/init/eessi_defaults +cvmfs_repo=${EESSI_CVMFS_REPO} software_dir="${cvmfs_repo}/versions/${pilot_version}/software/${os}/${cpu_arch_subdir}" if [ ! -d ${software_dir} ]; then @@ -35,6 +38,7 @@ cd ${overlay_upper_dir}/versions/ echo ">> Collecting list of files/directories to include in tarball via ${PWD}..." files_list=${tmpdir}/files.list.txt +module_files_list=${tmpdir}/module_files.list.txt if [ -d ${pilot_version}/software/${os}/${cpu_arch_subdir}/.lmod ]; then # include Lmod cache and configuration file (lmodrc.lua), @@ -46,12 +50,31 @@ if [ -d ${pilot_version}/software/${os}/${cpu_arch_subdir}/modules ]; then find ${pilot_version}/software/${os}/${cpu_arch_subdir}/modules -type f | grep -v '/\.wh\.' >> ${files_list} # module symlinks find ${pilot_version}/software/${os}/${cpu_arch_subdir}/modules -type l | grep -v '/\.wh\.' >> ${files_list} + # module files and symlinks + find ${pilot_version}/software/${os}/${cpu_arch_subdir}/modules/all -type f -o -type l \ + | grep -v '/\.wh\.' | sed -e 's/.lua$//' | sed -e 's@.*/modules/all/@@g' | sort -u \ + >> ${module_files_list} fi -if [ -d ${pilot_version}/software/${os}/${cpu_arch_subdir}/software ]; then - # installation directories - ls -d ${pilot_version}/software/${os}/${cpu_arch_subdir}/software/*/* | grep -v '/\.wh\.' >> ${files_list} +if [ -d ${pilot_version}/software/${os}/${cpu_arch_subdir}/software -a -r ${module_files_list} ]; then + # installation directories but only those for which module files were created + # Note, we assume that module names (as defined by 'PACKAGE_NAME/VERSION.lua' + # using EasyBuild's standard module naming scheme) match the name of the + # software installation directory (expected to be 'PACKAGE_NAME/VERSION/'). + # If either side changes (module naming scheme or naming of software + # installation directories), the procedure will likely not work. + for package_version in $(cat ${module_files_list}); do + echo "handling ${package_version}" + ls -d ${pilot_version}/software/${os}/${cpu_arch_subdir}/software/${package_version} \ + | grep -v '/\.wh\.' >> ${files_list} + done fi +# add a bit debug output +echo "wrote file list to ${files_list}" +[ -r ${files_list} ] && cat ${files_list} +echo "wrote module file list to ${module_files_list}" +[ -r ${module_files_list} ] && cat ${module_files_list} + topdir=${cvmfs_repo}/versions/ echo ">> Creating tarball ${target_tgz} from ${topdir}..." diff --git a/eb_hooks.py b/eb_hooks.py index df7742f999..2fba925b01 100644 --- a/eb_hooks.py +++ b/eb_hooks.py @@ -3,8 +3,11 @@ import os import re +from easybuild.easyblocks.generic.configuremake import obtain_config_guess from easybuild.tools.build_log import EasyBuildError, print_msg from easybuild.tools.config import build_option, update_build_option +from easybuild.tools.filetools import apply_regex_substitutions, copy_file, which +from easybuild.tools.run import run_cmd from easybuild.tools.systemtools import AARCH64, POWER, X86_64, get_cpu_architecture, get_cpu_features from easybuild.tools.toolchain.compiler import OPTARCH_GENERIC @@ -24,12 +27,11 @@ def get_eessi_envvar(eessi_envvar): def get_rpath_override_dirs(software_name): # determine path to installations in software layer via $EESSI_SOFTWARE_PATH eessi_software_path = get_eessi_envvar('EESSI_SOFTWARE_PATH') - eessi_pilot_version = get_eessi_envvar('EESSI_PILOT_VERSION') # construct the rpath override directory stub rpath_injection_stub = os.path.join( # Make sure we are looking inside the `host_injections` directory - eessi_software_path.replace(eessi_pilot_version, os.path.join('host_injections', eessi_pilot_version), 1), + eessi_software_path.replace('versions', 'host_injections', 1), # Add the subdirectory for the specific software 'rpath_overrides', software_name, @@ -89,6 +91,36 @@ def pre_prepare_hook(self, *args, **kwargs): mpi_family, rpath_override_dirs) +def gcc_postprepare(self, *args, **kwargs): + """ + Post-configure hook for GCCcore: + - copy RPATH wrapper script for linker commands to also have a wrapper in place with system type prefix like 'x86_64-pc-linux-gnu' + """ + if self.name == 'GCCcore': + config_guess = obtain_config_guess() + system_type, _ = run_cmd(config_guess, log_all=True) + cmd_prefix = '%s-' % system_type.strip() + for cmd in ('ld', 'ld.gold', 'ld.bfd'): + wrapper = which(cmd) + self.log.info("Path to %s wrapper: %s" % (cmd, wrapper)) + wrapper_dir = os.path.dirname(wrapper) + prefix_wrapper = os.path.join(wrapper_dir, cmd_prefix + cmd) + copy_file(wrapper, prefix_wrapper) + self.log.info("Path to %s wrapper with '%s' prefix: %s" % (cmd, cmd_prefix, which(prefix_wrapper))) + + # we need to tweak the copied wrapper script, so that: + regex_subs = [ + # - CMD in the script is set to the command name without prefix, because EasyBuild's rpath_args.py + # script that is used by the wrapper script only checks for 'ld', 'ld.gold', etc. + # when checking whether or not to use -Wl + ('^CMD=.*', 'CMD=%s' % cmd), + # - the path to the correct actual binary is logged and called + ('/%s ' % cmd, '/%s ' % (cmd_prefix + cmd)), + ] + apply_regex_substitutions(prefix_wrapper, regex_subs) + else: + raise EasyBuildError("GCCcore-specific hook triggered for non-GCCcore easyconfig?!") + def post_prepare_hook(self, *args, **kwargs): """Main post-prepare hook: trigger custom functions.""" @@ -98,6 +130,9 @@ def post_prepare_hook(self, *args, **kwargs): print_msg("Resetting rpath_override_dirs to original value: %s", getattr(self, EESSI_RPATH_OVERRIDE_ATTR)) delattr(self, EESSI_RPATH_OVERRIDE_ATTR) + if self.name in POST_PREPARE_HOOKS: + POST_PREPARE_HOOKS[self.name](self, *args, **kwargs) + def cgal_toolchainopts_precise(ec, eprefix): """Enable 'precise' rather than 'strict' toolchain option for CGAL on POWER.""" @@ -187,6 +222,10 @@ def wrf_preconfigure(self, *args, **kwargs): 'UCX': ucx_eprefix, } +POST_PREPARE_HOOKS = { + 'GCCcore': gcc_postprepare, +} + PRE_CONFIGURE_HOOKS = { 'libfabric': libfabric_disable_psm3_x86_64_generic, 'MetaBAT': metabat_preconfigure, diff --git a/eessi-2021.12.yml b/eessi-2021.12.yml index 8674c5979d..210bbb2845 100644 --- a/eessi-2021.12.yml +++ b/eessi-2021.12.yml @@ -35,6 +35,10 @@ software: toolchains: foss-2020a: versions: ['6.6'] + R: + toolchains: + foss-2021a: + versions: '4.1.0' R-bundle-Bioconductor: toolchains: foss-2020a: diff --git a/eessi_container.sh b/eessi_container.sh new file mode 100755 index 0000000000..48c4653ba9 --- /dev/null +++ b/eessi_container.sh @@ -0,0 +1,587 @@ +#!/bin/bash +# +# unified script to access EESSI in different scenarios: read-only +# for just using EESSI, read & write for building software to be +# added to the software stack +# +# This script is part of the EESSI software layer, see +# https://github.com/EESSI/software-layer.git +# +# author: Thomas Roeblitz (@trz42) +# +# license: GPLv2 +# + +# -e: stop script as soon as any command has non-zero exit code +# -u: treat usage of undefined variables as errors +# FIXME commented out because it's OK (?) if some environment variables are not set (like $SINGULARITY_HOME) +# set -e -u + +# script overview +# -. initial settings & exit codes +# 0. parse args +# 1. check if argument values are valid +# 2. set up host storage/tmp +# 3. set up common vars and directories +# 4. set up vars specific to a scenario +# 5. run container +# 6. save tmp (if requested) + +# -. initial settings & exit codes +TOPDIR=$(dirname $(realpath $0)) + +source ${TOPDIR}/scripts/utils.sh +source ${TOPDIR}/scripts/cfg_files.sh + +# exit codes: bitwise shift codes to allow for combination of exit codes +# ANY_ERROR_EXITCODE is sourced from ${TOPDIR}/scripts/utils.sh +CMDLINE_ARG_UNKNOWN_EXITCODE=$((${ANY_ERROR_EXITCODE} << 1)) +ACCESS_UNKNOWN_EXITCODE=$((${ANY_ERROR_EXITCODE} << 2)) +CONTAINER_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 3)) +HOST_STORAGE_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 4)) +MODE_UNKNOWN_EXITCODE=$((${ANY_ERROR_EXITCODE} << 5)) +REPOSITORY_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 6)) +RESUME_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 7)) +SAVE_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 8)) +HTTP_PROXY_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 9)) +HTTPS_PROXY_ERROR_EXITCODE=$((${ANY_ERROR_EXITCODE} << 10)) +RUN_SCRIPT_MISSING_EXITCODE=$((${ANY_ERROR_EXITCODE} << 11)) + +# CernVM-FS settings +CVMFS_VAR_LIB="var-lib-cvmfs" +CVMFS_VAR_RUN="var-run-cvmfs" + +# directory for tmp used inside container +export TMP_IN_CONTAINER=/tmp + +# repository cfg directory and file +# directory: default $PWD or EESSI_REPOS_CFG_DIR_OVERRIDE if set +# file: directory + '/repos.cfg' +export EESSI_REPOS_CFG_DIR="${EESSI_REPOS_CFG_DIR_OVERRIDE:=${PWD}}" +export EESSI_REPOS_CFG_FILE="${EESSI_REPOS_CFG_DIR}/repos.cfg" + + +# 0. parse args +# see example parsing of command line arguments at +# https://wiki.bash-hackers.org/scripting/posparams#using_a_while_loop +# https://stackoverflow.com/questions/192249/how-do-i-parse-command-line-arguments-in-bash + +display_help() { + echo "usage: $0 [OPTIONS] [[--] SCRIPT or COMMAND]" + echo " OPTIONS:" + echo " -a | --access {ro,rw} - ro (read-only), rw (read & write) [default: ro]" + echo " -c | --container IMG - image file or URL defining the container to use" + echo " [default: docker://ghcr.io/eessi/build-node:debian11]" + echo " -h | --help - display this usage information [default: false]" + echo " -g | --storage DIR - directory space on host machine (used for" + echo " temporary data) [default: 1. TMPDIR, 2. /tmp]" + echo " -l | --list-repos - list available repository identifiers [default: false]" + echo " -m | --mode MODE - with MODE==shell (launch interactive shell) or" + echo " MODE==run (run a script or command) [default: shell]" + echo " -r | --repository CFG - configuration file or identifier defining the" + echo " repository to use [default: EESSI-pilot via" + echo " default container, see --container]" + echo " -u | --resume DIR/TGZ - resume a previous run from a directory or tarball," + echo " where DIR points to a previously used tmp directory" + echo " (check for output 'Using DIR as tmp ...' of a previous" + echo " run) and TGZ is the path to a tarball which is" + echo " unpacked the tmp dir stored on the local storage space" + echo " (see option --storage above) [default: not set]" + echo " -s | --save DIR/TGZ - save contents of tmp directory to a tarball in" + echo " directory DIR or provided with the fixed full path TGZ" + echo " when a directory is provided, the format of the" + echo " tarball's name will be {REPO_ID}-{TIMESTAMP}.tgz" + echo " [default: not set]" + echo " -v | --verbose - display more information [default: false]" + echo " -x | --http-proxy URL - provides URL for the env variable http_proxy" + echo " [default: not set]; uses env var \$http_proxy if set" + echo " -y | --https-proxy URL - provides URL for the env variable https_proxy" + echo " [default: not set]; uses env var \$https_proxy if set" + echo + echo " If value for --mode is 'run', the SCRIPT/COMMAND provided is executed. If" + echo " arguments to the script/command start with '-' or '--', use the flag terminator" + echo " '--' to let eessi_container.sh stop parsing arguments." +} + +# set defaults for command line arguments +ACCESS="ro" +CONTAINER="docker://ghcr.io/eessi/build-node:debian11" +#DRY_RUN=0 +VERBOSE=0 +STORAGE= +LIST_REPOS=0 +MODE="shell" +REPOSITORY="EESSI-pilot" +RESUME= +SAVE= +HTTP_PROXY=${http_proxy:-} +HTTPS_PROXY=${https_proxy:-} + +POSITIONAL_ARGS=() + +while [[ $# -gt 0 ]]; do + case $1 in + -a|--access) + ACCESS="$2" + shift 2 + ;; + -c|--container) + CONTAINER="$2" + shift 2 + ;; +# -d|--dry-run) +# DRY_RUN=1 +# shift 1 +# ;; + -g|--storage) + STORAGE="$2" + shift 2 + ;; + -h|--help) + display_help + exit 0 + ;; + -l|--list-repos) + LIST_REPOS=1 + shift 1 + ;; + -m|--mode) + MODE="$2" + shift 2 + ;; + -r|--repository) + REPOSITORY="$2" + shift 2 + ;; + -s|--save) + SAVE="$2" + shift 2 + ;; + -u|--resume) + RESUME="$2" + shift 2 + ;; + -v|--verbose) + VERBOSE=1 + shift 1 + ;; + -x|--http-proxy) + HTTP_PROXY="$2" + export http_proxy=${HTTP_PROXY} + shift 2 + ;; + -y|--https-proxy) + HTTPS_PROXY="$2" + export https_proxy=${HTTPS_PROXY} + shift 2 + ;; + --) + shift + POSITIONAL_ARGS+=("$@") # save positional args + break + ;; + -*|--*) + fatal_error "Unknown option: $1" "${CMDLINE_ARG_UNKNOWN_EXITCODE}" + ;; + *) # No more options + POSITIONAL_ARGS+=("$1") # save positional arg + shift + ;; + esac +done + +set -- "${POSITIONAL_ARGS[@]}" + +if [[ ${LIST_REPOS} -eq 1 ]]; then + echo "Listing available repositories with format 'name [source]':" + echo " EESSI-pilot [default]" + if [[ -r ${EESSI_REPOS_CFG_FILE} ]]; then + cfg_load ${EESSI_REPOS_CFG_FILE} + sections=$(cfg_sections) + while IFS= read -r repo_id + do + echo " ${repo_id} [${EESSI_REPOS_CFG_FILE}]" + done <<< "${sections}" + fi + exit 0 +fi + +# 1. check if argument values are valid +# (arg -a|--access) check if ACCESS is supported +if [[ "${ACCESS}" != "ro" && "${ACCESS}" != "rw" ]]; then + fatal_error "unknown access method '${ACCESS}'" "${ACCESS_UNKNOWN_EXITCODE}" +fi + +# TODO (arg -c|--container) check container (is it a file or URL & access those) +# CONTAINER_ERROR_EXITCODE + +# TODO (arg -g|--storage) check if it exists, if user has write permission, +# if it contains no data, etc. +# HOST_STORAGE_ERROR_EXITCODE + +# (arg -m|--mode) check if MODE is known +if [[ "${MODE}" != "shell" && "${MODE}" != "run" ]]; then + fatal_error "unknown execution mode '${MODE}'" "${MODE_UNKNOWN_EXITCODE}" +fi + +# TODO (arg -r|--repository) check if repository is known +# REPOSITORY_ERROR_EXITCODE +if [[ ! -z "${REPOSITORY}" && "${REPOSITORY}" != "EESSI-pilot" && ! -r ${EESSI_REPOS_CFG_FILE} ]]; then + fatal_error "arg '--repository ${REPOSITORY}' requires a cfg file at '${EESSI_REPOS_CFG_FILE}'" "${REPOSITORY_ERROR_EXITCODE}" +fi + +# TODO (arg -u|--resume) check if it exists, if user has read permission, +# if it contains data from a previous run +# RESUME_ERROR_EXITCODE + +# TODO (arg -s|--save) check if DIR exists, if user has write permission, +# if TGZ already exists, if user has write permission to directory to which +# TGZ should be written +# SAVE_ERROR_EXITCODE + +# TODO (arg -x|--http-proxy) check if http proxy is accessible +# HTTP_PROXY_ERROR_EXITCODE + +# TODO (arg -y|--https-proxy) check if https proxy is accessible +# HTTPS_PROXY_ERROR_EXITCODE + +# check if a script is provided if mode is 'run' +if [[ "${MODE}" == "run" ]]; then + if [[ $# -eq 0 ]]; then + fatal_error "no command specified to run?!" "${RUN_SCRIPT_MISSING_EXITCODE}" + fi +fi + + +# 2. set up host storage/tmp if necessary +# if session to be resumed from a previous one (--resume ARG) and ARG is a directory +# just reuse ARG, define environment variables accordingly and skip creating a new +# tmp storage +if [[ ! -z ${RESUME} && -d ${RESUME} ]]; then + # resume from directory ${RESUME} + # skip creating a new tmp directory, just set environment variables + echo "Resuming from previous run using temporary storage at ${RESUME}" + EESSI_HOST_STORAGE=${RESUME} +else + # we need a tmp location (and possibly init it with ${RESUME} if it was not + # a directory + + # as location for temporary data use in the following order + # a. command line argument -l|--host-storage + # b. env var TMPDIR + # c. /tmp + # note, we ensure that (a) takes precedence by setting TMPDIR to STORAGE + # if STORAGE is not empty + # note, (b) & (c) are automatically ensured by using 'mktemp -d --tmpdir' to + # create a temporary directory + if [[ ! -z ${STORAGE} ]]; then + export TMPDIR=${STORAGE} + # mktemp fails if TMPDIR does not exist, so let's create it + mkdir -p ${TMPDIR} + fi + if [[ ! -z ${TMPDIR} ]]; then + # TODO check if TMPDIR already exists + # mktemp fails if TMPDIR does not exist, so let's create it + mkdir -p ${TMPDIR} + fi + if [[ -z ${TMPDIR} ]]; then + # mktemp falls back to using /tmp if TMPDIR is empty + # TODO check if /tmp is writable, large enough and usable (different + # features for ro-access and rw-access) + [[ ${VERBOSE} -eq 1 ]] && echo "skipping sanity checks for /tmp" + fi + EESSI_HOST_STORAGE=$(mktemp -d --tmpdir eessi.XXXXXXXXXX) + echo "Using ${EESSI_HOST_STORAGE} as tmp directory (to resume session add '--resume ${EESSI_HOST_STORAGE}')." +fi + +# if ${RESUME} is a file (assume a tgz), unpack it into ${EESSI_HOST_STORAGE} +if [[ ! -z ${RESUME} && -f ${RESUME} ]]; then + tar xf ${RESUME} -C ${EESSI_HOST_STORAGE} + echo "Resuming from previous run using temporary storage ${RESUME} unpacked into ${EESSI_HOST_STORAGE}" +fi + +# 3. set up common vars and directories +# directory structure should be: +# ${EESSI_HOST_STORAGE} +# |-singularity_cache +# |-${CVMFS_VAR_LIB} +# |-${CVMFS_VAR_RUN} +# |-overlay-upper +# |-overlay-work +# |-home +# |-repos_cfg + +# tmp dir for EESSI +EESSI_TMPDIR=${EESSI_HOST_STORAGE} +mkdir -p ${EESSI_TMPDIR} +[[ ${VERBOSE} -eq 1 ]] && echo "EESSI_TMPDIR=${EESSI_TMPDIR}" + +# configure Singularity: if SINGULARITY_CACHEDIR is already defined, use that +# a global SINGULARITY_CACHEDIR would ensure that we don't consume +# storage space again and again for the container & also speed-up +# launch times across different sessions +if [[ -z ${SINGULARITY_CACHEDIR} ]]; then + export SINGULARITY_CACHEDIR=${EESSI_TMPDIR}/singularity_cache + mkdir -p ${SINGULARITY_CACHEDIR} +fi +[[ ${VERBOSE} -eq 1 ]] && echo "SINGULARITY_CACHEDIR=${SINGULARITY_CACHEDIR}" + +# if VERBOSE is set to 0 (no arg --verbose), add argument '-q' +if [[ ${VERBOSE} -eq 0 ]]; then + RUN_QUIET='-q' +else + RUN_QUIET='' +fi + +# we try our best to make sure that we retain access to the container image in +# a subsequent session ("best effort" only because pulling or copying operations +# can fail ... in those cases the script may still succeed, but it is not +# guaranteed that we have access to the same container when resuming later on) +# - if CONTAINER references an image in a registry, pull & convert image +# and store it in ${EESSI_TMPDIR} +# + however, only pull image if there is no matching image in ${EESSI_TMPDIR} yet +# - if CONTAINER references an image file, copy it to ${EESSI_TMPDIR} +# + however, only copy it if its base name does not yet exist in ${EESSI_TMPDIR} +# - if the image file created (pulled or copied) or resumed exists in +# ${EESSI_TMPDIR}, let CONTAINER point to it +# + thus subsequent singularity commands in this script would just use the +# image file in EESSI_TMPDIR or the originally given source (some URL or +# path to an image file) +CONTAINER_IMG= +CONTAINER_URL_FMT=".*://(.*)" +if [[ ${CONTAINER} =~ ${CONTAINER_URL_FMT} ]]; then + # replace ':', '-', '/' with '_' in match (everything after ://) and append .sif + CONTAINER_IMG="$(echo ${BASH_REMATCH[1]} | sed 's/[:\/-]/_/g').sif" + # pull container to ${EESSI_TMPDIR} if it is not there yet (i.e. when + # resuming from a previous session) + if [[ ! -x ${EESSI_TMPDIR}/${CONTAINER_IMG} ]]; then + echo "Pulling container image from ${CONTAINER} to ${EESSI_TMPDIR}/${CONTAINER_IMG}" + singularity ${RUN_QUIET} pull ${EESSI_TMPDIR}/${CONTAINER_IMG} ${CONTAINER} + else + echo "Reusing existing container image ${EESSI_TMPDIR}/${CONTAINER_IMG}" + fi +else + # determine file name as basename of CONTAINER + CONTAINER_IMG=$(basename ${CONTAINER}) + # copy image file to ${EESSI_TMPDIR} if it is not there yet (i.e. when + # resuming from a previous session) + if [[ ! -x ${EESSI_TMPDIR}/${CONTAINER_IMG} ]]; then + echo "Copying container image from ${CONTAINER} to ${EESSI_TMPDIR}/${CONTAINER_IMG}" + cp -a ${CONTAINER} ${EESSI_TMPDIR}/. + else + echo "Reusing existing container image ${EESSI_TMPDIR}/${CONTAINER_IMG}" + fi +fi +# let CONTAINER point to the pulled, copied or resumed image file +if [[ -x ${EESSI_TMPDIR}/${CONTAINER_IMG} ]]; then + CONTAINER="${EESSI_TMPDIR}/${CONTAINER_IMG}" +fi +[[ ${VERBOSE} -eq 1 ]] && echo "CONTAINER=${CONTAINER}" + +# set env vars and create directories for CernVM-FS +EESSI_CVMFS_VAR_LIB=${EESSI_TMPDIR}/${CVMFS_VAR_LIB} +EESSI_CVMFS_VAR_RUN=${EESSI_TMPDIR}/${CVMFS_VAR_RUN} +mkdir -p ${EESSI_CVMFS_VAR_LIB} +mkdir -p ${EESSI_CVMFS_VAR_RUN} +[[ ${VERBOSE} -eq 1 ]] && echo "EESSI_CVMFS_VAR_LIB=${EESSI_CVMFS_VAR_LIB}" +[[ ${VERBOSE} -eq 1 ]] && echo "EESSI_CVMFS_VAR_RUN=${EESSI_CVMFS_VAR_RUN}" + +# allow that SINGULARITY_HOME is defined before script is run +if [[ -z ${SINGULARITY_HOME} ]]; then + export SINGULARITY_HOME="${EESSI_TMPDIR}/home:/home/${USER}" + mkdir -p ${EESSI_TMPDIR}/home +fi +[[ ${VERBOSE} -eq 1 ]] && echo "SINGULARITY_HOME=${SINGULARITY_HOME}" + +# define paths to add to SINGULARITY_BIND (added later when all BIND mounts are defined) +BIND_PATHS="${EESSI_CVMFS_VAR_LIB}:/var/lib/cvmfs,${EESSI_CVMFS_VAR_RUN}:/var/run/cvmfs" +# provide a '/tmp' inside the container +BIND_PATHS="${BIND_PATHS},${EESSI_TMPDIR}:${TMP_IN_CONTAINER}" + +[[ ${VERBOSE} -eq 1 ]] && echo "BIND_PATHS=${BIND_PATHS}" + +# set up repository config (always create directory repos_cfg and populate it with info when +# arg -r|--repository is used) +mkdir -p ${EESSI_TMPDIR}/repos_cfg +if [[ "${REPOSITORY}" == "EESSI-pilot" ]]; then + # need to source defaults as late as possible (see other sourcing below) + source ${TOPDIR}/init/eessi_defaults + + # strip "/cvmfs/" from default setting + repo_name=${EESSI_CVMFS_REPO/\/cvmfs\//} +else + # TODO implement more flexible specification of repo cfgs + # REPOSITORY => repo-id OR repo-cfg-file (with a single section) OR + # repo-cfg-file:repo-id (repo-id defined in repo-cfg-file) + # + # for now, assuming repo-id is defined in config file pointed to + # EESSI_REPOS_CFG_FILE, which is to be copied into the working directory + # (could also become part of the software layer to define multiple + # standard EESSI repositories) + cfg_load ${EESSI_REPOS_CFG_FILE} + + # copy repos.cfg to job directory --> makes it easier to inspect the job + cp -a ${EESSI_REPOS_CFG_FILE} ${EESSI_TMPDIR}/repos_cfg/. + + # cfg file should include: repo_name, repo_version, config_bundle, + # map { local_filepath -> container_filepath } + # + # repo_name_domain is the domain part of the repo_name, e.g., + # eessi-hpc.org for pilot.eessi-hpc.org + # + # where config bundle includes the files (-> target location in container) + # - default.local -> /etc/cvmfs/default.local + # contains CVMFS settings, e.g., CVMFS_HTTP_PROXY, CVMFS_QUOTA_LIMIT, ... + # - ${repo_name_domain}.conf -> /etc/cvmfs/domain.d/${repo_name_domain}.conf + # contains CVMFS settings, e.g., CVMFS_SERVER_URL (Stratum 1s), + # CVMFS_KEYS_DIR, CVMFS_USE_GEOAPI, ... + # - ${repo_name_domain}/ -> /etc/cvmfs/keys/${repo_name_domain} + # a directory that contains the public key to access the repository, key + # itself then doesn't need to be BIND mounted + # - ${repo_name_domain}/${repo_name}.pub + # (-> /etc/cvmfs/keys/${repo_name_domain}/${repo_name}.pub + # the public key to access the repository, key itself is BIND mounted + # via directory ${repo_name_domain} + repo_name=$(cfg_get_value ${REPOSITORY} "repo_name") + # derive domain part from repo_name (everything after first '.') + repo_name_domain=${repo_name#*.} + repo_version=$(cfg_get_value ${REPOSITORY} "repo_version") + config_bundle=$(cfg_get_value ${REPOSITORY} "config_bundle") + config_map=$(cfg_get_value ${REPOSITORY} "config_map") + + # convert config_map into associative array cfg_file_map + cfg_init_file_map "${config_map}" + [[ ${VERBOSE} -eq 1 ]] && cfg_print_map + + # use information to set up dir ${EESSI_TMPDIR}/repos_cfg, + # define BIND mounts and override repo name and version + # check if config_bundle exists, if so, unpack it into ${EESSI_TMPDIR}/repos_cfg + # if config_bundle is relative path (no '/' at start) prepend it with + # EESSI_REPOS_CFG_DIR + config_bundle_path= + if [[ ! "${config_bundle}" =~ ^/ ]]; then + config_bundle_path=${EESSI_REPOS_CFG_DIR}/${config_bundle} + else + config_bundle_path=${config_bundle} + fi + + if [[ ! -r ${config_bundle_path} ]]; then + fatal_error "config bundle '${config_bundle_path}' is not readable" ${REPOSITORY_ERROR_EXITCODE} + fi + + # only unpack config_bundle if we're not resuming from a previous run + if [[ -z ${RESUME} ]]; then + tar xf ${config_bundle_path} -C ${EESSI_TMPDIR}/repos_cfg + fi + + for src in "${!cfg_file_map[@]}" + do + target=${cfg_file_map[${src}]} + BIND_PATHS="${BIND_PATHS},${EESSI_TMPDIR}/repos_cfg/${src}:${target}" + done + export EESSI_PILOT_VERSION_OVERRIDE=${repo_version} + export EESSI_CVMFS_REPO_OVERRIDE="/cvmfs/${repo_name}" + # need to source defaults as late as possible (after *_OVERRIDEs) + source ${TOPDIR}/init/eessi_defaults +fi + +# if http_proxy is not empty, we assume that the machine accesses internet +# via a proxy. then we need to add CVMFS_HTTP_PROXY to +# ${EESSI_TMPDIR}/repos_cfg/default.local on host (and possibly add a BIND +# MOUNT if it was not yet in BIND_PATHS) +if [[ ! -z ${http_proxy} ]]; then + # TODO tolerate other formats for proxy URLs, for now assume format is + # http://SOME_HOSTNAME:SOME_PORT/ + [[ ${VERBOSE} -eq 1 ]] && echo "http_proxy='${http_proxy}'" + PROXY_HOST=$(get_host_from_url ${http_proxy}) + [[ ${VERBOSE} -eq 1 ]] && echo "PROXY_HOST='${PROXY_HOST}'" + PROXY_PORT=$(get_port_from_url ${http_proxy}) + [[ ${VERBOSE} -eq 1 ]] && echo "PROXY_PORT='${PROXY_PORT}'" + HTTP_PROXY_IPV4=$(get_ipv4_address ${PROXY_HOST}) + [[ ${VERBOSE} -eq 1 ]] && echo "HTTP_PROXY_IPV4='${HTTP_PROXY_IPV4}'" + echo "CVMFS_HTTP_PROXY=\"${http_proxy}|http://${HTTP_PROXY_IPV4}:${PROXY_PORT}\"" \ + >> ${EESSI_TMPDIR}/repos_cfg/default.local + [[ ${VERBOSE} -eq 1 ]] && echo "contents of default.local" + [[ ${VERBOSE} -eq 1 ]] && cat ${EESSI_TMPDIR}/repos_cfg/default.local + + # if default.local is not BIND mounted into container, add it to BIND_PATHS + if [[ ! ${BIND_PATHS} =~ "${EESSI_TMPDIR}/repos_cfg/default.local:/etc/cvmfs/default.local" ]]; then + export BIND_PATHS="${BIND_PATHS},${EESSI_TMPDIR}/repos_cfg/default.local:/etc/cvmfs/default.local" + fi +fi + +# 4. set up vars and dirs specific to a scenario + +declare -a EESSI_FUSE_MOUNTS=() +if [[ "${ACCESS}" == "ro" ]]; then + export EESSI_PILOT_READONLY="container:cvmfs2 ${repo_name} /cvmfs/${repo_name}" + + EESSI_FUSE_MOUNTS+=("--fusemount" "${EESSI_PILOT_READONLY}") + export EESSI_FUSE_MOUNTS +fi + +if [[ "${ACCESS}" == "rw" ]]; then + mkdir -p ${EESSI_TMPDIR}/overlay-upper + mkdir -p ${EESSI_TMPDIR}/overlay-work + + # set environment variables for fuse mounts in Singularity container + export EESSI_PILOT_READONLY="container:cvmfs2 ${repo_name} /cvmfs_ro/${repo_name}" + + EESSI_FUSE_MOUNTS+=("--fusemount" "${EESSI_PILOT_READONLY}") + + EESSI_PILOT_WRITABLE_OVERLAY="container:fuse-overlayfs" + EESSI_PILOT_WRITABLE_OVERLAY+=" -o lowerdir=/cvmfs_ro/${repo_name}" + EESSI_PILOT_WRITABLE_OVERLAY+=" -o upperdir=${TMP_IN_CONTAINER}/overlay-upper" + EESSI_PILOT_WRITABLE_OVERLAY+=" -o workdir=${TMP_IN_CONTAINER}/overlay-work" + EESSI_PILOT_WRITABLE_OVERLAY+=" ${EESSI_CVMFS_REPO}" + export EESSI_PILOT_WRITABLE_OVERLAY + + EESSI_FUSE_MOUNTS+=("--fusemount" "${EESSI_PILOT_WRITABLE_OVERLAY}") + export EESSI_FUSE_MOUNTS +fi + + +# 5. run container +# final settings +if [[ -z ${SINGULARITY_BIND} ]]; then + export SINGULARITY_BIND="${BIND_PATHS}" +else + export SINGULARITY_BIND="${SINGULARITY_BIND},${BIND_PATHS}" +fi +[[ ${VERBOSE} -eq 1 ]] && echo "SINGULARITY_BIND=${SINGULARITY_BIND}" + +# pass $EESSI_SOFTWARE_SUBDIR_OVERRIDE into build container (if set) +if [ ! -z ${EESSI_SOFTWARE_SUBDIR_OVERRIDE} ]; then + export SINGULARITYENV_EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_SOFTWARE_SUBDIR_OVERRIDE} + # also specify via $APPTAINERENV_* (future proof, cfr. https://apptainer.org/docs/user/latest/singularity_compatibility.html#singularity-environment-variable-compatibility) + export APPTAINERENV_EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_SOFTWARE_SUBDIR_OVERRIDE} +fi + +echo "Launching container with command (next line):" +echo "singularity ${RUN_QUIET} ${MODE} ${EESSI_FUSE_MOUNTS[@]} ${CONTAINER} $@" +singularity ${RUN_QUIET} ${MODE} "${EESSI_FUSE_MOUNTS[@]}" ${CONTAINER} "$@" +exit_code=$? + +# 6. save tmp if requested (arg -s|--save) +if [[ ! -z ${SAVE} ]]; then + # Note, for now we don't try to be smart and record in any way the OS and + # ARCH which might have been used internally, eg, when software packages + # were built ... we rather keep the script here "stupid" and leave the handling + # of these aspects to where the script is used + if [[ -d ${SAVE} ]]; then + # assume SAVE is name of a directory to which tarball shall be written to + # name format: {REPO_ID}-{TIMESTAMP}.tgz + ts=$(date +%s) + TGZ=${SAVE}/${REPOSITORY}-${ts}.tgz + else + # assume SAVE is the full path to a tarball's name + TGZ=${SAVE} + fi + tar cf ${TGZ} -C ${EESSI_TMPDIR} . + echo "Saved contents of tmp directory '${EESSI_TMPDIR}' to tarball '${TGZ}' (to resume session add '--resume ${TGZ}')" +fi + +# TODO clean up tmp by default? only retain if another option provided (--retain-tmp) + +# use exit code of container command +exit ${exit_code} diff --git a/init/arch_specs/eessi_arch_arm.spec b/init/arch_specs/eessi_arch_arm.spec index 92f32a76d8..b5c9275043 100755 --- a/init/arch_specs/eessi_arch_arm.spec +++ b/init/arch_specs/eessi_arch_arm.spec @@ -1,6 +1,6 @@ # ARM CPU architecture specifications # Software path in EESSI | Vendor ID | List of defining CPU features -"aarch64/arm/neoverse-n1" "ARM" "asimd" # Ampere Altra -"aarch64/arm/neoverse-n1" "" "asimd" # AWS Graviton2 -"aarch64/arm/neoverse-v1" "ARM" "asimd svei8mm" -"aarch64/arm/neoverse-v1" "" "asimd svei8mm" # AWS Graviton3 +"aarch64/neoverse_n1" "ARM" "asimd" # Ampere Altra +"aarch64/neoverse_n1" "" "asimd" # AWS Graviton2 +"aarch64/neoverse_v1" "ARM" "asimd svei8mm" +"aarch64/neoverse_v1" "" "asimd svei8mm" # AWS Graviton3 diff --git a/init/eessi_archdetect.sh b/init/eessi_archdetect.sh index 049399c233..7ec3ef7b80 100755 --- a/init/eessi_archdetect.sh +++ b/init/eessi_archdetect.sh @@ -1,8 +1,10 @@ #!/usr/bin/env bash -VERSION="1.0.0" +VERSION="1.1.0" # Logging LOG_LEVEL="INFO" +# Default result type is a best match +CPUPATH_RESULT="best" timestamp () { date "+%Y-%m-%d %H:%M:%S" @@ -116,7 +118,8 @@ cpupath(){ log "DEBUG" "cpupath: CPU flags of host system: '$cpu_flags'" # Default to generic CPU - local best_arch_match="generic" + local best_arch_match="$machine_type/generic" + local all_arch_matches=$best_arch_match # Iterate over the supported CPU specifications to find the best match for host CPU # Order of the specifications matters, the last one to match will be selected @@ -125,22 +128,29 @@ cpupath(){ if [ "${cpu_vendor}x" == "${arch_spec[1]}x" ]; then # each flag in this CPU specification must be found in the list of flags of the host check_allinfirst "${cpu_flags[*]}" ${arch_spec[2]} && best_arch_match=${arch_spec[0]} && \ - log "DEBUG" "cpupath: host CPU best match updated to $best_arch_match" + all_arch_matches="$best_arch_match:$all_arch_matches" && \ + log "DEBUG" "cpupath: host CPU best match updated to $best_arch_match" fi done - log "INFO" "cpupath: best match for host CPU: $best_arch_match" - echo "$best_arch_match" + if [ "allx" == "${CPUPATH_RESULT}x" ]; then + log "INFO" "cpupath: all matches for host CPU: $all_arch_matches" + echo "$all_arch_matches" + else + log "INFO" "cpupath: best match for host CPU: $best_arch_match" + echo "$best_arch_match" + fi } # Parse command line arguments -USAGE="Usage: eessi_archdetect.sh [-h][-d] " +USAGE="Usage: eessi_archdetect.sh [-h][-d][-a] " -while getopts 'hdv' OPTION; do +while getopts 'hdva' OPTION; do case "$OPTION" in h) echo "$USAGE"; exit 0;; d) LOG_LEVEL="DEBUG";; v) echo "eessi_archdetect.sh v$VERSION"; exit 0;; + a) CPUPATH_RESULT="all";; ?) echo "$USAGE"; exit 1;; esac done @@ -150,5 +160,5 @@ ARGUMENT=${1:-none} case "$ARGUMENT" in "cpupath") cpupath; exit;; - *) echo "$USAGE"; log "ERROR" "Missing argument";; + *) echo "$USAGE"; log "ERROR" "Missing argument (possible actions: 'cpupath')";; esac diff --git a/init/eessi_defaults b/init/eessi_defaults new file mode 100644 index 0000000000..f482cbc269 --- /dev/null +++ b/init/eessi_defaults @@ -0,0 +1,12 @@ +# define default values for some EESSI_* environment variables +# +# This file is part of the EESSI software layer, +# see https://github.com/EESSI/software-layer +# +# author: Thomas Roeblitz (@trz42) +# +# license: GPLv2 +# + +export EESSI_CVMFS_REPO="${EESSI_CVMFS_REPO_OVERRIDE:=/cvmfs/pilot.eessi-hpc.org}" +export EESSI_PILOT_VERSION="${EESSI_PILOT_VERSION_OVERRIDE:=2021.12}" diff --git a/init/eessi_software_subdir_for_host.py b/init/eessi_software_subdir_for_host.py index b09b4711e4..58e9cfd2e6 100755 --- a/init/eessi_software_subdir_for_host.py +++ b/init/eessi_software_subdir_for_host.py @@ -101,8 +101,6 @@ def find_best_target(eessi_prefix): continue if uarch in KNOWN_CPU_UARCHS: target_uarchs.append(KNOWN_CPU_UARCHS[uarch]) - else: - warning('Ignoring unknown target "%s"' % uarch) host_uarch = KNOWN_CPU_UARCHS[host_cpu_name] compat_target_uarchs = sorted([x for x in target_uarchs if x <= host_uarch]) diff --git a/init/minimal_eessi_env b/init/minimal_eessi_env index 39478d5c4c..b7cb7c5e9e 100644 --- a/init/minimal_eessi_env +++ b/init/minimal_eessi_env @@ -1,7 +1,13 @@ # define minimal EESSI environment, without relying on external scripts +# +# this script is *sourced*, not executed, so can't rely on $0 to determine path to self +# $BASH_SOURCE points to correct path, see also http://mywiki.wooledge.org/BashFAQ/028 +EESSI_INIT_DIR_PATH=$(dirname $(realpath $BASH_SOURCE)) + +# set up defaults: EESSI_CVMFS_REPO, EESSI_PILOT_VERSION +# script takes *_OVERRIDEs into account +source ${EESSI_INIT_DIR_PATH}/eessi_defaults -export EESSI_CVMFS_REPO="/cvmfs/pilot.eessi-hpc.org" -export EESSI_PILOT_VERSION="${EESSI_PILOT_VERSION_OVERRIDE:=2021.12}" export EESSI_PREFIX=$EESSI_CVMFS_REPO/versions/$EESSI_PILOT_VERSION if [[ $(uname -s) == 'Linux' ]]; then diff --git a/init/test.py b/init/test.py index 0ed09abd6f..f10be5e66e 100644 --- a/init/test.py +++ b/init/test.py @@ -60,12 +60,12 @@ def broadwell_host_triple(): prep_tmpdir(tmpdir, ['x86_64/intel/ivybridge']) assert find_best_target(tmpdir) == 'x86_64/intel/ivybridge' - # unknown targets don't cause trouble (only warning) + # unknown targets don't cause trouble prep_tmpdir(tmpdir, ['x86_64/intel/no_such_intel_cpu']) assert find_best_target(tmpdir) == 'x86_64/intel/ivybridge' captured = capsys.readouterr() assert captured.out == '' - assert captured.err == 'WARNING: Ignoring unknown target "no_such_intel_cpu"\n' + assert captured.err == '' # older targets have to no impact on best target (sandybridge < ivybridge) prep_tmpdir(tmpdir, ['x86_64/intel/sandybridge']) diff --git a/install_apptainer_ubuntu.sh b/install_apptainer_ubuntu.sh new file mode 100755 index 0000000000..c35c34cda6 --- /dev/null +++ b/install_apptainer_ubuntu.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +# see https://github.com/apptainer/singularity/issues/5390#issuecomment-899111181 +sudo apt-get install alien +alien --version +apptainer_rpm=$(curl --silent -L https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/a/ | grep 'apptainer-[0-9]' | sed 's/.*\(apptainer[0-9._a-z-]*.rpm\).*/\1/g') +curl -OL https://dl.fedoraproject.org/pub/epel/8/Everything/x86_64/Packages/a/${apptainer_rpm} +sudo alien -d ${apptainer_rpm} +sudo apt install ./apptainer*.deb +apptainer --version +# also check whether 'singularity' command is still provided by Apptainer installation +singularity --version diff --git a/install_software_layer.sh b/install_software_layer.sh index e0b2f785d5..bf3006a4a0 100755 --- a/install_software_layer.sh +++ b/install_software_layer.sh @@ -1,3 +1,4 @@ #!/bin/bash -export EESSI_PILOT_VERSION='2021.12' +base_dir=$(dirname $(realpath $0)) +source ${base_dir}/init/eessi_defaults ./run_in_compat_layer_env.sh ./EESSI-pilot-install-software.sh "$@" diff --git a/load_easybuild_module.sh b/load_easybuild_module.sh new file mode 100755 index 0000000000..4ff2a3c37c --- /dev/null +++ b/load_easybuild_module.sh @@ -0,0 +1,124 @@ +# Script to load the environment module for a specific version of EasyBuild. +# If that module is not available yet, the current latest EasyBuild version of EasyBuild will be installed, +# and used to install the specific EasyBuild version being specified. +# +# This script must be sourced, since it makes changes in the current environment, like loading an EasyBuild module. +# +# This script is part of the EESSI software layer, see +# https://github.com/EESSI/software-layer.git +# +# author: Kenneth Hosye (@boegel, HPC-UGent) +# +# license: GPLv2 +# +# +set -o pipefail + +if [ $# -ne 1 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +# don't use $EASYBUILD_VERSION, since that enables always running 'eb --version' +EB_VERSION=${1} + +# make sure that environment variables that we expect to be set are indeed set +if [ -z "${TMPDIR}" ]; then + echo "\$TMPDIR is not set" >&2 + exit 2 +fi + +# ${EB} is used to specify which 'eb' command should be used; +# can potentially be more than just 'eb', for example when using 'eb --optarch=GENERIC' +if [ -z "${EB}" ]; then + echo "\$EB is not set" >&2 + exit 2 +fi + +# make sure that utility functions are defined (cfr. scripts/utils.sh script in EESSI/software-layer repo) +type check_exit_code +if [ $? -ne 0 ]; then + echo "check_exit_code function is not defined" >&2 + exit 3 +fi + +echo ">> Checking for EasyBuild module..." + +ml_av_easybuild_out=${TMPDIR}/ml_av_easybuild.out +module avail 2>&1 | grep -i easybuild/${EB_VERSION} &> ${ml_av_easybuild_out} + +if [[ $? -eq 0 ]]; then + echo_green ">> Module for EasyBuild v${EB_VERSION} found!" +else + echo_yellow ">> No module yet for EasyBuild v${EB_VERSION}, installing it..." + + EB_TMPDIR=${TMPDIR}/ebtmp + echo ">> Temporary installation (in ${EB_TMPDIR})..." + pip_install_out=${TMPDIR}/pip_install.out + pip3 install --prefix ${EB_TMPDIR} easybuild &> ${pip_install_out} + + # keep track of original $PATH and $PYTHONPATH values, so we can restore them + ORIG_PATH=${PATH} + ORIG_PYTHONPATH=${PYTHONPATH} + + echo ">> Final installation in ${EASYBUILD_INSTALLPATH}..." + export PATH=${EB_TMPDIR}/bin:${PATH} + export PYTHONPATH=$(ls -d ${EB_TMPDIR}/lib/python*/site-packages):${PYTHONPATH} + eb_install_out=${TMPDIR}/eb_install.out + ok_msg="Latest EasyBuild release installed, let's go!" + fail_msg="Installing latest EasyBuild release failed, that's not good... (output: ${eb_install_out})" + ${EB} --install-latest-eb-release 2>&1 | tee ${eb_install_out} + check_exit_code $? "${ok_msg}" "${fail_msg}" + + # maybe the module obtained with --install-latest-eb-release is exactly the EasyBuild version we wanted? + module avail 2>&1 | grep -i easybuild/${EB_VERSION} &> ${ml_av_easybuild_out} + if [[ $? -eq 0 ]]; then + echo_green ">> Module for EasyBuild v${EB_VERSION} found!" + else + eb_ec=EasyBuild-${EB_VERSION}.eb + echo_yellow ">> Still no module for EasyBuild v${EB_VERSION}, trying with easyconfig ${eb_ec}..." + ${EB} --search ${eb_ec} | grep ${eb_ec} > /dev/null + if [[ $? -eq 0 ]]; then + echo "Easyconfig ${eb_ec} found for EasyBuild v${EB_VERSION}, so installing it..." + ok_msg="EasyBuild v${EB_VERSION} installed, alright!" + fail_msg="Installing EasyBuild v${EB_VERSION}, yikes! (output: ${eb_install_out})" + ${EB} EasyBuild-${EB_VERSION}.eb 2>&1 | tee -a ${eb_install_out} + check_exit_code $? "${ok_msg}" "${fail_msg}" + else + fatal_error "No easyconfig found for EasyBuild v${EB_VERSION}" + fi + fi + + # restore origin $PATH and $PYTHONPATH values, and clean up environment variables that are no longer needed + export PATH=${ORIG_PATH} + export PYTHONPATH=${ORIG_PYTHONPATH} + unset EB_TMPDIR ORIG_PATH ORIG_PYTHONPATH + + module avail easybuild/${EB_VERSION} &> ${ml_av_easybuild_out} + if [[ $? -eq 0 ]]; then + echo_green ">> EasyBuild/${EB_VERSION} module installed!" + else + fatal_error "EasyBuild/${EB_VERSION} module failed to install?! (output of 'pip install' in ${pip_install_out}, output of 'eb' in ${eb_install_out}, output of 'module avail easybuild' in ${ml_av_easybuild_out})" + fi +fi + +echo ">> Loading EasyBuild v${EB_VERSION} module..." +module load EasyBuild/${EB_VERSION} +eb_show_system_info_out=${TMPDIR}/eb_show_system_info.out +${EB} --show-system-info > ${eb_show_system_info_out} +if [[ $? -eq 0 ]]; then + echo_green ">> EasyBuild seems to be working!" + ${EB} --version | grep "${EB_VERSION}" + if [[ $? -eq 0 ]]; then + echo_green "Found EasyBuild version ${EB_VERSION}, looking good!" + else + ${EB} --version + fatal_error "Expected to find EasyBuild version ${EB_VERSION}, giving up here..." + fi + ${EB} --show-config +else + cat ${eb_show_system_info_out} + fatal_error "EasyBuild not working?!" +fi + +unset EB_VERSION diff --git a/run_in_compat_layer_env.sh b/run_in_compat_layer_env.sh index 561a311588..c70077bf15 100755 --- a/run_in_compat_layer_env.sh +++ b/run_in_compat_layer_env.sh @@ -1,9 +1,13 @@ #!/bin/bash + +base_dir=$(dirname $(realpath $0)) +source ${base_dir}/init/eessi_defaults + if [ -z $EESSI_PILOT_VERSION ]; then echo "ERROR: \$EESSI_PILOT_VERSION must be set!" >&2 exit 1 fi -EESSI_COMPAT_LAYER_DIR="/cvmfs/pilot.eessi-hpc.org/versions/${EESSI_PILOT_VERSION}/compat/linux/$(uname -m)" +EESSI_COMPAT_LAYER_DIR="${EESSI_CVMFS_REPO}/versions/${EESSI_PILOT_VERSION}/compat/linux/$(uname -m)" if [ ! -d ${EESSI_COMPAT_LAYER_DIR} ]; then echo "ERROR: ${EESSI_COMPAT_LAYER_DIR} does not exist!" >&2 exit 1 @@ -13,6 +17,18 @@ INPUT=$(echo "$@") if [ ! -z ${EESSI_SOFTWARE_SUBDIR_OVERRIDE} ]; then INPUT="export EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_SOFTWARE_SUBDIR_OVERRIDE}; ${INPUT}" fi +if [ ! -z ${EESSI_CVMFS_REPO_OVERRIDE} ]; then + INPUT="export EESSI_CVMFS_REPO_OVERRIDE=${EESSI_CVMFS_REPO_OVERRIDE}; ${INPUT}" +fi +if [ ! -z ${EESSI_PILOT_VERSION_OVERRIDE} ]; then + INPUT="export EESSI_PILOT_VERSION_OVERRIDE=${EESSI_PILOT_VERSION_OVERRIDE}; ${INPUT}" +fi +if [ ! -z ${http_proxy} ]; then + INPUT="export http_proxy=${http_proxy}; ${INPUT}" +fi +if [ ! -z ${https_proxy} ]; then + INPUT="export https_proxy=${https_proxy}; ${INPUT}" +fi -echo "Running '${INPUT}' in EESSI ${EESSI_PILOT_VERSION} compatibility layer environment..." +echo "Running '${INPUT}' in EESSI (${EESSI_CVMFS_REPO}) ${EESSI_PILOT_VERSION} compatibility layer environment..." ${EESSI_COMPAT_LAYER_DIR}/startprefix <<< "${INPUT}" diff --git a/scripts/cfg_files.sh b/scripts/cfg_files.sh new file mode 100644 index 0000000000..57ea2f7c03 --- /dev/null +++ b/scripts/cfg_files.sh @@ -0,0 +1,167 @@ +# functions for working with ini/cfg files +# +# This file is part of the EESSI software layer, see +# https://github.com/EESSI/software-layer.git +# +# author: Thomas Roeblitz (@trz42) +# +# license: GPLv2 +# + + +# global variables +# -a -> indexed array +# -A -> associative array +declare -A cfg_repos +declare -A cfg_file_map + + +# functions +function cfg_get_section { + if [[ "$1" =~ ^(\[)(.*)(\])$ ]]; then + echo ${BASH_REMATCH[2]} + else + echo "" + fi +} + +function cfg_get_key_value { + if [[ "$1" =~ ^([^=]+)=([^=]+)$ ]]; then + echo "${BASH_REMATCH[1]}=${BASH_REMATCH[2]}" + else + echo "" + fi +} + +function cfg_load { + local cur_section="" + local cur_key="" + local cur_val="" + IFS= + while read -r line; do + new_section=$(cfg_get_section $line) + # got a new section + if [[ -n "$new_section" ]]; then + cur_section=$new_section + # not a section, try a key value + else + val=$(cfg_get_key_value $line) + # trim leading and trailing spaces as well + cur_key=$(echo $val | cut -f1 -d'=' | cfg_trim_spaces) + cur_val=$(echo $val | cut -f2 -d'=' | cfg_trim_spaces) + if [[ -n "$cur_key" ]]; then + # section + key is the associative in bash array, the field separator is space + cfg_repos[${cur_section} ${cur_key}]=$cur_val + fi + fi + done <$1 +} + +function cfg_print { + for index in "${!cfg_repos[@]}" + do + # split the associative key in to section and key + echo -n "section : $(echo $index | cut -f1 -d ' ');" + echo -n "key : $(echo $index | cut -f2 -d ' ');" + echo "value: ${cfg_repos[$index]}" + done +} + +function cfg_sections { + declare -A sections + for key in "${!cfg_repos[@]}" + do + # extract section from the associative key + section=$(echo $key | cut -f1 -d ' ') + sections[${section}]=1 + done + for repo in "${!sections[@]}" + do + echo "${repo}" + done +} + +function cfg_get_value { + section=$1 + key=$2 + echo "${cfg_repos[$section $key]}" +} + +function cfg_trim_spaces { + # reads from argument $1 or stdin + if [[ $# -gt 0 ]]; then + sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' <<< ${1} + else + sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' < /dev/stdin + fi +} + +function cfg_trim_quotes { + # reads from argument $1 or stdin + if [[ $# -gt 0 ]]; then + sed -e 's/^"*//' -e 's/"*$//' <<< ${1} + else + sed -e 's/^"*//' -e 's/"*$//' < /dev/stdin + fi +} + +function cfg_trim_curly_brackets { + # reads from argument $1 or stdin + if [[ $# -gt 0 ]]; then + sed -e 's/^{*//' -e 's/}*$//' <<< ${1} + else + sed -e 's/^{*//' -e 's/}*$//' < /dev/stdin + fi +} + +function cfg_get_all_sections { + # first field in keys + # 1. get first field in all keys, 2. filter duplicates, 3. return them as string + declare -A all_sections + for key in "${!cfg_repos[@]}" + do + section=$(echo "$key" | cut -f1 -d' ') + all_sections[${section}]=1 + done + sections= + for sec_key in "${!all_sections[@]}" + do + sections="${sections} ${sec_key}" + done + echo "${sections}" | cfg_trim_spaces +} + +function cfg_init_file_map { + # strip '{' and '}' from config_map + # split config_map at ',' + # for each item: split at ':' use first as key, second as value + + # reset global variable + cfg_file_map=() + + # expects a string containing the config_map from the cfg file + # trim leading and trailing curly brackets + cm_trimmed=$(cfg_trim_curly_brackets "$1") + + # split into elements along ',' + declare -a cm_mappings + IFS=',' read -r -a cm_mappings <<< "${cm_trimmed}" + + for index in "${!cm_mappings[@]}" + do + # split mapping into key and value + map_key=$(echo ${cm_mappings[index]} | cut -f1 -d':') + map_value=$(echo ${cm_mappings[index]} | cut -f2 -d':') + # trim spaces and double quotes at start and end + tr_key=$(cfg_trim_spaces "${map_key}" | cfg_trim_quotes) + tr_value=$(cfg_trim_spaces "${map_value}" | cfg_trim_quotes) + cfg_file_map[${tr_key}]=${tr_value} + done +} + +function cfg_print_map { + for index in "${!cfg_file_map[@]}" + do + echo "${index} --> ${cfg_file_map[${index}]}" + done +} diff --git a/scripts/utils.sh b/scripts/utils.sh new file mode 100644 index 0000000000..d0da95e87f --- /dev/null +++ b/scripts/utils.sh @@ -0,0 +1,98 @@ +function echo_green() { + echo -e "\e[32m$1\e[0m" +} + +function echo_red() { + echo -e "\e[31m$1\e[0m" +} + +function echo_yellow() { + echo -e "\e[33m$1\e[0m" +} + +ANY_ERROR_EXITCODE=1 +function fatal_error() { + echo_red "ERROR: $1" >&2 + if [[ $# -gt 1 ]]; then + exit $2 + else + exit "${ANY_ERROR_EXITCODE}" + fi +} + +function check_exit_code { + ec=$1 + ok_msg=$2 + fail_msg=$3 + + if [[ $ec -eq 0 ]]; then + echo_green "${ok_msg}" + else + fatal_error "${fail_msg}" + fi +} + +function get_path_for_tool { + tool_name=$1 + tool_envvar_name=$2 + + which_out=$(which ${tool_name} 2>&1) + exit_code=$? + if [[ ${exit_code} -eq 0 ]]; then + echo "INFO: found tool ${tool_name} in PATH (${which_out})" >&2 + echo "${which_out}" + return 0 + fi + if [[ -z "${tool_envvar_name}" ]]; then + msg="no env var holding the full path to tool '${tool_name}' provided" + echo "${msg}" >&2 + return 1 + else + tool_envvar_value=${!tool_envvar_name} + if [[ -x "${tool_envvar_value}" ]]; then + msg="INFO: found tool ${tool_envvar_value} via env var ${tool_envvar_name}" + echo "${msg}" >&2 + echo "${tool_envvar_value}" + return 0 + else + msg="ERROR: tool '${tool_name}' not in PATH\n" + msg+="ERROR: tool '${tool_envvar_value}' via '${tool_envvar_name}' not in PATH" + echo "${msg}" >&2 + echo "" + return 2 + fi + fi +} + +function get_host_from_url { + url=$1 + re="(http|https)://([^/:]+)" + if [[ $url =~ $re ]]; then + echo ${BASH_REMATCH[2]} + return 0 + else + echo "" + return 1 + fi +} + +function get_port_from_url { + url=$1 + re="(http|https)://[^:]+:([0-9]+)" + if [[ $url =~ $re ]]; then + echo ${BASH_REMATCH[2]} + return 0 + else + echo "" + return 1 + fi +} + +function get_ipv4_address { + hname=$1 + hipv4=$(grep ${hname} /etc/hosts | grep -v '^[[:space:]]*#' | cut -d ' ' -f 1) + # TODO try other methods if the one above does not work --> tool that verifies + # what method can be used? + echo "${hipv4}" + return 0 +} diff --git a/tests/archdetect/aarch64/arm/neoverse-n1/AWS-awslinux-graviton2.output b/tests/archdetect/aarch64/arm/neoverse-n1/AWS-awslinux-graviton2.output deleted file mode 100644 index b4dc5e9f1b..0000000000 --- a/tests/archdetect/aarch64/arm/neoverse-n1/AWS-awslinux-graviton2.output +++ /dev/null @@ -1 +0,0 @@ -aarch64/arm/neoverse-n1 diff --git a/tests/archdetect/aarch64/arm/neoverse-n1/Azure-Ubuntu20-Altra.output b/tests/archdetect/aarch64/arm/neoverse-n1/Azure-Ubuntu20-Altra.output deleted file mode 100644 index b4dc5e9f1b..0000000000 --- a/tests/archdetect/aarch64/arm/neoverse-n1/Azure-Ubuntu20-Altra.output +++ /dev/null @@ -1 +0,0 @@ -aarch64/arm/neoverse-n1 diff --git a/tests/archdetect/aarch64/arm/neoverse-v1/AWS-awslinux-graviton3.output b/tests/archdetect/aarch64/arm/neoverse-v1/AWS-awslinux-graviton3.output deleted file mode 100644 index 20db96d01f..0000000000 --- a/tests/archdetect/aarch64/arm/neoverse-v1/AWS-awslinux-graviton3.output +++ /dev/null @@ -1 +0,0 @@ -aarch64/arm/neoverse-v1 diff --git a/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.all.output b/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.all.output new file mode 100644 index 0000000000..340aaa5d02 --- /dev/null +++ b/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.all.output @@ -0,0 +1 @@ +aarch64/neoverse_n1:aarch64/generic diff --git a/tests/archdetect/aarch64/arm/neoverse-n1/AWS-awslinux-graviton2.cpuinfo b/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.cpuinfo similarity index 100% rename from tests/archdetect/aarch64/arm/neoverse-n1/AWS-awslinux-graviton2.cpuinfo rename to tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.cpuinfo diff --git a/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.output b/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.output new file mode 100644 index 0000000000..a9bd49c75c --- /dev/null +++ b/tests/archdetect/aarch64/neoverse_n1/AWS-awslinux-graviton2.output @@ -0,0 +1 @@ +aarch64/neoverse_n1 diff --git a/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.all.output b/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.all.output new file mode 100644 index 0000000000..340aaa5d02 --- /dev/null +++ b/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.all.output @@ -0,0 +1 @@ +aarch64/neoverse_n1:aarch64/generic diff --git a/tests/archdetect/aarch64/arm/neoverse-n1/Azure-Ubuntu20-Altra.cpuinfo b/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.cpuinfo similarity index 100% rename from tests/archdetect/aarch64/arm/neoverse-n1/Azure-Ubuntu20-Altra.cpuinfo rename to tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.cpuinfo diff --git a/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.output b/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.output new file mode 100644 index 0000000000..a9bd49c75c --- /dev/null +++ b/tests/archdetect/aarch64/neoverse_n1/Azure-Ubuntu20-Altra.output @@ -0,0 +1 @@ +aarch64/neoverse_n1 diff --git a/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.all.output b/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.all.output new file mode 100644 index 0000000000..920d5f9996 --- /dev/null +++ b/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.all.output @@ -0,0 +1 @@ +aarch64/neoverse_v1:aarch64/neoverse_n1:aarch64/generic diff --git a/tests/archdetect/aarch64/arm/neoverse-v1/AWS-awslinux-graviton3.cpuinfo b/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.cpuinfo similarity index 100% rename from tests/archdetect/aarch64/arm/neoverse-v1/AWS-awslinux-graviton3.cpuinfo rename to tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.cpuinfo diff --git a/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.output b/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.output new file mode 100644 index 0000000000..a8e072a9c6 --- /dev/null +++ b/tests/archdetect/aarch64/neoverse_v1/AWS-awslinux-graviton3.output @@ -0,0 +1 @@ +aarch64/neoverse_v1 diff --git a/tests/archdetect/ppc64le/power9le/unknown-power9le.all.output b/tests/archdetect/ppc64le/power9le/unknown-power9le.all.output new file mode 100644 index 0000000000..7ecf79d0a7 --- /dev/null +++ b/tests/archdetect/ppc64le/power9le/unknown-power9le.all.output @@ -0,0 +1 @@ +ppc64le/power9le:ppc64le/generic \ No newline at end of file diff --git a/tests/archdetect/x86_64/amd/zen2/Azure-CentOS7-7V12.all.output b/tests/archdetect/x86_64/amd/zen2/Azure-CentOS7-7V12.all.output new file mode 100644 index 0000000000..180de26f0e --- /dev/null +++ b/tests/archdetect/x86_64/amd/zen2/Azure-CentOS7-7V12.all.output @@ -0,0 +1 @@ +x86_64/amd/zen2:x86_64/generic \ No newline at end of file diff --git a/tests/archdetect/x86_64/amd/zen3/Azure-CentOS7-7V73X.all.output b/tests/archdetect/x86_64/amd/zen3/Azure-CentOS7-7V73X.all.output new file mode 100644 index 0000000000..798a0aa565 --- /dev/null +++ b/tests/archdetect/x86_64/amd/zen3/Azure-CentOS7-7V73X.all.output @@ -0,0 +1 @@ +x86_64/amd/zen3:x86_64/amd/zen2:x86_64/generic \ No newline at end of file diff --git a/tests/archdetect/x86_64/intel/haswell/archspec-linux-E5-2680-v3.all.output b/tests/archdetect/x86_64/intel/haswell/archspec-linux-E5-2680-v3.all.output new file mode 100644 index 0000000000..a047dd42cc --- /dev/null +++ b/tests/archdetect/x86_64/intel/haswell/archspec-linux-E5-2680-v3.all.output @@ -0,0 +1 @@ +x86_64/intel/haswell:x86_64/generic \ No newline at end of file diff --git a/tests/archdetect/x86_64/intel/skylake_avx512/archspec-linux-6132.all.output b/tests/archdetect/x86_64/intel/skylake_avx512/archspec-linux-6132.all.output new file mode 100644 index 0000000000..c9fa524ea6 --- /dev/null +++ b/tests/archdetect/x86_64/intel/skylake_avx512/archspec-linux-6132.all.output @@ -0,0 +1 @@ +x86_64/intel/skylake_avx512:x86_64/intel/haswell:x86_64/generic \ No newline at end of file diff --git a/update_lmod_cache.sh b/update_lmod_cache.sh index 89e2ecbeee..814bb2dae0 100755 --- a/update_lmod_cache.sh +++ b/update_lmod_cache.sh @@ -5,7 +5,7 @@ TOPDIR=$(dirname $(realpath $0)) -source $TOPDIR/utils.sh +source $TOPDIR/scripts/utils.sh if [ $# -ne 2 ]; then echo "Usage: $0 " >&2 diff --git a/utils.sh b/utils.sh deleted file mode 100644 index 0c98c86ec4..0000000000 --- a/utils.sh +++ /dev/null @@ -1,28 +0,0 @@ -function echo_green() { - echo -e "\e[32m$1\e[0m" -} - -function echo_red() { - echo -e "\e[31m$1\e[0m" -} - -function echo_yellow() { - echo -e "\e[33m$1\e[0m" -} - -function fatal_error() { - echo_red "ERROR: $1" >&2 - exit 1 -} - -function check_exit_code { - ec=$1 - ok_msg=$2 - fail_msg=$3 - - if [[ $ec -eq 0 ]]; then - echo_green "${ok_msg}" - else - fatal_error "${fail_msg}" - fi -} diff --git a/versions/2021.06/init/Magic_Castle/bash b/versions/2021.06/init/Magic_Castle/bash new file mode 100644 index 0000000000..5f149c817f --- /dev/null +++ b/versions/2021.06/init/Magic_Castle/bash @@ -0,0 +1,3 @@ +/cvmfs/pilot.eessi-hpc.org/versions/2021.06/init/print_deprecation_warning.sh + +source /cvmfs/pilot.eessi-hpc.org/versions/2021.12/init/Magic_Castle/bash diff --git a/versions/2021.06/init/bash b/versions/2021.06/init/bash new file mode 100644 index 0000000000..82a078849a --- /dev/null +++ b/versions/2021.06/init/bash @@ -0,0 +1,3 @@ +/cvmfs/pilot.eessi-hpc.org/versions/2021.06/init/print_deprecation_warning.sh + +source /cvmfs/pilot.eessi-hpc.org/versions/2021.12/init/bash diff --git a/versions/2021.06/init/print_deprecation_warning.sh b/versions/2021.06/init/print_deprecation_warning.sh new file mode 100755 index 0000000000..b721ed2f71 --- /dev/null +++ b/versions/2021.06/init/print_deprecation_warning.sh @@ -0,0 +1,19 @@ +#!/bin/bash +function echo_yellow_stderr() { + echo -e "\e[33m${1}\e[0m" >&2 +} + +echo_yellow_stderr +echo_yellow_stderr "WARNING: Version 2021.06 of the EESSI pilot repository has been removed since 16 May 2023." +echo_yellow_stderr +echo_yellow_stderr "Version 2021.12 of the EESSI pilot repository can be used as a drop-in replacement, " +echo_yellow_stderr "so we have prepared your environment to use that instead." +echo_yellow_stderr +echo_yellow_stderr "In the future, please run" +echo_yellow_stderr +echo_yellow_stderr " source /cvmfs/pilot.eessi-hpc.org/latest/init/bash" +echo_yellow_stderr +echo_yellow_stderr "to prepare your start using the EESSI pilot repository." +echo_yellow_stderr +echo_yellow_stderr "See also https://eessi.github.io/docs/using_eessi/setting_up_environment ." +echo_yellow_stderr