diff --git a/.github/workflows/test-spack.yml b/.github/workflows/test-spack.yml index f9978bda1e..d4dba0da8f 100644 --- a/.github/workflows/test-spack.yml +++ b/.github/workflows/test-spack.yml @@ -19,7 +19,7 @@ jobs: - name: Checkout uses: actions/checkout@v3 with: - path: arbor + path: arbor - uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} diff --git a/CMakeLists.txt b/CMakeLists.txt index 19a2572b07..3f27afaf39 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -136,7 +136,9 @@ if(ARB_GPU STREQUAL "cuda") set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER}) enable_language(CUDA) find_package(CUDAToolkit) - set(CMAKE_CUDA_ARCHITECTURES 60 70 80) + if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES) + set(CMAKE_CUDA_ARCHITECTURES 60 70 80) + endif() # We _still_ need this otherwise CUDA symbols will not be exported # from libarbor.a leading to linker errors when link external clients. # Unit tests are NOT external enough. Re-review this somewhere in the @@ -144,10 +146,16 @@ if(ARB_GPU STREQUAL "cuda") find_package(CUDA ${CUDAToolkit_VERSION_MAJOR} REQUIRED) elseif(ARB_GPU STREQUAL "cuda-clang") include(FindCUDAToolkit) + if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES) + set(CMAKE_CUDA_ARCHITECTURES 60 70 80) + endif() set(ARB_WITH_CUDA_CLANG TRUE) enable_language(CUDA) elseif(ARB_GPU STREQUAL "hip") set(ARB_WITH_HIP_CLANG TRUE) + # Specify AMD architecture using a (user provided) list. + # Note: CMake native HIP architectures are introduced with version 3.21. + set(ARB_HIP_ARCHITECTURES gfx906 gfx900 CACHE STRING "AMD offload architectures (semicolon separated)") endif() if(ARB_WITH_NVCC OR ARB_WITH_CUDA_CLANG OR ARB_WITH_HIP_CLANG) @@ -415,11 +423,19 @@ if(ARB_WITH_GPU) target_compile_definitions(arbor-private-deps INTERFACE ARB_CUDA) target_compile_definitions(arborenv-private-deps INTERFACE ARB_CUDA) elseif(ARB_WITH_CUDA_CLANG) - set(clang_options_ -DARB_CUDA -xcuda --cuda-gpu-arch=sm_60 --cuda-gpu-arch=sm_70 --cuda-gpu-arch=sm_80 --cuda-path=${CUDA_TOOLKIT_ROOT_DIR}) + # Transform cuda archtitecture list into clang cuda flags + list(TRANSFORM CMAKE_CUDA_ARCHITECTURES PREPEND "--cuda-gpu-arch=sm_" OUTPUT_VARIABLE TMP) + string(REPLACE ";" " " CUDA_ARCH_STR "${TMP}") + + set(clang_options_ -DARB_CUDA -xcuda ${CUDA_ARCH_STR} --cuda-path=${CUDA_TOOLKIT_ROOT_DIR}) target_compile_options(arbor-private-deps INTERFACE $<$:${clang_options_}>) target_compile_options(arborenv-private-deps INTERFACE $<$:${clang_options_}>) elseif(ARB_WITH_HIP_CLANG) - set(clang_options_ -DARB_HIP -xhip --amdgpu-target=gfx906 --amdgpu-target=gfx900) + # Transform hip archtitecture list into clang hip flags + list(TRANSFORM ARB_HIP_ARCHITECTURES PREPEND "--offload-arch=" OUTPUT_VARIABLE TMP) + string(REPLACE ";" " " HIP_ARCH_STR "${TMP}") + + set(clang_options_ -DARB_HIP -xhip ${HIP_ARCH_STR}) target_compile_options(arbor-private-deps INTERFACE $<$:${clang_options_}>) target_compile_options(arborenv-private-deps INTERFACE $<$:${clang_options_}>) endif() diff --git a/arbor/CMakeLists.txt b/arbor/CMakeLists.txt index 170ac34248..c8f1a465a7 100644 --- a/arbor/CMakeLists.txt +++ b/arbor/CMakeLists.txt @@ -123,11 +123,11 @@ install(TARGETS arbor-private-headers EXPORT arbor-targets) # directory-local. add_subdirectory(../mechanisms "${CMAKE_BINARY_DIR}/mechanisms") -set_source_files_properties(${arbor-builtin-mechanisms} PROPERTIES GENERATED TRUE) +set_source_files_properties(${arbor-builtin-mechanisms} DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTIES GENERATED TRUE) if(ARB_WITH_CUDA_CLANG OR ARB_WITH_HIP_CLANG) - set_source_files_properties(${arbor_sources} PROPERTIES LANGUAGE CXX) - set_source_files_properties(${arbor-builtin-mechanism} PROPERTIES LANGUAGE CXX) + set_source_files_properties(${arbor_sources} DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTIES LANGUAGE CXX) + set_source_files_properties(${arbor-builtin-mechanisms} DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTIES LANGUAGE CXX) endif() # Library target: diff --git a/arbor/backends/gpu/shared_state.hpp b/arbor/backends/gpu/shared_state.hpp index 1749c5e2fb..606b5959b7 100644 --- a/arbor/backends/gpu/shared_state.hpp +++ b/arbor/backends/gpu/shared_state.hpp @@ -139,7 +139,7 @@ struct ARB_ARBOR_API shared_state: shared_state_base; cable_solver solver; - static constexpr std::size_t alignment = std::max(array::alignment(), iarray::alignment()); + static constexpr unsigned alignment = std::max(array::alignment(), iarray::alignment()); arb_size_type n_intdom = 0; // Number of distinct integration domains. arb_size_type n_detector = 0; // Max number of detectors on all cells. diff --git a/arbor/util/pimpl_src.hpp b/arbor/util/pimpl_src.hpp index 646820a472..b8ca4410a2 100644 --- a/arbor/util/pimpl_src.hpp +++ b/arbor/util/pimpl_src.hpp @@ -13,8 +13,9 @@ namespace util { template pimpl::~pimpl() = default; +// ctor is empty instead of defaulted because of hipcc complaints template -pimpl::pimpl() noexcept = default; +pimpl::pimpl() noexcept {} template pimpl::pimpl(T* ptr) noexcept : m{ptr} {} @@ -53,9 +54,4 @@ pimpl make_pimpl(Args&&... args) { // In order to avoid linker errors for the constructors and destructor, the pimpl template needs to // be instantiated in the source file. This macro helps with this boilerplate code. Note, that it // needs to be placed in the default namespace. -#define ARB_INSTANTIATE_PIMPL(T) \ -namespace arb { \ -namespace util { \ -template struct pimpl; \ -} \ -} +#define ARB_INSTANTIATE_PIMPL(T) template class ::arb::util::pimpl; diff --git a/ci/cscs/daint_gpu/Dockerfile b/ci/cscs/daint_gpu/Dockerfile new file mode 100644 index 0000000000..4951d4da96 --- /dev/null +++ b/ci/cscs/daint_gpu/Dockerfile @@ -0,0 +1,27 @@ +ARG BASE_IMG +FROM $BASE_IMG + +ARG NUM_PROCS + +COPY . /arbor.src + +RUN mkdir -p /arbor.src/build \ + && cd /arbor.src/build \ + && cmake .. \ + -GNinja \ + -DCMAKE_INSTALL_PREFIX=/arbor.install \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-march=haswell" \ + -DARB_ARCH=none \ + -DARB_WITH_ASSERTIONS=ON \ + -DARB_WITH_PROFILING=ON \ + -DARB_VECTORIZE=ON \ + -DARB_WITH_PYTHON=ON \ + -DARB_USE_HWLOC=ON \ + -DARB_WITH_MPI=ON \ + -DARB_GPU=cuda\ + -DCMAKE_CUDA_ARCHITECTURES=60 \ + -DARB_USE_GPU_RNG=ON \ + && ninja -j${NUM_PROCS} tests examples pyarb \ + && ninja install + diff --git a/ci/cscs/daint_gpu/Dockerfile.base b/ci/cscs/daint_gpu/Dockerfile.base new file mode 100644 index 0000000000..883f7d6ba6 --- /dev/null +++ b/ci/cscs/daint_gpu/Dockerfile.base @@ -0,0 +1,50 @@ +FROM docker.io/finkandreas/spack:0.19.2-cuda11.7.1-ubuntu22.04 as builder + +ARG NUM_PROCS + +RUN spack-install-helper daint-mc \ + "git" \ + "meson" \ + "ninja" \ + "cmake" \ + "valgrind" \ + "python" \ + "hwloc" \ + "boost" \ + "fmt" \ + "random123" \ + "py-mpi4py" \ + "py-sphinx" \ + "py-svgwrite" \ + "nlohmann-json" \ + "py-pybind11" \ + "py-numpy" \ + "py-flake8" \ + "py-black" \ + "py-pytest" \ + "py-seaborn" \ + "py-pandas" \ + "pugixml" + +# end of builder container, now we are ready to copy necessary files + +# copy only relevant parts to the final container +FROM docker.io/finkandreas/spack:base-cuda11.7.1-ubuntu22.04 + +# it is important to keep the paths, otherwise your installation is broken +# all these paths are created with the above `spack-install-helper` invocation +COPY --from=builder /opt/spack-environment /opt/spack-environment +COPY --from=builder /opt/software /opt/software +COPY --from=builder /opt/._view /opt/._view +COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + +# Some boilerplate to get all paths correctly - fix_spack_install is part of the base image +# and makes sure that all important things are being correctly setup +RUN fix_spack_install + +# Finally install software that is needed, e.g. compilers +# It is also possible to build compilers via spack and let all dependencies be handled by spack +RUN apt-get -yqq update && apt-get -yqq upgrade \ + && apt-get -yqq install build-essential gfortran \ + && rm -rf /var/lib/apt/lists/* + diff --git a/ci/cscs/daint_gpu/pipeline.yml b/ci/cscs/daint_gpu/pipeline.yml new file mode 100644 index 0000000000..aeb2033a29 --- /dev/null +++ b/ci/cscs/daint_gpu/pipeline.yml @@ -0,0 +1,67 @@ +include: + - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml' + +stages: + - build_base # build stage is running on Kubernetes cluster + - build # build stage is running on Kubernetes cluster + - test # test stage is running on slurm cluster + +variables: + PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/software/arbor_daint_gpu:$CI_COMMIT_SHORT_SHA + +build-base: + extends: .container-builder-dynamic-name + stage: build_base + variables: + DOCKERFILE: ci/cscs/daint_gpu/Dockerfile.base + PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/base/arbor_daint_gpu_base_image + WATCH_FILECHANGES: 'ci/cscs/daint_gpu/Dockerfile.base' + +build-arbor: + extends: .container-builder + stage: build + variables: + DOCKERFILE: ci/cscs/daint_gpu/Dockerfile + DOCKER_BUILD_ARGS: '["BASE_IMG=$BASE_IMAGE"]' + GIT_SUBMODULE_STRATEGY: recursive + +test-single-node: + extends: .container-runner-daint-gpu + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - build/bin/unit-modcc + - build/bin/unit-local + - build/bin/unit + - scripts/run_cpp_examples.sh + - python -m venv --system-site-packages /arbor.install + - source /arbor.install/bin/activate + - python -m unittest discover -v -s python + - scripts/run_python_examples.sh + - scripts/test_executables.sh + - deactivate + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 1 + SLURM_NTASKS: 1 + SLURM_CPUS_PER_TASK: 12 + SLURM_TIMELIMIT: "00:30:00" + USE_MPI: "NO" + +test-distributed: + extends: .container-runner-daint-gpu + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - build/bin/unit-mpi + - scripts/run_cpp_examples.sh -d + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 2 + SLURM_NTASKS: 2 + SLURM_CPUS_PER_TASK: 12 + SLURM_TIMELIMIT: "00:30:00" + USE_MPI: "YES" + diff --git a/ci/cscs/daint_mc/Dockerfile b/ci/cscs/daint_mc/Dockerfile new file mode 100644 index 0000000000..3841f076ee --- /dev/null +++ b/ci/cscs/daint_mc/Dockerfile @@ -0,0 +1,23 @@ +ARG BASE_IMG +FROM $BASE_IMG + +ARG NUM_PROCS + +COPY . /arbor.src + +RUN mkdir -p /arbor.src/build \ + && cd /arbor.src/build \ + && cmake .. \ + -GNinja \ + -DCMAKE_INSTALL_PREFIX=/arbor.install \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-march=broadwell" \ + -DARB_ARCH=none \ + -DARB_WITH_ASSERTIONS=ON \ + -DARB_WITH_PROFILING=ON \ + -DARB_VECTORIZE=ON \ + -DARB_WITH_PYTHON=ON \ + -DARB_USE_HWLOC=ON \ + -DARB_WITH_MPI=ON \ + && ninja -j${NUM_PROCS} tests examples pyarb \ + && ninja install diff --git a/ci/cscs/daint_mc/Dockerfile.base b/ci/cscs/daint_mc/Dockerfile.base new file mode 100644 index 0000000000..e27545a7c4 --- /dev/null +++ b/ci/cscs/daint_mc/Dockerfile.base @@ -0,0 +1,49 @@ +FROM docker.io/finkandreas/spack:0.19.2-ubuntu22.04 as builder + +ARG NUM_PROCS + +RUN spack-install-helper daint-mc \ + "git" \ + "meson" \ + "ninja" \ + "cmake" \ + "valgrind" \ + "python" \ + "hwloc" \ + "boost" \ + "fmt" \ + "random123" \ + "py-mpi4py" \ + "py-sphinx" \ + "py-svgwrite" \ + "nlohmann-json" \ + "py-pybind11" \ + "py-numpy" \ + "py-flake8" \ + "py-black" \ + "py-pytest" \ + "py-seaborn" \ + "py-pandas" \ + "pugixml" + +# end of builder container, now we are ready to copy necessary files + +# copy only relevant parts to the final container +FROM docker.io/finkandreas/spack:base-ubuntu22.04 + +# it is important to keep the paths, otherwise your installation is broken +# all these paths are created with the above `spack-install-helper` invocation +COPY --from=builder /opt/spack-environment /opt/spack-environment +COPY --from=builder /opt/software /opt/software +COPY --from=builder /opt/._view /opt/._view +COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + +# Some boilerplate to get all paths correctly - fix_spack_install is part of the base image +# and makes sure that all important things are being correctly setup +RUN fix_spack_install + +# Finally install software that is needed, e.g. compilers +# It is also possible to build compilers via spack and let all dependencies be handled by spack +RUN apt-get -yqq update && apt-get -yqq upgrade \ + && apt-get -yqq install build-essential gfortran \ + && rm -rf /var/lib/apt/lists/* diff --git a/ci/cscs/daint_mc/pipeline.yml b/ci/cscs/daint_mc/pipeline.yml new file mode 100644 index 0000000000..9ac453ae79 --- /dev/null +++ b/ci/cscs/daint_mc/pipeline.yml @@ -0,0 +1,66 @@ +include: + - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml' + +stages: + - build_base # build stage is running on Kubernetes cluster + - build # build stage is running on Kubernetes cluster + - test # test stage is running on slurm cluster + +variables: + PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/software/arbor_daint_mc:$CI_COMMIT_SHORT_SHA + +build-base: + extends: .container-builder-dynamic-name + stage: build_base + variables: + DOCKERFILE: ci/cscs/daint_mc/Dockerfile.base + PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/base/arbor_daint_mc_base_image + WATCH_FILECHANGES: 'ci/cscs/daint_mc/Dockerfile.base' + +build-arbor: + extends: .container-builder + stage: build + variables: + DOCKERFILE: ci/cscs/daint_mc/Dockerfile + DOCKER_BUILD_ARGS: '["BASE_IMG=$BASE_IMAGE"]' + GIT_SUBMODULE_STRATEGY: recursive + +test-single-node: + extends: .container-runner-daint-mc + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - build/bin/unit-modcc + - build/bin/unit-local + - build/bin/unit + - scripts/run_cpp_examples.sh + - python -m venv --system-site-packages /arbor.install + - source /arbor.install/bin/activate + - python -m unittest discover -v -s python + - scripts/run_python_examples.sh + - scripts/test_executables.sh + - deactivate + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 1 + SLURM_NTASKS: 1 + SLURM_CPUS_PER_TASK: 36 + SLURM_TIMELIMIT: "00:30:00" + USE_MPI: "NO" + +test-distributed: + extends: .container-runner-daint-mc + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - build/bin/unit-mpi + - scripts/run_cpp_examples.sh -d + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 2 + SLURM_NTASKS: 4 + SLURM_CPUS_PER_TASK: 18 + SLURM_TIMELIMIT: "00:30:00" + USE_MPI: "YES" diff --git a/ci/cscs/hohgant_cpu/Dockerfile b/ci/cscs/hohgant_cpu/Dockerfile new file mode 100644 index 0000000000..0cad50c9c4 --- /dev/null +++ b/ci/cscs/hohgant_cpu/Dockerfile @@ -0,0 +1,24 @@ +ARG BASE_IMG +FROM $BASE_IMG + +ARG NUM_PROCS + +COPY . /arbor.src + +RUN mkdir -p /arbor.src/build \ + && cd /arbor.src/build \ + && cmake .. \ + -GNinja \ + -DCMAKE_INSTALL_PREFIX=/arbor.install \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_CXX_FLAGS="-march=znver2" \ + -DARB_ARCH=none \ + -DARB_WITH_ASSERTIONS=ON \ + -DARB_WITH_PROFILING=ON \ + -DARB_VECTORIZE=ON \ + -DARB_WITH_PYTHON=ON \ + -DARB_USE_HWLOC=ON \ + -DARB_WITH_MPI=ON \ + && ninja -j${NUM_PROCS} tests examples pyarb \ + && ninja install + diff --git a/ci/cscs/hohgant_cpu/Dockerfile.base b/ci/cscs/hohgant_cpu/Dockerfile.base new file mode 100644 index 0000000000..57bf1419c9 --- /dev/null +++ b/ci/cscs/hohgant_cpu/Dockerfile.base @@ -0,0 +1,50 @@ +FROM docker.io/finkandreas/spack:0.19.2-ubuntu22.04 as builder + +ARG NUM_PROCS + +RUN spack-install-helper hohgant-cpu \ + "git" \ + "meson" \ + "ninja" \ + "cmake" \ + "valgrind" \ + "python" \ + "hwloc" \ + "boost" \ + "fmt" \ + "random123" \ + "py-mpi4py" \ + "py-sphinx" \ + "py-svgwrite" \ + "nlohmann-json" \ + "py-pybind11" \ + "py-numpy" \ + "py-flake8" \ + "py-black" \ + "py-pytest" \ + "py-seaborn" \ + "py-pandas" \ + "pugixml" + +# end of builder container, now we are ready to copy necessary files + +# copy only relevant parts to the final container +FROM docker.io/finkandreas/spack:base-ubuntu22.04 + +# it is important to keep the paths, otherwise your installation is broken +# all these paths are created with the above `spack-install-helper` invocation +COPY --from=builder /opt/spack-environment /opt/spack-environment +COPY --from=builder /opt/software /opt/software +COPY --from=builder /opt/._view /opt/._view +COPY --from=builder /etc/profile.d/z10_spack_environment.sh /etc/profile.d/z10_spack_environment.sh + +# Some boilerplate to get all paths correctly - fix_spack_install is part of the base image +# and makes sure that all important things are being correctly setup +RUN fix_spack_install + +# Finally install software that is needed, e.g. compilers +# It is also possible to build compilers via spack and let all dependencies be handled by spack +RUN apt-get -yqq update && apt-get -yqq upgrade \ + && apt-get -yqq install build-essential gfortran \ + && rm -rf /var/lib/apt/lists/* + diff --git a/ci/cscs/hohgant_cpu/pipeline.yml b/ci/cscs/hohgant_cpu/pipeline.yml new file mode 100644 index 0000000000..50900ad7da --- /dev/null +++ b/ci/cscs/hohgant_cpu/pipeline.yml @@ -0,0 +1,154 @@ +include: + - remote: 'https://gitlab.com/cscs-ci/recipes/-/raw/master/templates/v2/.ci-ext.yml' + +stages: + - build_base # build stage is running on Kubernetes cluster + - build # build stage is running on Kubernetes cluster + - test # test stage is running on slurm cluster + +variables: + PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/software/arbor_hohgant_cpu:$CI_COMMIT_SHORT_SHA + +build-base: + extends: .container-builder-dynamic-name + stage: build_base + variables: + DOCKERFILE: ci/cscs/hohgant_cpu/Dockerfile.base + PERSIST_IMAGE_NAME: $CSCS_REGISTRY_PATH/base/arbor_hohgant_cpu_base_image + WATCH_FILECHANGES: 'ci/cscs/hohgant_cpu/Dockerfile.base' + +build-arbor: + extends: .container-builder + stage: build + variables: + DOCKERFILE: ci/cscs/hohgant_cpu/Dockerfile + DOCKER_BUILD_ARGS: '["BASE_IMG=$BASE_IMAGE"]' + GIT_SUBMODULE_STRATEGY: recursive + +# Note: hohgant has an MPI issue: doesn't allow multiple MPI runs in same job due to repeated +# MPI_Init/MPI_Finalize calls. Therefore, we test examples individually (each in different job). + +test-single-node: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - build/bin/unit-modcc + - build/bin/unit-local + - build/bin/unit + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 1 + SLURM_NTASKS: 1 + SLURM_CPUS_PER_TASK: 128 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "NO" + +test-python-single-node: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - python -m venv --system-site-packages /arbor.install + - source /arbor.install/bin/activate + - python -m unittest discover -v -s python + - deactivate + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 1 + SLURM_NTASKS: 1 + SLURM_CPUS_PER_TASK: 128 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "NO" + +bench-single-node: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - scripts/run_cpp_examples.sh --with=bench + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 1 + SLURM_NTASKS: 1 + SLURM_CPUS_PER_TASK: 128 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "NO" + +brunel-single-node: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - scripts/run_cpp_examples.sh --with=brunel + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 1 + SLURM_NTASKS: 1 + SLURM_CPUS_PER_TASK: 128 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "NO" + +test-distributed: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - build/bin/unit-mpi + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 2 + SLURM_NTASKS: 4 + SLURM_CPUS_PER_TASK: 64 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "YES" + +bench-distributed: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - scripts/run_cpp_examples.sh --with=bench -d + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 2 + SLURM_NTASKS: 4 + SLURM_CPUS_PER_TASK: 64 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "YES" + +brunel-distributed: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - scripts/run_cpp_examples.sh --with=brunel -d + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 2 + SLURM_NTASKS: 4 + SLURM_CPUS_PER_TASK: 64 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "YES" + +remote-distributed: + extends: .container-runner-hohgant-zen2 + stage: test + image: $PERSIST_IMAGE_NAME + script: + - cd /arbor.src + - scripts/run_cpp_examples.sh --with=remote -d + variables: + SLURM_JOB_PARTITION: normal + SLURM_JOB_NUM_NODES: 2 + SLURM_NTASKS: 4 + SLURM_CPUS_PER_TASK: 64 + SLURM_TIMELIMIT: "00:10:00" + USE_MPI: "YES" diff --git a/example/ornstein_uhlenbeck/CMakeLists.txt b/example/ornstein_uhlenbeck/CMakeLists.txt index e6ed7a8347..7b73732fc1 100644 --- a/example/ornstein_uhlenbeck/CMakeLists.txt +++ b/example/ornstein_uhlenbeck/CMakeLists.txt @@ -7,7 +7,7 @@ make_catalogue_lib( add_executable(ou EXCLUDE_FROM_ALL ou.cpp) target_link_libraries(ou PRIVATE catalogue-ornstein_uhlenbeck) -target_link_libraries(ou PRIVATE arbor arborio) +target_link_libraries(ou PRIVATE arbor arborio arbor-private-deps) if (ARB_USE_BUNDLED_FMT) target_include_directories(ou PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../../ext/fmt/include") target_compile_definitions(ou PRIVATE FMT_HEADER_ONLY) diff --git a/mechanisms/BuildModules.cmake b/mechanisms/BuildModules.cmake index afaaae9d23..bb87c113d9 100644 --- a/mechanisms/BuildModules.cmake +++ b/mechanisms/BuildModules.cmake @@ -94,6 +94,10 @@ function("make_catalogue_standalone") endif() endforeach() + if(ARB_WITH_CUDA_CLANG OR ARB_WITH_HIP_CLANG) + set_source_files_properties(${catalogue_${MK_CAT_NAME}_source} DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTIES LANGUAGE CXX) + endif() + add_library(${MK_CAT_NAME}-catalogue SHARED ${catalogue_${MK_CAT_NAME}_source}) target_compile_definitions(${MK_CAT_NAME}-catalogue PUBLIC STANDALONE=1) @@ -124,7 +128,7 @@ function("make_catalogue_lib") VERBOSE ${MK_CAT_VERBOSE} ADD_DEPS OFF) if(ARB_WITH_CUDA_CLANG OR ARB_WITH_HIP_CLANG) - set_source_files_properties(${catalogue-${MK_CAT_NAME}-mechanisms} PROPERTIES LANGUAGE CXX) + set_source_files_properties(${catalogue-${MK_CAT_NAME}-mechanisms} DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTIES LANGUAGE CXX) endif() add_library(catalogue-${MK_CAT_NAME} STATIC EXCLUDE_FROM_ALL ${catalogue-${MK_CAT_NAME}-mechanisms}) target_link_libraries(catalogue-${MK_CAT_NAME} PRIVATE arbor arbor-private-deps) diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 8ddbbf6226..de26283cd2 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -97,9 +97,13 @@ file(WRITE "${CMAKE_BINARY_DIR}/install-prefix" "${CMAKE_INSTALL_PREFIX}") execute_process( COMMAND ${PYTHON_EXECUTABLE} "${PROJECT_SOURCE_DIR}/scripts/where.py" INPUT_FILE "${CMAKE_BINARY_DIR}/install-prefix" - OUTPUT_VARIABLE ARB_PYTHON_LIB_PATH_DEFAULT + OUTPUT_VARIABLE ARB_PYTHON_LIB_PATH_DEFAULT_REL OUTPUT_STRIP_TRAILING_WHITESPACE) +# convert to absolute path if needed (could be a relative path if ccmake was used) +get_filename_component(ARB_PYTHON_LIB_PATH_DEFAULT "${ARB_PYTHON_LIB_PATH_DEFAULT_REL}" + REALPATH BASE_DIR "${CMAKE_BINARY_DIR}") + # Default to installing in that path, override with user specified ARB_PYTHON_LIB_PATH set(ARB_PYTHON_LIB_PATH ${ARB_PYTHON_LIB_PATH_DEFAULT} CACHE PATH "path for installing Python module for Arbor.") message(STATUS "Python module installation path: ${ARB_PYTHON_LIB_PATH}") diff --git a/python/test/unit/test_domain_decompositions.py b/python/test/unit/test_domain_decompositions.py index 28f3cda964..00091fd034 100644 --- a/python/test/unit/test_domain_decompositions.py +++ b/python/test/unit/test_domain_decompositions.py @@ -55,7 +55,7 @@ class TestDomain_Decompositions(unittest.TestCase): def test_domain_decomposition_homogenous_CPU(self): n_cells = 10 recipe = homo_recipe(n_cells) - context = arb.context() + context = arb.context(threads=1, gpu_id=None) decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_cells) @@ -106,7 +106,7 @@ def test_domain_decomposition_homogenous_GPU(self): def test_domain_decomposition_heterogenous_CPU(self): n_cells = 10 recipe = hetero_recipe(n_cells) - context = arb.context() + context = arb.context(threads=1, gpu_id=None) decomp = arb.partition_load_balance(recipe, context) self.assertEqual(decomp.num_local_cells, n_cells) diff --git a/scripts/run_cpp_examples.sh b/scripts/run_cpp_examples.sh index b3ba3d4a5e..772b4b7b7a 100755 --- a/scripts/run_cpp_examples.sh +++ b/scripts/run_cpp_examples.sh @@ -3,58 +3,156 @@ set -Eeuo pipefail -if [[ "$#" -gt 1 ]]; then - echo "usage: run_cpp_examples.sh " - exit 1 -fi +# Function to print the script's synopsis +print_synopsis() { + echo "Usage: $(basename "$0") [-d] [--with=example] [prefix]" + echo " -d Toggle distributed mode (optional). Does not expect result" + echo " files to be available to every process." + echo " --with=example Select an example (optional). If omitted all examples will run." + echo " prefix Prefix value (optional). Will toggle distributed mode if it" + echo " starts with mpirun*." +} -PREFIX="${1:-} `pwd`/build/bin" +# Default values +distributed=0 +examples=() +prefix="" +PREFIX=" `pwd`/build/bin" tag=dev-`git rev-parse --short HEAD` out="results/$tag/cpp/" - ok=0 -check () { - prog=$1 - expected="$2 spikes" - actual=$(grep -Eo '[0-9]+ spikes' $out/$prog/stdout.txt || echo "N/A") - if [ "$expected" == "$actual" ] - then - echo " - $prog: OK" + +# List of all examples +all_examples=( + "bench" + "brunel" + "gap_junctions" + "generators" + "lfp" + "ring" + "busyring" + "single-cell" + "probe-demo v" + "plasticity" + "ou" + "voltage-clamp" + "remote" +) + +# Mark examples not suitable for local execution +skip_local=( + "remote" +) + +# Lookup table for expected spike count +expected_outputs=( + 972 + 6998 + "30" + "" + "" + 94 + 35000 + "" + "" + "" + "" + "" + "" +) + +# Function to execute an example +execute_example() { + local example="${1}" + local dir=`echo ${example} | tr ' ' '_'` + local path="${out}${dir}" + echo -n " - ${example}: " + + # skip marked examples if we are in distributed mode + for ex in "${skip_local[@]}"; do + if [[ $ex == $example ]]; then + echo "skipped" + return + fi + done + + # run the example and redirect its output + mkdir -p ${path} + ${PREFIX}/${example} > ${path}/stdout.txt 2> ${path}/stderr.txt + + # get the expected output if it exists and compare it to the actual output + local expected="" + for i in "${!all_examples[@]}"; do + if [[ ${all_examples[$i]} == $example ]]; then + expected=${expected_outputs[$i]} + break + fi + done + if [[ -n ${expected} ]]; then + actual=$(grep -Eo '[0-9]+ spikes' ${path}/stdout.txt || echo "N/A") + if [[ $distributed == 1 && "$actual" == "N/A" ]]; then + echo "check skipped on remote rank" + elif [ "$expected spikes" == "$actual" ]; then + echo "OK" + else + echo "ERROR wrong number of spikes: $expected ./. $actual" + ok=1 + fi else - echo " - $prog: ERROR wrong number of spikes: $expected ./. $actual" - ok=1 + echo "OK (nothing to check)" fi } -for ex in bench brunel gap_junctions generators lfp ring busyring single-cell "probe-demo v" plasticity ou voltage-clamp -do - echo " - $ex" - dir=`echo $ex | tr ' ' '_'` - mkdir -p $out/$dir - cd $out/$dir - $PREFIX/$ex > stdout.txt 2> stderr.txt - cd - +# Argument parsing +while [[ $# -gt 0 ]]; do + key="$1" + + case $key in + -d) + distributed=1 + shift + ;; + --with=*) + example="${key#*=}" + if [[ " ${all_examples[@]} " =~ " $example " ]]; then + examples+=("$example") + else + echo "Error: Invalid example '$example'" + print_synopsis + exit 1 + fi + shift + ;; + *) + if [[ $key == -* ]]; then + echo "Error: Invalid argument '$key'" + print_synopsis + exit 1 + fi + prefix="$key" + shift + ;; + esac done -# MPI only examples -if [[ $PREFIX = mpirun* ]] -then - for ex in remote - do - echo " - $ex" - dir=`echo $ex | tr ' ' '_'` - mkdir -p $out/$dir - cd $out/$dir - $PREFIX/$ex > stdout.txt 2> stderr.txt - cd - - done +# If --with=example was not used, add all entries from all_examples to examples +if [[ ${#examples[@]} -eq 0 ]]; then + examples=("${all_examples[@]}") fi -# Do some sanity checks. -check brunel 6998 -check bench 972 -check ring 94 -check busyring 35000 -check gap_junctions 30 +## Set distributed to true if prefix is not empty +#if [[ -n $prefix ]]; then +# Set distributed to true if prefix starts with mpirun +if [[ $prefix == mpirun* ]]; then + distributed=1 +fi + +# Concatenate full prefix +PREFIX="${prefix}${PREFIX}" + +# Execute the selected examples +for example in "${examples[@]}"; do + execute_example "${example}" +done exit $ok diff --git a/test/unit/mech_private_field_access.cpp b/test/unit/mech_private_field_access.cpp index a7f46e3c8a..aca471c3e7 100644 --- a/test/unit/mech_private_field_access.cpp +++ b/test/unit/mech_private_field_access.cpp @@ -93,7 +93,7 @@ std::vector gpu_mechanism_ion_index(const mechanism* m, const st arb_value_type gpu_mechanism_global(const mechanism* m, const std::string& key) { auto p = global_lookup(m, key); - arb_value_type v; + arb_value_type v = 0.; memory::gpu_memcpy_d2h(p, &v, sizeof(v)); return v; } diff --git a/test/unit/test_event_stream_gpu.cpp b/test/unit/test_event_stream_gpu.cpp index 940e954c6f..4b548f91b7 100644 --- a/test/unit/test_event_stream_gpu.cpp +++ b/test/unit/test_event_stream_gpu.cpp @@ -11,8 +11,6 @@ using namespace arb; namespace { - auto evtime = [](deliverable_event e) { return event_time(e); }; - constexpr cell_local_size_type mech = 13u; target_handle handle[4] = {