diff --git a/.github/workflows/smartsim.yaml b/.github/workflows/smartsim.yaml new file mode 100644 index 00000000..44d33d4f --- /dev/null +++ b/.github/workflows/smartsim.yaml @@ -0,0 +1,60 @@ +name: smartsim +defaults: + run: + shell: bash -o pipefail -i {0} +on: + push: + branches: + - main + - 'smartsim*' + - 'static_client' + - 'releases/**' + pull_request: + types: [opened, reopened] + workflow_dispatch: + inputs: + debug_enabled: + type: boolean + description: 'Tmate debugging session' + required: false + default: false + +jobs: + build: + runs-on: ubuntu-22.04 + + container: + image: ghcr.io/foamscience/openfoam-smartsim:cpu + options: --user openfoam + env: + CATCH_TIMEOUT: 20 + FOAM_FOAMUT: "/tmp/foamUT" + SSDB: "redis:6379" + + services: + redis: + image: redislabs/redisai + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 + if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }} + with: + detached: true + + - name: Compile and Test + run: | + cd 2023-01/smartsim + source /usr/lib/openfoam/openfoam2212/etc/bashrc + mkdir -p $FOAM_USER_LIBBIN + ./Allwmake + git clone https://github.com/FoamScience/foamUT $FOAM_FOAMUT + sed -i 's/mpirun/mpirun --oversubscribe/g' $FOAM_FOAMUT/Alltest + ln -s "$PWD"/smartsim_function_object/smartSimFunctionObject "$FOAM_FOAMUT"/smartSimFunctionObject + ln -s "$PWD"/smartsim_function_object/tests "$FOAM_FOAMUT"/tests/smartSimFOTests + cd $FOAM_FOAMUT || exit 1 + rm -rf tests/exampleTests + ./Alltest "$@" diff --git a/2023-01/smartsim/Allwmake b/2023-01/smartsim/Allwmake new file mode 100755 index 00000000..dc34e0fa --- /dev/null +++ b/2023-01/smartsim/Allwmake @@ -0,0 +1,3 @@ +#!/usr/bin/bash + +wmake smartsim_function_object/smartSimFunctionObject diff --git a/2023-01/smartsim/Dockerfile b/2023-01/smartsim/Dockerfile new file mode 100644 index 00000000..39ac2c5a --- /dev/null +++ b/2023-01/smartsim/Dockerfile @@ -0,0 +1,34 @@ +# Dockerfile to build SmartSim/SmartRedis-ready images for developing OpenFOAM-related stuff +#FROM opencfd/openfoam-dev +# The images from OpenCFD do not play nice with Github, so I use my own ones +FROM ghcr.io/foamscience/jammy-openfoam:v2212 +LABEL maintainer="Mohammed Elwardi Fadeli " + +# Dependencies +USER root +RUN apt update && apt install -y python3-dev python3-pip git-core git-lfs cmake unzip vim wget +RUN apt clean && apt autoremove --yes && rm -rf /var/lib/{cache,log}/ + +USER openfoam + +# Software versions; overriding these has no effect on resulting containers; but nice to have +# as environment variables to remove version confusion +ENV SMARTREDIS_VERSION="6bfcf1deb0d726df75610fe8a352b9ecb13c44f7" +ENV SMARTSIM_VERSION="0.5.1" + +# Install SmartSim (maybe get the development version for this too?) +RUN pip3 install -U smartsim[ml]==$SMARTSIM_VERSION +ENV PATH=$PATH:/home/openfoam/.local/bin + +# Build SmartRedis +ENV FOAM_SMARTREDIS=$HOME/smartredis +RUN git clone https://github.com/CrayLabs/SmartRedis $FOAM_SMARTREDIS &&\ + cd $FOAM_SMARTREDIS && git reset --hard $SMARTREDIS_VERSION +RUN cd $FOAM_SMARTREDIS && smart clobber && smart build --device=cpu && make lib + +# Environment variables to use for linking SmartRedis libs to OpenFOAM bins/libs +ENV SMARTREDIS_INCLUDE=$FOAM_SMARTREDIS/install/include +ENV SMARTREDIS_LIB=$FOAM_SMARTREDIS/install/lib +ENV LD_LIBRARY_PATH=$SMARTREDIS_LIB:$LD_LIBRARY_PATH +RUN find /usr/lib/openfoam -iname DiagonalMatrix.H -exec sudo sed -i 's/DiagonalMatrix/DiagonalMatrix/g' {} \; +RUN sudo chmod 777 -R /home/openfoam/data diff --git a/2023-01/smartsim/configure-smartredis.sh b/2023-01/smartsim/configure-smartredis.sh index 1809c7dd..5c71147e 100755 --- a/2023-01/smartsim/configure-smartredis.sh +++ b/2023-01/smartsim/configure-smartredis.sh @@ -3,14 +3,11 @@ # Define environmental variables for including and linking SmartRedis in # OpenFOAM applications and libraries. -echo Warning: make sure you sourced configure-smartredis.sh in its folder. -# Otherwise, the include files for OpenFOAM application/library compilations will be wrong. - export FOAM_SMARTREDIS=$PWD/smartredis -export FOAM_SMARTREDIS_INCLUDE=$FOAM_SMARTREDIS/include -export FOAM_SMARTREDIS_DEP_INCLUDE=$FOAM_SMARTREDIS/install/include -export FOAM_SMARTREDIS_LIB=$FOAM_SMARTREDIS/install/lib -export FOAM_SMARTREDIS_BUILD_LIB=$FOAM_SMARTREDIS/build -export LD_LIBRARY_PATH=$FOAM_SMARTREDIS_BUILD_LIB:$LD_LIBRARY_PATH +if [ ! -d "$FOAM_SMARTREDIS" ]; then + echo "$FOAM_SMARTREDIS does not exist, please source configure-smartredis.sh from its folder" +fi +export SMARTREDIS_INCLUDE=$FOAM_SMARTREDIS/install/include +export SMARTREDIS_LIB=$FOAM_SMARTREDIS/install/lib export LD_LIBRARY_PATH=$FOAM_SMARTREDIS_LIB:$LD_LIBRARY_PATH export SSDB="127.0.0.1:8000" # for multinode setup let smartsim do this diff --git a/2023-01/smartsim/smartredis-simpleFoam/simpleRedisFoam/Make/options b/2023-01/smartsim/smartredis-simpleFoam/simpleRedisFoam/Make/options index 22295823..b8899baa 100644 --- a/2023-01/smartsim/smartredis-simpleFoam/simpleRedisFoam/Make/options +++ b/2023-01/smartsim/smartredis-simpleFoam/simpleRedisFoam/Make/options @@ -7,8 +7,7 @@ EXE_INC = \ -I$(LIB_SRC)/TurbulenceModels/incompressible/lnInclude \ -I$(LIB_SRC)/transportModels \ -I$(LIB_SRC)/transportModels/incompressible/singlePhaseTransportModel \ - -I$(FOAM_SMARTREDIS_INCLUDE) \ - -I$(FOAM_SMARTREDIS_DEP_INCLUDE) + -I$(SMARTREDIS_INCLUDE) EXE_LIBS = \ -lfiniteVolume \ @@ -19,5 +18,5 @@ EXE_LIBS = \ -lincompressibleTurbulenceModels \ -lincompressibleTransportModels \ -latmosphericModels \ - -L$(FOAM_SMARTREDIS_LIB) -lhiredis -lredis++ \ - -L$(FOAM_SMARTREDIS_BUILD_LIB) -lsmartredis + -L$(SMARTREDIS_LIB) -lhiredis -lredis++ \ + -lsmartredis diff --git a/2023-01/smartsim/smartredis/.github/workflows/build_wheels.yml b/2023-01/smartsim/smartredis/.github/workflows/build_wheels.yml index 102457df..9dc65799 100644 --- a/2023-01/smartsim/smartredis/.github/workflows/build_wheels.yml +++ b/2023-01/smartsim/smartredis/.github/workflows/build_wheels.yml @@ -1,3 +1,31 @@ +# +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + name: build_wheels on: [push, pull_request] @@ -18,7 +46,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-20.04, macos-10.15] + os: [ubuntu-20.04, macos-12] gcc_v: [8] # Version of GFortran we want to use. env: FC: gfortran-${{ matrix.gcc_v }} @@ -29,7 +57,7 @@ jobs: - uses: actions/setup-python@v2 - name: Install cibuildwheel - run: python -m pip install cibuildwheel==1.10.0 + run: python -m pip install cibuildwheel>=2.12.3 - name: Install GFortran Linux if: contains(matrix.os, 'ubuntu') diff --git a/2023-01/smartsim/smartredis/.github/workflows/docker.yml b/2023-01/smartsim/smartredis/.github/workflows/docker.yml index 2134075c..b489e2d0 100644 --- a/2023-01/smartsim/smartredis/.github/workflows/docker.yml +++ b/2023-01/smartsim/smartredis/.github/workflows/docker.yml @@ -1,7 +1,7 @@ # # BSD 2-Clause License # -# Copyright (c) 2021, Hewlett Packard Enterprise +# Copyright (c) 2021-2023, Hewlett Packard Enterprise # All rights reserved. # # Redistribution and use in source and binary forms, with or without diff --git a/2023-01/smartsim/smartredis/.github/workflows/release.yml b/2023-01/smartsim/smartredis/.github/workflows/release.yml index c5c7107c..1984b92e 100644 --- a/2023-01/smartsim/smartredis/.github/workflows/release.yml +++ b/2023-01/smartsim/smartredis/.github/workflows/release.yml @@ -1,3 +1,31 @@ +# +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + name: deploy-release on: @@ -20,7 +48,7 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-20.04, macos-10.15] + os: [ubuntu-20.04, macos-12] gcc_v: [8] # Version of GFortran we want to use. env: FC: gfortran-${{ matrix.gcc_v }} @@ -31,7 +59,7 @@ jobs: - uses: actions/setup-python@v2 - name: Install cibuildwheel - run: python -m pip install cibuildwheel==1.10.0 + run: python -m pip install cibuildwheel>=2.12.3 - name: Install GFortran Linux if: contains(matrix.os, 'ubuntu') diff --git a/2023-01/smartsim/smartredis/.github/workflows/run_post_merge_tests.yml b/2023-01/smartsim/smartredis/.github/workflows/run_post_merge_tests.yml deleted file mode 100644 index d63e72ef..00000000 --- a/2023-01/smartsim/smartredis/.github/workflows/run_post_merge_tests.yml +++ /dev/null @@ -1,146 +0,0 @@ -name: run_post_merge_tests - -# This file is for tests that are to be run rarely, once on each merge. - -on: - push: - branches: - - master - - develop - -env: - HOMEBREW_NO_ANALYTICS: "ON" # Make Homebrew installation a little quicker - HOMEBREW_NO_AUTO_UPDATE: "ON" - HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: "ON" - HOMEBREW_NO_GITHUB_API: "ON" - HOMEBREW_NO_INSTALL_CLEANUP: "ON" - DEBIAN_FRONTEND: "noninteractive" # disable interactive apt installs - SSDB: "127.0.0.1:6379" - SMARTREDIS_TEST_CLUSTER: False - -jobs: - - run_tests: - name: Run smartredis tests using ${{ matrix.os }}, Python ${{ matrix.py_v }}, RedisAI ${{ matrix.rai_v }}, and compiler ${{ matrix.compiler }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-20.04] # cannot test on macOS as docker isn't supported on Mac - compiler: [intel, 8, 9, 10, 11] # intel compiler, and versions of GNU compiler - rai_v: [1.2.4, 1.2.5] # versions of RedisAI - py_v: ['3.7.x', '3.8.x', '3.9.x'] # versions of Python - env: - FC: gfortran-${{ matrix.compiler }} - GCC_V: ${{ matrix.compiler }} # used when the compiler is gcc/gfortran - - # Service containers to run with `container-job` - services: - # Label used to access the service container - redis: - # Docker Hub image - image: redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic - - # Set health checks to wait until redis has started - options: >- - --health-cmd "redis-cli ping" - --health-interval 10s - --health-timeout 5s - --health-retries 5 - ports: - # map port 6379 on service container to the host - - 6379:6379 - - steps: - # download a copy of SmartRedis before running CI tests - - uses: actions/checkout@v2 - - - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.py_v }} - - # Install compilers - - name: Install GCC - if: "!contains( matrix.compiler, 'intel' )" # if using GNU compiler - run: | - sudo apt-get update && - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && - sudo apt-get update && - sudo apt-get install -y gcc-${GCC_V} gfortran-${GCC_V} g++-${GCC_V} && - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_V} 100 \ - --slave /usr/bin/gfortran gfortran /usr/bin/gfortran-${GCC_V} \ - --slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_V} - - - name: Install Intel compiler - if: "contains( matrix.compiler, 'intel' )" # if using intel compiler - run: | - wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && - sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && - rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && - echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list && - sudo apt-get update && - sudo apt-get install intel-oneapi-compiler-fortran intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic - source /opt/intel/oneapi/setvars.sh && - printenv >> $GITHUB_ENV && - echo "CC=icc" >> $GITHUB_ENV && - echo "CXX=icpc" >> $GITHUB_ENV && - echo "FC=ifort" >> $GITHUB_ENV - - # Set up standalone Redis environment - - name: Install Cmake Linux - if: contains(matrix.os, 'ubuntu') - run: sudo apt-get install cmake - - - name: Build SmartRedis python and install - run: python -m pip install -e .[dev] - - # CI tests that should be run with standalone Redis go here - # (none at present) - - # Set up clustered Redis environment - - name: Install docker and redis-server - run: | - sudo apt-get update && sudo apt-get -y install curl gnupg lsb-release software-properties-common ca-certificates && \ - # Add latest redis to apt sources - echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list && \ - curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ - # Add latest docker to apt sources - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null && \ - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && \ - sudo apt-get update && \ - sudo apt-get -y install iputils-ping docker-ce docker-ce-cli containerd.io redis-tools=6:6.2.5-1rl1~focal1 redis-server=6:6.2.5-1rl1~focal1 - - - name: Copy redisai from docker image - run: | - docker create --name redisai --rm redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic && \ - docker cp redisai:/usr/lib/redis/modules/redisai.so $HOME && - sudo mkdir -p /usr/lib/redis/modules/ && - sudo docker cp redisai:/usr/lib/redis/modules/backends/ /usr/lib/redis/modules/ - - - name: Setup local redis servers - run: | - redis-server --port 7000 --daemonize yes --cluster-enabled yes --cluster-config-file 7000.conf --protected-mode no --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so & - redis-server --port 7001 --daemonize yes --cluster-enabled yes --cluster-config-file 7001.conf --protected-mode no --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so & - redis-server --port 7002 --daemonize yes --cluster-enabled yes --cluster-config-file 7002.conf --protected-mode no --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so - - - name: Overwrite redis cluster env vars - run: | - echo "SSDB=127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002" >> $GITHUB_ENV && - echo "SMARTREDIS_TEST_CLUSTER=True" >> $GITHUB_ENV - - - name: Start redis cluster - run: redis-cli --cluster create $(echo $SSDB | tr "," " ") --cluster-yes - - # CI tests that should be run with clustered Redis go here - - name: Install MPI for parallel examples -- for GCC, we can just use the std install - if: contains(matrix.os, 'ubuntu') && !contains(matrix.compiler, 'intel') - run: sudo apt-get install -y mpich - - - name: Build and run examples # Only with GNU compiler as that's what mpich is built for - if: contains(matrix.os, 'ubuntu') && !contains(matrix.compiler, 'intel') - run: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib && - export SR_LOG_FILE=smartredis_examples_log.txt && - export SR_LOG_LEVEL=INFO && - make test-examples - diff --git a/2023-01/smartsim/smartredis/.github/workflows/run_static_and_examples.yml b/2023-01/smartsim/smartredis/.github/workflows/run_static_and_examples.yml new file mode 100644 index 00000000..5687f773 --- /dev/null +++ b/2023-01/smartsim/smartredis/.github/workflows/run_static_and_examples.yml @@ -0,0 +1,153 @@ +# +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +name: run_static_and_examples + +on: + push: + branches: + - master + - develop + pull_request: + branches: + - master + - develop +env: + HOMEBREW_NO_ANALYTICS: "ON" # Make Homebrew installation a little quicker + HOMEBREW_NO_AUTO_UPDATE: "ON" + HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: "ON" + HOMEBREW_NO_GITHUB_API: "ON" + HOMEBREW_NO_INSTALL_CLEANUP: "ON" + DEBIAN_FRONTEND: "noninteractive" # disable interactive apt installs + +jobs: + + run_tests: + name: Run smartredis tests using ${{ matrix.os }}, Python ${{ matrix.py_v }}, RedisAI ${{ matrix.rai_v }}, and compiler ${{ matrix.compiler }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04] # cannot test on macOS as docker isn't supported on Mac + compiler: [intel, 8, 9, 10, 11] # intel compiler, and versions of GNU compiler + rai_v: [1.2.7] # version(s) of RedisAI + py_v: ['3.7.x', '3.8.x', '3.9.x', '3.10.x'] # versions of Python + env: + FC: gfortran-${{ matrix.compiler }} + GCC_V: ${{ matrix.compiler }} # used when the compiler is gcc/gfortran + + steps: + # download a copy of SmartRedis before running CI tests + - uses: actions/checkout@v3 + + # Setup python within the container + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.py_v }} + + # Install compilers (Intel or GCC) + - name: Install GCC + if: "!contains( matrix.compiler, 'intel' )" # if using GNU compiler + run: | + sudo apt-get -y update && + sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && + sudo apt-get -y update && + sudo apt-get -y install -y gcc-${GCC_V} gfortran-${GCC_V} g++-${GCC_V} && + sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_V} 100 \ + --slave /usr/bin/gfortran gfortran /usr/bin/gfortran-${GCC_V} \ + --slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_V} \ + --slave /usr/bin/gcov gcov /usr/bin/gcov-${GCC_V} && + echo "CC=gcc" >> $GITHUB_ENV && + echo "CXX=g++" >> $GITHUB_ENV && + echo "FC=gfortran" >> $GITHUB_ENV + # Note CC and CXX need to be set otherwise, some Ubuntu images default to + # a Debian-flavored compiler + + - name: Install Intel compiler + if: "contains( matrix.compiler, 'intel' )" # if using intel compiler + run: | + wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && + sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && + rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && + echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list && + sudo apt-get update -y && + sudo apt-get install -y intel-oneapi-compiler-fortran intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic + source /opt/intel/oneapi/setvars.sh && + printenv >> $GITHUB_ENV && + echo "CC=icx" >> $GITHUB_ENV && + echo "CXX=icpx" >> $GITHUB_ENV && + echo "FC=ifort" >> $GITHUB_ENV + + # Install additional dependencies + - name: Install Cmake Linux + if: contains(matrix.os, 'ubuntu') + run: sudo apt-get -y install cmake + + - name: Build SmartRedis python and install + run: python -m pip install -e .[dev,xarray] + + - name: Build and install test dependencies + run: make lcov && make catch2 + + - name: Install docker, redis-server, and RedisAI + run: | + sudo apt-get -y update && sudo apt-get -y install curl gnupg lsb-release software-properties-common ca-certificates && \ + # Add latest redis to apt sources + sudo echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list && \ + sudo curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ + # Add latest docker to apt sources + sudo mkdir -m 0755 -p /etc/apt/keyrings && + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg && + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null && + # Install and update + sudo apt-get -y update && \ + sudo apt-get -y install iputils-ping docker-ce docker-ce-cli containerd.io redis-tools=6:6.2.5-1rl1~focal1 redis-server=6:6.2.5-1rl1~focal1 && + CONTAINER_NAME="redisai_$RANDOM" && + sudo docker create --name $CONTAINER_NAME --rm redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic && \ + sudo mkdir -p /usr/lib/redis/modules/ && + sudo docker cp $CONTAINER_NAME:/usr/lib/redis/modules/redisai.so /usr/lib/redis/modules && + sudo docker cp $CONTAINER_NAME:/usr/lib/redis/modules/backends/ /usr/lib/redis/modules/ && + echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib" >> $GITHUB_ENV && + echo "REDISAI_CPU_INSTALL_PATH=/usr/lib/redis/modules/" >> $GITHUB_ENV && + echo "SR_CICD_EXECUTION=True" >> $GITHUB_ENV && + echo "REDISAI_MODULES=/usr/lib/redis/modules/redisai.so \ + TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so \ + TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so" >> $GITHUB_ENV + + # Run the examples + - name: Install MPI for parallel examples -- for GCC, we can just use the std install + if: contains(matrix.os, 'ubuntu') && !contains(matrix.compiler, 'intel') + run: sudo apt-get install -y mpich + - name: Test static build -- GNU compilers + if: "!contains( matrix.compiler, 'intel' )" # if using GNU compiler + run: make test-examples SR_FORTRAN=ON SR_PYTHON=ON \ + SR_TEST_PORT=7000 SR_TEST_REDISAI_VER=v${{ matrix.rai_v }} + - name: Test static build -- intel compilers + if: contains(matrix.compiler, 'intel') + run: make build-example-serial SR_FORTRAN=ON diff --git a/2023-01/smartsim/smartredis/.github/workflows/run_tests.yml b/2023-01/smartsim/smartredis/.github/workflows/run_tests.yml index 0de02fc5..b527cde0 100644 --- a/2023-01/smartsim/smartredis/.github/workflows/run_tests.yml +++ b/2023-01/smartsim/smartredis/.github/workflows/run_tests.yml @@ -1,3 +1,31 @@ +# +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + name: run_tests # This file is for tests that are to be run frequently, with each push to a PR. @@ -19,8 +47,8 @@ env: HOMEBREW_NO_GITHUB_API: "ON" HOMEBREW_NO_INSTALL_CLEANUP: "ON" DEBIAN_FRONTEND: "noninteractive" # disable interactive apt installs - SSDB: "127.0.0.1:6379" - SMARTREDIS_TEST_CLUSTER: False + SR_LOG_FILE: "smartredis_cicd_tests_log.txt" + SR_LOG_LEVEL: "INFO" jobs: @@ -31,49 +59,46 @@ jobs: fail-fast: false matrix: os: [ubuntu-20.04] # cannot test on macOS as docker isn't supported on Mac + rai_v: [1.2.7] # versions of RedisAI + py_v: ['3.7.x', '3.8.x', '3.9.x', '3.10.x'] # versions of Python compiler: [intel, 8, 9, 10, 11] # intel compiler, and versions of GNU compiler - rai_v: [1.2.4, 1.2.5] # versions of RedisAI - py_v: ['3.7.x', '3.8.x', '3.9.x'] # versions of Python env: FC: gfortran-${{ matrix.compiler }} GCC_V: ${{ matrix.compiler }} # used when the compiler is gcc/gfortran - # Service containers to run with `container-job` - services: - # Label used to access the service container - redis: - # Docker Hub image - image: redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic - - # Set health checks to wait until redis has started - options: >- - --health-cmd "redis-cli ping" - --health-interval 10s - --health-timeout 5s - --health-retries 5 - ports: - # map port 6379 on service container to the host - - 6379:6379 - steps: # download a copy of SmartRedis before running CI tests - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - - uses: actions/setup-python@v2 + # Setup python within the container + - uses: actions/setup-python@v4 with: python-version: ${{ matrix.py_v }} - # Install compilers + # Free up some disk space + - name: Free disk space + run: | + sudo rm -rf /usr/share/dotnet && + sudo rm -rf /opt/ghc && + sudo rm -rf "/usr/local/share/boost" + + # Install compilers (Intel or GCC) - name: Install GCC if: "!contains( matrix.compiler, 'intel' )" # if using GNU compiler run: | - sudo apt-get update && + sudo apt-get -y update && sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && - sudo apt-get update && - sudo apt-get install -y gcc-${GCC_V} gfortran-${GCC_V} g++-${GCC_V} && + sudo apt-get -y update && + sudo apt-get -y install -y gcc-${GCC_V} gfortran-${GCC_V} g++-${GCC_V} && sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_V} 100 \ --slave /usr/bin/gfortran gfortran /usr/bin/gfortran-${GCC_V} \ - --slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_V} + --slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_V} \ + --slave /usr/bin/gcov gcov /usr/bin/gcov-${GCC_V} && + echo "CC=gcc" >> $GITHUB_ENV && + echo "CXX=g++" >> $GITHUB_ENV && + echo "FC=gfortran" >> $GITHUB_ENV + # Note CC and CXX need to be set otherwise, some Ubuntu images default to + # a Debian-flavored compiler - name: Install Intel compiler if: "contains( matrix.compiler, 'intel' )" # if using intel compiler @@ -82,90 +107,98 @@ jobs: sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list && - sudo apt-get update && - sudo apt-get install intel-oneapi-compiler-fortran intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic + sudo apt-get update -y && + sudo apt-get install -y intel-oneapi-compiler-fortran intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic source /opt/intel/oneapi/setvars.sh && printenv >> $GITHUB_ENV && - echo "CC=icc" >> $GITHUB_ENV && - echo "CXX=icpc" >> $GITHUB_ENV && + echo "CC=icx" >> $GITHUB_ENV && + echo "CXX=icpx" >> $GITHUB_ENV && echo "FC=ifort" >> $GITHUB_ENV - # Set up standalone Redis environment + # Set up perl environment for LCOV + - uses: actions/checkout@v3 + - name: Setup perl + uses: shogo82148/actions-setup-perl@v1 + with: + perl-version: '5.30' + install-modules: Memory::Process + + # Install additional perl Modules + - name: Add perl modules + run: | + sudo apt install libcapture-tiny-perl && \ + sudo apt install libdatetime-perl && \ + sudo apt install libdevel-cover-perl && \ + sudo apt install libdigest-md5-perl && \ + sudo apt install libfile-spec-perl && \ + sudo apt install libjson-xs-perl && \ + sudo apt install libtime-hires-perl + + # Install additional dependencies - name: Install Cmake Linux if: contains(matrix.os, 'ubuntu') - run: sudo apt-get install cmake + run: sudo apt-get -y install cmake - name: Build SmartRedis python and install run: python -m pip install -e .[dev,xarray] - # CI tests that should be run with standalone Redis go here - - name: Build and run tests - run: | - mkdir -p ./third-party && - cd ./third-party && - bash ../build-scripts/build-lcov.sh && - bash ../build-scripts/build-catch.sh && - cd ../ && - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib && - export SR_LOG_FILE=smartredis_cicd_tests_log.txt && - export SR_LOG_LEVEL=INFO && - make test-verbose - - - name: Run Python coverage tests - run: python -m pytest --cov=./src/python/module/smartredis/ --cov-report=xml --cov-append -vv ./tests/python/ - - - name: Run C++ coverage tests # unit tests already built - run: bash ./build-scripts/build_cpp_cov.sh - - - name: Upload Python coverage to Codecov - uses: codecov/codecov-action@v2 - with: - files: ./coverage.xml - - - name: Upload C++ coverage to Codecov - uses: codecov/codecov-action@v2 - with: - files: ./tests/cpp/unit-tests/build/CMakeFiles/cpp_unit_tests.dir/coverage.info + - name: Build and install test dependencies + run: make lcov && make catch2 - # Set up clustered Redis environment - - name: Install docker and redis-server + - name: Install docker, redis-server, and RedisAI run: | - sudo apt-get update && sudo apt-get -y install curl gnupg lsb-release software-properties-common ca-certificates && \ + sudo apt-get -y update && sudo apt-get -y install curl gnupg lsb-release software-properties-common ca-certificates && \ # Add latest redis to apt sources - echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list && \ - curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ + sudo echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list && \ + sudo curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ # Add latest docker to apt sources - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null && \ - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && \ - sudo apt-get update && \ - sudo apt-get -y install iputils-ping docker-ce docker-ce-cli containerd.io redis-tools=6:6.2.5-1rl1~focal1 redis-server=6:6.2.5-1rl1~focal1 - - - name: Copy redisai from docker image - run: | - docker create --name redisai --rm redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic && \ - docker cp redisai:/usr/lib/redis/modules/redisai.so $HOME && + sudo mkdir -m 0755 -p /etc/apt/keyrings && + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg && + echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null && + # Install and update + sudo apt-get -y update && \ + sudo apt-get -y install iputils-ping docker-ce docker-ce-cli containerd.io redis-tools=6:6.2.5-1rl1~focal1 redis-server=6:6.2.5-1rl1~focal1 && + CONTAINER_NAME="redisai_$RANDOM" && + sudo docker create --name $CONTAINER_NAME --rm redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic && \ sudo mkdir -p /usr/lib/redis/modules/ && - sudo docker cp redisai:/usr/lib/redis/modules/backends/ /usr/lib/redis/modules/ - - - name: Setup local redis servers + sudo docker cp $CONTAINER_NAME:/usr/lib/redis/modules/redisai.so /usr/lib/redis/modules && + sudo docker cp $CONTAINER_NAME:/usr/lib/redis/modules/backends/ /usr/lib/redis/modules/ && + echo "LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib" >> $GITHUB_ENV && + echo "REDISAI_CPU_INSTALL_PATH=/usr/lib/redis/modules/" >> $GITHUB_ENV && + echo "SR_CICD_EXECUTION=True" >> $GITHUB_ENV && + echo "REDISAI_MODULES=/usr/lib/redis/modules/redisai.so \ + TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so \ + TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so" >> $GITHUB_ENV + + # Check the static build by compiling and linking examples + # Only supported on Linux; static builds are not currently available for MacOS + - name: Install MPI for parallel examples -- for GCC, we can just use the std install + if: contains(matrix.os, 'ubuntu') && !contains(matrix.compiler, 'intel') + run: sudo apt-get install -y mpich + - name: Test static build -- GNU compilers + if: contains(matrix.os, 'ubuntu') && !contains(matrix.compiler, 'intel') + run: make build-examples SR_LINK=Static SR_FORTRAN=ON + - name: Test static build -- intel compilers + if: contains(matrix.os, 'ubuntu') && contains(matrix.compiler, 'intel') + run: make build-example-serial SR_LINK=Static SR_FORTRAN=ON + + # Run the tests using various DB deployments + - name: Run tests run: | - redis-server --port 7000 --daemonize yes --cluster-enabled yes --cluster-config-file 7000.conf --protected-mode no --save "" --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so & - redis-server --port 7001 --daemonize yes --cluster-enabled yes --cluster-config-file 7001.conf --protected-mode no --save "" --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so & - redis-server --port 7002 --daemonize yes --cluster-enabled yes --cluster-config-file 7002.conf --protected-mode no --save "" --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so + make test-verbose-with-coverage \ + COV_FLAGS="--cov=./src/python/module/smartredis/ --cov-report=xml --cov-append" \ + SR_FORTRAN=ON SR_PYTHON=ON SR_TEST_REDIS_MODE=All SR_TEST_PORT=7000 \ + SR_TEST_REDISAI_VER=v${{ matrix.rai_v }} - - name: Overwrite redis cluster env vars - run: | - echo "SSDB=127.0.0.1:7000,127.0.0.1:7001,127.0.0.1:7002" >> $GITHUB_ENV && - echo "SMARTREDIS_TEST_CLUSTER=True" >> $GITHUB_ENV - - - name: Start redis cluster - run: redis-cli --cluster create $(echo $SSDB | tr "," " ") --cluster-yes + # Process and upload code coverage (Python was collected during pytest) + - name: Collect coverage from C/C++/Fortran testers + run: third-party/lcov/install/bin/lcov --ignore-errors gcov,mismatch --keep-going -c -d build/Coverage/CMakeFiles -o coverage.info - # CI tests that should be run with clustered Redis go here - - name: Run testing with redis cluster - run: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib && - export SR_LOG_FILE=smartredis_cicd_tests_log.txt && - export SR_LOG_LEVEL=INFO && - make test-verbose + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + files: ./coverage.xml, ./coverage.info + - name: Run Pylint + run: make check-lint diff --git a/2023-01/smartsim/smartredis/.github/workflows/run_tests_uds.yml b/2023-01/smartsim/smartredis/.github/workflows/run_tests_uds.yml deleted file mode 100644 index 2119a5d4..00000000 --- a/2023-01/smartsim/smartredis/.github/workflows/run_tests_uds.yml +++ /dev/null @@ -1,151 +0,0 @@ -name: run_tests - -# This file is for tests that are to be run frequently, with each push to a PR. - -on: - push: - branches: - - develop - pull_request: - branches: - - develop - -env: - HOMEBREW_NO_ANALYTICS: "ON" # Make Homebrew installation a little quicker - HOMEBREW_NO_AUTO_UPDATE: "ON" - HOMEBREW_NO_BOTTLE_SOURCE_FALLBACK: "ON" - HOMEBREW_NO_GITHUB_API: "ON" - HOMEBREW_NO_INSTALL_CLEANUP: "ON" - DEBIAN_FRONTEND: "noninteractive" # disable interactive apt installs - SSDB: "127.0.0.1:6379" - SMARTREDIS_TEST_CLUSTER: False - -jobs: - - run_tests: - name: Run smartredis UDS tests using ${{ matrix.os }}, Python ${{ matrix.py_v }}, RedisAI ${{ matrix.rai_v }}, and compiler ${{ matrix.compiler }} - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-20.04] # cannot test on macOS as docker isn't supported on Mac - compiler: [intel, 10] # intel compiler, and versions of GNU compiler - rai_v: [1.2.5] # versions of RedisAI - py_v: ['3.9.x'] # versions of Python - env: - FC: gfortran-${{ matrix.compiler }} - GCC_V: ${{ matrix.compiler }} # used when the compiler is gcc/gfortran - - # Service containers to run with `container-job` - services: - # Label used to access the service container - redis: - # Docker Hub image - image: redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic - - # Set health checks to wait until redis has started - options: >- - --health-cmd "redis-cli ping" - --health-interval 10s - --health-timeout 5s - --health-retries 5 - ports: - # map port 6379 on service container to the host - - 6379:6379 - - steps: - # download a copy of SmartRedis before running CI tests - - uses: actions/checkout@v2 - - - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.py_v }} - - # Install compilers - - name: Install GCC - if: "!contains( matrix.compiler, 'intel' )" # if using GNU compiler - run: | - sudo apt-get update && - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test && - sudo apt-get update && - sudo apt-get install -y gcc-${GCC_V} gfortran-${GCC_V} g++-${GCC_V} && - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-${GCC_V} 100 \ - --slave /usr/bin/gfortran gfortran /usr/bin/gfortran-${GCC_V} \ - --slave /usr/bin/g++ g++ /usr/bin/g++-${GCC_V} - - - name: Install Intel compiler - if: "contains( matrix.compiler, 'intel' )" # if using intel compiler - run: | - wget https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && - sudo apt-key add GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && - rm GPG-PUB-KEY-INTEL-SW-PRODUCTS-2023.PUB && - echo "deb https://apt.repos.intel.com/oneapi all main" | sudo tee /etc/apt/sources.list.d/oneAPI.list && - sudo apt-get update && - sudo apt-get install intel-oneapi-compiler-fortran intel-oneapi-compiler-dpcpp-cpp-and-cpp-classic - source /opt/intel/oneapi/setvars.sh && - printenv >> $GITHUB_ENV && - echo "CC=icc" >> $GITHUB_ENV && - echo "CXX=icpc" >> $GITHUB_ENV && - echo "FC=ifort" >> $GITHUB_ENV - - # Set up standalone Redis environment - - name: Install Cmake Linux - if: contains(matrix.os, 'ubuntu') - run: sudo apt-get install cmake - - - name: Build SmartRedis python and install - run: python -m pip install -e .[dev] - - # CI tests that should be run with standalone Redis go here - - name: Build and run tests - run: | - mkdir -p ./third-party && - cd ./third-party && - bash ../build-scripts/build-lcov.sh && - bash ../build-scripts/build-catch.sh && - cd ../ && - echo "make test-verbose" - - # Set up UDS Redis environment - - name: Install docker and redis-server - run: | - sudo apt-get update && sudo apt-get -y install curl gnupg lsb-release software-properties-common ca-certificates && \ - # Add latest redis to apt sources - echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list && \ - curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg && \ - # Add latest docker to apt sources - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null && \ - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && \ - sudo apt-get update && \ - sudo apt-get -y install iputils-ping docker-ce docker-ce-cli containerd.io redis-tools=6:6.2.5-1rl1~focal1 redis-server=6:6.2.5-1rl1~focal1 - - - name: Copy redisai from docker image - run: | - docker create --name redisai --rm redislabs/redisai:${{ matrix.rai_v }}-cpu-bionic && \ - docker cp redisai:/usr/lib/redis/modules/redisai.so $HOME && - sudo mkdir -p /usr/lib/redis/modules/ && - sudo docker cp redisai:/usr/lib/redis/modules/backends/ /usr/lib/redis/modules/ - - - name: Setup local redis server for Unix domain socket - run: | - mkdir -p /tmp && \ - touch /tmp/redis.sock && \ - chmod 777 /tmp/redis.sock && \ - redis-server --unixsocket /tmp/redis.sock --unixsocketperm 777 --port 0 --bind 127.0.0.1 --daemonize yes --protected-mode no --loadmodule $HOME/redisai.so TF /usr/lib/redis/modules/backends/redisai_tensorflow/redisai_tensorflow.so TORCH /usr/lib/redis/modules/backends/redisai_torch/redisai_torch.so - - - name: Overwrite redis env vars - run: | - echo "SSDB=unix:///tmp/redis.sock" >> $GITHUB_ENV && - echo "SMARTREDIS_TEST_CLUSTER=False" >> $GITHUB_ENV - -# - name: Start redis (***WS*** Do I need to do anything special to launch?) -# run: redis-cli --cluster create $(echo $SSDB | tr "," " ") --cluster-yes - - # CI tests that should be run with UDS Redis go here - - name: Run testing with UDS redis - run: | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$PWD/install/lib && - export SR_LOG_FILE=smartredis_uds_tests_log.txt && - export SR_LOG_LEVEL=INFO && - make test-verbose - diff --git a/2023-01/smartsim/smartredis/.pylintrc b/2023-01/smartsim/smartredis/.pylintrc index 50b124b6..46cd8d0a 100644 --- a/2023-01/smartsim/smartredis/.pylintrc +++ b/2023-01/smartsim/smartredis/.pylintrc @@ -1,3 +1,29 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + [MASTER] # Use multiple processes to speed up Pylint. @@ -10,19 +36,16 @@ persistent=yes # user-friendly hints instead of false-positive error messages. suggestion-mode=yes +# Ignore problematic extensions +extension-pkg-whitelist=pydantic, + +# Dirs where we do not care about style +# ignore-paths= + [MESSAGES CONTROL] -disable=print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, +disable=logging-fstring-interpolation, raw-checker-failed, bad-inline-option, locally-disabled, @@ -31,69 +54,6 @@ disable=print-statement, useless-suppression, deprecated-pragma, use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape - bad-continuation, - invalid-name, too-many-instance-attributes, too-many-arguments, unused-argument, @@ -103,18 +63,26 @@ disable=print-statement, missing-function-docstring, too-many-branches, too-many-nested-blocks, - no-self-use, no-else-break, broad-except, - pointless-string-statement + pointless-string-statement, + too-few-public-methods, + fixme, # TODO: Enable after reaching a release + broad-exception-raised, # TODO: Enable after reaching a MVP enable=useless-object-inheritance, unused-variable, unused-import, - undefined-variable + unused-argument, + undefined-variable, not-callable, arguments-differ, - redefined-outer-name + redefined-outer-name, + bare-except, + +load-plugins=pylint.extensions.no_self_use, + pylint.extensions.eq_without_hash, + pylint.extensions.broad_try_clause, [REPORTS] @@ -138,7 +106,7 @@ logging-format-style=new # Logging modules to check that the string format arguments are in logging # function parameter format. -logging-modules=logging +logging-modules=logging, [VARIABLES] @@ -153,11 +121,11 @@ allow-global-unused-variables=yes # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_, - _cb + _cb, # A regular expression matching the name of dummy variables (i.e. expected to # not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$) # Argument names that match this expression will be ignored. Default to name # with leading underscore. @@ -168,7 +136,10 @@ init-import=no # List of qualified module names which can have objects that can redefine # builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io +redefining-builtins-modules=six.moves, + past.builtins, + future.builtins, + builtins,io [FORMAT] @@ -187,18 +158,11 @@ indent-after-paren=4 indent-string=' ' # Maximum number of characters on a single line. -max-line-length=160 +max-line-length=88 # Maximum number of lines in a module. max-module-lines=1000 -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator - # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no @@ -213,15 +177,14 @@ single-line-if-stmt=no # Naming style matching correct argument names. argument-naming-style=snake_case -# Regular expression matching correct argument names. Overrides argument- -# naming-style. -#argument-rgx= +# Regular expression matching correct argument names. Overrides argument-naming-style. +# Same as `argument-naming-style=snake_case` but allow for two letter args names +# argument-rgx=([^\W\dA-Z][^\WA-Z]{1,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$ # Naming style matching correct attribute names. attr-naming-style=snake_case -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. +# Regular expression matching correct attribute names. Overrides attr-naming-style. #attr-rgx= # Bad variable names which should always be refused, separated by a comma. @@ -230,7 +193,7 @@ bad-names=foo, baz, toto, tutu, - tata + tata, # Naming style matching correct class attribute names. class-attribute-naming-style=any @@ -249,9 +212,14 @@ function-naming-style=snake_case good-names=i, j, k, + v, + x, + e, ex, - Run, - _ + db, + ar, + _, + fn, # Include a hint for the correct naming format with invalid-name. include-naming-hint=no @@ -263,7 +231,8 @@ inlinevar-naming-style=any method-naming-style=snake_case # Naming style matching correct module names. -module-naming-style=snake_case +# usually snake_case +module-naming-style=any # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. @@ -273,6 +242,9 @@ property-classes=abc.abstractproperty # Naming style matching correct variable names. variable-naming-style=snake_case +# Same as `variable-naming-style=snake_case` but allow for two letter vars +# variable-rgx=([^\W\dA-Z][^\WA-Z]{1,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$ + [STRING] @@ -321,6 +293,8 @@ known-third-party=enchant # Couples of modules and preferred modules, separated by a comma. preferred-modules= +ignored-modules=smartredis.smartredisPy + [CLASSES] @@ -328,7 +302,7 @@ preferred-modules= defining-attr-methods=__init__, __new__, setUp, - __post_init__ + __post_init__, # List of member names, which should be excluded from the protected access # warning. @@ -336,10 +310,34 @@ exclude-protected=_asdict, _fields, _replace, _source, - _make + _make, # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=cls +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=9 + +# Maximum number of locals for function / method body +max-locals=19 + +# Maximum number of return / yield for function / method body +max-returns=11 + +# Maximum number of branch for function / method body +max-branches=20 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of statements in a try-block +max-try-statements=7 + +# Maximum level of inheritance, bumping up to account for db mixins +max-parents=25 diff --git a/2023-01/smartsim/smartredis/CMakeLists.txt b/2023-01/smartsim/smartredis/CMakeLists.txt index 4629fabe..1ff6d1aa 100644 --- a/2023-01/smartsim/smartredis/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/CMakeLists.txt @@ -24,86 +24,171 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -project(SmartRedis) +# Enable setting version in the project statement +if (POLICY CMP0048) + cmake_policy(SET CMP0048 NEW) +endif (POLICY CMP0048) +# Project definition for the SmartRedis project cmake_minimum_required(VERSION 3.13) +project(SmartRedis VERSION "0.4.2") -option(BUILD_PYTHON "Build the python module" ON) -option(BUILD_FORTRAN "Build the fortran client library" OFF) +# Configure options for the SmartRedis project +option(SR_PYTHON "Build the python module" OFF) +option(SR_FORTRAN "Build the fortran client library" OFF) +option(SR_PEDANTIC "Build with pickiest compiler settings" OFF) -set(CMAKE_BUILD_TYPE RELEASE) set(CMAKE_CXX_STANDARD 17) set(CMAKE_INSTALL_PREFIX ${CMAKE_SOURCE_DIR}/install) set(CMAKE_CXX_VISIBILITY_PRESET default) set(THREADS_PREFER_PTHREAD_FLAG ON) +set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}) +include(smartredis_defs) -if (BUILD_FORTRAN) +# If we want to use Fortran, we have to tell CMake to use it +if (SR_FORTRAN) enable_language(Fortran) endif() -if (WERROR) - if (CMAKE_COMPILER_IS_GNUCC OR CMAKE_COMPILER_IS_GNUCXX) +# For now, we only support Pedantic on the main library build. +# If/when we fine-tune the examples and test cases, move this block +# to smartredis_defs.cmake +if (SR_PEDANTIC) + if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_C_COMPILER_ID STREQUAL "GNU")) add_compile_options(-Wall -Werror) else() - message(WARNING "WERROR was specified, but the CMAKE compiler is not GCC") + message(WARNING "SR_PEDANTIC was specified, but the CMAKE compiler is not GCC") + endif() + if(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") + set(CMAKE_Fortran_FLAGS "${CMAKE_Fortran_FLAGS} -Wno-maybe-uninitialized") endif() endif() -find_library(REDISPP redis++ PATHS ${CMAKE_SOURCE_DIR}/install/lib NO_DEFAULT_PATH REQUIRED) -find_library(HIREDIS hiredis PATHS ${CMAKE_SOURCE_DIR}/install/lib NO_DEFAULT_PATH REQUIRED) +# Bring in third-party libaries needed for the SmartRedis library +find_library(REDISPP redis++ + PATHS ${CMAKE_SOURCE_DIR}/install/lib NO_DEFAULT_PATH + REQUIRED STATIC +) +find_library(HIREDIS hiredis + PATHS ${CMAKE_SOURCE_DIR}/install/lib NO_DEFAULT_PATH + REQUIRED STATIC +) find_package(Threads REQUIRED) - set(EXT_CLIENT_LIBRARIES ${REDISPP} ${HIREDIS}) +# Define source code that goes into the SmartRedis library set(CLIENT_SRC src/c/c_client.cpp + src/c/c_configoptions.cpp src/c/c_dataset.cpp src/c/c_error.cpp src/c/c_logcontext.cpp src/c/c_logger.cpp src/cpp/address.cpp + src/cpp/addressallcommand.cpp + src/cpp/addressanycommand.cpp + src/cpp/addressatcommand.cpp src/cpp/client.cpp - src/cpp/dataset.cpp + src/cpp/clusterinfocommand.cpp src/cpp/command.cpp - src/cpp/keyedcommand.cpp - src/cpp/nonkeyedcommand.cpp - src/cpp/multikeycommand.cpp - src/cpp/singlekeycommand.cpp + src/cpp/commandlist.cpp + src/cpp/commandreply.cpp src/cpp/compoundcommand.cpp - src/cpp/addressatcommand.cpp - src/cpp/addressanycommand.cpp - src/cpp/addressallcommand.cpp - src/cpp/clusterinfocommand.cpp + src/cpp/configoptions.cpp + src/cpp/dataset.cpp src/cpp/dbinfocommand.cpp + src/cpp/dbnode.cpp src/cpp/gettensorcommand.cpp - src/cpp/commandlist.cpp + src/cpp/keyedcommand.cpp + src/cpp/logger.cpp src/cpp/metadata.cpp - src/cpp/tensorbase.cpp - src/cpp/tensorpack.cpp - src/cpp/dbnode.cpp - src/cpp/commandreply.cpp - src/cpp/redisserver.cpp - src/cpp/rediscluster.cpp - src/cpp/redis.cpp src/cpp/metadatafield.cpp - src/cpp/stringfield.cpp + src/cpp/multikeycommand.cpp + src/cpp/nonkeyedcommand.cpp src/cpp/pipelinereply.cpp + src/cpp/redis.cpp + src/cpp/rediscluster.cpp + src/cpp/redisserver.cpp + src/cpp/singlekeycommand.cpp + src/cpp/srobject.cpp + src/cpp/stringfield.cpp + src/cpp/tensorbase.cpp + src/cpp/tensorpack.cpp src/cpp/threadpool.cpp src/cpp/utility.cpp - src/cpp/logger.cpp - src/cpp/srobject.cpp ) +# Define include directories for header files include_directories(SYSTEM include install/include ) -if (BUILD_FORTRAN) +# Build the main SmartRedis library +if (STATIC_BUILD) + set(TEMP_LIB_NAME sr${SR_STATIC_NAME_SUFFIX}-static) + set(TEMP_LIB_FULLNAME lib${TEMP_LIB_NAME}.a) + set(SR_LIB_INSTALL_PATH ${CMAKE_SOURCE_DIR}/include/lib) + add_library(smartredis ${SMARTREDIS_LINK_MODE} ${CLIENT_SRC}) + set_target_properties(smartredis PROPERTIES + SUFFIX ${SMARTREDIS_LINK_LIBRARY_SUFFIX} + ) + set_target_properties(smartredis PROPERTIES + OUTPUT_NAME ${TEMP_LIB_NAME} + ) + target_link_libraries(smartredis PUBLIC ${EXT_CLIENT_LIBRARIES} PRIVATE Threads::Threads) + + # Merge SmartRedis static library with dependencies + # . Create a sacrificial dummy file so we have source to make a target against + set(DUMMY_FILE ${CMAKE_BINARY_DIR}/merge-dummy.cpp) + file( + WRITE ${DUMMY_FILE} + "// Life, the universe, and everything!\nstatic int sr_magic_number = 42;\n" + ) + add_library(${SMARTREDIS_LIB} STATIC ${DUMMY_FILE}) + set(STATIC_LIB_COMPONENTS ${SR_LIB_INSTALL_PATH}/${TEMP_LIB_FULLNAME} ${EXT_CLIENT_LIBRARIES}) + + # . Archive the static libraries together into a thin library + add_custom_command( + TARGET smartredis + POST_BUILD + COMMAND rm -f lib${SMARTREDIS_LIB}.a && mkdir -p ${SR_LIB_INSTALL_PATH} && cp ${CMAKE_BINARY_DIR}/${TEMP_LIB_FULLNAME} ${SR_LIB_INSTALL_PATH} && ar crT lib${SMARTREDIS_LIB}.a ${STATIC_LIB_COMPONENTS} + VERBATIM + COMMENT "Bundling static libraries together as lib${SMARTREDIS_LIB}.a" + ) + + # Install static library + install(TARGETS ${SMARTREDIS_LIB} + LIBRARY DESTINATION lib) +else () # Shared library build + add_library(smartredis ${SMARTREDIS_LINK_MODE} ${CLIENT_SRC}) + set_target_properties(smartredis PROPERTIES + SUFFIX ${SMARTREDIS_LINK_LIBRARY_SUFFIX} + ) + set_target_properties(smartredis PROPERTIES + OUTPUT_NAME ${SMARTREDIS_LIB} + ) + target_link_libraries(smartredis PUBLIC ${EXT_CLIENT_LIBRARIES} PRIVATE Threads::Threads) + + # Install dynamic library + install(TARGETS smartredis + LIBRARY DESTINATION lib) +endif() + +# Install SmartRedis header files +install(DIRECTORY "${CMAKE_SOURCE_DIR}/include/" + DESTINATION "include" + FILES_MATCHING + PATTERN "*.h" PATTERN "*.tcc" PATTERN "*.inc" +) + +# Build the Fortran library +if (SR_FORTRAN) set(FORTRAN_SRC + src/fortran/errors.F90 src/fortran/client.F90 + src/fortran/configoptions.F90 src/fortran/dataset.F90 - src/fortran/errors.F90 src/fortran/fortran_c_interop.F90 src/fortran/logcontext.F90 src/fortran/logger.F90 @@ -112,48 +197,37 @@ if (BUILD_FORTRAN) # Note the following has to be before ANY add_library command) set(CMAKE_Fortran_MODULE_DIRECTORY "${CMAKE_INSTALL_PREFIX}/include") # Fortran library - add_library(smartredis-fortran SHARED ${FORTRAN_SRC}) - set_target_properties(smartredis-fortran PROPERTIES SUFFIX ".so") + add_library(smartredis-fortran ${SMARTREDIS_LINK_MODE} ${FORTRAN_SRC}) + set_target_properties(smartredis-fortran PROPERTIES + SUFFIX ${SMARTREDIS_LINK_LIBRARY_SUFFIX} + ) + set_target_properties(smartredis-fortran PROPERTIES + OUTPUT_NAME ${SMARTREDIS_FORTRAN_LIB} + ) target_link_libraries(smartredis-fortran PUBLIC smartredis ${EXT_CLIENT_LIBRARIES}) # Install dynamic library and headers install(TARGETS smartredis-fortran LIBRARY DESTINATION lib) endif() - -# Build dynamic library -add_library(smartredis SHARED ${CLIENT_SRC}) -set_target_properties(smartredis PROPERTIES SUFFIX ".so") -target_link_libraries(smartredis PUBLIC ${EXT_CLIENT_LIBRARIES} PRIVATE Threads::Threads) - -install(DIRECTORY "${CMAKE_SOURCE_DIR}/include/" - DESTINATION "include" - FILES_MATCHING - PATTERN "*.h" PATTERN "*.tcc" PATTERN "*.inc" -) - -# Install dynamic library and headers -install(TARGETS smartredis - LIBRARY DESTINATION lib) - -if(BUILD_PYTHON) +# Build the Python library for SmartRedis +if(SR_PYTHON) message("-- Python client build enabled") add_subdirectory(${CMAKE_SOURCE_DIR}/third-party/pybind ${CMAKE_SOURCE_DIR}/third-party/pybind/build) - add_library(smartredis_static STATIC ${CLIENT_SRC}) - pybind11_add_module(smartredisPy - src/python/src/pysrobject.cpp - src/python/src/pylogcontext.cpp src/python/src/pyclient.cpp + src/python/src/pyconfigoptions.cpp src/python/src/pydataset.cpp + src/python/src/pylogcontext.cpp + src/python/src/pysrobject.cpp ${CLIENT_SRC} src/python/bindings/bind.cpp) target_link_libraries(smartredisPy PUBLIC ${EXT_CLIENT_LIBRARIES}) - install(TARGETS smartredisPy - LIBRARY DESTINATION lib) + install(TARGETS smartredisPy LIBRARY DESTINATION lib) + install(TARGETS smartredisPy LIBRARY DESTINATION ../src/python/module/smartredis) else() message("-- Skipping Python client build") endif() diff --git a/2023-01/smartsim/smartredis/CONTRIBUTING.md b/2023-01/smartsim/smartredis/CONTRIBUTING.md new file mode 100644 index 00000000..e4976aab --- /dev/null +++ b/2023-01/smartsim/smartredis/CONTRIBUTING.md @@ -0,0 +1,3 @@ +SmartRedis and SmartSim share the same contributor guidelines. Please refer to +[CONTRIBUTING.rst](https://github.com/CrayLabs/SmartSim/blob/develop/CONTRIBUTING.rst) +in the SmartSim repo or at CrayLabs[https://www.craylabs.org/docs/contributing.html] \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/MANIFEST.in b/2023-01/smartsim/smartredis/MANIFEST.in index 5fb01f15..496dc070 100644 --- a/2023-01/smartsim/smartredis/MANIFEST.in +++ b/2023-01/smartsim/smartredis/MANIFEST.in @@ -2,6 +2,7 @@ include requirements.txt include requirements-dev.txt include Makefile include CMakeLists.txt +include smartredis_defs.cmake graft src graft build-scripts graft include diff --git a/2023-01/smartsim/smartredis/Makefile b/2023-01/smartsim/smartredis/Makefile index 534cbefb..b15d14ad 100644 --- a/2023-01/smartsim/smartredis/Makefile +++ b/2023-01/smartsim/smartredis/Makefile @@ -1,5 +1,67 @@ - +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# General settings MAKEFLAGS += --no-print-directory +SHELL:=/bin/bash +CWD := $(shell pwd) + +# Params for third-party software +HIREDIS_URL := https://github.com/redis/hiredis.git +HIREDIS_VER := v1.2.0 +RPP_URL := https://github.com/sewenew/redis-plus-plus.git +RPP_VER := 1.3.10 +PYBIND_URL := https://github.com/pybind/pybind11.git +PYBIND_VER := v2.11.1 +REDIS_URL := https://github.com/redis/redis.git +REDIS_VER := 6.0.8 +REDISAI_URL := https://github.com/RedisAI/RedisAI.git +# REDISAI_VER is controlled instead by SR_TEST_REDISAI_VER below +CATCH2_URL := https://github.com/catchorg/Catch2.git +CATCH2_VER := v2.13.6 +LCOV_URL := https://github.com/linux-test-project/lcov.git +LCOV_VER := v2.0 + +# Build variables +NPROC := $(shell nproc 2>/dev/null || python -c "import multiprocessing as mp; print (mp.cpu_count())" 2>/dev/null || echo 4) +SR_BUILD := Release +SR_LINK := Shared +SR_PEDANTIC := OFF +SR_FORTRAN := OFF +SR_PYTHON := OFF + +# Test variables +COV_FLAGS := +SR_TEST_REDIS_MODE := Clustered +SR_TEST_UDS_FILE := /tmp/redis.sock +SR_TEST_PORT := 6379 +SR_TEST_NODES := 3 +SR_TEST_REDISAI_VER := v1.2.7 +SR_TEST_DEVICE := cpu +SR_TEST_PYTEST_FLAGS := -vv -s # Do not remove this block. It is used by the 'help' rule when # constructing the help output. @@ -13,109 +75,157 @@ help: @grep "^# help\:" Makefile | grep -v grep | sed 's/\# help\: //' | sed 's/\# help\://' # help: -# help: Build -# help: ------- +# help: Build variables +# help: --------------- +# help: +# help: These variables affect the way that the SmartRedis library is built. Each +# help: has several options; the first listed is the default. Use by appending +# help: the variable name and setting after the make target, e.g. +# help: make lib SR_BUILD=Debug SR_LINK=Static SR_FORTRAN=ON +# help: +# help: SR_BUILD {Release, Debug, Coverage} -- optimization level for the build +# help: SR_LINK {Shared, Static} -- linkage for the SmartRedis library +# help: SR_PEDANTIC {OFF, ON} -- GNU only; enable pickiest compiler settings +# help: SR_FORTRAN {OFF, ON} -- Enable/disable build of Fortran library +# help: SR_PYTHON {OFF, ON} -- Enable/disable build of Python library +# help: +# help: Test variables +# help: -------------- +# help: +# help: These variables affect the way that the SmartRedis library is tested. Each +# help: has several options; the first listed is the default. Use by appending +# help: the variable name and setting after the make target, e.g. +# help: make test SR_BUILD=Debug SR_LINK=Static SR_FORTRAN=ON +# help: +# help: SR_TEST_REDIS_MODE {Clustered, Standalone} -- type of Redis backend launched for tests +# help: SR_TEST_PORT (Default: 6379) -- first port for Redis server(s) +# help: SR_TEST_NODES (Default: 3) Number of shards to intantiate for a clustered Redis database +# help: SR_TEST_REDISAI_VER {v1.2.7, v1.2.5} -- version of RedisAI to use for tests +# help: SR_TEST_DEVICE {cpu, gpu} -- device type to test on. Warning, this variable is CASE SENSITIVE! +# help: SR_TEST_PYTEST_FLAGS (default: "-vv -s"): Verbosity flags to use with pytest + +# help: +# help: Build targets +# help: ------------- # help: deps - Make SmartRedis dependencies .PHONY: deps -deps: SHELL:=/bin/bash +deps: hiredis +deps: redis-plus-plus +deps: pybind deps: - @bash ./build-scripts/build_deps.sh # help: lib - Build SmartRedis C/C++/Python clients into a dynamic library .PHONY: lib -lib: SHELL:=/bin/bash lib: deps - @bash ./build-scripts/build_lib.sh $(LIB_BUILD_ARGS) +lib: + @cmake -S . -B build/$(SR_BUILD) -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) \ + -DSR_PEDANTIC=$(SR_PEDANTIC) -DSR_FORTRAN=$(SR_FORTRAN) -DSR_PYTHON=$(SR_PYTHON) + @cmake --build build/$(SR_BUILD) -- -j $(NPROC) + @cmake --install build/$(SR_BUILD) # help: lib-with-fortran - Build SmartRedis C/C++/Python and Fortran clients into a dynamic library .PHONY: lib-with-fortran -lib-with-fortran: SHELL:=/bin/bash -lib-with-fortran: deps - @bash ./build-scripts/build_lib.sh $(LIB_BUILD_ARGS) -DBUILD_FORTRAN=ON +lib-with-fortran: SR_FORTRAN=ON +lib-with-fortran: lib # help: test-lib - Build SmartRedis clients into a dynamic library with least permissive compiler settings .PHONY: test-lib -test-lib: SHELL:=/bin/bash -test-lib: LIB_BUILD_ARGS="-DWERROR=ON" +test-lib: SR_PEDANTIC=ON test-lib: lib # help: test-lib-with-fortran - Build SmartRedis clients into a dynamic library with least permissive compiler settings .PHONY: test-lib-with-fortran -test-lib-with-fortran: SHELL:=/bin/bash -test-lib-with-fortran: LIB_BUILD_ARGS="-DWERROR=ON" +test-lib-with-fortran: SR_PEDANTIC=ON test-lib-with-fortran: lib-with-fortran # help: test-deps - Make SmartRedis testing dependencies .PHONY: test-deps -test-deps: SHELL:=/bin/bash -test-deps: - @bash ./build-scripts/build_test_deps.sh +test-deps: redis +test-deps: redisAI +test-deps: catch2 +test-deps: lcov # help: test-deps-gpu - Make SmartRedis GPU testing dependencies -.PHONY: test-deps -test-deps-gpu: SHELL:=/bin/bash -test-deps-gpu: - @bash ./build-scripts/build_test_deps.sh gpu - +.PHONY: test-deps-gpu +test-deps-gpu: SR_TEST_DEVICE=gpu +test-deps-gpu: test-deps # help: build-tests - build all tests (C, C++, Fortran) .PHONY: build-tests -build-tests: test-lib-with-fortran - ./build-scripts/build_cpp_tests.sh - ./build-scripts/build_cpp_unit_tests.sh - ./build-scripts/build_c_tests.sh - ./build-scripts/build_fortran_tests.sh +build-tests: test-deps +build-tests: test-lib + @cmake -S tests -B build/$(SR_BUILD)/tests/$(SR_LINK) \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) -DSR_FORTRAN=$(SR_FORTRAN) + @cmake --build build/$(SR_BUILD)/tests/$(SR_LINK) -- -j $(NPROC) # help: build-test-cpp - build the C++ tests .PHONY: build-test-cpp +build-test-cpp: test-deps build-test-cpp: test-lib - ./build-scripts/build_cpp_tests.sh - ./build-scripts/build_cpp_unit_tests.sh + @cmake -S tests/cpp -B build/$(SR_BUILD)/tests/$(SR_LINK)/cpp \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) + @cmake --build build/$(SR_BUILD)/tests/$(SR_LINK)/cpp -- -j $(NPROC) # help: build-unit-test-cpp - build the C++ unit tests .PHONY: build-unit-test-cpp +build-unit-test-cpp: test-deps build-unit-test-cpp: test-lib - ./build-scripts/build_cpp_unit_tests.sh + @cmake -S tests/cpp/unit-tests -B build/$(SR_BUILD)/tests/$(SR_LINK)/cpp/unit-tests \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) + @cmake --build build/$(SR_BUILD)/tests/$(SR_LINK)/cpp/unit-tests -- -j $(NPROC) # help: build-test-c - build the C tests .PHONY: build-test-c +build-test-c: test-deps build-test-c: test-lib - ./build-scripts/build_c_tests.sh + @cmake -S tests/c -B build/$(SR_BUILD)/tests/$(SR_LINK)/c \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) + @cmake --build build/$(SR_BUILD)/tests/$(SR_LINK)/c -- -j $(NPROC) # help: build-test-fortran - build the Fortran tests .PHONY: build-test-fortran -build-test-fortran: test-lib-with-fortran - ./build-scripts/build_fortran_tests.sh +build-test-fortran: test-deps +build-test-fortran: SR_FORTRAN=ON +build-test-fortran: test-lib + @cmake -S tests/fortran -B build/$(SR_BUILD)/tests/$(SR_LINK)/fortran \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) + @cmake --build build/$(SR_BUILD)/tests/$(SR_LINK)/fortran -- -j $(NPROC) # help: build-examples - build all examples (serial, parallel) .PHONY: build-examples -build-examples: lib-with-fortran - ./build-scripts/build_serial_examples.sh - ./build-scripts/build_parallel_examples.sh +build-examples: lib + @cmake -S examples -B build/$(SR_BUILD)/examples/$(SR_LINK) -DSR_BUILD=$(SR_BUILD) \ + -DSR_LINK=$(SR_LINK) -DSR_FORTRAN=$(SR_FORTRAN) + @cmake --build build/$(SR_BUILD)/examples/$(SR_LINK) -- -j $(NPROC) + # help: build-example-serial - buld serial examples .PHONY: build-example-serial build-example-serial: lib - ./build-scripts/build_serial_examples.sh + @cmake -S examples/serial -B build/$(SR_BUILD)/examples/$(SR_LINK)/serial \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) -DSR_FORTRAN=$(SR_FORTRAN) + @cmake --build build/$(SR_BUILD)/examples/$(SR_LINK)/serial # help: build-example-parallel - build parallel examples (requires MPI) .PHONY: build-example-parallel build-example-parallel: lib - ./build-scripts/build_parallel_examples.sh + @cmake -S examples/parallel -B build/$(SR_BUILD)/examples/$(SR_LINK)/parallel \ + -DSR_BUILD=$(SR_BUILD) -DSR_LINK=$(SR_LINK) -DSR_FORTRAN=$(SR_FORTRAN) + @cmake --build build/$(SR_BUILD)/examples/$(SR_LINK)/parellel # help: clean-deps - remove third-party deps .PHONY: clean-deps clean-deps: - @rm -rf ./third-party + @rm -rf ./third-party ./install/lib/libhiredis.a ./install/lib/libredis++.a -# help: clean - remove builds, pyc files, .gitignore rules +# help: clean - remove builds, pyc files .PHONY: clean clean: @git clean -X -f -d @@ -127,8 +237,8 @@ clobber: clean clean-deps # help: -# help: Style -# help: ------- +# help: Style targets +# help: ------------- # help: style - Sort imports and format with black .PHONY: style @@ -167,12 +277,12 @@ check-sort-imports: # help: check-lint - run static analysis checks .PHONY: check-lint check-lint: - @pylint --rcfile=.pylintrc ./src/python/module/smartredis ./tests/python + @pylint --rcfile=.pylintrc ./src/python/module/smartredis # help: -# help: Documentation -# help: ------- +# help: Documentation targets +# help: --------------------- # help: docs - generate project documentation .PHONY: docs @@ -186,64 +296,314 @@ cov: @echo if data was present, coverage report is in htmlcov/index.html # help: -# help: Test -# help: ------- +# help: Test targets +# help: ------------ + +# Build Pytest flags to skip various subsets of the tests +ifeq ($(SR_PYTHON),OFF) +SKIP_PYTHON = --ignore ./tests/python +endif +ifeq ($(SR_FORTRAN),OFF) +SKIP_FORTRAN = --ignore ./tests/fortran +endif +SKIP_DOCKER := --ignore ./tests/docker + +# Build SSDB string for clustered database +SSDB_STRING := 127.0.0.1:$(SR_TEST_PORT) +PORT_RANGE := $(shell seq `expr $(SR_TEST_PORT) + 1` 1 `expr $(SR_TEST_PORT) + $(SR_TEST_NODES) - 1`) +SSDB_STRING += $(foreach P,$(PORT_RANGE),",127.0.0.1:$(P)") +SSDB_STRING := $(shell echo $(SSDB_STRING) | tr -d " ") + +# Run test cases with a freshly instantiated standalone Redis server +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_standalone_server + echo "Launching standalone Redis server" && \ + export SR_TEST_DEVICE=$(SR_TEST_DEVICE) SR_DB_TYPE=Standalone && \ + export SMARTREDIS_TEST_CLUSTER=False SMARTREDIS_TEST_DEVICE=$(SR_TEST_DEVICE) && \ + export SSDB=127.0.0.1:$(SR_TEST_PORT) && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 \ + --rai $(SR_TEST_REDISAI_VER) --device $(SR_TEST_DEVICE) && \ + echo "Running standalone tests" && \ + PYTHONFAULTHANDLER=1 python -m pytest $(SR_TEST_PYTEST_FLAGS) $(COV_FLAGS) \ + $(SKIP_DOCKER) $(SKIP_PYTHON) $(SKIP_FORTRAN) \ + --build $(SR_BUILD) --link $(SR_LINK) \ + --sr_fortran $(SR_FORTRAN) $(1) ; \ + (testresult=$$?; \ + echo "Shutting down standalone Redis server" && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 --stop && \ + test $$testresult -eq 0 || echo "Standalone tests failed"; exit $$testresult) && \ + echo "Standalone tests complete" +endef + +# Run test cases with a freshly instantiated clustered Redis server +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_clustered_server + echo "Launching clustered Redis server" && \ + export SR_TEST_DEVICE=$(SR_TEST_DEVICE) SR_DB_TYPE=Clustered && \ + export SMARTREDIS_TEST_CLUSTER=True SMARTREDIS_TEST_DEVICE=$(SR_TEST_DEVICE) && \ + export SSDB=$(SSDB_STRING) && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes $(SR_TEST_NODES) \ + --rai $(SR_TEST_REDISAI_VER) --device $(SR_TEST_DEVICE) && \ + echo "Running clustered tests" && \ + PYTHONFAULTHANDLER=1 python -m pytest $(SR_TEST_PYTEST_FLAGS) $(COV_FLAGS) \ + $(SKIP_DOCKER) $(SKIP_PYTHON) $(SKIP_FORTRAN) \ + --build $(SR_BUILD) --link $(SR_LINK) \ + --sr_fortran $(SR_FORTRAN) $(1) ; \ + (testresult=$$?; \ + echo "Shutting down clustered Redis server" && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) \ + --nodes $(SR_TEST_NODES) --stop; \ + test $$testresult -eq 0 || echo "Clustered tests failed"; exit $$testresult) && \ + echo "Clustered tests complete" +endef + +# Run test cases with a freshly instantiated standalone Redis server +# connected via a Unix Domain Socket +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_uds_server + echo "Launching standalone Redis server with Unix Domain Socket support" + export SR_TEST_DEVICE=$(SR_TEST_DEVICE) SR_DB_TYPE=Standalone && \ + export SMARTREDIS_TEST_CLUSTER=False SMARTREDIS_TEST_DEVICE=$(SR_TEST_DEVICE) && \ + export SSDB=unix://$(SR_TEST_UDS_FILE) && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 \ + --rai $(SR_TEST_REDISAI_VER) --device $(SR_TEST_DEVICE) \ + --udsport $(SR_TEST_UDS_FILE) && \ + echo "Running standalone tests with Unix Domain Socket connection" && \ + PYTHONFAULTHANDLER=1 python -m pytest $(SR_TEST_PYTEST_FLAGS) $(COV_FLAGS) \ + $(SKIP_DOCKER) $(SKIP_PYTHON) $(SKIP_FORTRAN) \ + --build $(SR_BUILD) --link $(SR_LINK) \ + --sr_fortran $(SR_FORTRAN) $(1) ; \ + (testresult=$$?; \ + echo "Shutting down standalone Redis server with Unix Domain Socket support" && \ + python utils/launch_redis.py --port $(SR_TEST_PORT) --nodes 1 \ + --udsport $(SR_TEST_UDS_FILE) --stop; \ + test $$testresult -eq 0 || echo "UDS tests failed"; exit $$testresult) && \ + echo "UDS tests complete" +endef + +# Run test cases with freshly instantiated Redis servers +# Parameters: +# 1: the test directory in which to run tests +define run_smartredis_tests_with_server + $(if $(or $(filter $(SR_TEST_REDIS_MODE),Standalone), + $(filter $(SR_TEST_REDIS_MODE),All)), + $(call run_smartredis_tests_with_standalone_server,$(1)) + ) + $(if $(or $(filter $(SR_TEST_REDIS_MODE),Clustered), + $(filter $(SR_TEST_REDIS_MODE),All)), + $(call run_smartredis_tests_with_clustered_server,$(1)) + ) + $(if $(or $(filter $(SR_TEST_REDIS_MODE),UDS), + $(filter $(SR_TEST_REDIS_MODE),All)), + $(if $(filter-out $(shell uname -s),Darwin), + $(call run_smartredis_tests_with_uds_server,$(1)), + @echo "Skipping: Unix Domain Socket is not supported on MacOS" + ) + ) +endef # help: test - Build and run all tests (C, C++, Fortran, Python) .PHONY: test +test: test-deps test: build-tests +test: SR_TEST_PYTEST_FLAGS := -vv test: - @PYTHONFAULTHANDLER=1 python -m pytest --ignore ./tests/docker -vv ./tests - + @$(call run_smartredis_tests_with_server,./tests) # help: test-verbose - Build and run all tests [verbosely] .PHONY: test-verbose +test-verbose: test-deps test-verbose: build-tests +test-verbose: SR_TEST_PYTEST_FLAGS := -vv -s test-verbose: - @PYTHONFAULTHANDLER=1 python -m pytest --ignore ./tests/docker -vv -s ./tests + @$(call run_smartredis_tests_with_server,./tests) + +# help: test-verbose-with-coverage - Build and run all tests [verbose-with-coverage] +.PHONY: test-verbose-with-coverage +test-verbose-with-coverage: SR_BUILD := Coverage +test-verbose-with-coverage: test-deps +test-verbose-with-coverage: build-tests +test-verbose-with-coverage: SR_TEST_PYTEST_FLAGS := -vv -s +test-verbose-with-coverage: + @$(call run_smartredis_tests_with_server,./tests) # help: test-c - Build and run all C tests .PHONY: test-c test-c: build-test-c +test-c: SR_TEST_PYTEST_FLAGS := -vv -s test-c: - @python -m pytest -vv -s ./tests/c/ + @$(call run_smartredis_tests_with_server,./tests/c) # help: test-cpp - Build and run all C++ tests .PHONY: test-cpp test-cpp: build-test-cpp test-cpp: build-unit-test-cpp +test-cpp: SR_TEST_PYTEST_FLAGS := -vv -s test-cpp: - @python -m pytest -vv -s ./tests/cpp/ + @$(call run_smartredis_tests_with_server,./tests/cpp) # help: unit-test-cpp - Build and run unit tests for C++ .PHONY: unit-test-cpp unit-test-cpp: build-unit-test-cpp +unit-test-cpp: SR_TEST_PYTEST_FLAGS := -vv -s unit-test-cpp: - @python -m pytest -vv -s ./tests/cpp/unit-tests/ + @$(call run_smartredis_tests_with_server,./tests/cpp/unit-tests) # help: test-py - run python tests .PHONY: test-py +test-py: test-deps +test-py: SR_PYTHON := ON +test-py: lib +test-py: SR_TEST_PYTEST_FLAGS := -vv test-py: - @PYTHONFAULTHANDLER=1 python -m pytest -vv ./tests/python/ + @$(call run_smartredis_tests_with_server,./tests/python) # help: test-fortran - run fortran tests .PHONY: test-fortran +test-fortran: SR_FORTRAN := ON test-fortran: build-test-fortran - @python -m pytest -vv ./tests/fortran/ +test-fortran: SR_TEST_PYTEST_FLAGS := -vv -s +test-fortran: + @$(call run_smartredis_tests_with_server,./tests/fortran) # help: testpy-cov - run python tests with coverage .PHONY: testpy-cov +testpy-cov: test-deps +testpy-cov: SR_PYTHON := ON +testpy-cov: SR_TEST_PYTEST_FLAGS := -vv +testpy-cov: COV_FLAGS := --cov=./src/python/module/smartredis/ testpy-cov: - @PYTHONFAULTHANDLER=1 python -m pytest --cov=./src/python/module/smartredis/ -vv ./tests/python/ - -# help: testcpp-cov - run cpp unit tests with coverage -.PHONY: testcpp-cov -testcpp-cov: unit-test-cpp - ./build-scripts/build_cpp_cov.sh + @$(call run_smartredis_tests_with_server,./tests/python) # help: test-examples - Build and run all examples .PHONY: test-examples +test-examples: test-deps test-examples: build-examples +testpy-cov: SR_TEST_PYTEST_FLAGS := -vv -s test-examples: - @python -m pytest -vv -s ./examples - + @$(call run_smartredis_tests_with_server,./examples) + + +############################################################################ +# hidden build targets for third-party software + +# Hiredis (hidden build target) +.PHONY: hiredis +hiredis: install/lib/libhiredis.a +install/lib/libhiredis.a: + @rm -rf third-party/hiredis + @mkdir -p third-party + @cd third-party && \ + git clone $(HIREDIS_URL) hiredis --branch $(HIREDIS_VER) --depth=1 + @cd third-party/hiredis && \ + LIBRARY_PATH=lib CC=gcc CXX=g++ make PREFIX="../../install" static -j $(NPROC) && \ + LIBRARY_PATH=lib CC=gcc CXX=g++ make PREFIX="../../install" install && \ + rm -f ../../install/lib/libhiredis*.so* && \ + rm -f ../../install/lib/libhiredis*.dylib* && \ + echo "Finished installing Hiredis" + +# Redis-plus-plus (hidden build target) +.PHONY: redis-plus-plus +redis-plus-plus: install/lib/libredis++.a +install/lib/libredis++.a: + @rm -rf third-party/redis-plus-plus + @mkdir -p third-party + @cd third-party && \ + git clone $(RPP_URL) redis-plus-plus --branch $(RPP_VER) --depth=1 + @cd third-party/redis-plus-plus && \ + mkdir -p compile && \ + cd compile && \ + (cmake -DCMAKE_BUILD_TYPE=Release -DREDIS_PLUS_PLUS_BUILD_TEST=OFF \ + -DREDIS_PLUS_PLUS_BUILD_SHARED=OFF -DCMAKE_PREFIX_PATH="../../../install/lib/" \ + -DCMAKE_INSTALL_PREFIX="../../../install" -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_INSTALL_LIBDIR="lib" -DCMAKE_C_COMPILER=gcc -DCMAKE_CXX_COMPILER=g++ .. )&& \ + CC=gcc CXX=g++ make -j $(NPROC) && \ + CC=gcc CXX=g++ make install && \ + echo "Finished installing Redis-plus-plus" + +# Pybind11 (hidden build target) +.PHONY: pybind +pybind: third-party/pybind/include/pybind11/pybind11.h +third-party/pybind/include/pybind11/pybind11.h: + @mkdir -p third-party + @cd third-party && \ + git clone $(PYBIND_URL) pybind --branch $(PYBIND_VER) --depth=1 + @mkdir -p third-party/pybind/build && \ + echo "Finished installing Pybind11" + +# Redis (hidden test target) +.PHONY: redis +redis: third-party/redis/src/redis-server +third-party/redis/src/redis-server: + @mkdir -p third-party + @cd third-party && \ + git clone $(REDIS_URL) redis --branch $(REDIS_VER) --depth=1 + @cd third-party/redis && \ + CC=gcc CXX=g++ make MALLOC=libc -j $(NPROC) && \ + echo "Finished installing redis" + +# cudann-check (hidden test target) +# checks cuda dependencies for GPU build +.PHONY: cudann-check +cudann-check: +ifeq ($(SR_TEST_DEVICE),gpu) +ifndef CUDA_HOME + $(error ERROR: CUDA_HOME is not set) +endif +ifndef CUDNN_INCLUDE_DIR + $(error ERROR: CUDNN_INCLUDE_DIR is not set) +endif +ifeq (,$(wildcard $(CUDNN_INCLUDE_DIR)/cudnn.h)) + $(error ERROR: could not find cudnn.h at $(CUDNN_INCLUDE_DIR)) +endif +ifndef CUDNN_LIBRARY + $(error ERROR: CUDNN_LIBRARY is not set) +endif +ifeq (,$(wildcard $(CUDNN_LIBRARY)/libcudnn.so)) + $(error ERROR: could not find libcudnn.so at $(CUDNN_LIBRARY)) +endif +endif + +# RedisAI (hidden test target) +.PHONY: redisAI +redisAI: cudann-check +redisAI: third-party/RedisAI/$(SR_TEST_REDISAI_VER)/install-$(SR_TEST_DEVICE)/redisai.so +third-party/RedisAI/$(SR_TEST_REDISAI_VER)/install-$(SR_TEST_DEVICE)/redisai.so: + @echo in third-party/RedisAI/$(SR_TEST_REDISAI_VER)/install-$(SR_TEST_DEVICE)/redisai.so: + $(eval DEVICE_IS_GPU := $(shell test $(SR_TEST_DEVICE) == "cpu"; echo $$?)) + @mkdir -p third-party + @cd third-party && \ + rm -rf RedisAI/$(SR_TEST_REDISAI_VER) && \ + GIT_LFS_SKIP_SMUDGE=1 git clone --recursive $(REDISAI_URL) RedisAI/$(SR_TEST_REDISAI_VER) \ + --branch $(SR_TEST_REDISAI_VER) --depth=1 + -@cd third-party/RedisAI/$(SR_TEST_REDISAI_VER) && \ + CC=gcc CXX=g++ WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 bash get_deps.sh \ + $(SR_TEST_DEVICE) && \ + CC=gcc CXX=g++ GPU=$(DEVICE_IS_GPU) WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 \ + WITH_UNIT_TESTS=0 make -j $(NPROC) -C opt clean build && \ + echo "Finished installing RedisAI" + +# Catch2 (hidden test target) +.PHONY: catch2 +catch2: third-party/catch/single_include/catch2/catch.hpp +third-party/catch/single_include/catch2/catch.hpp: + @mkdir -p third-party + @cd third-party && \ + git clone $(CATCH2_URL) catch --branch $(CATCH2_VER) --depth=1 + @echo "Finished installing Catch2" + +# LCOV (hidden test target) +.PHONY: lcov +lcov: third-party/lcov/install/bin/lcov +third-party/lcov/install/bin/lcov: + @echo Installing LCOV + @mkdir -p third-party + @cd third-party && \ + git clone $(LCOV_URL) lcov --branch $(LCOV_VER) --depth=1 + @cd third-party/lcov && \ + mkdir -p install && \ + CC=gcc CXX=g++ make PREFIX=$(CWD)/third-party/lcov/install/ install && \ + echo "Finished installing LCOV" diff --git a/2023-01/smartsim/smartredis/README.md b/2023-01/smartsim/smartredis/README.md index c693ec08..9094a7da 100644 --- a/2023-01/smartsim/smartredis/README.md +++ b/2023-01/smartsim/smartredis/README.md @@ -32,12 +32,12 @@ RedisAI capabilities and include additional features for high performance computing (HPC) applications. SmartRedis provides clients in the following languages: -| Language | Version/Standard | -|------------|:-----------------:| -| Python | 3.7, 3.8, 3.9 | -| C++ | C++17 | -| C | C99 | -| Fortran | Fortran 2018 | +| Language | Version/Standard | +|------------|:----------------------------------------------:| +| Python | 3.7, 3.8, 3.9, 3.10 | +| C++ | C++17 | +| C | C99 | +| Fortran | Fortran 2018 (GNU/Intel), 2003 (PGI/Nvidia) | SmartRedis is used in the [SmartSim library](https://github.com/CrayLabs/SmartSim). SmartSim makes it easier to use common Machine Learning (ML) libraries like @@ -51,43 +51,48 @@ SmartSim in any Python, C++, C, or Fortran project. ## Using SmartRedis SmartRedis installation instructions are currently hosted as part of the -[SmartSim library installation instructions](https://www.craylabs.org/docs/installation.html#smartredis) +[SmartSim library installation instructions](https://www.craylabs.org/docs/installation_instructions/basic.html#smartredis) Additionally, detailed [API documents](https://www.craylabs.org/docs/api/smartredis_api.html) are also available as part of the SmartSim documentation. ## Dependencies -SmartRedis utilizes the following libraries. +SmartRedis utilizes the following libraries: - [NumPy](https://github.com/numpy/numpy) - - [Hiredis](https://github.com/redis/hiredis) 1.0.0 - - [Redis-plus-plus](https://github.com/sewenew/redis-plus-plus) 1.2.3 + - [Hiredis](https://github.com/redis/hiredis) 1.1.0 + - [Redis-plus-plus](https://github.com/sewenew/redis-plus-plus) 1.3.5 ## Publications The following are public presentations or publications using SmartRedis - [Collaboration with NCAR - CGD Seminar](https://www.youtube.com/watch?v=2e-5j427AS0) - - [Using Machine Learning in HPC Simulations - paper (pre-print)](https://arxiv.org/abs/2104.09355) + - [Using Machine Learning in HPC Simulations - paper](https://www.sciencedirect.com/science/article/pii/S1877750322001065) + - [Relexi — A scalable open source reinforcement learning framework for high-performance computing - paper](https://www.sciencedirect.com/science/article/pii/S2665963822001063) ## Cite -Please use the following citation when referencing SmartSim, SmartRedis, or any SmartSim related work. +Please use the following citation when referencing SmartSim, SmartRedis, or any SmartSim related work: - -Partee et al., “Using Machine Learning at Scale in HPC Simulations with SmartSim: -An Application to Ocean Climate Modeling,” arXiv:2104.09355, Apr. 2021, -[Online]. Available: http://arxiv.org/abs/2104.09355. + Partee et al., "Using Machine Learning at scale in numerical simulations with SmartSim: + An application to ocean climate modeling", + Journal of Computational Science, Volume 62, 2022, 101707, ISSN 1877-7503. + Open Access: https://doi.org/10.1016/j.jocs.2022.101707. ### bibtex - ```latex - @misc{partee2021using, - title={Using Machine Learning at Scale in HPC Simulations with SmartSim: An Application to Ocean Climate Modeling}, - author={Sam Partee and Matthew Ellis and Alessandro Rigazzi and Scott Bachman and Gustavo Marques and Andrew Shao and Benjamin Robbins}, - year={2021}, - eprint={2104.09355}, - archivePrefix={arXiv}, - primaryClass={cs.CE} - } - ``` + @article{PARTEE2022101707, + title = {Using Machine Learning at scale in numerical simulations with SmartSim: + An application to ocean climate modeling}, + journal = {Journal of Computational Science}, + volume = {62}, + pages = {101707}, + year = {2022}, + issn = {1877-7503}, + doi = {https://doi.org/10.1016/j.jocs.2022.101707}, + url = {https://www.sciencedirect.com/science/article/pii/S1877750322001065}, + author = {Sam Partee and Matthew Ellis and Alessandro Rigazzi and Andrew E. Shao + and Scott Bachman and Gustavo Marques and Benjamin Robbins}, + keywords = {Deep learning, Numerical simulation, Climate modeling, High performance computing, SmartSim}, + } diff --git a/2023-01/smartsim/smartredis/archive/README.txt b/2023-01/smartsim/smartredis/archive/README.txt new file mode 100644 index 00000000..9a82e2ca --- /dev/null +++ b/2023-01/smartsim/smartredis/archive/README.txt @@ -0,0 +1,8 @@ +This folder contains files that are not actively being used but are +being kept around as they contain information that may be of use +some day. + +Files: + . build-keydb.sh: bash script to build the KeyDB alternative to Redis + . slurm_cluster.py: Python script to set up a Redis cluster under Slurm + \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/utils/create_cluster/slurm_cluster.py b/2023-01/smartsim/smartredis/archive/slurm_cluster.py similarity index 100% rename from 2023-01/smartsim/smartredis/utils/create_cluster/slurm_cluster.py rename to 2023-01/smartsim/smartredis/archive/slurm_cluster.py diff --git a/2023-01/smartsim/smartredis/build-scripts/build-catch.sh b/2023-01/smartsim/smartredis/build-scripts/build-catch.sh deleted file mode 100755 index 897f6f17..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build-catch.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash - -# Install Catch -if [[ -f ./catch/single_include/catch2/catch.hpp ]]; then - echo "Catch has already been download and installed" -else - echo "Installing Catch" - if [[ ! -d "./catch" ]]; then - git clone https://github.com/catchorg/Catch2.git catch --branch v2.13.6 --depth=1 - else - echo "Catch downloaded" - fi - echo "Finished installing Catch" -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build-keydb.sh b/2023-01/smartsim/smartredis/build-scripts/build-keydb.sh deleted file mode 100755 index 10ca7db2..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build-keydb.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Install Keydb -found_keydb=$(which keydb-server > /dev/null 2<&1) -if [[ -x "$found_keydb" ]] ; then - echo "KeyDB is installed" -else - if [[ -d "./KeyDB" ]] ; then - echo "KeyDB has already been downloaded" - export PATH="$(pwd)/KeyDB/src:${PATH}" - echo "Added KeyDB to PATH" - else - echo "Installing KeyDB" - git clone https://github.com/JohnSully/KeyDB.git --branch v5.3.3 --depth=1 - cd KeyDB/ - CC=gcc CXX=g++ make -j 2 - cd .. - export PATH="$(pwd)/KeyDB/src:${PATH}" - echo "Finished installing KeyDB" - fi -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build-lcov.sh b/2023-01/smartsim/smartredis/build-scripts/build-lcov.sh deleted file mode 100755 index 957f78d8..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build-lcov.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# Install LCOV -if [[ -f ./lcov/install/usr/local/bin/lcov ]]; then - echo "LCOV has already been download and installed" -else - echo "Installing LCOV" - if [[ ! -d "./lcov" ]]; then - git clone https://github.com/linux-test-project/lcov.git --branch v1.15 --depth=1 lcov - else - echo "LCOV downloaded" - fi - cd lcov - mkdir install - echo "Building LCOV v1.15" - CC=gcc CXX=g++ DESTDIR="install/" make install - cd ../ -fi \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/build-scripts/build-redis.sh b/2023-01/smartsim/smartredis/build-scripts/build-redis.sh deleted file mode 100755 index 6ac92284..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build-redis.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -# get the number of processors -NPROC=$(python -c "import multiprocessing as mp; print(mp.cpu_count())") - -#Install Redis -if [[ -f ./redis/src/redis-server ]]; then - echo "Redis has already been downloaded and installed" -else - if [[ ! -d "./redis" ]]; then - git clone https://github.com/redis/redis.git redis - cd redis - git checkout tags/6.0.8 - cd .. - else - echo "Redis downloaded" - fi - cd redis - echo "Building redis 6.0.8" - CC=gcc CXX=g++ make MALLOC=libc -j $NPROC - echo "Finished installing redis" - cd ../ -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build-redisai-cpu.sh b/2023-01/smartsim/smartredis/build-scripts/build-redisai-cpu.sh deleted file mode 100755 index b5ee3a4a..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build-redisai-cpu.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# get the number of processors -NPROC=$(python -c "import multiprocessing as mp; print(mp.cpu_count())") - -#Install RedisAI -if [[ -f ./RedisAI/install-cpu/redisai.so ]]; then - echo "RedisAI CPU has already been downloaded and installed" -else - if [[ ! -d "./RedisAI" ]]; then - GIT_LFS_SKIP_SMUDGE=1 git clone --recursive https://github.com/RedisAI/RedisAI.git --branch v1.2.3 --depth=1 RedisAI - cd RedisAI - cd .. - else - echo "RedisAI downloaded" - fi - cd RedisAI - echo "Downloading RedisAI CPU dependencies" - CC=gcc CXX=g++ WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 bash get_deps.sh cpu - echo "Building RedisAI" - CC=gcc CXX=g++ GPU=0 WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 WITH_UNIT_TESTS=0 make -j $NPROC -C opt clean build - - if [ -f "./install-cpu/redisai.so" ]; then - echo "Finished installing RedisAI" - cd ../ - else - echo "ERROR: RedisAI failed to build" - exit 1 - fi -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build-redisai-gpu.sh b/2023-01/smartsim/smartredis/build-scripts/build-redisai-gpu.sh deleted file mode 100755 index ea1c6187..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build-redisai-gpu.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/bash - -# get the number of processors -NPROC=$(python -c "import multiprocessing as mp; print(mp.cpu_count())") - -#Install RedisAI -if [[ -f ./RedisAI/install-gpu/redisai.so ]]; then - echo "RedisAI GPU has already been downloaded and installed" -else - - # check for cudnn includes - if [ -z "$CUDA_HOME" ]; then - echo "ERROR: CUDA_HOME is not set" - exit 1 - else - echo "Found CUDA_HOME: $CUDA_HOME" - fi - - # check for cudnn includes - if [ -z "$CUDNN_INCLUDE_DIR" ]; then - echo "ERROR: CUDNN_INCLUDE_DIR is not set" - exit 1 - else - echo "Found CUDNN_INCLUDE_DIR: $CUDNN_INCLUDE_DIR " - if [ -f "$CUDNN_INCLUDE_DIR/cudnn.h" ]; then - echo "Found cudnn.h at $CUDNN_INCLUDE_DIR" - else - echo "ERROR: could not find cudnn.h at $CUDNN_INCLUDE_DIR" - exit 1 - fi - fi - - # check for cudnn library - if [ -z "$CUDNN_LIBRARY" ]; then - echo "ERROR: CUDNN_LIBRARY is not set" - exit 1 - else - echo "Found CUDNN_LIBRARY: $CUDNN_LIBRARY" - if [ -f "$CUDNN_LIBRARY/libcudnn.so" ]; then - echo "Found libcudnn.so at $CUDNN_LIBRARY" - else - echo "ERROR: could not find libcudnn.so at $CUDNN_LIBRARY" - exit 1 - fi - fi - - - if [[ ! -d "./RedisAI" ]]; then - GIT_LFS_SKIP_SMUDGE=1 git clone --recursive https://github.com/RedisAI/RedisAI.git --branch v1.2.3 --depth=1 RedisAI - cd RedisAI - cd .. - else - echo "RedisAI downloaded" - fi - cd RedisAI - echo "Downloading RedisAI CPU dependencies" - CC=gcc CXX=g++ WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 bash get_deps.sh gpu - echo "Building RedisAI" - CC=gcc CXX=g++ GPU=1 WITH_PT=1 WITH_TF=1 WITH_TFLITE=0 WITH_ORT=0 WITH_UNIT_TESTS=0 make -j $NPROC -C opt clean build - - if [ -f "./install-gpu/redisai.so" ]; then - echo "Finished installing RedisAI" - cd ../ - else - echo "ERROR: RedisAI failed to build" - exit 1 - fi -fi - diff --git a/2023-01/smartsim/smartredis/build-scripts/build_c_tests.sh b/2023-01/smartsim/smartredis/build-scripts/build_c_tests.sh deleted file mode 100755 index effe2e9f..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_c_tests.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -CMAKE=$(which cmake) - -cd ./tests/c/ - -# setup build dirs -mkdir build -cd ./build - -# TODO add platform dependent build step here -$CMAKE .. - -if [ $? != 0 ]; then - echo "ERROR: cmake for C tests failed" - cd .. - exit 1 -fi - -make -j 4 - -if [ $? != 0 ]; then - echo "ERROR: failed to make C tests" - cd .. - exit 1 -fi - -cd ../ - -echo \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/build-scripts/build_cpp_cov.sh b/2023-01/smartsim/smartredis/build-scripts/build_cpp_cov.sh deleted file mode 100755 index a4e3e8a2..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_cpp_cov.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -BASEDIR=$(pwd) -CPP_COV_DIR=$BASEDIR/tests/cpp/unit-tests/build/CMakeFiles/cpp_unit_tests.dir -LCOV_EXEC=$BASEDIR/third-party/lcov/install/usr/local/bin/lcov -GEN_HTML_EXEC=$BASEDIR/third-party/lcov/install/usr/local/bin/genhtml - - - -cd ./CMakeFiles/cpp_unit_tests.dir - -EXCLUDE_LIST="" -ENV_VAR=$(echo "$SMARTREDIS_TEST_CLUSTER" | tr '[:upper:]' '[:lower:]') -case $ENV_VAR in - "false") - EXCLUDE_LIST="*rediscluster.cpp*" - ;; - "true") - EXCLUDE_LIST="*redis.cpp*" - ;; -esac - -$LCOV_EXEC --capture --directory $CPP_COV_DIR --output-file $CPP_COV_DIR/coverage.info --exclude "$EXCLUDE_LIST" --include "$BASEDIR/src*" --include "$BASEDIR/include*" -$GEN_HTML_EXEC $CPP_COV_DIR/coverage.info --output-directory $CPP_COV_DIR/../../htmlcov - -echo "Coverage information available in file: $CPP_COV_DIR/../../htmlcov/index.html" - -cd $BASEDIR \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/build-scripts/build_cpp_tests.sh b/2023-01/smartsim/smartredis/build-scripts/build_cpp_tests.sh deleted file mode 100755 index 79e7931a..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_cpp_tests.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -CMAKE=$(which cmake) - -cd ./tests/cpp/ - -# setup build dirs -mkdir build -cd ./build - -# TODO add platform dependent build step here -$CMAKE .. - -if [ $? != 0 ]; then - echo "ERROR: cmake for CPP tests failed" - cd .. - exit 1 -fi - -make -j 4 - -if [ $? != 0 ]; then - echo "ERROR: failed to make CPP tests" - cd .. - exit 1 -fi - -cd ../ - -echo diff --git a/2023-01/smartsim/smartredis/build-scripts/build_cpp_unit_tests.sh b/2023-01/smartsim/smartredis/build-scripts/build_cpp_unit_tests.sh deleted file mode 100755 index e835c8ab..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_cpp_unit_tests.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -# get the number of processors -NPROC=$(python -c "import multiprocessing as mp; print(mp.cpu_count())") - -CMAKE=$(which cmake) - -BASEDIR=$(pwd) - -cd ./tests/cpp/unit-tests - -if [[ -d "./build" ]]; then - echo "Removing previous cpp unit test build directory" - rm -rf ./build -fi - -# setup build dirs -mkdir build -cd ./build - -# TODO add platform dependent build step here -$CMAKE .. - -if [ $? != 0 ]; then - echo "ERROR: cmake for CPP tests failed" - cd .. - exit 1 -fi - -make -j$NPROC - -if [ $? != 0 ]; then - echo "ERROR: failed to make CPP tests" - cd .. - exit 1 -fi - -cd $BASEDIR \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/build-scripts/build_deps.sh b/2023-01/smartsim/smartredis/build-scripts/build_deps.sh deleted file mode 100755 index a81b3c07..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_deps.sh +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash - -if [[ ! -d "./third-party" ]]; then - mkdir ./third-party -fi - -if [[ ! -d "./install" ]]; then - mkdir ./install -fi - -cd ./third-party - -# get the number of processors -NPROC=$(python -c "import multiprocessing as mp; print(mp.cpu_count())") -# get python installed cmake -CMAKE=$(which cmake) - -# Install Hiredis -if ls ../install/lib/libhiredis.a 1>/dev/null 2>&1; then - echo "Hiredis has already been installed" -else - if [[ ! -d "./hiredis" ]]; then - git clone https://github.com/redis/hiredis.git hiredis --branch v1.0.0 --depth=1 - echo "Hiredis downloaded" - fi - cd hiredis - - LIBRARY_PATH=lib CC=gcc CXX=g++ make PREFIX="$(pwd)/../../install" static -j $NPROC - LIBRARY_PATH=lib CC=gcc CXX=g++ make PREFIX="$(pwd)/../../install" install - cd ../ - # delete shared libraries - rm ../install/lib/*.so - rm ../install/lib/*.dylib - echo "Finished installing Hiredis" -fi - - -#Install Redis-plus-plus -if ls ../install/lib/libredis++.a 1>/dev/null 2>&1; then - echo "Redis-plus-plus has already been installed" -else - if [[ ! -d "./redis-plus-plus" ]]; then - git clone https://github.com/sewenew/redis-plus-plus.git redis-plus-plus --branch 1.3.2 --depth=1 - echo "Redis-plus-plus downloaded" - fi - cd redis-plus-plus - #ex -s -c '2i|SET_PROPERTY(GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS TRUE)' -c x CMakeLists.txt - mkdir compile - cd compile - - $CMAKE -DCMAKE_BUILD_TYPE=Release -DREDIS_PLUS_PLUS_BUILD_TEST=OFF -DREDIS_PLUS_PLUS_BUILD_SHARED=OFF -DCMAKE_PREFIX_PATH="$(pwd)../../../install/lib/" -DCMAKE_INSTALL_PREFIX="$(pwd)/../../../install" -DCMAKE_CXX_STANDARD=17 .. - CC=gcc CXX=g++ make -j $NPROC - CC=gcc CXX=g++ make install - cd ../../ - echo "Finished installing Redis-plus-plus" -fi - -# Install Pybind11 -if [[ -d "./pybind" ]]; then - echo "PyBind11 has already been downloaded and installed" -else - git clone https://github.com/pybind/pybind11.git pybind --branch v2.6.2 --depth=1 - cd pybind - mkdir build - cd .. - echo "PyBind11 downloaded" -fi - - -cd ../ diff --git a/2023-01/smartsim/smartredis/build-scripts/build_fortran_tests.sh b/2023-01/smartsim/smartredis/build-scripts/build_fortran_tests.sh deleted file mode 100755 index 2de51ae9..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_fortran_tests.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -CMAKE=$(which cmake) - -cd ./tests/fortran/ - -# setup build dirs -mkdir build -cd ./build - -DO_FORTRAN="yes" - -if [ "$(uname)" == "Darwin" ]; then - DO_FORTRAN="yes" -fi - -if [[ $DO_FORTRAN == "yes" ]]; then - # TODO add platform dependent build step here - $CMAKE .. - - if [ $? != 0 ]; then - echo "ERROR: cmake for Fortran tests failed" - cd .. - exit 1 - fi - - make - - if [ $? != 0 ]; then - echo "ERROR: failed to make Fortran tests" - cd .. - exit 1 - fi - - cd ../ - echo "Fortran tests built" -else - echo "Skipping Fortran test build" -fi - diff --git a/2023-01/smartsim/smartredis/build-scripts/build_lib.sh b/2023-01/smartsim/smartredis/build-scripts/build_lib.sh deleted file mode 100755 index 5ac209c2..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_lib.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -# get the number of processors -NPROC=$(python -c "import multiprocessing as mp; print(mp.cpu_count())") - -CMAKE=$(which cmake) - -# Any command line arguments are assumed to be CMake arguments -CMAKE_ARGS=$@ - -# Remove existing module -if [ -f ./src/python/module/smartredis/smartredisPy.*.so ]; then - echo "Removing existing module installation" - rm ./src/python/module/smartredis/smartredisPy.*.so -fi - -if [[ -d "./build" ]]; then - echo "Removing previous build directory" - rm -rf ./build -fi - -# make a new build directory and invoke cmake -mkdir build -cd build -$CMAKE .. $CMAKE_ARGS -make -j $NPROC -make install - -if [ -f ./../install/lib/libsmartredis.so ]; then - echo "Finished building and installing libsmartredis" -else - echo "ERROR: libsmartredis failed to build and install" - exit 1 -fi - -if [ -f ./smartredisPy.*.so ]; then - echo "Finished building smartredisPy module" - # move python module to module directory - cp smartredisPy.*.so ../src/python/module/smartredis/ - cd ../ -else - echo "ERROR: smartredisPy module failed to compile" - exit 1 -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build_parallel_examples.sh b/2023-01/smartsim/smartredis/build-scripts/build_parallel_examples.sh deleted file mode 100755 index ab8230ef..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_parallel_examples.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -CMAKE=$(which cmake) - -cd ./examples/parallel/fortran/ - -DO_FORTRAN="yes" - -if [ "$(uname)" == "Darwin" ]; then - DO_FORTRAN="no" -fi - -if [[ $DO_FORTRAN == "yes" ]]; then - - # setup build dirs - mkdir build - cd ./build - - # TODO add platform dependent build step here - $CMAKE .. - - if [ $? != 0 ]; then - echo "ERROR: cmake for parallel Fortran examples failed" - cd .. - exit 1 - fi - - make - - if [ $? != 0 ]; then - echo "ERROR: failed to make Fortran parallel examples" - cd .. - exit 1 - fi - - cd ../ - - echo "Fortran parallel examples built" -else - echo "Skipping Fortran parallel example build" -fi - -cd ../cpp/ - -# setup build dirs -mkdir build -cd ./build - -# TODO add platform dependent build step here -$CMAKE .. - -if [ $? != 0 ]; then - echo "ERROR: cmake for CPP parallel examples failed" - cd .. - exit 1 -fi - -make -j 4 - -if [ $? != 0 ]; then - echo "ERROR: failed to make CPP parallel examples" - cd .. - exit 1 -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build_serial_examples.sh b/2023-01/smartsim/smartredis/build-scripts/build_serial_examples.sh deleted file mode 100755 index df17efb4..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_serial_examples.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/bash - -CMAKE=$(which cmake) - -cd ./examples/serial/c/ - -# setup build dirs -mkdir build -cd ./build - -# TODO add platform dependent build step here -$CMAKE .. - -if [ $? != 0 ]; then - echo "ERROR: cmake for C serial examples failed" - cd .. - exit 1 -fi - -make -j 4 - -if [ $? != 0 ]; then - echo "ERROR: failed to make C serial examples" - cd .. - exit 1 -fi - -cd ../ - -echo - -cd ../fortran - - -DO_FORTRAN="yes" - -if [ "$(uname)" == "Darwin" ]; then - DO_FORTRAN="yes" -fi - -if [[ $DO_FORTRAN == "yes" ]]; then - - # setup build dirs - mkdir build - cd ./build - - # TODO add platform dependent build step here - $CMAKE .. - - if [ $? != 0 ]; then - echo "ERROR: cmake for parallel Fortran examples failed" - cd .. - exit 1 - fi - - make - - if [ $? != 0 ]; then - echo "ERROR: failed to make Fortran parallel examples" - cd .. - exit 1 - fi - - cd ../ - - echo "Fortran parallel examples built" -else - echo "Skipping Fortran parallel example build" -fi - -cd ../cpp/ - -# setup build dirs -mkdir build -cd ./build - -# TODO add platform dependent build step here -$CMAKE .. - -if [ $? != 0 ]; then - echo "ERROR: cmake for CPP parallel examples failed" - cd .. - exit 1 -fi - -make -j 4 - -if [ $? != 0 ]; then - echo "ERROR: failed to make CPP parallel examples" - cd .. - exit 1 -fi diff --git a/2023-01/smartsim/smartredis/build-scripts/build_test_deps.sh b/2023-01/smartsim/smartredis/build-scripts/build_test_deps.sh deleted file mode 100755 index ac846e63..00000000 --- a/2023-01/smartsim/smartredis/build-scripts/build_test_deps.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash - -# set variables for RedisAI -RAI_BUILD_TYPE=${1:-"cpu"} -RAI_PT=${2:-1} -RAI_TF=${3:-1} -RAI_TFL=${4:-0} -RAI_ONNX=${5:-0} - -# Make third-party directory if it doesn't exist -if [[ ! -d "./third-party" ]]; then - mkdir ./third-party -fi -cd ./third-party - -# build redis -../build-scripts/build-redis.sh -if [ $? != 0 ]; then - echo "ERROR: Redis failed to build" - cd .. - exit 1 -fi - - -if [[ $RAI_BUILD_TYPE == "gpu" ]]; then - echo "Building RedisAI for GPU..." - ../build-scripts/build-redisai-gpu.sh $RAI_PT $RAI_TF $RAI_TFL $RAI_ONNX - if [ $? != 0 ]; then - echo "ERROR: RedisAI GPU failed to build" - cd .. - exit 1 - fi -else - echo "Building RedisAI for CPU..." - ../build-scripts/build-redisai-cpu.sh $RAI_PT $RAI_TF $RAI_TFL $RAI_ONNX - if [ $? != 0 ]; then - echo "ERROR: RedisAI CPU failed to build" - cd .. - exit 1 - fi -fi - - -# build catch -../build-scripts/build-catch.sh -if [ $? != 0 ]; then - echo "ERROR: Catch failed to build" - cd .. - exit 1 -fi - -# build LCOV -../build-scripts/build-lcov.sh -if [ $? != 0 ]; then - echo "ERROR: Catch failed to build" - cd .. - exit 1 -fi - -cd ../ diff --git a/2023-01/smartsim/smartredis/codecov.yml b/2023-01/smartsim/smartredis/codecov.yml index 5f079a63..8a197f41 100644 --- a/2023-01/smartsim/smartredis/codecov.yml +++ b/2023-01/smartsim/smartredis/codecov.yml @@ -2,9 +2,13 @@ codecov: require_ci_to_pass: yes coverage: - precision: 2 - round: down - range: "70...100" + status: + project: + default: + target: 0% + patch: + default: + target: 0% parsers: gcov: @@ -17,6 +21,6 @@ parsers: comment: layout: "reach,diff,flags,files" behavior: default # update comment if already exists, otherwise post new - require_changes: no - require_base: no + require_changes: yes + require_base: yes require_head: yes diff --git a/2023-01/smartsim/smartredis/conftest.py b/2023-01/smartsim/smartredis/conftest.py index 8b4eca3a..a379dbdf 100644 --- a/2023-01/smartsim/smartredis/conftest.py +++ b/2023-01/smartsim/smartredis/conftest.py @@ -53,10 +53,6 @@ np.uint64, ] -@pytest.fixture -def use_cluster(): - return os.getenv('SMARTREDIS_TEST_CLUSTER', "").lower() == 'true' - @pytest.fixture def mock_data(): return MockTestData @@ -136,3 +132,24 @@ def create_torch_cnn(filepath=None): torch.jit.save(module, buffer) str_model = buffer.getvalue() return str_model + +# Add a build type option to pytest command lines +def pytest_addoption(parser): + parser.addoption("--build", action="store", default="Release") + parser.addoption("--link", action="store", default="Shared") + parser.addoption("--sr_fortran", action="store", default="OFF") + +# Fixture to retrieve the build type setting +@pytest.fixture(scope="session") +def build(request): + return request.config.getoption("--build") + +# Fixture to retrieve the link type setting +@pytest.fixture(scope="session") +def link(request): + return request.config.getoption("--link") + +# Fixture to retrieve the Fortran enablement setting +@pytest.fixture(scope="session") +def sr_fortran(request): + return request.config.getoption("--sr_fortran") diff --git a/2023-01/smartsim/smartredis/doc/advanced_topics.rst b/2023-01/smartsim/smartredis/doc/advanced_topics.rst new file mode 100644 index 00000000..a84d090a --- /dev/null +++ b/2023-01/smartsim/smartredis/doc/advanced_topics.rst @@ -0,0 +1,185 @@ +*************** +Advanced Topics +*************** + +This page of documentation is reserved for advanced topics +that may not be needed for all users. + +.. _advanced-topics-dataset-aggregation: + +Dataset Aggregation +=================== + +In addition to the ability to work with individual datasets, SmartRedis lets +users manage lists of datasets and work with them collectively. This is +particularly useful for producer/consumer relationships. + +The DataSet Aggregation API manages references to multiple ``DataSet`` objects +on one or more database nodes through an interface referred to as +``aggregation lists``. +An ``aggregation list`` in SmartRedis stores references to +``DataSet`` objects that are stored in the database. The SmartRedis client +in one application can append ``DataSet`` objects to the ``aggregation list``; +subsequently, ``SmartRedis`` clients in the same application or a different +application can retrieve some or all of the ``DataSet`` objects referenced +in that ``aggregation list``. + +Appending to a DataSet aggregation list +--------------------------------------- + +The C++ client function to append a reference to a ``DataSet`` to an +aggregation list is shown below: + +.. code-block:: cpp + + # C++ aggregation list append interface + void append_to_list(const std::string& list_name, + const DataSet& dataset); + +NOTE: The ``DataSet`` must have already been added to the database via +the ``put_dataset()`` method in the SmartRedis ``Client``. + +The above function will append the a reference to the provided ``DataSet`` to the +``aggregation list``, which can be referenced in all user-facing functions +by the provided list name. Note that a list can be appended by +any client in the same or different application. Implicitly, all +``DataSets``, when appended, are added to the end of the list. If the list does not +already exist, it is automatically created. + +Retrieving the DataSets in an aggregation list +---------------------------------------------- + +To retrieve the ``DataSet`` referenced in an ``aggregation list``, +the SmartRedis ``Client`` provides an API function that +returns an iterable object containing all of ``DataSets`` +appended to the ``aggregation list``. For example, the C++ client +function to retrieve the entire ``aggregation list`` contents is shown below: + +.. code-block:: cpp + + # C++ aggregation list retrieval interface + std::vector get_datasets_from_list(const std::string& list_name); + +It is also possible to retrieve a subset of the DataSets within an aggregation +list: + +.. code-block:: cpp + + # C++ aggregation list subset retrieval interface + std::vector get_dataset_list_range(const std::string& list_name, + const int start_index, + const int end_index); + +The start_index and end_index may be specified as negative numbers. +In this case, the offset is from the most recently appended DataSets. +For example, an offset of -1 is the last element in the list, and -3 is the +antepenultimate DataSet. + +An application can determine how many DataSets are in an aggregation list +via the following API: + +.. code-block:: cpp + + # C++ aggregation list length query + int get_list_length(const std::string& list_name); + + +Synchronization +--------------- + +A SmartRedis Client can use the following APIs to block until a +predicate is matched on the length of the list: + +.. code-block:: cpp + + // Block until the list reaches a specific length + bool poll_list_length(const std::string& name, int list_length, + int poll_frequency_ms, int num_tries); + + // Block until the list reaches or exceeds a specific length + bool poll_list_length_gte(const std::string& name, int list_length, + int poll_frequency_ms, int num_tries); + + // Block until the list is no longer than a specific length + bool poll_list_length_lte(const std::string& name, int list_length, + int poll_frequency_ms, int num_tries); + +Other operations +---------------- + +Finally, aggregation lists may be copied, renamed, or deleted. Note +that there is no synchronization support for these operations; performing +these operations when multiple applications are accessing the list may +lead to race conditions: + +.. code-block:: cpp + + // Copy an aggregation list + void copy_list(const std::string& src_name, + const std::string& dest_name); + + // Rename an aggregation list + void rename_list(const std::string& src_name, + const std::string& dest_name); + + // Delete an aggregation list + void delete_list(const std::string& list_name); + +.. _advanced-topics-dataset-aggregation: + +Multiple Database Support +========================= + +SmartRedis offers clients the ability to interact with multiple databases +concurrently. Each Client represents a connection to a specific database, +but an application with multiple clients can have each one connected to a +different database. + +Differentiating databases via environment variables +--------------------------------------------------- + +In order to differentiate the databases that clients connect to, SmartRedis +relies on differentiation in the environment variables that the client uses +to initialize itself. Of primary importance here are the SSDB and SR_DB_TYPE +variables, but all environment variables (other than SR_LOG_LEVEL and +SR_LOG_FILE, which are shared for all databases) are duplicated in order to +represent additional databases. + +This duplication is done via suffixing: an underscore and the identifier for +the database are suffixed to the base variable names to derive a set of +environment variables specific to each database. For example, SSDB_INPUT +and SR_DB_TYPE_INPUT reflect a database named ``INPUT``. + +For backward compatibility, the default database is anonymous and thus its +environment variables use neither an underscore nor a database name. This +behavior exactly matches earlier releases of SmartRedis. + +Instantiating Clients for named databases +----------------------------------------- + +Beginning with version 0.5.0 of SmartRedis, users can initialize Clients +using a new construction method that accepts a ConfigOptions object as +an input parameter. In turn, the ConfigOptions object can be constructed +via the ConfigOptions create_from_environment() factory method, which +accepts the suffix to be applied to environment variables when looking +them up (or an empty string, to indicate that the default names should be +used, as for an anonymous database). Depending on the programming language +for the SmartRedis client, variously None, NULL, or skipping the +ConfigOptions parameter altogether also implicitly requests an anonymous +database. + +For example, to create a Client for a database named ``INPUT`` in C++, +one would write the following code: + +.. code-block:: cpp + + // Create a ConfigOptions object + auto co = ConfigOptions::create_from_environment("INPUT"); + + // Pass it to the Client constructor along with an identifier for logging + Client* input_client = new Client(co, "input_client"); + +Note that with the Client constructor that accepts a ConfigOptions object, +there is no parameter for whether the database is clustered or not. This is +because the type of database is now read in from the SR_DB_TYPE environment +variable (with optional {_suffix}). \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/doc/changelog.rst b/2023-01/smartsim/smartredis/doc/changelog.rst index 39a4d9c0..1be07cd3 100644 --- a/2023-01/smartsim/smartredis/doc/changelog.rst +++ b/2023-01/smartsim/smartredis/doc/changelog.rst @@ -4,15 +4,225 @@ Changelog Development branch ------------------ -To be released at some future date +To be released at some future point in time -Note +Description + +- Added test cases for all Client construction parameter combinations +- Centralized dependency tracking to setup.cfg +- Improved robustness of Python client construction +- Updated Client and Dataset documentation +- Expanded list of allowed characters in the SSDB address +- Added coverage to SmartRedis Python API functions +- Improved responsiveness of library when attempting connection to missing backend database +- Moved testing of examples to on-commit testing in CI/CD pipeline +- Added name retrieval function to the DataSet object +- Updated RedisAI version used in post-commit check-in testing in Github pipeline +- Allow strings in Python interface for Client.run_script, Client.run_script_multiGPU +- Improved support for model execution batching +- Added support for model chunking +- Updated the third-party RedisAI component +- Updated the third-party lcov component +- Add link to contributing guidelines +- Added link to contributing guidelines +- Added support for multiple backend databases via a new Client constructor that accepts a ConfigOptions object + +Detailed Notes + +- Added test cases for all Client construction parameter combinations (PR422_) +- Merged dependency lists from requirements.txt and requirements-dev.txt into setup.cfg to have only one set of dependencies going forward (PR420_) +- Improved robustness of Python client construction by adding detection of invalid kwargs (PR419_), (PR421_) +- Updated the Client and Dataset API documentation to clarify which interacts with the backend db (PR416_) +- The SSDB address can now include '-' and '_' as special characters in the name. This gives users more options for naming the UDS socket file (PR415_) +- Added tests to increase Python code coverage +- Employed a Redis++ ConnectionsObject in the connection process to establish a TCP timeout of 100ms during connection attempts (PR413_) +- Moved testing of examples to on-commit testing in CI/CD pipeline (PR412_) +- Added a function to the DataSet class and added a test +- Updated RedisAI version used in post-commit check-in testing in Github pipeline to a version that supports fetch of model chunking size (PR408_) +- Allow users to pass single keys for the inputs and outputs parameters as a string for Python run_script and run_script_multigpu +- Exposed access to the Redis.AI MINBATCHTIMEOUT parameter, which limits the delay in model execution when trying to accumulate multiple executions in a batch (PR406_) +- Models will now be automatically chunked when sent to/received from the backed database. This allows use of models greater than 511MB in size. (PR404_) +- Updated from RedisAI v1.2.3 (test target)/v1.2.4 and v1.2.5 (CI/CD pipeline) to v1.2.7 (PR402_) +- Updated lcov from version 1.15 to 2.0 (PR396_) +- Create CONTRIBUTIONS.md file that points to the contribution guideline for both SmartSim and SmartRedis (PR395_) +- Migrated to ConfigOptions-based Client construction, adding multiple database support (PR353_) + +.. _PR422: https://github.com/CrayLabs/SmartRedis/pull/422 +.. _PR421: https://github.com/CrayLabs/SmartRedis/pull/421 +.. _PR420: https://github.com/CrayLabs/SmartRedis/pull/420 +.. _PR419: https://github.com/CrayLabs/SmartRedis/pull/419 +.. _PR416: https://github.com/CrayLabs/SmartRedis/pull/416 +.. _PR415: https://github.com/CrayLabs/SmartRedis/pull/415 +.. _PR414: https://github.com/CrayLabs/SmartRedis/pull/414 +.. _PR413: https://github.com/CrayLabs/SmartRedis/pull/413 +.. _PR412: https://github.com/CrayLabs/SmartRedis/pull/412 +.. _PR411: https://github.com/CrayLabs/SmartRedis/pull/411 +.. _PR408: https://github.com/CrayLabs/SmartRedis/pull/408 +.. _PR407: https://github.com/CrayLabs/SmartRedis/pull/407 +.. _PR406: https://github.com/CrayLabs/SmartRedis/pull/406 +.. _PR404: https://github.com/CrayLabs/SmartRedis/pull/404 +.. _PR402: https://github.com/CrayLabs/SmartRedis/pull/402 +.. _PR396: https://github.com/CrayLabs/SmartRedis/pull/396 +.. _PR395: https://github.com/CrayLabs/SmartRedis/pull/395 +.. _PR353: https://github.com/CrayLabs/SmartRedis/pull/353 + +0.4.2 +----- + +Released on September 13, 2023 + +Description + +- Reduced number of suppressed lint errors +- Expanded documentation of aggregation lists +- Updated third-party software dependencies to current versions +- Updated post-merge tests in CI/CD to work with new test system +- Enabled static builds of SmartRedis +- Improve robustness of test runs +- Fixed installation link +- Updated supported languages documentation +- Removed obsolete files +- Added pylint to CI/CD pipeline and mitigate existing errors +- Improved clustered redis initialization + +Detailed Notes + +- Refactor factory for ConfigOptions to avoid using protected member outside an instance (PR393_) +- Added a new advanced topics documentation page with a section on aggregation lists (PR390_) +- Updated pybind (2.10.3 => 2.11.1), hiredis (1.1.0 => 1.2.0), and redis++ (1.3.5 => 1.3.10) dependencies to current versions (PR389_) +- Post-merge tests in CI/CD have been updated to interface cleanly with the new test system that was deployed in the previous release (PR388_) +- Static builds of SmartRedis can now work with Linux platforms. Fortran is tested with GNU, PGI, Intel compilers (PR386_) +- Preserve the shell output of test runs while making sure that server shutdown happens unconditionally (PR381_) +- Fix incorrect link to installation documentation (PR380_) +- Update language support matrix in documentation to reflect updates from the last release (PR379_) +- Fix typo causing startup failure in utility script for unit tests (PR378_) +- Update pylint configuration and version, mitigate most errors, execute in CI/CD pipeline (PR371_, PR382_) +- Deleted obsolete build and testing files that are no longer needed with the new build and test system (PR366_) +- Reuse existing redis connection when mapping the Redis cluster (PR364_) + +.. _PR393: https://github.com/CrayLabs/SmartRedis/pull/393 +.. _PR390: https://github.com/CrayLabs/SmartRedis/pull/390 +.. _PR389: https://github.com/CrayLabs/SmartRedis/pull/389 +.. _PR388: https://github.com/CrayLabs/SmartRedis/pull/388 +.. _PR386: https://github.com/CrayLabs/SmartRedis/pull/386 +.. _PR382: https://github.com/CrayLabs/SmartRedis/pull/382 +.. _PR381: https://github.com/CrayLabs/SmartRedis/pull/381 +.. _PR380: https://github.com/CrayLabs/SmartRedis/pull/380 +.. _PR379: https://github.com/CrayLabs/SmartRedis/pull/379 +.. _PR378: https://github.com/CrayLabs/SmartRedis/pull/378 +.. _PR371: https://github.com/CrayLabs/SmartRedis/pull/371 +.. _PR366: https://github.com/CrayLabs/SmartRedis/pull/366 +.. _PR364: https://github.com/CrayLabs/SmartRedis/pull/364 + + +0.4.1 +----- + +Released on July 5, 2023 + +Description + +This release revamps the build and test systems for SmartRedis as well as improving +compatibility with different Fortran compilers and laying the groundwork for future +support for interacting with multiple concurrent backend databases: + +- Documentation improvements +- Improved compatibility of type hints with third-party software +- Added type hints to the Python interface layer +- Add support for Python 3.10 +- Updated setup.py to work with the new build system +- Remove unneeded method from Python SRObject class +- Fixed a memory leak in the C layer +- Revamp SmartRedis test system +- Remove debug output in pybind layer +- Update Hiredis version to 1.1.0 +- Enable parallel build for the SmartRedis examples +- Experimental support for Nvidia toolchain +- Major revamp of build and test systems for SmartRedis +- Refactor Fortran methods to return default logical kind +- Update CI/CD tests to use a modern version of MacOS +- Fix the spelling of the Dataset destructor's C interface (now DeallocateDataSet) +- Update Redis++ version to 1.3.8 +- Refactor third-party software dependency installation +- Add pip-install target to Makefile to automate this process going forward (note: this was later removed) +- Added infrastructure for multiDB support + +Detailed Notes + +- Assorted updates and clarifications to the documentation (PR367_) +- Turn `ParamSpec` usage into forward references to not require `typing-extensions` at runtime (PR365_) +- Added type hints to the Python interface layer (PR361_) +- List Python 3.10 support and loosen PyTorch requirement to allow for versions support Python 3.10 (PR360_) +- Streamlined setup.py to simplify Python install (PR359) +- Remove from_pybind() from Python SRObject class as it's not needed and didn't work properly anyway (PR358_) +- Fixed memory leaked from the C layer when calling get_string_option() (PR357_) +- Major revamp to simplify use of SmartRedis test system, automating most test processes (PR356_) +- Remove debug output in pybind layer associated with put_dataset (PR352_) +- Updated to the latest version of Hiredis (1.1.0) (PR351_) +- Enable parallel build for the SmartRedis examples by moving utility Fortran code into a small static library (PR349_) +- For the NVidia toolchain only: Replaces the assumed rank feature of F2018 used in the Fortran client with assumed shape arrays, making it possible to compile SmartRedis with the Nvidia toolchain. (PR346_) +- Rework the build and test system to improve maintainability of the library. There have been several significant changes, including that Python and Fortran clients are no longer built by defaults and that there are Make variables that customize the build process. Please review the build documentation and ``make help`` to see all that has changed. (PR341_) +- Many Fortran routines were returning logical kind = c_bool which turns out not to be the same default kind of most Fortran compilers. These have now been refactored so that users need not import `iso_c_binding` in their own applications (PR340_) +- Update MacOS version in CI/CD tests from 10.15 to 12.0 (PR339_) +- Correct the spelling of the C DataSet destruction interface from DeallocateeDataSet to DeallocateDataSet (PR338_) +- Updated the version of Redis++ to v1.3.8 to pull in a change that ensures the redis++.pc file properly points to the generated libraries (PR334_) +- Third-party software dependency installation is now handled in the Makefile instead of separate scripts +- New pip-install target in Makefile will be a dependency of the lib target going forward so that users don't have to manually pip install SmartRedis in the future (PR330_) +- Added ConfigOptions class and API, which will form the backbone of multiDB support (PR303_) + +.. _PR367: https://github.com/CrayLabs/SmartRedis/pull/367 +.. _PR365: https://github.com/CrayLabs/SmartRedis/pull/365 +.. _PR361: https://github.com/CrayLabs/SmartRedis/pull/361 +.. _PR360: https://github.com/CrayLabs/SmartRedis/pull/360 +.. _PR359: https://github.com/CrayLabs/SmartRedis/pull/359 +.. _PR358: https://github.com/CrayLabs/SmartRedis/pull/358 +.. _PR357: https://github.com/CrayLabs/SmartRedis/pull/357 +.. _PR356: https://github.com/CrayLabs/SmartRedis/pull/356 +.. _PR352: https://github.com/CrayLabs/SmartRedis/pull/352 +.. _PR351: https://github.com/CrayLabs/SmartRedis/pull/351 +.. _PR349: https://github.com/CrayLabs/SmartRedis/pull/349 +.. _PR346: https://github.com/CrayLabs/SmartRedis/pull/346 +.. _PR341: https://github.com/CrayLabs/SmartRedis/pull/341 +.. _PR340: https://github.com/CrayLabs/SmartRedis/pull/340 +.. _PR339: https://github.com/CrayLabs/SmartRedis/pull/339 +.. _PR338: https://github.com/CrayLabs/SmartRedis/pull/338 +.. _PR334: https://github.com/CrayLabs/SmartRedis/pull/334 +.. _PR331: https://github.com/CrayLabs/SmartRedis/pull/331 +.. _PR330: https://github.com/CrayLabs/SmartRedis/pull/330 +.. _PR303: https://github.com/CrayLabs/SmartRedis/pull/303 + +0.4.0 +----- -This section details changes made in the development branch that have not yet been applied to a released version of the SmartRedis library. +Released on April 12, 2023 Description +This release provides a variety of features to improve usability and debugging +of the SmartRedis library, notably including Unix domain socket support, logging, +the ability to print a textual representation of a string or dataset, dataset +inspection, documentation updates, fixes to the multi-GPU support, and much more: + +- Prepare 0.4.0 release +- Disable codecov CI tests +- Improved error message in to_string methods in C interface +- Streamlined PyBind interface layer +- Updated Python API documentation +- Streamlined C interface layer +- Improved performance of get, put, and copy dataset methods +- Fix a bug which prevented multi-GPU model set in some cases +- Streamline pipelined execution of tasks for backend database +- Enhance code coverage to include all 4 languages supported by SmartRedis +- Fix a bug which resulted in wrong key prefixing when retrieving aggregation lists in ensembles +- Correct assorted API documentation errors and omissions +- Improve documentation of exception handling in Redis server classes +- Improve error handling for setting of scripts and models +- Add support to inspect the dimensions of a tensor via get_tensor_dims() +- Split dataset prefixing control from use_tensor_ensemble_prefix() to use_dataset_ensemble_prefix() +- Update to the latest version of redis-plus-plus +- Update to the latest version of PyBind - Change documentation theme to sphinx_book_theme and fix doc strings +- Add print capability for Client and DataSet - Add support for inspection of tensors and metadata inside datasets - Add support for user-directed logging for Python clients, using Client, Dataset, or LogContext logging methods - Add support for user-directed logging for C and Fortran clients without a Client or Dataset context @@ -27,7 +237,26 @@ Description Detailed Notes +- Update docs and version numbers in preparation for version 0.4.0. Clean up duplicate marking of numpy dependency (PR321_) +- Remove codecov thresholds to avoid commits being marked as 'failed' due to coverage variance (PR317_) +- Corrected the error message in to_string methods in C interface to not overwrite the returned error message and to name the function (PR320_) +- Streamlined PyBind interface layer to reduce repetitive boilerplate code (PR315_) +- Updated Python API summary table to include new methods (PR313_) +- Streamlined C interface layer to reduce repetitive boilerplate code (PR312_) +- Leveraged Redis pipelining to improve performance of get, put, and copy dataset methods (PR311_) +- Redis::set_model_multigpu() will now upload the correct model to all GPUs (PR310_) +- RedisCluster::_run_pipeline() will no longer unconditionally apply a retry wait before returning (PR309_) +- Expand code coverage to all four languages and make the CI/CD more efficent (PR308_) +- An internal flag was set incorrectly, it resulted in wrong key prefixing when accessing (retrieving or querying) lists created in ensembles (PR306_) +- Corrected a variety of Doxygen errors and omissions in the API documentation (PR305_) +- Added throw documentation for exception handling in redis.h, redisserver.h, rediscluster.h (PR301_) +- Added error handling for a rare edge condition when setting scripts and models (PR300_) +- Added support to inspect the dimensions of a tensor via new get_tensor_dims() method (PR299_) +- The use_tensor_ensemble_prefix() API method no longer controls whether datasets are prefixed. A new API method, use_dataset_ensemble_prefix() now manages this. (PR298_) +- Updated from redis-plus-plus v1.3.2 to v1.3.5 (PR296_) +- Updated from PyBind v2.6.2 to v2.10.3 (PR295_) - Change documentation theme to sphinx_book_theme to match SmartSim documentation theme and fix Python API doc string errors (PR294_) +- Added print capability for Client and DataSet to give details diagnostic information for debugging (PR293_) - Added support for retrieval of names and types of tensors and metadata inside datasets (PR291_) - Added support for user-directed logging for Python clients via {Client, Dataset, LogContext}.{log_data, log_warning, log_error} methods (PR289_) - Added support for user-directed logging without a Client or Dataset context to C and Fortran clients via _string() methods (PR288_) @@ -44,7 +273,26 @@ Detailed Notes - Implemented support for Unix Domain Sockets, including refactorization of server address code, test cases, and check-in tests. (PR252_) - A new make target `make lib-with-fortran` now compiles the Fortran client and dataset into its own library which applications can link against (PR245_) +.. _PR321: https://github.com/CrayLabs/SmartRedis/pull/321 +.. _PR317: https://github.com/CrayLabs/SmartRedis/pull/317 +.. _PR320: https://github.com/CrayLabs/SmartRedis/pull/320 +.. _PR315: https://github.com/CrayLabs/SmartRedis/pull/315 +.. _PR313: https://github.com/CrayLabs/SmartRedis/pull/313 +.. _PR312: https://github.com/CrayLabs/SmartRedis/pull/312 +.. _PR311: https://github.com/CrayLabs/SmartRedis/pull/311 +.. _PR310: https://github.com/CrayLabs/SmartRedis/pull/310 +.. _PR309: https://github.com/CrayLabs/SmartRedis/pull/309 +.. _PR308: https://github.com/CrayLabs/SmartRedis/pull/308 +.. _PR306: https://github.com/CrayLabs/SmartRedis/pull/306 +.. _PR305: https://github.com/CrayLabs/SmartRedis/pull/305 +.. _PR301: https://github.com/CrayLabs/SmartRedis/pull/301 +.. _PR300: https://github.com/CrayLabs/SmartRedis/pull/300 +.. _PR299: https://github.com/CrayLabs/SmartRedis/pull/299 +.. _PR298: https://github.com/CrayLabs/SmartRedis/pull/298 +.. _PR296: https://github.com/CrayLabs/SmartRedis/pull/296 +.. _PR295: https://github.com/CrayLabs/SmartRedis/pull/295 .. _PR294: https://github.com/CrayLabs/SmartRedis/pull/294 +.. _PR293: https://github.com/CrayLabs/SmartRedis/pull/293 .. _PR291: https://github.com/CrayLabs/SmartRedis/pull/291 .. _PR289: https://github.com/CrayLabs/SmartRedis/pull/289 .. _PR288: https://github.com/CrayLabs/SmartRedis/pull/288 @@ -277,4 +525,4 @@ Released on April 1, 2021 Description -- Initial 0.1.0 release of SmartRedis +- Initial 0.1.0 release of SmartRedis \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/doc/clients/c-plus.rst b/2023-01/smartsim/smartredis/doc/clients/c-plus.rst index 4ed83147..67844c8d 100644 --- a/2023-01/smartsim/smartredis/doc/clients/c-plus.rst +++ b/2023-01/smartsim/smartredis/doc/clients/c-plus.rst @@ -1,10 +1,26 @@ -*** -C++ -*** +******** +C++ APIs +******** + +The following page provides a comprehensive overview of the SmartRedis C++ +Client and Dataset APIs. +Further explanation and details of each are presented below. Client API ========== +The Client API is purpose-built for interaction with the backend database, +which extends the capabilities of the Redis in-memory data store. +It's important to note that the SmartRedis Client API is the exclusive +means for altering, transmitting, and receiving data within the backend +database. More specifically, the Client API is responsible for both +creating and modifying data structures, which encompass :ref:`Models `, +:ref:`Scripts `, and :ref:`Tensors `. +It also handles the transmission and reception of +the aforementioned data structures in addition to :ref:`Dataset ` +data structure. Creating and modifying the ``DataSet`` object +is confined to local operation by the DataSet API. + .. doxygenclass:: SmartRedis::Client :project: cpp_client :members: @@ -14,6 +30,13 @@ Client API Dataset API =========== +The C++ DataSet API enables a user to manage a group of tensors +and associated metadata within a datastructure called a ``DataSet`` object. +The DataSet API operates independently of the database and solely +maintains the dataset object in-memory. The actual interaction with the Redis database, +where a snapshot of the DataSet object is sent, is handled by the Client API. For more +information on the ``DataSet`` object, click :ref:`here `. + .. doxygenclass:: SmartRedis::DataSet :project: cpp_client :members: diff --git a/2023-01/smartsim/smartredis/doc/clients/c.rst b/2023-01/smartsim/smartredis/doc/clients/c.rst index 354e8d6c..91b3ebb2 100644 --- a/2023-01/smartsim/smartredis/doc/clients/c.rst +++ b/2023-01/smartsim/smartredis/doc/clients/c.rst @@ -1,11 +1,26 @@ +******* +C APIs +******* -*** - C -*** +The following page provides a comprehensive overview of the SmartRedis C +Client and Dataset APIs. +Further explanation and details of each are presented below. Client API ========== +The Client API is purpose-built for interaction with the backend database, +which extends the capabilities of the Redis in-memory data store. +It's important to note that the SmartRedis Client API is the exclusive +means for altering, transmitting, and receiving data within the backend +database. More specifically, the Client API is responsible for both +creating and modifying data structures, which encompass :ref:`Models `, +:ref:`Scripts `, and :ref:`Tensors `. +It also handles the transmission and reception of +the aforementioned data structures in addition to :ref:`Dataset ` +data structure. Creating and modifying the ``DataSet`` object +is confined to local operation by the DataSet API. + .. doxygenfile:: c_client.h :project: c_client @@ -13,6 +28,13 @@ Client API Dataset API =========== +The C DataSet API enables a user to manage a group of tensors +and associated metadata within a datastructure called a ``DataSet`` object. +The DataSet API operates independently of the database and solely +maintains the dataset object in-memory. The actual interaction with the Redis database, +where a snapshot of the DataSet object is sent, is handled by the Client API. For more +information on the ``DataSet`` object, click :ref:`here `. + .. doxygenfile:: c_dataset.h :project: c_client diff --git a/2023-01/smartsim/smartredis/doc/clients/fortran.rst b/2023-01/smartsim/smartredis/doc/clients/fortran.rst index c44f3486..0a31c2a3 100644 --- a/2023-01/smartsim/smartredis/doc/clients/fortran.rst +++ b/2023-01/smartsim/smartredis/doc/clients/fortran.rst @@ -1,11 +1,26 @@ +************ +Fortran APIs +************ -******* -Fortran -******* +The following page provides a comprehensive overview of the SmartRedis Fortran +Client and Dataset APIs. +Further explanation and details of each are presented below. Client API ========== +The Client API is purpose-built for interaction with the backend database, +which extends the capabilities of the Redis in-memory data store. +It's important to note that the SmartRedis Client API is the exclusive +means for altering, transmitting, and receiving data within the backend +database. More specifically, the Client API is responsible for both +creating and modifying data structures, which encompass :ref:`Models `, +:ref:`Scripts `, and :ref:`Tensors `. +It also handles the transmission and reception of +the aforementioned data structures in addition to :ref:`Dataset ` +data structure. Creating and modifying the ``DataSet`` object +is confined to local operation by the DataSet API. + The following are overloaded interfaces which support 32/64-bit ``real`` and 8, 16, 32, and 64-bit ``integer`` tensors @@ -17,6 +32,13 @@ The following are overloaded interfaces which support Dataset API =========== +The Fortran DataSet API enables a user to manage a group of tensors +and associated metadata within a datastructure called a ``DataSet`` object. +The DataSet API operates independently of the database and solely +maintains the dataset object in-memory. The actual interaction with the Redis database, +where a snapshot of the DataSet object is sent, is handled by the Client API. For more +information on the ``DataSet`` object, click :ref:`here `. + The following are overloaded interfaces which support 32/64-bit ``real`` and 8, 16, 32, and 64-bit ``integer`` tensors @@ -105,9 +127,9 @@ Fortran compilers need to support the following features * Object-oriented programming support (Fortran 2003) * Fortran-C interoperability, ``iso_c_binding`` (Fortran 2003) -* Assumed rank (``dimension(..)``) arrays (Fortran 2018) -These language features are supported by Intel 19, GNU 9, and Cray 8.6 and later versions. +These language features are supported by Intel 19, GNU 9, and Cray 8.6 and later versions. Nvidia compilers +have been shown to work, but should be considered a fragile feature for now .. _unsupported_smartredis_features: diff --git a/2023-01/smartsim/smartredis/doc/clients/python.rst b/2023-01/smartsim/smartredis/doc/clients/python.rst index c6664880..7b446664 100644 --- a/2023-01/smartsim/smartredis/doc/clients/python.rst +++ b/2023-01/smartsim/smartredis/doc/clients/python.rst @@ -1,15 +1,36 @@ -****** -Python -****** +*********** +Python APIs +*********** + +The following page provides a comprehensive overview of the SmartRedis Python +Client, DataSet and Logging APIs. +Further explanation and details of each are presented below. Client API ========== +The Client API is purpose-built for interaction with the backend database, +which extends the capabilities of the Redis in-memory data store. +It's important to note that the SmartRedis Client API is the exclusive +means for altering, transmitting, and receiving data within the backend +database. More specifically, the Client API is responsible for both +creating and modifying data structures, which encompass +:ref:`Models `, :ref:`Scripts `, +and :ref:`Tensors `. +It also handles the transmission and reception of the aforementioned data +structures in addition to :ref:`Dataset ` data +structure. Creating and modifying the ``DataSet`` object is confined to local +operation by the DataSet API. + +Client Class Method Overview +---------------------------- + .. currentmodule:: smartredis .. autosummary:: Client.__init__ + Client.__str__ Client.put_tensor Client.get_tensor Client.delete_tensor @@ -25,21 +46,34 @@ Client API Client.dataset_exists Client.poll_dataset Client.set_function + Client.set_function_multigpu Client.set_script + Client.set_script_multigpu Client.set_script_from_file + Client.set_script_from_file_multigpu Client.get_script Client.run_script + Client.run_script_multigpu + Client.delete_script + Client.delete_script_multigpu Client.set_model + Client.set_model_multigpu Client.set_model_from_file + Client.set_model_from_file_multigpu Client.get_model Client.run_model + Client.run_model_multigpu + Client.delete_model + Client.delete_model_multigpu Client.model_exists Client.poll_model Client.key_exists Client.poll_key Client.set_data_source - Client.use_model_ensemble_prefix Client.use_tensor_ensemble_prefix + Client.use_dataset_ensemble_prefix + Client.use_model_ensemble_prefix + Client.use_list_ensemble_prefix Client.get_db_node_info Client.get_db_cluster_info Client.get_ai_info @@ -47,6 +81,19 @@ Client API Client.config_get Client.config_set Client.save + Client.append_to_list + Client.delete_list + Client.copy_list + Client.rename_list + Client.get_list_length + Client.poll_list_length + Client.poll_list_length_gte + Client.poll_list_length_lte + Client.get_datasets_from_list + Client.get_dataset_list_range + +Client Class Method Detailed View +--------------------------------- .. autoclass:: Client :members: @@ -56,19 +103,88 @@ Client API DataSet API =========== +The Python DataSet API enables a user to manage a group of tensors +and associated metadata within a datastructure called a ``DataSet`` object. +The DataSet API operates independently of the database and solely +maintains the dataset object **in-memory**. The actual interaction with the Redis database, +where a snapshot of the DataSet object is sent, is handled by the Client API. For more +information on the ``DataSet`` object, click :ref:`here `. + +Dataset Class Method Overview +----------------------------- + .. currentmodule:: smartredis .. autosummary:: Dataset.__init__ + Dataset.__str__ Dataset.add_tensor Dataset.get_tensor Dataset.add_meta_scalar Dataset.get_meta_scalars Dataset.add_meta_string Dataset.get_meta_strings + Dataset.get_metadata_field_names + Dataset.get_metadata_field_type + Dataset.get_name + Dataset.get_tensor_type + Dataset.get_tensor_names + Dataset.get_tensor_dims + +Dataset Class Method Detailed View +---------------------------------- .. autoclass:: Dataset :members: :show-inheritance: +Logging API +=========== + +The SmartRedis logging functionality is split across multiple classes as well +as three standalone methods. All logging requires a context, which is the text +that is attached to a log message so that when reading the log, one can tell +which part of their program generated the log message. A context can be a +Client object, a DataSet object, or a LogContext object (which only contains the +string for identifying context); or it can be a simple text string. The three +classes all derive from the SRObject class, which contains logging methods. +Three standalone methods support logging against a string context. + +Logging Functionality Overview +------------------------------ + +.. currentmodule:: smartredis + +.. autosummary:: + + SRObject.log_data + SRObject.log_warning + SRObject.log_error + LogContext.__init__ + log_data + log_warning + log_error + +LogContext Class Method Detailed View +------------------------------------- + +.. autoclass:: LogContext + :members: + :show-inheritance: + +SRObject Class Method Detailed View +----------------------------------- + +.. autoclass:: SRObject + :members: + :show-inheritance: + +Standalone Logging Method Detailed View +--------------------------------------- + +.. autofunction:: log_data + +.. autofunction:: log_warning + +.. autofunction:: log_error diff --git a/2023-01/smartsim/smartredis/doc/data_structures.rst b/2023-01/smartsim/smartredis/doc/data_structures.rst index e10fd9d9..bd4441e8 100644 --- a/2023-01/smartsim/smartredis/doc/data_structures.rst +++ b/2023-01/smartsim/smartredis/doc/data_structures.rst @@ -2,32 +2,33 @@ Data Structures *************** -RedisAI defines three new data structures to be -used in redis databases: tensor, model, and script. -In addition, SmartRedis defines an additional data -structure ``DataSet``. In this section, the SmartRedis -API for interacting with these data structures -will be described, and when applicable, -comments on performance and best practices will be made. - -In general, concepts and capabilities will be -demonstrated for the Python and C++ API. -The C and Fortran function signatures closely -resemble the C++ API, and as a result, -they are not discussed in detail in the interest -of brevity. For more detailed explanations of the C -and Fortran API, refer to the documentation pages for those -clients. - - -.. _data_structures_tensor: +SmartSim defines primary three data structures designed for use within backend databases: + +* ``Tensor`` : represents an n-dimensional array of values. +* ``Model`` : represents a computational ML model for one of the supported backend frameworks. +* ``Script`` : represents a TorchScript program. + +In addition, SmartRedis defines a data +structure named ``DataSet`` that enables a user to manage a group of tensors +and associated metadata in-memory. In this section, we will provide an explanation +of the SmartRedis API used to interact with these four data structures, +along with relevant insights on performance and best practices. + +We illustrate concepts and capabilities of the Python +and C++ SmartRedis APIs. The C and Fortran function signatures closely +mirror the C++ API, and for brevity, we won't delve +into them. For full discussion of the C and Fortran APIs, +please refer to their respective documentation pages. + + +.. _data-structures-tensor: Tensor ====== An n-dimensional tensor is used by RedisAI to store and manipulate numerical data. SmartRedis provides functions to -put a key and tensor pair into the Redis database and retrieve +put a key and tensor pair into the backend database and retrieve a tensor associated with a key from the database. .. note:: @@ -88,7 +89,7 @@ Retrieving ---------- The C++, C, and Fortran clients provide two methods for retrieving -tensors from the Redis database. The first method is referred to +tensors from the backend database. The first method is referred to as *unpacking* a tensor. When a tensor is retrieved via ``unpack_tensor()``, the memory space to store the retrieved tensor data is provided by the user. This has the advantage @@ -172,21 +173,22 @@ Note that all of the client ``get_tensor()`` functions will internally modify the provided tensor name if the client is being used with SmartSim ensemble capabilities. -.. _data_structures_dataset: +.. _data-structures-dataset: Dataset ======= -In many situations, a ``Client`` might be tasked with sending a -group of tensors and metadata which are closely related and -naturally grouped into a collection for future retrieval. -The ``DataSet`` object stages these items so that they can be -more efficiently placed in the redis database and can later be -retrieved with the name given to the ``DataSet``. +When dealing with multi-modal data or complex data sets, +one may have different types of tensors (e.g., images, text embeddings, +numerical data) and metadata for each data point. Grouping them into a +collection represents each data point as a cohesive unit. +The ``DataSet`` data structure provides this functionality to stage tensors and metadata +in-memory via the ``DataSet API``. After the creation of a +``DataSet`` object, the grouped data can be efficiently stored in the backend database +by the ``Client API`` and subsequently retrieved using the assigned ``DataSet`` name. +In the upcoming sections, we outline the process of building, sending, and retrieving a ``DataSet``. Listed below are the supported tensor and metadata types. -In the following sections, building, sending, and retrieving -a ``DataSet`` will be described. .. list-table:: Supported Data Types :widths: 25 25 25 @@ -230,36 +232,37 @@ a ``DataSet`` will be described. - - X -Sending -------- +Build and Send a DataSet +------------------------ -When building a ``DataSet`` to be stored in the database, -a user can add any combination of tensors and metadata. -To add a tensor to the ``DataSet``, the user simply uses -the ``DataSet.add_tensor()`` function defined in -each language. The ``DataSet.add_tensor()`` parameters are the same -as ``Client.put_tensor()``, and as a result, details of the function -signatures will not be reiterated here. +When building a ``DataSet`` object in-memory, +a user can group various combinations of tensors and metadata that +constrain to the supported data types in the table above. To illustrate, +tensors can be inserted into a ``dataset`` object via the ``Dataset.add_tensor()`` method. +The SmartRedis DataSet API functions +are available in C, C++, Python, and Fortran. The ``DataSet.add_tensor()`` function, +operates independently of the database and solely +maintains the dataset object. Storing the dataset in the backend +database is done via the Client API ``put_dataset()`` method. .. note:: - ``DataSet.add_tensor()`` copies the tensor data - provided by the user to eliminate errors from user-provided - data being cleared or deallocated. This additional memory - will be freed when the DataSet - object is destroyed. + The ``DataSet.add_tensor()`` function copies user-provided + tensor data; this prevents potential issues arising from the user's + data being cleared or deallocated. Any additional memory allocated + for this purpose will be released when the DataSet object is destroyed. -Metadata can be added to the ``DataSet`` with the +Metadata can be added to an in-memory ``DataSet`` object with the ``DataSet.add_meta_scalar()`` and ``DataSet.add_meta_string()`` -functions. As the aforementioned function names suggest, -there are separate functions to add metadata that is a scalar -(e.g. double) and a string. For both functions, the first -function input is the name of the metadata field. This field -name is an internal ``DataSet`` identifier for the metadata -value(s) that is used for future retrieval, and because it -is an internal identifier, the user does not have to worry -about any key conflicts in the database (i.e. multiple ``DataSet`` -can have the same metadata field names). To clarify these -and future descriptions, the C++ interface for adding +functions. Methods exist for adding scalar metadata (e.g., double) and string metadata. +For both functions, the first input +parameter is the name of the metadata field. +The field name serves as an internal identifier within the ``DataSet`` +for grouped metadata values. It's used to retrieve metadata in the future. +Since it's an internal identifier, users don't need to be concerned +about conflicts with keys in the database. In other words, multiple +``DataSet`` objects can use the same metadata field names without causing +issues because these names are managed within the ``DataSet`` and won't +interfere with external database keys. The C++ interface for adding metadata is shown below: .. code-block:: cpp @@ -277,100 +280,69 @@ metadata is shown below: When adding a scalar or string metadata value, the value is copied by the ``DataSet``, and as a result, the user does not need to ensure that the metadata values provided -are still in memory after they have been added. +are still in-memory. In other words, +the ``DataSet`` handles the memory management of these metadata values, +and you don't need to retain or manage the original copies separately +once they have been included in the ``DataSet`` object. Additionally, multiple metadata values can be added to a -single field, and the default behavior is to append the value to the -existing field. In this way, the ``DataSet`` metadata supports -one-dimensional arrays, but the entries in the array must be added -iteratively by the user. Also, note that in the above C++ example, +single field name, and the default behavior is to append the value to the +field name (creating the field if not already present). This behavior allows the ``DataSet`` metadata +to function like one-dimensional arrays. + +Also, note that in the above C++ example, the metadata scalar type must be specified with a -``SRMetaDataType`` enum value, and similar +``SRMetaDataType`` enum value; similar requirements exist for C and Fortran ``DataSet`` implementations. Finally, the ``DataSet`` object is sent to the database using the ``Client.put_dataset()`` function, which is uniform across all clients. +To emphasize once more, all interactions with the backend database are handle by +the Client API, not the DataSet API. -Retrieving ----------- +Retrieving a DataSet +-------------------- In all clients, the ``DataSet`` is retrieved with a single function call to ``Client.get_dataset()``, which requires only the name of the ``DataSet`` (i.e. the name used in the constructor of the ``DataSet`` when it was -built and placed in the database). ``Client.get_dataset()`` -returns to the user a DataSet object or a pointer to a -DataSet object that can be used to access all of the +built and placed in the database by the Client API). ``Client.get_dataset()`` +returns to the user a ``DataSet`` object (in C, a pointer to a +``DataSet`` object) from the database that is used to access all of the dataset tensors and metadata. -The functions for retrieving tensors from ``DataSet`` +The functions for retrieving tensors from an in-memory ``DataSet`` object are identical to the functions provided by ``Client``, and the same return values and memory management -paradigm is followed. As a result, please refer to +paradigm is followed. As a result, please refer to the previous section for details on tensor retrieve function calls. -There are two functions for retrieving metadata: -``get_meta_scalars()`` and ``get_meta_strings()``. -As the names suggest, the first function -is used for retrieving numerical metadata values, -and the second is for retrieving metadata string -values. The metadata retrieval function prototypes +There are four functions for retrieving metadata information from a ``DataSet`` object in-memory: +``get_meta_scalars()``, ``get_meta_strings()``, ``get_metadata_field_names()`` +and ``get_metadata_field_type()``. As the names suggest, the ``get_meta_scalars()`` function +is used for retrieving numerical metadata values, while the ``get_meta_strings()`` function +is for retrieving metadata string values. The ``get_metadata_field_names()`` function +retrieves a list of all metadata field names in the ``DataSet`` object. Lastly, +the ``get_metadata_field_type()`` function returns the type (scalar or string) of the metadata +attached to the specified field name. The metadata retrieval function prototypes vary across the clients based on programming language constraints, and as a result, please refer to the ``DataSet`` API documentation -for a description of input parameters and memory management. It is +for a description of input parameters and memory management. It is important to note, however, that all functions require the name of the -metadata field to be retrieved, and this name is the same name that +metadata field to be retrieved. This name is the same name that was used when constructing the metadata field with ``add_meta_scalar()`` and ``add_meta_string()`` functions. Aggregating ----------- -An API is provided to aggregate multiple ``DataSet`` objects that -are stored on one or more database nodes. This is accomplished -through an interface referred to as ``aggregation lists``. -An ``aggregation list`` in SmartRedis stores references to -``DataSet`` objects that are stored in the database. ``DataSet`` -objects can be appended to the ``aggregation list`` and then -``SmartRedis`` clients in the same application or a different application -can retrieve all or some of the ``DataSet`` objects referenced in that -``aggregation list``. - -For example, the C++ client function to append a ``DataSet`` to an -aggregation list is shown below: - -.. code-block:: cpp - - # C++ aggregation list append interface - void append_to_list(const std::string& list_name, - const DataSet& dataset); - -The above function will append the provided ``DataSet`` to the -``aggregation list``, which can be referenced in all user-facing functions -by the provided list name. Note that a list can be appended by -any client in the same or different application. Additionally, all -appends are performed at the end of the list, and if the list does not -already exist, it is automatically created. - -For retrieval of ``aggregation list`` contents, -the SmartRedis ``Client`` method provides an API function that -will return an iterable container with all of the ``DataSet`` objects -that were appended to the ``aggregation list``. For example, the C++ client -function to retrieve the ``aggregation list`` contents is shown below: - -.. code-block:: cpp - - # C++ aggregation list retrieval interface - std::vector get_datasets_from_list(const std::string& list_name); - -Additional functions are provided to retrieve only a portion of the -``aggregation list`` contents, copy an ``aggregation list``, rename -an ``aggregation list` and retrieve ``aggregation list`` length. -A blocking method to poll the ``aggregation list`` length is also -provided as a means to wait for list completion before performing -another task in the same application or a separate application. +SmartRedis also supports an advanced API for working with aggregate +lists of DataSets; details may be found +:ref:`here `. +.. _data-structures-model: Model ===== @@ -382,8 +354,8 @@ RedisAI supports PyTorch, TensorFlow, TensorFlow Lite, and ONNX backends, and specifying the backend to be used is done through the ``Client`` function calls. -Sending -------- +Build and Send a Model +---------------------- A model is placed in the database through the ``Client.set_model()`` function. While data types may differ, the function parameters @@ -399,6 +371,7 @@ are uniform across all SmartRedis clients, and as an example, the C++ const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -412,14 +385,14 @@ documentation or the RedisAI documentation for a description of each parameter. .. note:: - With a Redis cluster configuration, ``Client.set_model()`` + With a clustered Redis backend configuration, ``Client.set_model()`` will distribute a copy of the model to each database node in the cluster. As a result, the model that has been placed in the cluster with ``Client.set_model()`` will not be addressable directly with the Redis CLI because of key manipulation that is required to accomplish this distribution. Despite the internal key - manipulation, models in a Redis cluster that have been + manipulation, models in a clustered Redis backend that have been set through the SmartRedis ``Client`` can be accessed and run through the SmartRedis ``Client`` API using the name provided to ``set_model()``. The user @@ -441,7 +414,7 @@ A model can be retrieved from the database using the type varies between languages, only the model name that was used with ``Client.set_model()`` is needed to reference the model in the database. Note that -in a Redis cluster configuration, only one copy of the +in a clustered Redis backend configuration, only one copy of the model is returned to the user. .. note:: @@ -456,7 +429,7 @@ Executing A model can be executed using the ``Client.run_model()`` function. The only required inputs to execute a model are the model name, a list of input tensor names, and a list of output tensor names. -If using a Redis cluster configuration, a copy of the model +If using a clustered Redis backend configuration, a copy of the model referenced by the provided name will be chosen based on data locality. It is worth noting that the names of input and output tensors will be altered with ensemble member identifications if the SmartSim @@ -504,13 +477,15 @@ via ``Client.set_model_multigpu()``. it must have been set via ``Client.set_model_multigpu()``. The ``first_gpu`` and ``num_gpus`` parameters must be constant across both calls. +.. _data-structures-script: + Script ====== Data processing is an essential step in most machine learning workflows. For this reason, RedisAI provides the ability to evaluate PyTorch programs using the hardware -co-located with the Redis database (either CPU or GPU). +co-located with the backend database (either CPU or GPU). The SmartRedis ``Client`` provides functions for users to place a script in the database, retrieve a script from the database, and run a script. @@ -533,14 +508,14 @@ need to be provided by the user. const std::string_view& script); .. note:: - With a Redis cluster configuration, ``Client.set_script()`` + With a clustered Redis backend configuration, ``Client.set_script()`` will distribute a copy of the script to each database node in the cluster. As a result, the script that has been placed in the cluster with ``Client.set_script()`` will not be addressable directly with the Redis CLI because of key manipulation that is required to accomplish this distribution. Despite the internal key - manipulation, scripts in a Redis cluster that have been + manipulation, scripts in a clustered Redis backend that have been set through the SmartRedis ``Client`` can be accessed and run through the SmartRedis ``Client`` API using the name provided to ``set_script()``. The user @@ -562,7 +537,7 @@ A script can be retrieved from the database using the type varies between languages, only the script name that was used with ``Client.set_script()`` is needed to reference the script in the database. Note that -in a Redis cluster configuration, only one copy of the +in a clustered Redis backend configuration, only one copy of the script is returned to the user. .. note:: @@ -578,7 +553,7 @@ A script can be executed using the ``Client.run_script()`` function. The only required inputs to execute a script are the script name, the name of the function in the script to execute, a list of input tensor names, and a list of output tensor names. -If using a Redis cluster configuration, a copy of the script +If using a clustered Redis backend configuration, a copy of the script referenced by the provided name will be chosen based on data locality. It is worth noting that the names of input and output tensors will be altered with ensemble member identifications if the SmartSim @@ -623,4 +598,4 @@ via ``Client.set_script_multigpu()``. In order for a script to be executed via ``Client.run_script_multigpu()``, or deleted via ``Client.delete_script_multigpu()``, it must have been set via ``Client.set_script_multigpu()``. The - ``first_gpu`` and ``num_gpus`` parameters must be constant across both calls. + ``first_gpu`` and ``num_gpus`` parameters must be constant across both calls. \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/doc/dataset_conversions.rst b/2023-01/smartsim/smartredis/doc/dataset_conversions.rst index c8e4a201..a04f8da0 100644 --- a/2023-01/smartsim/smartredis/doc/dataset_conversions.rst +++ b/2023-01/smartsim/smartredis/doc/dataset_conversions.rst @@ -2,22 +2,22 @@ DataSet Conversions ******************* -Dataset conversion refers to a multi-step workflow where a user can create a SmartRedis dataset +Dataset conversion refers to a multi-step workflow where a user can create a SmartRedis dataset which can then be retrieved on the Python side, and then transformed into a {Dataformat} object. -An Xarray Dataset conversion, in this case, creates a SmartRedis dataset that is prepared to be converted +An Xarray Dataset conversion, in this case, creates a SmartRedis dataset that is prepared to be converted into a an Xarray Dataarray object, and then performs the conversion into the Xarray Dataarray object. -Dataset conversion to Xarray within the DatasetConversion class is two-step process: -#. where additional metadata is added to an existing Dataset to add the possibility of conversion and -#. a transformation method which ingests a Dataset and returns an object in the native dataformat. +Dataset conversion to Xarray within the DatasetConversion class is two-step process: +#. where additional metadata is added to an existing Dataset to add the possibility of conversion and +#. a transformation method which ingests a Dataset and returns an object in the native dataformat. -Xarray DataSet Conversions +Xarray DataSet Conversions ========================== -The Xarray Dataset format conversion methods follow the format of the two step process, where the +The Xarray Dataset format conversion methods follow the format of the two step process, where the ``add_metadata_for_xarray()`` method performs the task of adding additional metadata to an existing Dataset, -allowing the ``transform_to_xarray()`` method to identify which fields should be used in the construction of -the specific data format. +allowing the ``transform_to_xarray()`` method to identify which fields should be used in the construction of +the specific data format. .. code-block:: python @@ -29,71 +29,71 @@ the specific data format. // transform_to_xarray interface transform_to_xarray(dataset) -Separating the adding of the metadata and the transformation into the appropriate data format minimizes -the SmartRedis interference with the existing dataset. +Separating the adding of the metadata and the transformation into the appropriate data format minimizes +the SmartRedis interference with the existing dataset. .. note:: - The ``add_metadata_for_xarray()``and ``transform_to_xarray()`` methods support adding multiple tensors into - SmartRedis datasets and storing their common metadata. If multiple data items are present with common metadata - then multiple xarrays will be built. Support for multiple data items with differing metadata is not yet supported. + The ``add_metadata_for_xarray()``and ``transform_to_xarray()`` methods support adding multiple tensors into + SmartRedis datasets and storing their common metadata. If multiple data items are present with common metadata + then multiple xarrays will be built. Support for multiple data items with differing metadata is not yet supported. add_metadata_for_xarray ----------------------- -The ``add_metadata_for_xarray()`` method supports attaching data and metadata to a tensor within a SmartRedis dataset, -preparing the SmartRedis Dataset for transformation. The ``add_metadata_for_xarray()`` method should not interfere with the +The ``add_metadata_for_xarray()`` method supports attaching data and metadata to a tensor within a SmartRedis dataset, +preparing the SmartRedis Dataset for transformation. The ``add_metadata_for_xarray()`` method should not interfere with the existing Dataset API to extract and manipulate data. We expect users to construct the datasets themselves using the Dataset API before calling the ``add_metadata_for_xarray()`` method. -Only the field names will be being passed into ``add_metadata_for_xarray()``, so the actual structure of the dataset and any of the metadata will -not be affected after calling the method. +Only the field names will be being passed into ``add_metadata_for_xarray()``, so the actual structure of the dataset and any of the metadata will +not be affected after calling the method. Below is an example of the creation of a SmartRedis Dataset and addition of tensor data and metadata done by the user: .. code-block:: python ds1 = Dataset("ds-1d") - dataset.add_tensor("1ddata",data1d) - dataset.add_tensor("x",longitude_1d) - dataset.add_meta_string("x_coord_units",'degrees E') - dataset.add_meta_string("x_coord_longname",'Longitude') - dataset.add_meta_string("units",'m/s') - dataset.add_meta_string("longname",'velocity') - dataset.add_meta_string("convention",'CF1.5') - dataset.add_meta_string("dim_data_x","x") - -Below is an example of the ``add_metadata_for_xarray()`` method calls to pass in field names of data and -metadata of the created SmartRedis Dataset under the appropriate parameter names for the creation of + dataset.add_tensor("1ddata", data1d) + dataset.add_tensor("x", longitude_1d) + dataset.add_meta_string("x_coord_units", "degrees E") + dataset.add_meta_string("x_coord_longname", "Longitude") + dataset.add_meta_string("units", "m/s") + dataset.add_meta_string("longname", "velocity") + dataset.add_meta_string("convention", "CF1.5") + dataset.add_meta_string("dim_data_x", "x") + +Below is an example of the ``add_metadata_for_xarray()`` method calls to pass in field names of data and +metadata of the created SmartRedis Dataset under the appropriate parameter names for the creation of the tensor data variable for the Xarray object and the coordinate data variable for the Xarray object: .. code-block:: python # Calling method add_metadata_for_xarray on the created dataset DatasetConverter.add_metadata_for_xarray( - ds1, + ds1, data_names=["1ddata"], dim_names=["dim_data_x"], coord_names=["x"], - attr_names=["units","longname","convention"] + attr_names=["units", "longname", "convention"] ) # Calling method add_metadata_for_xarray for longitude coordinate DatasetConverter.add_metadata_for_xarray( ds1, - data_names=["x"], + data_names=["x"], dim_names=["dim_data_x"], - attr_names=["x_coord_units","x_coord_longname"] + attr_names=["x_coord_units", "x_coord_longname"] ) -The ``add_metadata_for_xarray()`` method has the ability to define the coordinates of each dimension of a tensor added to the dataset -(e.g. the actual x, y, z values of every element of a 3D tensor or vector of timestamps for a 1D timeseries) +The ``add_metadata_for_xarray()`` method has the ability to define the coordinates of each dimension of a tensor added to the dataset +(e.g. the actual x, y, z values of every element of a 3D tensor or vector of timestamps for a 1D timeseries) If the user would like to add data variables as coordinates to their Xarray DataArray, the data name of the data variable must match the name of the coordinate_name being specified in the ``add_metadata_for_xarray()`` parameters, and the method will recognize the appropriately named data variable -as a coordianate variable to be added to the Xarray DataArray. +as a coordianate variable to be added to the Xarray DataArray. -The ability to extract data (metadata,tensors, etc.) by their original field names remains intact after any call to +The ability to extract data (metadata,tensors, etc.) by their original field names remains intact after any call to ``add_metadata_for_xarray()``. The ``add_metadata_for_xarray()`` method uses metadata names that are reserved by and on behalf of the ``add_metadata_for_xarray()`` method: @@ -102,28 +102,28 @@ The ``add_metadata_for_xarray()`` method uses metadata names that are reserved b "_xarray_data_name" "_xarray_dim_name" - "_xarray_coord_name" - "_xarray_attr_name" + "_xarray_coord_name" + "_xarray_attr_name" -.. note:: +.. note:: - Calling the ``add_metadata_for_xarray()`` method to add the reserved metadata names is necessary for the ``transform_to_xarray()`` method - to read the metadata names and unpack the data for the data format conversion. + Calling the ``add_metadata_for_xarray()`` method to add the reserved metadata names is necessary for the ``transform_to_xarray()`` method + to read the metadata names and unpack the data for the data format conversion. transform_to_xarray ------------------- The ``transform_to_xarray()`` converts from a SmartRedis dataset into a dictionary of keys as the name of the Xarray DataArray, and the values -as the actual converted Xarray DataArrays. +as the actual converted Xarray DataArrays. -The transform to xarray method will retrieve the field names store in the Dataset under these metadata names -for populating the native xarray conversion to DataArray method. +The transform to xarray method will retrieve the field names store in the Dataset under these metadata names +for populating the native xarray conversion to DataArray method. .. code-block:: python xarray_ret = DatasetConverter.transform_to_xarray(ds1) -An example of the returned dictionary of the ``transform_to_xarray()`` method: +An example of the returned dictionary of the ``transform_to_xarray()`` method: .. code-block:: python diff --git a/2023-01/smartsim/smartredis/doc/developer/testing.rst b/2023-01/smartsim/smartredis/doc/developer/testing.rst index bea69d1e..d0b899a0 100644 --- a/2023-01/smartsim/smartredis/doc/developer/testing.rst +++ b/2023-01/smartsim/smartredis/doc/developer/testing.rst @@ -8,7 +8,7 @@ Quick instructions To run the tests, assuming that all requirements have been installed 1. Activate your environment with SmartSim and SmartRedis installed -2. Modify `SMARTREDIS_TEST_CLUSTER` (`True` or `False` and +2. Modify `SR_DB_TYPE` (`Clustered` or `Standalone`) and `SMARTREDIS_TEST_DEVICE` (`gpu` or `cpu`) as necessary in `setup_test_env.sh`. 3. `source setup_test_env.sh` diff --git a/2023-01/smartsim/smartredis/doc/examples/cpp_api_examples.rst b/2023-01/smartsim/smartredis/doc/examples/cpp_api_examples.rst index cdaa8fd7..a24efa00 100644 --- a/2023-01/smartsim/smartredis/doc/examples/cpp_api_examples.rst +++ b/2023-01/smartsim/smartredis/doc/examples/cpp_api_examples.rst @@ -17,8 +17,8 @@ SmartRedis ``DataSet`` API is also provided. .. note:: The C++ API examples are written - to connect to a Redis cluster database. Update the - ``Client`` constructor call to connect to a Redis non-cluster database. + to connect to a clustered backend database. Update the + ``Client`` constructor call to connect to a non-clustered backend database. Tensors ======= @@ -33,7 +33,10 @@ SmartRedis C++ client API. DataSets ======== -The C++ client can store and retrieve tensors and metadata in datasets. +The C++ ``Client`` API stores and retrieve datasets from the backend database. The C++ +``DataSet`` API can store and retrieve tensors and metadata from an in-memory ``DataSet`` object. +To reiterate, the actual interaction with the backend database, +where a snapshot of the ``DataSet`` object is sent, is handled by the Client API. For further information about datasets, please refer to the :ref:`Dataset section of the Data Structures documentation page `. @@ -97,4 +100,4 @@ source code is also shown. .. literalinclude:: ../../examples/common/mnist_data/data_processing_script.txt :linenos: :language: Python - :lines: 15-20 + :lines: 15-20 \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/doc/examples/fortran_api_examples.rst b/2023-01/smartsim/smartredis/doc/examples/fortran_api_examples.rst index e2553754..9011db3c 100644 --- a/2023-01/smartsim/smartredis/doc/examples/fortran_api_examples.rst +++ b/2023-01/smartsim/smartredis/doc/examples/fortran_api_examples.rst @@ -13,19 +13,19 @@ SmartRedis ``DataSet`` API is also provided. .. note:: The Fortran API examples rely on the ``SSDB`` environment - variable being set to the address and port of the Redis database. + variable being set to the address and port of the backend database. .. note:: The Fortran API examples are written - to connect to a Redis cluster database. Update the - ``Client`` constructor call to connect to a non-cluster Redis instance. + to connect to a clustered backend database. Update the + ``Client`` constructor call to connect to a non-cluster backend instance. Tensors ======= The SmartRedis Fortran client is used to communicate between -a Fortran client and the Redis database. In this example, +a Fortran client and the backend database. In this example, the client will be used to send an array to the database and then unpack the data into another Fortran array. @@ -107,9 +107,13 @@ into a different array. Datasets ======== -The following code snippet shows how to use the Fortran -Client to store and retrieve dataset tensors and -dataset metadata scalars. +The Fortran ``Client`` API stores and retrieve datasets from the backend database. The Fortran +``DataSet`` API can store and retrieve tensors and metadata from an in-memory ``DataSet`` object. +To reiterate, the actual interaction with the backend database, +where a snapshot of the ``DataSet`` object is sent, is handled by the Client API. + +The code below shows how to store and retrieve tensors and metadata +which belong to a ``DataSet``. .. literalinclude:: ../../examples/serial/fortran/smartredis_dataset.F90 :linenos: @@ -300,14 +304,14 @@ constructed by including a suffix based on MPI tasks. The subroutine, in place of an actual simulation, next generates an array of random numbers and puts this array -into the Redis database. +into the backend database. .. code-block:: Fortran call random_number(array) call client%put_tensor(in_key, array, shape(array)) -The Redis database can now be called to run preprocessing +The backend database can now be called to run preprocessing scripts on these data. .. code-block:: Fortran @@ -382,4 +386,4 @@ Python Pre-Processing: .. literalinclude:: ../../examples/common/mnist_data/data_processing_script.txt :linenos: :language: Python - :lines: 15-20 + :lines: 15-20 \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/doc/examples/python_api_examples.rst b/2023-01/smartsim/smartredis/doc/examples/python_api_examples.rst index c1c154ed..1adf575a 100644 --- a/2023-01/smartsim/smartredis/doc/examples/python_api_examples.rst +++ b/2023-01/smartsim/smartredis/doc/examples/python_api_examples.rst @@ -18,13 +18,13 @@ SmartRedis ``DataSet`` API is also provided. .. note:: The Python API examples are written - to connect to a Redis cluster database. Update the - ``Client`` constructor call to connect to a Redis non-cluster database. + to connect to a clustered backend database. Update the + ``Client`` constructor call to connect to a non-clustered backend database. Tensors ======= The Python client has the ability to send and receive tensors from -the Redis database. The tensors are stored in the Redis database +the backend database. The tensors are stored in the backend database as RedisAI data structures. Additionally, Python client API functions involving tensor data are compatible with Numpy arrays and do not require any other data types. @@ -37,7 +37,10 @@ and do not require any other data types. Datasets ======== -The Python client can store and retrieve tensors and metadata in datasets. +The Python ``Client`` API stores and retrieve datasets from the backend database. The Python +``DataSet`` API can store and retrieve tensors and metadata from an in-memory ``DataSet`` object. +To reiterate, the actual interaction with the backend database, +where a snapshot of the ``DataSet`` object is sent, is handled by the Client API. For further information about datasets, please refer to the :ref:`Dataset section of the Data Structures documentation page `. @@ -92,4 +95,4 @@ looks like this: .. literalinclude:: ../../examples/serial/python/data_processing_script.txt :language: python - :linenos: + :linenos: \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/doc/index.rst b/2023-01/smartsim/smartredis/doc/index.rst index ce76bbf2..2a5f5ed9 100644 --- a/2023-01/smartsim/smartredis/doc/index.rst +++ b/2023-01/smartsim/smartredis/doc/index.rst @@ -13,6 +13,7 @@ installation testing runtime + advanced_topics .. toctree:: :maxdepth: 2 diff --git a/2023-01/smartsim/smartredis/doc/install/docker.rst b/2023-01/smartsim/smartredis/doc/install/docker.rst index 38914c47..11113a93 100644 --- a/2023-01/smartsim/smartredis/doc/install/docker.rst +++ b/2023-01/smartsim/smartredis/doc/install/docker.rst @@ -67,9 +67,8 @@ for a containerized application. # C++ containerized CMake file - project(DockerTester) - cmake_minimum_required(VERSION 3.13) + project(DockerTester) set(CMAKE_CXX_STANDARD 17) @@ -111,9 +110,8 @@ for a containerized application. # C containerized CMake file - project(DockerTester) - cmake_minimum_required(VERSION 3.13) + project(DockerTester) set(CMAKE_CXX_STANDARD 17) @@ -151,15 +149,14 @@ for a containerized application. **Fortran** -In addition to the SmartRedis dynamic -library installed in ``/usr/local/lib/`` and library -header files installed in ``/usr/local/include/smartredis/``, -the Fortran source files needed to compile a Fortran application -with SmartRedis are installed in -``/usr/local/src/SmartRedis/src/fortran/``. +The SmartRedis and SmartRedis-Fortran dynamic +library needed to compile a Fortran application +with SmartRedis are installed in ``/usr/local/lib/`` +and the library header files are installed in +``/usr/local/include/smartredis/``. An example CMake file that builds a Fortran application -using the ``smartredis`` image can be found +using the ``smartredis`` images can be found at ``./tests/docker/fortran/CMakeLists.txt``. This CMake file is also shown below along with an example Docker file that invokes the CMake files @@ -169,33 +166,38 @@ for a containerized application. # Fortran containerized CMake file - project(DockerTester) - cmake_minimum_required(VERSION 3.13) + project(DockerTesterFortran) enable_language(Fortran) + # Configure the build set(CMAKE_CXX_STANDARD 17) - set(CMAKE_C_STANDARD 99) - - set(ftn_client_src - /usr/local/src/SmartRedis/src/fortran/fortran_c_interop.F90 - /usr/local/src/SmartRedis/src/fortran/dataset.F90 - /usr/local/src/SmartRedis/src/fortran/client.F90 + SET(CMAKE_C_STANDARD 99) + set(CMAKE_BUILD_TYPE Debug) + + # Locate dependencies + find_library(SR_LIB smartredis REQUIRED) + find_library(SR_FTN_LIB smartredis-fortran REQUIRED) + set(SMARTREDIS_LIBRARIES + ${SR_LIB} + ${SR_FTN_LIB} ) - find_library(SR_LIB smartredis) - + # Define include directories for header files include_directories(SYSTEM /usr/local/include/smartredis ) - add_executable(docker_test + # Build the test + add_executable(docker_test_fortran test_docker.F90 - ${ftn_client_src} ) + set_target_properties(docker_test_fortran PROPERTIES + OUTPUT_NAME docker_test + ) + target_link_libraries(docker_test_fortran ${SMARTREDIS_LIBRARIES} pthread) - target_link_libraries(docker_test ${SR_LIB} pthread) .. code-block:: docker diff --git a/2023-01/smartsim/smartredis/doc/install/lib.rst b/2023-01/smartsim/smartredis/doc/install/lib.rst index 4b480dc6..dd316f16 100644 --- a/2023-01/smartsim/smartredis/doc/install/lib.rst +++ b/2023-01/smartsim/smartredis/doc/install/lib.rst @@ -9,10 +9,10 @@ The release tarball can also be used instead of cloning the git repository, but the preferred method is a repository clone. The ```Makefile`` included in the top level of the SmartRedis repository has two -main targets: ``lib`` which will create a dynamic library for C, C++, and Python -clients and ``lib-with-fortran`` which will also additionally build a library -for Fortran applications. ``make help`` will list additional targets that are -used for SmartRedis development. +main targets: ``lib`` which will create a dynamic library for C, C++, and +(optionally) Fortran and Python clients; and ``lib-with-fortran`` which will also +unconditionally build a library for Fortran applications. ``make help`` will list +additional targets that are used for SmartRedis development. .. code-block:: bash @@ -25,6 +25,87 @@ installed in ``SmartRedis/install/include/``. The library installation can be used to easily include SmartRedis capabilities in C++, C, and Fortran applications. +Customizing the library build +----------------------------- + +By default, the SmartRedis library is built as a shared library. For some +applications, however, it is preferable to link to a statically compiled +library. This can be done easily with the command: + +.. code-block:: bash + + cd SmartRedis + # Static build + make lib SR_LINK=Static + # Shared build + make lib SR_LINK=Shared #or skip the SR_LINK variable as this is the default + +Linked statically, the SmartRedis library will have a ``.a`` file extension. When +linked dynamically, the SmartRedis library will have a ``.so`` file extension. + +It is also possible to adjust compilation settings for the SmartRedis library. +By default, the library compiles in an optimized build (``Release``), but debug builds +with full symbols (``Debug``) can be created as can debug builds with extensions enabled +for code coverage metrics (``Coverage``; this build type is only available with GNU +compilers). Similar to configuring a link type, selecting the build mode can be done +via a variable supplied to make: + +.. code-block:: bash + + cd SmartRedis + # Release build + make lib SR_BUILD=Release #or skip the SR_BUILD variable as this is the default + # Debug build + make lib SR_BUILD=Debug + # Code coverage build + make lib SR_BUILD=Coverage + +The name of the library produced for a Debug mode build is ``smartredis-debug``. +The name of the library produced for a Coverage mode build is ``smartredis-coverage``. +The name of the library produced for a Release mode build is ``smartredis``. +In each case, the file extension is dependent on the link type, ``.so`` or ``.a``. +All libraries will be located in the ``install/lib`` folder. + +Finally, it is possible to build SmartRedis to include Python and/or Fortran support +(both are omitted by default): + +.. code-block:: bash + + cd SmartRedis + # Build support for Python + make lib SR_PYTHON=ON + # Build support for Fortran + make lib SR_FORTRAN=ON # equivalent to make lib-with-fortran + # Build support for Python and Fortran + make lib SR_PYTHON=ON SR_FORTRAN=ON # or make lib-with-fortran SR_PYTHON=ON + +The build mode, link type, and Fortran/Python support settings are fully orthogonal; +any combination of them is supported. For example, a statically linked debug build +with Python support may be achieved via the following command: + +.. code-block:: bash + + cd SmartRedis + make lib SR_LINK=Static SR_BUILD=Debug SR_PYTHON=ON + +The SR_LINK, SR_BUILD, SR_PYTHON, and SR_FORTRAN variables are fully supported for all +test and build targets in the Makefile. + +Fortran support is built in a secondary library. +The name of the Fortran library produced for a Debug mode build is ``smartredis-fortran-debug``. +The name of the library produced for a Coverage mode build is ``smartredis-fortran-coverage``. +The name of the library produced for a Release mode build is ``smartredis-fortran``. +As with the main libray, the file extension is dependent on the link type, ``.so`` or ``.a``. +All libraries will be located in the ``install/lib`` folder. + + +Additional make variables are described in the ``help`` make target: + +.. code-block:: bash + + cd SmartRedis + make help + Linking instructions using compiler flags ----------------------------------------- @@ -36,18 +117,19 @@ following flags should be included for the preprocessor -I/path/to/smartredis/install/include The linking flags will differ slightly whether the Fortran client library needs -to be included. If so, be sure that you ran ``make lib-with-fortran`` and -include the SmartRedis fortran library in the following flags +to be included. If so, be sure that you ran ``make lib-with-fortran`` (or ``make +lib SR_FORTRAN=ON``) and include the SmartRedis fortran library via the following flags: .. code-block:: text - -L/path/to/smartredis/install/lib -lhiredis -lredis++ -lsmartredis [-lsmartredis-fortran] + -L/path/to/smartredis/install/lib -lsmartredis [-lsmartredis-fortran] .. note:: Fortran applications need to link in both ``smartredis-fortran`` and ``smartredis`` libraries whereas C/C++ applications require only - ``smartredis`` + ``smartredis``. For debug or coverage builds, use the appropriate alternate + libraries as described previously. Linking instructions for CMake-based build systems @@ -59,9 +141,8 @@ with SmartRedis. To build a Fortran client, uncomment out the lines after the .. code-block:: text - project(Example) - cmake_minimum_required(VERSION 3.13) + project(Example) set(CMAKE_CXX_STANDARD 17) diff --git a/2023-01/smartsim/smartredis/doc/installation.rst b/2023-01/smartsim/smartredis/doc/installation.rst index 4bfab0e2..b01a4ae2 100644 --- a/2023-01/smartsim/smartredis/doc/installation.rst +++ b/2023-01/smartsim/smartredis/doc/installation.rst @@ -8,7 +8,8 @@ can be compiled as a library that is linked with an application at compile time. For Python, the clients can be used just like any other pip library. -Before installation, it is recommended to use an OS and compiler that are known to be reliable with SmartRedis. +Before installation, it is recommended to use an OS and compiler that are +known to be reliable with SmartRedis. SmartRedis is tested with the following operating systems on a daily basis: @@ -31,11 +32,11 @@ SmartRedis is tested with the following compilers on a daily basis: * - Compilers (tested daily) * - GNU (GCC/GFortran) - * - Intel (icc/icpc/ifort) + * - Intel oneAPI (icx/icpx/ifort) * - Apple Clang - -SmartRedis has been tested with the following compiler in the past, but on a less regular basis as the compilers listed above: +SmartRedis has been tested with the following compiler in the past, but on +a less regular basis as the compilers listed above: .. list-table:: :widths: 50 @@ -44,9 +45,11 @@ SmartRedis has been tested with the following compiler in the past, but on a les * - Compilers (irregularly tested in the past) * - Cray Clang + * - Intel Classic (icc/icpc) -SmartRedis has been used with the following compilers in the past, but they have not been tested. We do not imply that these compilers work for certain: +SmartRedis has been used with the following compilers in the past, but they have +not been tested. We do not imply that these compilers work for certain: .. list-table:: :widths: 50 diff --git a/2023-01/smartsim/smartredis/doc/overview.rst b/2023-01/smartsim/smartredis/doc/overview.rst index 25723d41..a17cbdd3 100644 --- a/2023-01/smartsim/smartredis/doc/overview.rst +++ b/2023-01/smartsim/smartredis/doc/overview.rst @@ -48,10 +48,10 @@ below summarizes the language standards for each client. * - Language - Version/Standard * - Python - - 3.7, 3.8, 3.9 + - 3.7, 3.8, 3.9, 3.10 * - C++ - C++17 * - C - C99 * - Fortran - - Fortran 2018 + - Fortran 2018 (GNU/Intel), 2003 (PGI/Nvidia) diff --git a/2023-01/smartsim/smartredis/doc/runtime.rst b/2023-01/smartsim/smartredis/doc/runtime.rst index 896faf23..8cc66895 100644 --- a/2023-01/smartsim/smartredis/doc/runtime.rst +++ b/2023-01/smartsim/smartredis/doc/runtime.rst @@ -10,8 +10,8 @@ and to retrieve the correct information from the database. In the following sections, these requirements will be described. -Setting Redis Database Location -=============================== +Setting Redis Database Location and Type +======================================== The C++, C, and Fortran clients retrieve the Redis database location from the @@ -37,10 +37,22 @@ at three different addresses, each using port ``6379``: export SSDB="10.128.0.153:6379,10.128.0.154:6379,10.128.0.155:6379" -The Python client relies on ``SSDB`` to determine database -location. However, the Python ``Client`` constructor also allows -for the database location to be set as an input parameter. In -this case, it sets SSDB from the input parameter. + +There are two types of Redis databases that can be used by the +SmartRedis library. A ``Clustered`` database, such as the one in +the previous example, is replicated across multiple shards. +By way of comparison, a ``Standalone`` database only has a single +shard that services all traffic; this is the form used when a +colocated database or a standard deployment with a non-sharded +database is requested. + +The ``SR_DB_TYPE`` environment variable informs the SmartRedis +library which form is in use. Below is an example of setting +``SR_DB_TYPE`` for a Redis cluster: + +.. code-block:: bash + + export SR_DB_TYPE="Clustered" Logging Environment Variables ============================= @@ -69,7 +81,7 @@ though this can happen only if the variables to set up logging are in place. If this parameter is not set, a default logging level of ``INFO`` will be adopted. The runtime impact of log levels NONE or INFO should be minimal on -client performance; however, seting the log level to DEBUG may cause some +client performance; however, setting the log level to DEBUG may cause some degradation. Ensemble Environment Variables @@ -129,15 +141,22 @@ The functions for changing this default behavior are: void use_tensor_ensemble_prefix(bool use_prefix); + void use_dataset_ensemble_prefix(bool use_prefix); + void use_model_ensemble_prefix(bool use_prefix); .. note:: The function ``Client.use_tensor_ensemble_prefix()`` controls - object prefixing for objects stored with ``Client.put_tensor()`` - and all ``DataSet`` components added via ``DataSet.add_tensor()``, - ``DataSet.add_meta_scalar()``, and ``DataSet.add_meta_string()``. + object prefixing for objects stored with ``Client.put_tensor()``. + +.. note:: + + The function ``Client.use_dataset_ensemble_prefix()`` controls + object prefixing for``DataSet`` components added via + ``DataSet.add_tensor()``, ``DataSet.add_meta_scalar()``, and + ``DataSet.add_meta_string()``. .. note:: diff --git a/2023-01/smartsim/smartredis/doc/testing.rst b/2023-01/smartsim/smartredis/doc/testing.rst index 9cc0bdfb..357b2bcd 100644 --- a/2023-01/smartsim/smartredis/doc/testing.rst +++ b/2023-01/smartsim/smartredis/doc/testing.rst @@ -2,108 +2,67 @@ Testing ******* -The following will demonstrate how to build and run the tests for -each of the SmartSim clients. - -Building the Tests -================== - -Before building the tests, it is assumed that the base dependencies -for SmartRedis described in the installation instructions have already -been executed. - -To build the tests, you first need to install the dependencies for -testing. To download SmartRedis related testing dependencies, run -the following: - -.. code-block:: bash - - make test-deps - -If you wish to run tests on GPU hardware, run the following command: +To build and run all tests on the local host, run the following command in the top +level of the smartredis repository: .. code-block:: bash - make test-deps-gpu - -.. note:: - - The test suite is currently written to be run on CPU hardware to - test model and script executions. Testing on GPU hardware - currently requires modifications to the test suite. + make test .. note:: - The tests require - - GCC > 5 - - CMake > 3 + The tests require: + - GCC >= 5 + - CMake >= 3.13 Since these are usually system libraries, we do not install them - for the user + for the user. -After installing dependencies and setting up your testing environment with -``setup_test_env.sh``, all tests can be built with the following command: +You can also run tests for individual clients as follows: .. code-block:: bash - ./setup_test_env.sh - make build-tests + make test-c # run C tests + make test-fortran # run Fortran tests. Implicitly, SR_FORTRAN=ON + make test-cpp # run all C++ tests + make unit-test-cpp # run unit tests for C++ + make test-py # run Python tests. Implicitly, SR_PYTHON=ON + make testpy-cov # run python tests with coverage. Implicitly, SR_PYTHON=ON SR_BUILD=COVERAGE + make testcpp-cpv # run cpp unit tests with coverage. Implicitly, SR_BUILD=COVERAGE -Starting Redis -============== +Customizing the Tests +===================== -Before running the tests, users will have to spin up a Redis -cluster instance and set the ``SSDB`` environment variable. +Several Make variables can adjust the manner in which tests are run: + - SR_BUILD: change the way that the SmartRedis library is built. (supported: Release, Debug, Coverage; default for testing is Debug) + - SR_FORTRAN: enable Fortran language build and testing (default is OFF) + - SR_PYTHON: enable Python language build and testing (default is OFF) + - SR_TEST_PORT: change the base port for Redis communication (default is 6379) + - SR_TEST_NODES: change the number of Redis shards used for testing (default is 3) + - SR_TEST_REDIS_MODE: change the type(s) of Redis servers used for testing. Supported is Clustered, Standalone, UDS; default is Clustered) + - SR_TEST_REDISAI_VER: change the version of RedisAI used for testing. (Default is v1.2.3; the parameter corresponds the the RedisAI gitHub branch name for the release) + - SR_TEST_DEVICE: change the type of device to test against. (Supported is cpu, gpu; default is cpu) + - SR_TEST_PYTEST_FLAGS: tweak flags sent to pytest when executing tests (default is -vv -s) -To spin up a local Redis cluster, use the script -in ``utils/create_cluster`` as follows: +These variables are all orthogonal. For example, to run tests for all languages against +a standalone Redis server, execute the following command: .. code-block:: bash - cd /smartredis # navigate to the top level dir of smartredis - conda activate env # activate python env with SmartRedis requirements - source setup_test_env.sh # Setup smartredis environment - cd utils/create_cluster - python local_cluster.py # spin up Redis cluster locally - export SSDB="127.0.0.1:6379,127.0.0.1:6380,127.0.0.1:6381" # Set database location + make test SR_FORTRAN=ON SR_PYTHON=ON SR_TEST_REDIS_MODE=Standalone - # run the tests (described below) - - cd utils/create_cluster - python local_cluster.py --stop # stop the Redis cluster - -A similar script ``utils/create_cluster/slurm_cluster.py`` -assists with launching a Redis cluster for testing on -Slurm managed machines. This script has only been tested -on a Cray XC, and it may not be portable to all machines. - -Running the Tests -================= - -.. note:: - - If you are running the tests in a new terminal from the - one used to build the tests and run the Redis cluster, - remember to load your python environment with SmartRedis - dependencies, source the ``setup_test_env.sh`` file, - and set the ``SSDB`` environment variable. - -To build and run all tests, run the following command in the top -level of the smartredis repository. +Similarly, it is possible to run the tests against each type of Redis server in sequence +(all tests against a standalone Redis server, then all tests against a Clustered server, +then all tests against a standalone server with a Unix domain socket connection) via the +following command: .. code-block:: bash - make test + make test SR_FORTRAN=ON SR_PYTHON=ON SR_TEST_REDIS_MODE=All -You can also run tests for individual clients as follows: +.. note:: -.. code-block:: bash + Unix domain socket connections are not supported on MacOS. If the SmartRedis test + system detects that it is running on MacOS, it will automatically skip UDS testing. - make test-c # run C tests - make test-fortran # run Fortran tests - make test-cpp # run all C++ tests - make unit-test-cpp # run unit tests for C++ - make test-py # run Python tests - make testpy-cov # run python tests with coverage - make testcpp-cpv # run cpp unit tests with coverage diff --git a/2023-01/smartsim/smartredis/examples/CMakeLists.txt b/2023-01/smartsim/smartredis/examples/CMakeLists.txt new file mode 100644 index 00000000..0d58cd50 --- /dev/null +++ b/2023-01/smartsim/smartredis/examples/CMakeLists.txt @@ -0,0 +1,40 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Project definition for the SmartRedis-Examples project +cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Examples) + +# Enable language support for the examples +enable_language(C) +enable_language(CXX) +if (SR_FORTRAN) + enable_language(Fortran) +endif() + +# Bring in subdirectories +add_subdirectory(parallel) +add_subdirectory(serial) diff --git a/2023-01/smartsim/smartredis/examples/parallel/CMakeLists.txt b/2023-01/smartsim/smartredis/examples/parallel/CMakeLists.txt new file mode 100644 index 00000000..29312968 --- /dev/null +++ b/2023-01/smartsim/smartredis/examples/parallel/CMakeLists.txt @@ -0,0 +1,41 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Project definition for the SmartRedis-Examples-Parallel project +cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Examples-Parallel) + +# Enable language support for the examples +enable_language(CXX) +if (SR_FORTRAN) + enable_language(Fortran) +endif() + +# Bring in subdirectories +add_subdirectory(cpp) +if (SR_FORTRAN) + add_subdirectory(fortran) +endif() diff --git a/2023-01/smartsim/smartredis/examples/parallel/cpp/CMakeLists.txt b/2023-01/smartsim/smartredis/examples/parallel/cpp/CMakeLists.txt index 2dba75e3..17bee87b 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/cpp/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/examples/parallel/cpp/CMakeLists.txt @@ -24,45 +24,68 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -project(CppClientExamples) - +# Project definition for the SmartRedis-Examples-Parallel-Cpp project cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Examples-Parallel-Cpp) + +# Enable language support for the examples +enable_language(CXX) -set(CMAKE_BUILD_TYPE Release) +# Configure the build set(CMAKE_CXX_STANDARD 17) +set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/../../..") +include(smartredis_defs) + +# Assume by default that users should link against the +# install directory in this repository +if(NOT DEFINED SMARTREDIS_INSTALL_PATH) + set(SMARTREDIS_INSTALL_PATH "../../../install/") +endif() +# Locate dependencies find_package(MPI) +find_library(SR_LIB ${SMARTREDIS_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} +) -find_library(SR_LIB smartredis PATHS ../../../install/lib NO_DEFAULT_PATH REQUIRED) +# Select libraries for build +if (STATIC_BUILD) + # Static builds have an extra dependency on the Pthreads library + find_package(Threads REQUIRED) + set(SMARTREDIS_LIBRARIES + ${SR_LIB} + Threads::Threads + ) +else() + # Shared builds only need the SmartRedis library + set(SMARTREDIS_LIBRARIES ${SR_LIB}) +endif() +# Define include directories for header files include_directories(SYSTEM /usr/local/include ${MPI_INCLUDE_PATH} - ../../../install/include -) - -set(ftn_client_src - ../../../src/fortran/fortran_c_interop.F90 - ../../../src/fortran/dataset.F90 - ../../../src/fortran/client.F90 + ${SMARTREDIS_INSTALL_PATH}/include ) -# Build executables - -add_executable(smartredis_put_get_3D - smartredis_put_get_3D.cpp - ${ftn_client_src} -) -target_link_libraries(smartredis_put_get_3D - MPI::MPI_CXX - ${SR_LIB} +# Define all the examples to be built +list(APPEND EXECUTABLES + smartredis_put_get_3D + smartredis_mnist ) -add_executable(smartredis_mnist - smartredis_mnist.cpp - ${ftn_client_src} -) -target_link_libraries(smartredis_mnist - MPI::MPI_CXX - ${SR_LIB} -) +# Build the examples +foreach(EXECUTABLE ${EXECUTABLES}) + add_executable(${EXECUTABLE}_cpp_parallel + ${EXECUTABLE}.cpp + ) + set_target_properties(${EXECUTABLE}_cpp_parallel PROPERTIES + OUTPUT_NAME ${EXECUTABLE} + ) + target_link_libraries(${EXECUTABLE}_cpp_parallel + MPI::MPI_CXX + ${SMARTREDIS_LIBRARIES} + ) +endforeach() diff --git a/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_mnist.cpp b/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_mnist.cpp index 05266e00..97a8d24f 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_mnist.cpp +++ b/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_mnist.cpp @@ -43,7 +43,7 @@ void run_mnist(const std::string& model_name, // Load the mnist image from a file using MPI rank 0 if (rank == 0) { - std::string image_file = "../../../common/mnist_data/one.raw"; + std::string image_file = "../../common/mnist_data/one.raw"; std::ifstream fin(image_file, std::ios::binary); std::ostringstream ostream; ostream << fin.rdbuf(); @@ -104,8 +104,7 @@ int main(int argc, char* argv[]) { logger_name += std::to_string(rank); // Initialize a Client object - bool cluster_mode = true; // Set to false if not using a clustered database - SmartRedis::Client client(cluster_mode, logger_name); + SmartRedis::Client client(logger_name); // Set the model and script that will be used by all ranks // from MPI rank 0. @@ -113,16 +112,14 @@ int main(int argc, char* argv[]) { // Build model key, file name, and then set model // from file using client API std::string model_key = "mnist_model"; - std::string model_file = "../../../"\ - "common/mnist_data/mnist_cnn.pt"; + std::string model_file = "../../common/mnist_data/mnist_cnn.pt"; client.set_model_from_file(model_key, model_file, "TORCH", "CPU", 20); // Build script key, file name, and then set script // from file using client API std::string script_key = "mnist_script"; - std::string script_file = "../../../common/mnist_data/" - "data_processing_script.txt"; + std::string script_file = "../../common/mnist_data/data_processing_script.txt"; client.set_script_from_file(script_key, "CPU", script_file); // Get model and script to illustrate client API diff --git a/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_put_get_3D.cpp b/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_put_get_3D.cpp index 370a3a24..fbdc86ee 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_put_get_3D.cpp +++ b/2023-01/smartsim/smartredis/examples/parallel/cpp/smartredis_put_get_3D.cpp @@ -57,8 +57,7 @@ int main(int argc, char* argv[]) { logger_name += std::to_string(rank); // Initialize a SmartRedis client - bool cluster_mode = true; // Set to false if not using a clustered database - SmartRedis::Client client(cluster_mode, logger_name); + SmartRedis::Client client(logger_name); // Put the tensor in the database std::string key = "3d_tensor_" + std::to_string(rank); diff --git a/2023-01/smartsim/smartredis/examples/parallel/fortran/CMakeLists.txt b/2023-01/smartsim/smartredis/examples/parallel/fortran/CMakeLists.txt index 8cf05c2b..5d985024 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/fortran/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/examples/parallel/fortran/CMakeLists.txt @@ -24,58 +24,114 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -project(FortranClientExamples) - +# Project definition for the SmartRedis-Examples-Parallel-Fortran project cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Examples-Parallel-Fortran) +# Enable language support for the examples enable_language(Fortran) +enable_language(CXX) -set(CMAKE_VERBOSE_MAKEFILE ON) -set(CMAKE_BUILD_TYPE Debug) +# Configure the build set(CMAKE_CXX_STANDARD 17) -set(CMAKE_C_STANDARD 99) +set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/../../..") +include(smartredis_defs) # Assume by default that users should link against the install directory in this repository if(NOT DEFINED SMARTREDIS_INSTALL_PATH) set(SMARTREDIS_INSTALL_PATH "../../../install/") endif() -# Specify all pre-processor and library dependencies +# Locate dependencies +# . MPI find_package(MPI REQUIRED) IF(NOT MPI_Fortran_FOUND) message(FATAL_ERROR "Could not find Fortran MPI components") endif() -find_library(SMARTREDIS_LIBRARY smartredis PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -find_library(SMARTREDIS_FORTRAN_LIBRARY smartredis-fortran PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -find_library(HIREDIS hiredis PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -find_library(REDISPP redis++ PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -set(SMARTREDIS_LIBRARIES - ${SMARTREDIS_LIBRARY} - ${SMARTREDIS_FORTRAN_LIBRARY} - ${HIREDIS} - ${REDISPP} +# . Main SmartRedis Library (C/C++ based) +add_library(smartredis-main ${SMARTREDIS_LINK_MODE} IMPORTED) +find_library(SR_LIB ${SMARTREDIS_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} +) +set_target_properties(smartredis-main PROPERTIES + IMPORTED_LOCATION ${SR_LIB} +) +# . SmartRedis Fortran Library (Fortran based) +add_library(smartredis-fortran ${SMARTREDIS_LINK_MODE} IMPORTED) +find_library(SR_FTN_LIB ${SMARTREDIS_FORTRAN_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} +) +set_target_properties(smartredis-fortran PROPERTIES + IMPORTED_LOCATION ${SR_FTN_LIB} ) + +# Select libraries for build +if (STATIC_BUILD) + # The CMake "preferred" approach only seems to work with the GNU + # compiler. We will streamline this in the future + if(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") + # Mark that SmartRedis requires the C++ linker + set_target_properties(smartredis-main PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + ) + set_target_properties(smartredis-fortran PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES "FORTRAN" + ) + else() # Tested with PGI, Intel + # For other compilers, don't set languages so that CMake will use the Fortran linker (default) + + # Add the stdc++ linker flag + set(CMAKE_EXE_LINKER_FLAGS "-lstdc++ ${CMAKE_EXE_LINKER_FLAGS}") + endif() + + # Static builds have an extra dependency on the Pthreads library + # The order of libraries here is crucial to get dependencies covered + find_package(Threads REQUIRED) + set(SMARTREDIS_LIBRARIES + smartredis-fortran + smartredis-main + Threads::Threads + ) +else() + # Shared builds only need the SmartRedis libraries + set(SMARTREDIS_LIBRARIES + smartredis-fortran + smartredis-main + ) +endif() + +# Define include directories for header files include_directories(SYSTEM /usr/local/include ${MPI_INCLUDE_PATH} ${SMARTREDIS_INSTALL_PATH}/include ) -# Define all the tests to be built +# Stuff the example_utils into a library to enable building the examples in parallel +add_library(example_utils STATIC example_utils.F90) + +# Define all the examples to be built list(APPEND EXECUTABLES smartredis_dataset smartredis_mnist smartredis_put_get_3D ) +# Build the examples foreach(EXECUTABLE ${EXECUTABLES}) - - add_executable(${EXECUTABLE} + add_executable(${EXECUTABLE}_fortran_parallel ${EXECUTABLE}.F90 - example_utils.F90 ) - target_link_libraries(${EXECUTABLE} + set_target_properties(${EXECUTABLE}_fortran_parallel PROPERTIES + OUTPUT_NAME ${EXECUTABLE} + ) + target_link_libraries(${EXECUTABLE}_fortran_parallel ${SMARTREDIS_LIBRARIES} MPI::MPI_Fortran + example_utils ) endforeach() diff --git a/2023-01/smartsim/smartredis/examples/parallel/fortran/example_utils.F90 b/2023-01/smartsim/smartredis/examples/parallel/fortran/example_utils.F90 index 23e5397e..4e999eba 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/fortran/example_utils.F90 +++ b/2023-01/smartsim/smartredis/examples/parallel/fortran/example_utils.F90 @@ -44,15 +44,15 @@ end function irand logical function use_cluster() - character(len=16) :: smartredis_test_cluster + character(len=16) :: server_type - call get_environment_variable('SMARTREDIS_TEST_CLUSTER', smartredis_test_cluster) - smartredis_test_cluster = to_lower(smartredis_test_cluster) - if (len_trim(smartredis_test_cluster)>0) then - select case (smartredis_test_cluster) - case ('true') + call get_environment_variable('SR_DB_TYPE', server_type) + server_type = to_lower(server_type) + if (len_trim(server_type)>0) then + select case (server_type) + case ('clustered') use_cluster = .true. - case ('false') + case ('standalone') use_cluster = .false. case default use_cluster = .false. diff --git a/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_dataset.F90 b/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_dataset.F90 index b16534f4..7b8da6a8 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_dataset.F90 +++ b/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_dataset.F90 @@ -85,7 +85,7 @@ program main if (result .ne. SRNoError) error stop 'dataset%get_meta_scalars failed' ! Initialize a client - result = client%initialize(.true., "smartredis_dataset") ! Change .false. to .true. if not using a clustered database + result = client%initialize("smartredis_dataset") if (result .ne. SRNoError) error stop 'client%initialize failed' ! Send the dataset to the database via the client diff --git a/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_mnist.F90 b/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_mnist.F90 index 780c2415..90771a98 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_mnist.F90 +++ b/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_mnist.F90 @@ -35,9 +35,9 @@ program mnist_example #include "enum_fortran.inc" character(len=*), parameter :: model_key = "mnist_model" - character(len=*), parameter :: model_file = "../../../common/mnist_data/mnist_cnn.pt" + character(len=*), parameter :: model_file = "../../common/mnist_data/mnist_cnn.pt" character(len=*), parameter :: script_key = "mnist_script" - character(len=*), parameter :: script_file = "../../../common/mnist_data/data_processing_script.txt" + character(len=*), parameter :: script_file = "../../common/mnist_data/data_processing_script.txt" type(client_type) :: client integer :: err_code, pe_id, result @@ -51,7 +51,7 @@ program mnist_example write(key_suffix, "(A,I1.1)") "_",pe_id ! Initialize a client - result = client%initialize(.true., "smartredis_mnist") ! Change .false. to .true. if not using a clustered database + result = client%initialize("smartredis_mnist") if (result .ne. SRNoError) error stop 'client%initialize failed' ! Set up model and script for the computation diff --git a/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_put_get_3D.F90 b/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_put_get_3D.F90 index e1f30898..cc26dd2b 100644 --- a/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_put_get_3D.F90 +++ b/2023-01/smartsim/smartredis/examples/parallel/fortran/smartredis_put_get_3D.F90 @@ -57,7 +57,7 @@ program main call random_number(recv_array_real_64) ! Initialize a client - result = client%initialize(.true., "smartredis_put_get_3D") ! Change .false. to .true. if not using a clustered database + result = client%initialize("smartredis_put_get_3D") if (result .ne. SRNoError) error stop 'client%initialize failed' ! Add a tensor to the database and verify that we can retrieve it diff --git a/2023-01/smartsim/smartredis/examples/serial/CMakeLists.txt b/2023-01/smartsim/smartredis/examples/serial/CMakeLists.txt new file mode 100644 index 00000000..6e62c20a --- /dev/null +++ b/2023-01/smartsim/smartredis/examples/serial/CMakeLists.txt @@ -0,0 +1,43 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Project definition for the SmartRedis-Examples-Serial project +cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Examples-Serial) + +# Enable language support for the examples +enable_language(C) +enable_language(CXX) +if (SR_FORTRAN) + enable_language(Fortran) +endif() + +# Bring in subdirectories +add_subdirectory(c) +add_subdirectory(cpp) +if (SR_FORTRAN) + add_subdirectory(fortran) +endif() diff --git a/2023-01/smartsim/smartredis/examples/serial/c/CMakeLists.txt b/2023-01/smartsim/smartredis/examples/serial/c/CMakeLists.txt index 4184cfec..116f5355 100644 --- a/2023-01/smartsim/smartredis/examples/serial/c/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/examples/serial/c/CMakeLists.txt @@ -24,35 +24,79 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -project(CClientExamples) - +# Project definition for the SmartRedis-Examples-Serial-C project cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Examples-Serial-C) + +# Enable language support for the examples +enable_language(C) +enable_language(CXX) -set(CMAKE_BUILD_TYPE RELEASE) +# Configure the build set(CMAKE_CXX_STANDARD 17) SET(CMAKE_C_STANDARD 99) +set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/../../..") +include(smartredis_defs) -find_library(SR_LIB smartredis PATHS ../../../install/lib - NO_DEFAULT_PATH REQUIRED) +# Assume by default that users should link against the +# install directory in this repository +if(NOT DEFINED SMARTREDIS_INSTALL_PATH) + set(SMARTREDIS_INSTALL_PATH "../../../install/") +endif() -include_directories(SYSTEM - /usr/local/include - ../../../install/include +# Locate dependencies +add_library(smartredis-main ${SMARTREDIS_LINK_MODE} IMPORTED) +find_library(SR_LIB ${SMARTREDIS_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} ) - -add_executable(example_put_unpack_1D - example_put_unpack_1D.c +set_target_properties(smartredis-main PROPERTIES + IMPORTED_LOCATION ${SR_LIB} ) -target_link_libraries(example_put_unpack_1D - ${SR_LIB} -) +# Select libraries for build +if (STATIC_BUILD) + # Mark that SmartRedis requires the C++ linker + set_target_properties(smartredis-main PROPERTIES + IMPORTED_LOCATION ${SR_LIB} + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + ) + + # Static builds have an extra dependency on the Pthreads library + find_package(Threads REQUIRED) + set(SMARTREDIS_LIBRARIES + smartredis-main + Threads::Threads + ) +else() + # Shared builds only need the SmartRedis library + set(SMARTREDIS_LIBRARIES + smartredis-main + ) +endif() -add_executable(example_put_get_3D - example_put_get_3D.c +# Define include directories for header files +include_directories(SYSTEM + /usr/local/include + ${SMARTREDIS_INSTALL_PATH}/include ) -target_link_libraries(example_put_get_3D - ${SR_LIB} +# Define all the examples to be built +list(APPEND EXECUTABLES + example_put_unpack_1D + example_put_get_3D ) +# Build the examples +foreach(EXECUTABLE ${EXECUTABLES}) + add_executable(${EXECUTABLE}_c_serial + ${EXECUTABLE}.c + ) + set_target_properties(${EXECUTABLE}_c_serial PROPERTIES + OUTPUT_NAME ${EXECUTABLE} + ) + target_link_libraries(${EXECUTABLE}_c_serial + ${SMARTREDIS_LIBRARIES} + ) +endforeach() diff --git a/2023-01/smartsim/smartredis/examples/serial/c/example_put_get_3D.c b/2023-01/smartsim/smartredis/examples/serial/c/example_put_get_3D.c index e27d9d03..ea66f0d3 100644 --- a/2023-01/smartsim/smartredis/examples/serial/c/example_put_get_3D.c +++ b/2023-01/smartsim/smartredis/examples/serial/c/example_put_get_3D.c @@ -47,8 +47,7 @@ int main(int argc, char* argv[]) { dims[2] = 3; void* client = NULL; - bool cluster_mode = true; // Set to false if not using a clustered database - if (SRNoError != SmartRedisCClient(cluster_mode, logger_name, cid_len, &client)) { + if (SRNoError != SimpleCreateClient(logger_name, cid_len, &client)) { printf("Client initialization failed!\n"); exit(-1); } @@ -66,7 +65,7 @@ int main(int argc, char* argv[]) { for(size_t i=0; i tensor_2(n_values, 0); for(size_t i=0; i img(n_values, 0); // Load the MNIST image from a file - std::string image_file = "../../../common/mnist_data/one.raw"; + std::string image_file = "../../common/mnist_data/one.raw"; std::ifstream fin(image_file, std::ios::binary); std::ostringstream ostream; ostream << fin.rdbuf(); @@ -74,25 +74,20 @@ void run_mnist(const std::string& model_name, int main(int argc, char* argv[]) { - // Initialize a client for setting the model and script. - // In general, Client objects should be reused, but for this - // small example, Client objects are not reused. - bool cluster_mode = true; // Set to false if not using a clustered database - SmartRedis::Client client(cluster_mode, __FILE__); + // Initialize a client for setting the model and script + SmartRedis::Client client(__FILE__); // Build model key, file name, and then set model // from file using client API std::string model_key = "mnist_model"; - std::string model_file = "../../../"\ - "common/mnist_data/mnist_cnn.pt"; - client.set_model_from_file(model_key, model_file, - "TORCH", "CPU", 20); + std::string model_file = "../../common/mnist_data/mnist_cnn.pt"; + client.set_model_from_file( + model_key, model_file, "TORCH", "CPU", 20); // Build script key, file name, and then set script // from file using client API std::string script_key = "mnist_script"; - std::string script_file = "../../../common/mnist_data/" - "data_processing_script.txt"; + std::string script_file = "../../common/mnist_data/data_processing_script.txt"; client.set_script_from_file(script_key, "CPU", script_file); // Get model and script to illustrate client API diff --git a/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_model.cpp b/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_model.cpp index 0c520f21..a76925e5 100644 --- a/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_model.cpp +++ b/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_model.cpp @@ -37,7 +37,7 @@ int main(int argc, char* argv[]) { std::vector img(n_values, 0); // Load the mnist image from a file - std::string image_file = "../../../common/mnist_data/one.raw"; + std::string image_file = "../../common/mnist_data/one.raw"; std::ifstream fin(image_file, std::ios::binary); std::ostringstream ostream; ostream << fin.rdbuf(); @@ -47,17 +47,16 @@ int main(int argc, char* argv[]) { std::memcpy(img.data(), tmp.data(), img.size()*sizeof(float)); // Initialize a SmartRedis client to connect to the Redis database - bool cluster_mode = true; // Set to false if not using a clustered database - SmartRedis::Client client(cluster_mode, __FILE__); + SmartRedis::Client client(__FILE__); // Use the client to set a model in the database from a file std::string model_key = "mnist_model"; - std::string model_file = "../../../common/mnist_data/mnist_cnn.pt"; + std::string model_file = "../../common/mnist_data/mnist_cnn.pt"; client.set_model_from_file(model_key, model_file, "TORCH", "CPU", 20); // Use the client to set a script from the database form a file std::string script_key = "mnist_script"; - std::string script_file = "../../../common/mnist_data/data_processing_script.txt"; + std::string script_file = "../../common/mnist_data/data_processing_script.txt"; client.set_script_from_file(script_key, "CPU", script_file); // Declare keys that we will use in forthcoming client commands diff --git a/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_put_get_3D.cpp b/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_put_get_3D.cpp index c751cd5b..cc7cbfe8 100644 --- a/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_put_get_3D.cpp +++ b/2023-01/smartsim/smartredis/examples/serial/cpp/smartredis_put_get_3D.cpp @@ -44,11 +44,10 @@ int main(int argc, char* argv[]) { size_t n_values = dim1 * dim2 * dim3; std::vector input_tensor(n_values, 0); for(size_t i=0; i& inputs = std::vector(), @@ -383,6 +416,7 @@ class Client : public SRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -398,6 +432,7 @@ class Client : public SRObject int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -423,6 +458,7 @@ class Client : public SRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for information purposes * \param inputs One or more names of model input nodes * (TF models only). For other models, provide an @@ -438,6 +474,7 @@ class Client : public SRObject const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -462,6 +499,7 @@ class Client : public SRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -477,6 +515,7 @@ class Client : public SRObject int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -712,7 +751,7 @@ class Client : public SRObject * The first_gpu and num_gpus parameters must match those used * when the model was stored. * \param name The name associated with the model - * \param first_cpu the first GPU (zero-based) to use with the model + * \param first_gpu the first GPU (zero-based) to use with the model * \param num_gpus the number of gpus for which the model was stored * \throw SmartRedis::Exception if model deletion fails */ @@ -739,7 +778,7 @@ class Client : public SRObject * The first_gpu and num_gpus parameters must match those used * when the script was stored. * \param name The name associated with the script - * \param first_cpu the first GPU (zero-based) to use with the script + * \param first_gpu the first GPU (zero-based) to use with the script * \param num_gpus the number of gpus for which the script was stored * \throw SmartRedis::Exception if script deletion fails */ @@ -783,7 +822,7 @@ class Client : public SRObject * \details The dataset key used to check for existence * may be formed by applying a prefix to the supplied * name. See set_data_source() - * and use_tensor_ensemble_prefix() for more details. + * and use_dataset_ensemble_prefix() for more details. * \param name The dataset name to be checked in the database * \returns Returns true if the dataset exists in the database * \throw SmartRedis::Exception if dataset exists command fails @@ -830,7 +869,7 @@ class Client : public SRObject * \details The dataset key used to check for existence * may be formed by applying a prefix to the supplied * name. See set_data_source() - * and use_tensor_ensemble_prefix() for more details. + * and use_dataset_ensemble_prefix() for more details. * \param name The dataset name to be checked in the database * \param poll_frequency_ms The time delay between checks, * in milliseconds @@ -886,26 +925,45 @@ class Client : public SRObject void set_data_source(std::string source_id); /*! - * \brief Control whether names of tensor and dataset keys are + * \brief Control whether names of tensor keys are * prefixed (e.g. in an ensemble) when forming database keys. * \details This function can be used to avoid key collisions in an * ensemble by prepending the string value from the - * environment variable SSKEYIN to tensor and dataset names. + * environment variable SSKEYIN to tensor names. * Prefixes will only be used if they were previously set * through the environment variables SSKEYOUT and SSKEYIN. * Keys of entities created before this function is called * will not be retroactively prefixed. - * By default, the client prefixes tensor and dataset keys + * By default, the client prefixes tensor keys * with the first prefix specified with the SSKEYIN * and SSKEYOUT environment variables. * - * \param use_prefix If set to true, all future operations - * on tensors and datasets will use - * a prefix, if available. + * \param use_prefix If set to true, all future operations on tensors + * will use a prefix, if available. * \throw SmartRedis::Exception for failed activation of tensor prefixing */ void use_tensor_ensemble_prefix(bool use_prefix); + /*! + * \brief Control whether names of dataset keys are + * prefixed (e.g. in an ensemble) when forming database keys. + * \details This function can be used to avoid key collisions in an + * ensemble by prepending the string value from the + * environment variable SSKEYIN to dataset names. + * Prefixes will only be used if they were previously set + * through the environment variables SSKEYOUT and SSKEYIN. + * Keys of entities created before this function is called + * will not be retroactively prefixed. + * By default, the client prefixes dataset keys + * with the first prefix specified with the SSKEYIN + * and SSKEYOUT environment variables. + * + * \param use_prefix If set to true, all future operations on datasets + * will use a prefix, if available. + * \throw SmartRedis::Exception for failed activation of dataset prefixing + */ + void use_dataset_ensemble_prefix(bool use_prefix); + /*! * \brief Control whether model and script keys are * prefixed (e.g. in an ensemble) when forming database keys. @@ -938,9 +996,9 @@ class Client : public SRObject * prefixed. By default, the client prefixes aggregation * list keys with the first prefix specified with the SSKEYIN * and SSKEYOUT environment variables. Note that - * use_tensor_ensemble_prefix() controls prefixing + * use_dataset_ensemble_prefix() controls prefixing * for the entities in the aggregation list, and - * use_tensor_ensemble_prefix() should be given the + * use_dataset_ensemble_prefix() should be given the * same value that was used during the initial * setting of the DataSet into the database. * \param use_prefix If set to true, all future operations @@ -1131,10 +1189,10 @@ class Client : public SRObject * \brief Rename an aggregation list * \details The old and new aggregation list key used to find and * relocate the list may be formed by applying prefixes to - * the supplied old_name and new_name. See set_data_source() + * the supplied src_name and dest_name. See set_data_source() * and use_list_ensemble_prefix() for more details. - * \param old_name The old list name - * \param new_name The new list name + * \param src_name The initial list name + * \param dest_name The target list name * \throw SmartRedis::Exception if the command fails */ void rename_list(const std::string& src_name, @@ -1250,6 +1308,27 @@ class Client : public SRObject const int start_index, const int end_index); + /*! + * \brief Reconfigure the chunking size that Redis uses for model + * serialization, replication, and the model_get command. + * \details This method triggers the AI.CONFIG method in the Redis + * database to change the model chunking size. + * + * NOTE: The default size of 511MB should be fine for most + * applications, so it is expected to be very rare that a + * client calls this method. It is not necessary to call + * this method a model to be chunked. + * \param chunk_size The new chunk size in bytes + * \throw SmartRedis::Exception if the command fails. + */ + void set_model_chunk_size(int chunk_size); + + /*! + * \brief Create a string representation of the client + * \returns A string containing client details + */ + std::string to_string() const; + protected: /*! @@ -1279,7 +1358,7 @@ class Client : public SRObject */ inline CommandReply _run(AddressAtCommand& cmd) { - return this->_redis_server->run(cmd); + return _redis_server->run(cmd); } /*! @@ -1289,7 +1368,7 @@ class Client : public SRObject */ inline CommandReply _run(AddressAnyCommand& cmd) { - return this->_redis_server->run(cmd); + return _redis_server->run(cmd); } /*! @@ -1299,7 +1378,7 @@ class Client : public SRObject */ inline CommandReply _run(SingleKeyCommand& cmd) { - return this->_redis_server->run(cmd); + return _redis_server->run(cmd); } /*! @@ -1309,7 +1388,7 @@ class Client : public SRObject */ inline CommandReply _run(MultiKeyCommand& cmd) { - return this->_redis_server->run(cmd); + return _redis_server->run(cmd); } /*! @@ -1319,7 +1398,7 @@ class Client : public SRObject */ inline CommandReply _run(CompoundCommand& cmd) { - return this->_redis_server->run(cmd); + return _redis_server->run(cmd); } /*! @@ -1329,14 +1408,14 @@ class Client : public SRObject */ inline std::vector _run(CommandList& cmd_list) { - return this->_redis_server->run(cmd_list); + return _redis_server->run(cmd_list); } /*! * \brief Set the prefixes that are used for set and get methods - * using SSKEYIN and SSKEYOUT environment variables. + * using SSKEYIN and SSKEYOUT config settings */ - void _set_prefixes_from_env(); + void _get_prefix_settings(); /*! * \brief Get the key prefix for placement methods @@ -1376,7 +1455,7 @@ class Client : public SRObject /*! * \brief Execute the command to retrieve a subset of a DataSet * aggregation list - * \param name The name of the dataset aggregation list + * \param list_name The name of the dataset aggregation list * \param start_index The starting index of the range * (inclusive, starting at zero) * \param end_index The ending index of the range @@ -1390,20 +1469,21 @@ class Client : public SRObject const int start_index, const int end_index); + + // Add a retrieved tensor to a dataset /*! - * \brief Retrieve a tensor and add it to the dataset object - * \param dataset The dataset which will be augmented with the - * retrieved tensor - * \param name The name (not key) of the tensor to retrieve and add + * \brief Add a tensor retrieved via get_tensor() to a dataset + * \param dataset The dataset which will receive the tensor + * \param name The name by which the tensor shall be added * to the dataset - * \param key The key (not name) of the tensor to retrieve and add - * to the dataset - * \throw SmartRedis::Exception if retrieval or addition - * of tensor fails + * \param tensor_data get_tensor command reply containing + * tensor data + * \throw SmartRedis::Exception if addition of tensor fails */ - inline void _get_and_add_dataset_tensor(DataSet& dataset, - const std::string& name, - const std::string& key); + inline void _add_dataset_tensor( + DataSet& dataset, + const std::string& name, + CommandReply tensor_data); /*! * \brief Retrieve the tensor from the DataSet and return @@ -1468,6 +1548,12 @@ class Client : public SRObject */ bool _use_tensor_prefix; + /*! + * \brief Flag determining whether prefixes should be used + * for dataset keys. + */ + bool _use_dataset_prefix; + /*! * \brief Flag determining whether prefixes should be used * for model and script keys. @@ -1480,6 +1566,11 @@ class Client : public SRObject */ bool _use_list_prefix; + /*! + * \brief Our configuration options, used to access runtime settings + */ + ConfigOptions* _cfgopts; + /*! * \brief Build full formatted key of a tensor, based on * current prefix settings. @@ -1651,8 +1742,28 @@ class Client : public SRObject int poll_frequency_ms, int num_tries, std::function comp_func); + /*! + * \brief Initialize a connection to the back-end database + * \throw SmartRedis::Exception if the connection fails + */ + void _establish_server_connection(); + }; +/*! +* \brief Serialize a client +* \param stream The stream onto which to serialize the client +* \param client The client to serialize +* \returns The output stream, for chaining +*/ +inline +std::ostream& operator<<(std::ostream& stream, const Client& client) +{ + stream << client.to_string(); + return stream; +} + + } // namespace SmartRedis #endif // __cplusplus diff --git a/2023-01/smartsim/smartredis/include/command.h b/2023-01/smartsim/smartredis/include/command.h index a612cbbb..4456f421 100644 --- a/2023-01/smartsim/smartredis/include/command.h +++ b/2023-01/smartsim/smartredis/include/command.h @@ -53,8 +53,16 @@ class RedisServer; class Keyfield: public std::string { public: - Keyfield(std::string s) : _s(s) {}; - std::string _s; + /*! + * \brief Keyfield constructor + * \param s The field name for this key field + */ + Keyfield(std::string s) : _s(s) {}; + + /*! + * \brief The name of this key field + */ + std::string _s; }; /*! @@ -139,6 +147,21 @@ class Command return *this; } + /*! + * \brief Add a vector of string_views to the command. + * \details The string values are copied to the command. + * To add a vector of keys, use the add_keys() + * method. + * \param fields The strings to add to the command + * \returns The command object, for chaining. + */ + virtual Command& operator<<(const std::vector& fields) { + for (size_t i = 0; i < fields.size(); i++) { + add_field_ptr(fields[i]); + } + return *this; + } + /*! * \brief Add a vector of strings to the command. * \details The string values are copied to the command. @@ -286,24 +309,22 @@ class Command * from a vector of strings. * \details The string key field values are copied to the * Command. - * \param fields The key fields to add to the Command - * \param is_key Boolean indicating if the all - * of the fields are Command keys + * \param keyfields The key fields to add to the Command */ - void add_keys(const std::vector& fields); + void add_keys(const std::vector& keyfields); /*! * \brief Add key fields to the Command * from a vector of type T * \details The key field values are copied to the - * Command. The type T must be convertable + * Command. The type T must be convertible * to a string via std::to_string. * \tparam T Any type that can be converted * to a string via std::to_string. * \param keyfields The key fields to add to the Command */ template - void add_keys(const std::vector& fields); + void add_keys(const std::vector& keyfields); /*! * \brief Return true if the Command has keys @@ -386,6 +407,8 @@ class Command * \brief Replace a field in a command * \param new_field The string to swap in * \param pos The location to swap + * \param is_key True IFF the field supplied in new_field + * is a key field */ void set_field_at(std::string new_field, size_t pos, diff --git a/2023-01/smartsim/smartredis/include/commandreply.h b/2023-01/smartsim/smartredis/include/commandreply.h index a8667183..68e9bac9 100644 --- a/2023-01/smartsim/smartredis/include/commandreply.h +++ b/2023-01/smartsim/smartredis/include/commandreply.h @@ -267,6 +267,14 @@ class CommandReply { */ std::string redis_reply_type(); + /*! + * \brief Determine whether the response is an array + * \returns true iff the response is of type REDIS_REPLY_ARRAY + */ + bool is_array() { + return _reply->type == REDIS_REPLY_ARRAY; + } + /*! * \brief Print the reply structure of the CommandReply */ diff --git a/2023-01/smartsim/smartredis/include/configoptions.h b/2023-01/smartsim/smartredis/include/configoptions.h new file mode 100644 index 00000000..175359b1 --- /dev/null +++ b/2023-01/smartsim/smartredis/include/configoptions.h @@ -0,0 +1,321 @@ +/* + * BSD 2-Clause License + * + * Copyright (c) 2021-2023, Hewlett Packard Enterprise + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SMARTREDIS_CONFIGOPTIONS_H +#define SMARTREDIS_CONFIGOPTIONS_H + +#ifndef __cplusplus +#error C users should include c_configoptions.h, not configoptions.h +#endif + +#ifdef __cplusplus +#include +#include // for unique_ptr +#include +#include "srobject.h" +#include "sr_enums.h" +#include "srexception.h" + +///@file + +namespace SmartRedis { + +/*! +* \brief Configuration source enumeration +*/ +enum cfgSrc { + cs_envt, // Configuration data is coming from environment variables +}; + + +/*! +* \brief The ConfigOptions class consolidates access to configuration options +* used in SmartRedis +*/ +class ConfigOptions +{ + private: + /*! + * \brief ConfigOptions constructor. Do not use! To instantiate a + * ConfigOptions object, use one of the factory methods below + * \param source The selected source for config data + * \param string The string associated with the source + */ + ConfigOptions(cfgSrc source, const std::string& string); + + public: + + /*! + * \brief ConfigOptions copy constructor + * \param cfgopts The ConfigOptions to copy + */ + ConfigOptions(const ConfigOptions& cfgopts) = default; + + /*! + * \brief ConfigOptions copy assignment operator + * \param cfgopts The ConfigOptions to copy and assign + */ + ConfigOptions& operator=(const ConfigOptions& cfgopts) = default; + + /*! + * \brief ConfigOptions move constructor + * \param cfgopts The ConfigOptions to move + */ + ConfigOptions(ConfigOptions&& cfgopts) = default; + + /*! + * \brief ConfigOptions move assignment operator + * \param cfgopts The ConfigOptions to move and assign + */ + ConfigOptions& operator=(ConfigOptions&& cfgopts) = default; + + /*! + * \brief ConfigOptions destructor + */ + virtual ~ConfigOptions(); + + /*! + * \brief Deep copy a ConfigOptions object + * \returns The cloned object + * \throw std::bad_alloc on allocation failure + */ + ConfigOptions* clone(); + + ///////////////////////////////////////////////////////////// + // Factory construction methods + + /*! + * \brief Instantiate ConfigOptions, getting selections from + * environment variables. If \p db_suffix is non-empty, + * then "{db_suffix}_" will be prepended to the name of + * each environment variable that is read. + * \param db_suffix The suffix to use with environment variables, + * or an empty string to disable suffixing + * \returns The constructed ConfigOptions object + * \throw SmartRedis::Exception if db_suffix contains invalid + * characters + */ + static std::unique_ptr create_from_environment( + const std::string& db_suffix); + + /*! + * \brief Instantiate ConfigOptions, getting selections from + * environment variables. If \p db_suffix is non-empty, + * then "{db_suffix}_" will be prepended to the name of + * each environment variable that is read. + * \param db_suffix The suffix to use with environment variables, + * or an empty string to disable suffixing + * \returns The constructed ConfigOptions object + * \throw SmartRedis::Exception if db_suffix contains invalid + * characters + */ + static std::unique_ptr create_from_environment( + const char* db_suffix); + + ///////////////////////////////////////////////////////////// + // Option access + + /*! + * \brief Retrieve the value of a numeric configuration option + * from the selected source + * \param option_name The name of the configuration option to retrieve + * \returns The value of the selected option + * \throw Throws SRKeyException if the option was not set in the + * selected source + */ + int64_t get_integer_option(const std::string& option_name); + + /*! + * \brief Retrieve the value of a string configuration option + * from the selected source + * \param option_name The name of the configuration option to retrieve + * \returns The value of the selected option + * \throw Throws SRKeyException if the option was not set in the + * selected source + */ + std::string get_string_option(const std::string& option_name); + + /*! + * \brief Resolve the value of a string configuration option + * from the selected source, selecting a default value + * if not configured + * \param option_name The name of the configuration option to retrieve + * \param default_value The baseline value of the configuration + * option to be returned if a value was not set in the + * selected source + * \returns The value of the selected option. Returns + * \p default_value if the option was not set in the + * selected source + */ + std::string _resolve_string_option( + const std::string& option_name, const std::string& default_value); + + /*! + * \brief Resolve the value of a numeric configuration option + * from the selected source, selecting a default value + * if not configured + * \param option_name The name of the configuration option to retrieve + * \param default_value The baseline value of the configuration + * option to be returned if a value was not set in the + * selected source + * \returns The value of the selected option. Returns + * \p default_value if the option was not set in the + * selected source + */ + int64_t _resolve_integer_option( + const std::string& option_name, int64_t default_value); + + /*! + * \brief Check whether a configuration option is set in the + * selected source + * \param option_name The name of the configuration option to check + * \returns True IFF the target option is defined in the selected + * source + */ + bool is_configured(const std::string& option_name); + + /*! + * \brief Retrieve the logging context + * \returns The log context associated with this object + */ + SRObject* _get_log_context() { + if (_log_context == NULL) { + throw SRRuntimeException( + "Attempt to _get_log_context() before context was set!"); + } + return _log_context; + } + + /*! + * \brief Store the logging context + * \param log_context The context to associate with logging + */ + void _set_log_context(SRObject* log_context) { + _log_context = log_context; + } + + /*! + * \brief Clear a configuration option from the cache + * \param option_name The name of the option to clear + */ + void _clear_option_from_cache(const std::string& option_name); + + /*! + * \brief Stash a string buffer so we can delete it on cleanup + * \param buf The buffer to store + */ + void _add_string_buffer(char* buf) { + _string_buffer_stash.push_back(buf); + } + + ///////////////////////////////////////////////////////////// + // Option overrides + + /*! + * \brief Override the value of a numeric configuration option + * in the selected source + * \details Overrides are specific to an instance of the + * ConfigOptions class. An instance that references + * the same source will not be affected by an override to + * a different ConfigOptions instance + * \param option_name The name of the configuration option to override + * \param value The value to store for the configuration option + */ + void override_integer_option( + const std::string& option_name, int64_t value); + + /*! + * \brief Override the value of a string configuration option + * in the selected source + * \details Overrides are specific to an instance of the + * ConfigOptions class. An instance that references + * the same source will not be affected by an override to + * a different ConfigOptions instance + * \param option_name The name of the configuration option to override + * \param value The value to store for the configuration option + */ + void override_string_option( + const std::string& option_name, const std::string& value); + + private: + + /*! + * \brief Process option data from a fixed source + * \throw SmartRedis::Exception: sources other than environment + * variables are not implemented yet + */ + void _populate_options(); + + /*! + * \brief Apply a suffix to an option name if the source is environment + * variables and the suffix is nonempty + * \param option_name The name of the option to suffix + */ + std::string _suffixed(const std::string& option_name); + + /*! + * \brief Integer option map + */ + std::unordered_map _int_options; + + /*! + * \brief String option map + */ + std::unordered_map _string_options; + + /*! + * \brief Configuration source + */ + cfgSrc _source; + + /*! + * \brief Configuration string. Meaning is specific to the source + */ + std::string _string; + + /*! + * \brief Lazy evaluation (do we read in all options at once or only + * on demand) + */ + bool _lazy; + + /*! + * \brief Logging context + */ + SRObject* _log_context; + + /*! + * \brief Stash of string buffers to free at cleanup time + */ + std::vector _string_buffer_stash; +}; + +} // namespace SmartRedis + +#endif +#endif // SMARTREDIS_CONFIGOPTIONS_H diff --git a/2023-01/smartsim/smartredis/include/dataset.h b/2023-01/smartsim/smartredis/include/dataset.h index 69878761..0f72471f 100644 --- a/2023-01/smartsim/smartredis/include/dataset.h +++ b/2023-01/smartsim/smartredis/include/dataset.h @@ -44,7 +44,6 @@ namespace SmartRedis { -///@file /*! * \brief The DataSet class aggregates tensors * and metadata into a nested data structure @@ -241,7 +240,8 @@ class DataSet : public SRObject * metadata field name * \throw SmartRedis::Exception if metadata retrieval fails */ - std::vector get_meta_strings(const std::string& name); + std::vector get_meta_strings( + const std::string& name) const; /*! * \brief Retrieve metadata string field values from the DataSet. @@ -266,7 +266,7 @@ class DataSet : public SRObject * \param field_name The name of the field to check * \returns True iff the DataSet contains the field */ - bool has_field(const std::string& field_name); + bool has_field(const std::string& field_name) const; /*! @@ -297,7 +297,7 @@ class DataSet : public SRObject * \returns The name of the tensors in the DataSet * \throw SmartRedis::Exception if metadata retrieval fails */ - std::vector get_tensor_names(); + std::vector get_tensor_names() const; /*! * \brief Retrieve tensor names from the DataSet. @@ -318,13 +318,22 @@ class DataSet : public SRObject * \returns The data type for the tensor * \throw SmartRedis::Exception if tensor name retrieval fails */ - SRTensorType get_tensor_type(const std::string& name); + SRTensorType get_tensor_type(const std::string& name) const; + + /*! + * \brief Retrieve the dimensions of a Tensor in the DataSet + * \param name The name of the tensor + * \returns A vector of the tensor's dimensions + * \throw SmartRedis::Exception if tensor name retrieval fails + */ + const std::vector get_tensor_dims( + const std::string& name) const; /*! * \brief Retrieve the names of all metadata fields in the DataSet * \returns A vector of metadata field names */ - std::vector get_metadata_field_names(); + std::vector get_metadata_field_names() const; /*! * \brief Retrieve metadata field names from the DataSet. @@ -346,7 +355,13 @@ class DataSet : public SRObject * \returns The data type for the metadata field * \throw SmartRedis::Exception if metadata field name retrieval fails */ - SRMetaDataType get_metadata_field_type(const std::string& name); + SRMetaDataType get_metadata_field_type(const std::string& name) const; + + /*! + * \brief Create a string representation of the DataSet + * \returns A string containing DataSet details + */ + std::string to_string() const; friend class Client; friend class PyDataset; @@ -475,6 +490,19 @@ class DataSet : public SRObject }; +/*! +* \brief Serialize a dataset +* \param stream The stream onto which to serialize the dataset +* \param dataset The dataset to serialize +* \returns The output stream, for chaining +*/ +inline +std::ostream& operator<<(std::ostream& stream, const DataSet& dataset) +{ + stream << dataset.to_string(); + return stream; +} + } // namespace SmartRedis #endif diff --git a/2023-01/smartsim/smartredis/include/logger.h b/2023-01/smartsim/smartredis/include/logger.h index fb5e0a5a..d87ef469 100644 --- a/2023-01/smartsim/smartredis/include/logger.h +++ b/2023-01/smartsim/smartredis/include/logger.h @@ -253,6 +253,7 @@ class FunctionLogger { public: /*! * \brief Logger constructor + * \param context The context to which to log function behavior * \param function_name The name of the function to track */ FunctionLogger(const SRObject* context, const char* function_name) diff --git a/2023-01/smartsim/smartredis/include/metadata.h b/2023-01/smartsim/smartredis/include/metadata.h index 676f530e..cd8b77ac 100644 --- a/2023-01/smartsim/smartredis/include/metadata.h +++ b/2023-01/smartsim/smartredis/include/metadata.h @@ -181,7 +181,8 @@ class MetaData * \param name The name of the string field to retrieve * \returns A vector of the strings in the field */ - std::vector get_string_values(const std::string& name); + std::vector get_string_values( + const std::string& name) const; /*! * \brief Get metadata string field using a c-style @@ -214,7 +215,7 @@ class MetaData * \returns Boolean indicating if the DataSet has * the field. */ - bool has_field(const std::string& field_name); + bool has_field(const std::string& field_name) const; /*! * \brief This function clears all entries in a @@ -238,14 +239,15 @@ class MetaData * \param name The name of the field to check * \throw KeyException if the name is not present */ - SRMetaDataType get_field_type(const std::string& name); + SRMetaDataType get_field_type(const std::string& name) const; /*! * \brief Retrieve a vector of metadata field names * \param skip_internal Omit internal items (such as .tensor_names) * from the results */ - std::vector get_field_names(bool skip_internal = false); + std::vector get_field_names( + bool skip_internal = false) const; /*! * \brief Get metadata field names using a c-style diff --git a/2023-01/smartsim/smartredis/include/metadatafield.h b/2023-01/smartsim/smartredis/include/metadatafield.h index e9032e33..1a8ddcbd 100644 --- a/2023-01/smartsim/smartredis/include/metadatafield.h +++ b/2023-01/smartsim/smartredis/include/metadatafield.h @@ -67,19 +67,19 @@ class MetadataField { * \brief Retrieve the MetadataField name * \returns MetadataField name */ - std::string name(); + std::string name() const; /*! * \brief Retrieve the Metadatafield type * \returns MetadataField type */ - SRMetaDataType type(); + SRMetaDataType type() const; /*! * \brief Retrieve the number of values in the field * \returns The number of values */ - virtual size_t size() = 0; + virtual size_t size() const = 0; /*! * \brief Clear the values in the field diff --git a/2023-01/smartsim/smartredis/include/pyclient.h b/2023-01/smartsim/smartredis/include/pyclient.h index 00f44f20..deb01f57 100644 --- a/2023-01/smartsim/smartredis/include/pyclient.h +++ b/2023-01/smartsim/smartredis/include/pyclient.h @@ -39,6 +39,7 @@ #include "client.h" #include "pydataset.h" #include "pysrobject.h" +#include "pyconfigoptions.h" ///@file @@ -56,7 +57,24 @@ class PyClient : public PySRObject public: /*! - * \brief PyClient constructor + * \brief Simple constructor that uses default environment variables + * to locate configuration settings + * \param logger_name Identifier for the current client + */ + PyClient(const std::string& logger_name); + + /*! + * \brief Constructor that uses a ConfigOptions object + * to locate configuration settings + * \param config_options The ConfigOptions object to use + * \param logger_name Identifier for the current client + */ + PyClient( + PyConfigOptions& config_options, + const std::string& logger_name); + + /*! + * \brief PyClient constructor (deprecated) * \param cluster Flag to indicate if a database cluster * is being used * \param logger_name Identifier for the current client @@ -285,6 +303,7 @@ class PyClient : public PySRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -299,6 +318,7 @@ class PyClient : public PySRObject const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -317,6 +337,7 @@ class PyClient : public PySRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -332,6 +353,7 @@ class PyClient : public PySRObject int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -350,6 +372,7 @@ class PyClient : public PySRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -364,6 +387,7 @@ class PyClient : public PySRObject const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -382,6 +406,7 @@ class PyClient : public PySRObject * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -397,6 +422,7 @@ class PyClient : public PySRObject int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -447,7 +473,7 @@ class PyClient : public PySRObject /*! * \brief Remove a model from the database * \param name The name associated with the model - * \param first_cpu the first GPU (zero-based) to use with the model + * \param first_gpu the first GPU (zero-based) to use with the model * \param num_gpus the number of gpus for which the model was stored * \throw RuntimeException for all client errors */ @@ -464,7 +490,7 @@ class PyClient : public PySRObject * \brief Remove a script from the database that was stored * for use with multiple GPUs * \param name The name associated with the script - * \param first_cpu the first GPU (zero-based) to use with the script + * \param first_gpu the first GPU (zero-based) to use with the script * \param num_gpus the number of gpus for which the script was stored * \throw RuntimeException for all client errors */ @@ -618,9 +644,9 @@ class PyClient : public PySRObject * prefixed. By default, the client prefixes aggregation * list keys with the first prefix specified with the SSKEYIN * and SSKEYOUT environment variables. Note that - * use_tensor_ensemble_prefix() controls prefixing + * use_dataset_ensemble_prefix() controls prefixing * for the entities in the aggregation list, and - * use_tensor_ensemble_prefix() should be given the + * use_dataset_ensemble_prefix() should be given the * same value that was used during the initial * setting of the DataSet into the database. * \param use_prefix If set to true, all future operations @@ -630,21 +656,35 @@ class PyClient : public PySRObject void use_list_ensemble_prefix(bool use_prefix); /*! - * \brief Set whether names of tensors or datasets should be - * prefixed (e.g. in an ensemble) to form database keys. + * \brief Set whether names of tensors should be prefixed (e.g. + * in an ensemble) to form database keys. * Prefixes will only be used if they were previously set through * the environment variables SSKEYOUT and SSKEYIN. * Keys formed before this function is called will not be affected. - * By default, the client prefixes tensor and dataset keys - * with the first prefix specified with the SSKEYIN - * and SSKEYOUT environment variables. + * By default, the client prefixes tensor keys with the first + * prefix specified with the SSKEYIN and SSKEYOUT environment + * variables. * - * \param use_prefix If set to true, all future operations - * on tensors and datasets will add - * a prefix to the entity names, if available. + * \param use_prefix If set to true, all future operations on tensors will + * add a prefix to the entity names, if available. */ void use_tensor_ensemble_prefix(bool use_prefix); + /*! + * \brief Set whether names of datasets should be prefixed (e.g. + * in an ensemble) to form database keys. + * Prefixes will only be used if they were previously set through + * the environment variables SSKEYOUT and SSKEYIN. + * Keys formed before this function is called will not be affected. + * By default, the client prefixes tensor keys with the first + * prefix specified with the SSKEYIN and SSKEYOUT environment + * variables. + * + * \param use_prefix If set to true, all future operations on datasets will + * add a prefix to the entity names, if available. + */ + void use_dataset_ensemble_prefix(bool use_prefix); + /*! * \brief Returns information about the given database nodes * \param addresses The addresses of the database nodes. Each address is @@ -790,12 +830,12 @@ class PyClient : public PySRObject /*! * \brief Rename an aggregation list - * \details The old and new aggregation list key used to find and + * \details The initial and target aggregation list key used to find and * relocate the list may be formed by applying prefixes to - * the supplied old_name and new_name. See set_data_source() + * the supplied src_name and dest_name. See set_data_source() * and use_list_ensemble_prefix() for more details. - * \param old_name The old list name - * \param new_name The new list name + * \param src_name The initial list name + * \param dest_name The target list name * \throw SmartRedis::Exception if the command fails */ void rename_list(const std::string& src_name, @@ -911,6 +951,28 @@ class PyClient : public PySRObject const int start_index, const int end_index); + /*! + * \brief Reconfigure the chunking size that Redis uses for model + * serialization, replication, and the model_get command. + * \details This method triggers the AI.CONFIG method in the Redis + * database to change the model chunking size. + * + * NOTE: The default size of 511MB should be fine for most + * applications, so it is expected to be very rare that a + * client calls this method. It is not necessary to call + * this method a model to be chunked. + * \param chunk_size The new chunk size in bytes + * \throw SmartRedis::Exception if the command fails. + */ + void set_model_chunk_size(int chunk_size); + + /*! + * \brief Create a string representation of the Client + * \returns A string representation of the Client + */ + std::string to_string(); + + private: /*! diff --git a/2023-01/smartsim/smartredis/include/pyconfigoptions.h b/2023-01/smartsim/smartredis/include/pyconfigoptions.h new file mode 100644 index 00000000..0eb887a3 --- /dev/null +++ b/2023-01/smartsim/smartredis/include/pyconfigoptions.h @@ -0,0 +1,168 @@ +/* + * BSD 2-Clause License + * + * Copyright (c) 2021-2023, Hewlett Packard Enterprise + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef SMARTREDIS_PYCONFIGOPTIONS_H +#define SMARTREDIS_PYCONFIGOPTIONS_H + + +#include +#include +#include +#include +#include "configoptions.h" +#include "pysrobject.h" + +///@file + +namespace py = pybind11; + + +namespace SmartRedis { + + +/*! +* \brief The PyConfigOptions class is a wrapper around the + C++ ConfigOptions class. +*/ +class PyConfigOptions +{ + public: + + /*! + * \brief PyConfigOptions constructor + */ + PyConfigOptions(); + + /*! + * \brief PyConfigOptions constructor from a + * SmartRedis::ConfigOptions object + * \param configoptions A SmartRedis::ConfigOptions object + * allocated on the heap. The SmartRedis + * PConfigOptions will be deleted upon + * PyConfigOptions deletion. + */ + PyConfigOptions(ConfigOptions* configoptions); + + /*! + * \brief PyConfigOptions destructor + */ + virtual ~PyConfigOptions(); + + /*! + * \brief Retrieve a pointer to the underlying + * SmartRedis::ConfigOptions object + * \returns ConfigOptions pointer within PyConfigOptions + */ + ConfigOptions* get(); + + ///////////////////////////////////////////////////////////// + // Factory construction methods + + /*! + * \brief Instantiate ConfigOptions, getting selections from + * environment variables. If \p db_suffix is non-empty, + * then "_{db_suffix}" will be appended to the name of + * each environment variable that is read. + * \param db_suffix The suffix to use with environment variables, + * or an empty string to disable suffixing + * \returns The constructed ConfigOptions object + * \throw SmartRedis::Exception if db_suffix contains invalid + * characters + */ + static PyConfigOptions* create_from_environment( + const std::string& db_suffix); + + ///////////////////////////////////////////////////////////// + // Option access + + /*! + * \brief Retrieve the value of a numeric configuration option + * from the selected source + * \param option_name The name of the configuration option to retrieve + * \returns The value of the selected option. Returns + * \p default_value if the option was not set in the + * selected source + */ + int64_t get_integer_option(const std::string& option_name); + + /*! + * \brief Retrieve the value of a string configuration option + * from the selected source + * \param option_name The name of the configuration option to retrieve + * \returns The value of the selected option. Returns + * \p default_value if the option was not set in the + * selected source + */ + std::string get_string_option(const std::string& option_name); + + /*! + * \brief Check whether a configuration option is set in the + * selected source + * \param option_name The name of the configuration option to check + * \returns True IFF the target option is defined in the selected + * source + */ + bool is_configured(const std::string& option_name); + + ///////////////////////////////////////////////////////////// + // Option overrides + + /*! + * \brief Override the value of a numeric configuration option + * in the selected source + * \details Overrides are specific to an instance of the + * ConfigOptions class. An instance that references + * the same source will not be affected by an override to + * a different ConfigOptions instance + * \param option_name The name of the configuration option to override + * \param value The value to store for the configuration option + */ + void override_integer_option( + const std::string& option_name, int64_t value); + + /*! + * \brief Override the value of a string configuration option + * in the selected source + * \details Overrides are specific to an instance of the + * ConfigOptions class. An instance that references + * the same source will not be affected by an override to + * a different ConfigOptions instance + * \param option_name The name of the configuration option to override + * \param value The value to store for the configuration option + */ + void override_string_option( + const std::string& option_name, const std::string& value); + + private: + + ConfigOptions* _configoptions; +}; + +} // namespace SmartRedis + +#endif // SMARTREDIS_PYCONFIGOPTIONS_H diff --git a/2023-01/smartsim/smartredis/include/pydataset.h b/2023-01/smartsim/smartredis/include/pydataset.h index e583b717..99231e10 100644 --- a/2023-01/smartsim/smartredis/include/pydataset.h +++ b/2023-01/smartsim/smartredis/include/pydataset.h @@ -144,6 +144,14 @@ class PyDataset : public PySRObject */ std::string get_tensor_type(const std::string& name); + /*! + * \brief Retrieve the dimensions of a Tensor in the DataSet + * \param name The name of the tensor + * \returns A list of the tensor's dimensions + * \throw SmartRedis::Exception if tensor name retrieval fails + */ + py::list get_tensor_dims(const std::string& name); + /*! * \brief Retrieve the names of all metadata fields in the DataSet * \returns A vector of metadata field names @@ -168,7 +176,13 @@ class PyDataset : public PySRObject * SmartRedis::DataSet object * \returns DataSet pointer within PyDataset */ - DataSet* get(); + DataSet* get() { return _dataset; } + + /*! + * \brief Create a string representation of the DataSet + * \returns A string representation of the DataSet + */ + std::string to_string(); private: diff --git a/2023-01/smartsim/smartredis/include/pylogcontext.h b/2023-01/smartsim/smartredis/include/pylogcontext.h index 152d440c..1113d96e 100644 --- a/2023-01/smartsim/smartredis/include/pylogcontext.h +++ b/2023-01/smartsim/smartredis/include/pylogcontext.h @@ -76,7 +76,7 @@ class PyLogContext : public PySRObject * SmartRedis::LogContext object * \returns LogContext pointer within PyLogContext */ - LogContext* get(); + LogContext* get() { return _logcontext; } private: diff --git a/2023-01/smartsim/smartredis/include/pysrobject.h b/2023-01/smartsim/smartredis/include/pysrobject.h index 04a22fd6..cfc56ff0 100644 --- a/2023-01/smartsim/smartredis/include/pysrobject.h +++ b/2023-01/smartsim/smartredis/include/pysrobject.h @@ -57,11 +57,11 @@ class PySRObject /*! * \brief PySRObject constructor from a * SmartRedis::SRObject object - * \param logcontext A SmartRedis::SRObject pointer to - * a SmartRedis::SRObject allocated on - * the heap. The SmartRedis::SRObject - * will be deleted upton PySRObject - * deletion. + * \param srobject A SmartRedis::SRObject pointer to + * a SmartRedis::SRObject allocated on + * the heap. The SmartRedis::SRObject + * will be deleted upton PySRObject + * deletion. */ PySRObject(SRObject* srobject); @@ -75,7 +75,7 @@ class PySRObject * SmartRedis::SRObject object * \returns SRObject pointer within PySRObject */ - SRObject* get(); + SRObject* get() { return _srobject; } /*! * \brief Conditionally log data if the logging level is high enough diff --git a/2023-01/smartsim/smartredis/include/redis.h b/2023-01/smartsim/smartredis/include/redis.h index e137d5b3..8233c45f 100644 --- a/2023-01/smartsim/smartredis/include/redis.h +++ b/2023-01/smartsim/smartredis/include/redis.h @@ -35,7 +35,7 @@ namespace SmartRedis { -class SRObject; +class ConfigOptions; /*! * \brief The Redis class executes RedisServer @@ -46,18 +46,19 @@ class Redis : public RedisServer public: /*! * \brief Redis constructor. - * \param context The owning context + * \param cfgopts Our source for configuration options */ - Redis(const SRObject* context); + Redis(ConfigOptions* cfgopts); /*! * \brief Redis constructor. * Uses address provided to constructor instead * of environment variables. - * \param context The owning context + * \param cfgopts Our source for configuration options * \param addr_spec The TCP or UDS server address + * \throw SmartRedis::Exception if connection fails */ - Redis(const SRObject* context, std::string addr_spec); + Redis(ConfigOptions* cfgopts, std::string addr_spec); /*! * \brief Redis copy constructor is not allowed @@ -91,50 +92,49 @@ class Redis : public RedisServer /*! * \brief Run a SingleKeyCommand on the server * \param cmd The SingleKeyCommand to run - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(SingleKeyCommand& cmd); /*! * \brief Run a MultiKeyCommand on the server * \param cmd The MultiKeyCommand to run - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(MultiKeyCommand& cmd); /*! * \brief Run a CompoundCommand on the server * \param cmd The CompoundCommand to run - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(CompoundCommand& cmd); /*! * \brief Run an AddressAtCommand on the server * \param cmd The AddressAtCommand command to run - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAtCommand& cmd); /*! * \brief Run an AddressAnyCommand on the server * \param cmd The AddressAnyCommand to run - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAnyCommand& cmd); /*! * \brief Run a non-keyed Command that * addresses every db node on the server - * \param cmd The non-keyed Command that - * addresses any db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses any db node + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAllCommand& cmd); @@ -142,11 +142,11 @@ class Redis : public RedisServer * \brief Run multiple single-key or single-hash slot * Command on the server. Each Command in the * CommandList is run sequentially. - * \param cmd The CommandList containing multiple - * single-key or single-hash - * slot Comand to run + * \param cmd The CommandList containing multiple single-key or + * single-hash slot Command to run * \returns A list of CommandReply for each Command * in the CommandList + * \throw SmartRedis::Exception if command execution fails */ virtual std::vector run(CommandList& cmd); @@ -157,12 +157,12 @@ class Redis : public RedisServer * by shard, and executed in groups by shard. * Commands are not guaranteed to be executed * in any sequence or ordering. - * \param cmd The CommandList containing multiple - * single-key or single-hash - * slot Command to run + * \param cmd_list The CommandList containing multiple single-key + * or single-hash slot Commands to run * \returns A list of CommandReply for each Command * in the CommandList. The order of the result * matches the order of the input CommandList. + * \throw SmartRedis::Exception if command execution fails */ virtual PipelineReply run_via_unordered_pipelines(CommandList& cmd_list); @@ -174,6 +174,7 @@ class Redis : public RedisServer * be used. * \param key The key to check * \returns True if the key exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool key_exists(const std::string& key); @@ -182,6 +183,7 @@ class Redis : public RedisServer * \param key The key containing the field * \param field The field in the key to check * \returns True if the hash field exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool hash_field_exists(const std::string& key, const std::string& field); @@ -190,6 +192,7 @@ class Redis : public RedisServer * \brief Check if a model or script key exists in the database * \param key The key to check * \returns True if the key exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool model_key_exists(const std::string& key); @@ -203,8 +206,8 @@ class Redis : public RedisServer /*! * \brief Put a Tensor on the server * \param tensor The Tensor to put on the server - * \returns The CommandReply from the put tensor - * command execution + * \returns The CommandReply from the put tensor command execution + * \throw SmartRedis::Exception if tensor storage fails */ virtual CommandReply put_tensor(TensorBase& tensor); @@ -213,15 +216,25 @@ class Redis : public RedisServer * \param key The name of the tensor to retrieve * \returns The CommandReply from the get tensor server * command execution + * \throw SmartRedis::Exception if tensor retrieval fails */ virtual CommandReply get_tensor(const std::string& key); + /*! + * \brief Get a list of Tensor from the server + * \param keys The keys of the tensor to retrieve + * \returns The PipelineReply from executing the get tensor commands + * \throw SmartRedis::Exception if tensor retrieval fails + */ + virtual PipelineReply get_tensors( + const std::vector& keys); + /*! * \brief Rename a tensor in the database * \param key The original key for the tensor * \param new_key The new key for the tensor - * \returns The CommandReply from executing the RENAME - * command + * \returns The CommandReply from executing the RENAME command + * \throw SmartRedis::Exception if tensor rename fails */ virtual CommandReply rename_tensor(const std::string& key, const std::string& new_key); @@ -231,6 +244,7 @@ class Redis : public RedisServer * \param key The database key for the tensor * \returns The CommandReply from delete command * executed on the server + * \throw SmartRedis::Exception if tensor removal fails */ virtual CommandReply delete_tensor(const std::string& key); @@ -239,8 +253,8 @@ class Redis : public RedisServer * the destination key * \param src_key The source key for the tensor copy * \param dest_key The destination key for the tensor copy - * \returns The CommandReply from executing the COPY - * command + * \returns The CommandReply from executing the COPY command + * \throw SmartRedis::Exception if tensor copy fails */ virtual CommandReply copy_tensor(const std::string& src_key, const std::string& dest_key); @@ -252,6 +266,7 @@ class Redis : public RedisServer * \param dest Vector of destination keys * \returns The CommandReply from the last put command * associated with the tensor copy + * \throw SmartRedis::Exception if tensor copy fails */ virtual CommandReply copy_tensors(const std::vector& src, const std::vector& dest); @@ -261,28 +276,29 @@ class Redis : public RedisServer * \brief Set a model from std::string_view buffer in the * database for future execution * \param key The key to associate with the model - * \param model The model as a continuous buffer string_view + * \param model The model as a sequence of buffer string_view chunks * \param backend The name of the backend * (TF, TFLITE, TORCH, ONNX) * \param device The name of the device for execution * (e.g. CPU or GPU) * \param batch_size The batch size for model execution - * \param min_batch_size The minimum batch size for model - * execution - * \param tag A tag to attach to the model for - * information purposes + * \param min_batch_size The minimum batch size for model execution + * \param min_batch_timeout Max time (ms) to wait for min batch size + * \param tag A tag to attach to the model for information purposes * \param inputs One or more names of model input nodes * (TF models only) * \param outputs One or more names of model output nodes * (TF models only) * \returns The CommandReply from the set_model Command + * \throw RuntimeException for all client errors */ virtual CommandReply set_model(const std::string& key, - std::string_view model, + const std::vector& model, const std::string& backend, const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -293,16 +309,15 @@ class Redis : public RedisServer * \brief Set a model from std::string_view buffer in the * database for future execution in a multi-GPU system * \param name The name to associate with the model - * \param model The model as a continuous buffer string_view + * \param model The model as a sequence of buffer string_view chunks * \param backend The name of the backend * (TF, TFLITE, TORCH, ONNX) * \param first_gpu The first GPU to use with this model * \param num_gpus The number of GPUs to use with this model * \param batch_size The batch size for model execution - * \param min_batch_size The minimum batch size for model - * execution - * \param tag A tag to attach to the model for - * information purposes + * \param min_batch_size The minimum batch size for model execution + * \param min_batch_timeout Max time (ms) to wait for min batch size + * \param tag A tag to attach to the model for information purposes * \param inputs One or more names of model input nodes * (TF models only) * \param outputs One or more names of model output nodes @@ -310,12 +325,13 @@ class Redis : public RedisServer * \throw RuntimeException for all client errors */ virtual void set_model_multigpu(const std::string& name, - const std::string_view& model, + const std::vector& model, const std::string& backend, int first_gpu, int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -330,6 +346,7 @@ class Redis : public RedisServer * (e.g. CPU or GPU) * \param script The script source in a std::string_view * \returns The CommandReply from set_script Command + * \throw RuntimeException for all client errors */ virtual CommandReply set_script(const std::string& key, const std::string& device, @@ -346,19 +363,18 @@ class Redis : public RedisServer */ virtual void set_script_multigpu(const std::string& name, const std::string_view& script, - int first_cpu, + int first_gpu, int num_gpus); /*! * \brief Run a model in the database using the * specified input and output tensors * \param key The key associated with the model - * \param inputs The keys of inputs tensors to use - * in the model + * \param inputs The keys of inputs tensors to use in the model * \param outputs The keys of output tensors that * will be used to save model results - * \returns The CommandReply from the run model server - * Command + * \returns The CommandReply from the run model server Command + * \throw RuntimeException for all client errors */ virtual CommandReply run_model(const std::string& key, std::vector inputs, @@ -389,12 +405,11 @@ class Redis : public RedisServer * specified input and output tensors * \param key The key associated with the script * \param function The name of the function in the script to run - * \param inputs The keys of inputs tensors to use - * in the script + * \param inputs The keys of inputs tensors to use in the script * \param outputs The keys of output tensors that * will be used to save script results - * \returns The CommandReply from script run Command - * execution + * \returns The CommandReply from script run Command execution + * \throw RuntimeException for all client errors */ virtual CommandReply run_script(const std::string& key, const std::string& function, @@ -435,7 +450,7 @@ class Redis : public RedisServer * \brief Remove a model from the database that was stored * for use with multiple GPUs * \param name The name associated with the model - * \param first_cpu the first GPU (zero-based) to use with the model + * \param first_gpu the first GPU (zero-based) to use with the model * \param num_gpus the number of gpus for which the model was stored * \throw SmartRedis::Exception if model deletion fails */ @@ -454,7 +469,7 @@ class Redis : public RedisServer * \brief Remove a script from the database that was stored * for use with multiple GPUs * \param name The name associated with the script - * \param first_cpu the first GPU (zero-based) to use with the script + * \param first_gpu the first GPU (zero-based) to use with the script * \param num_gpus the number of gpus for which the script was stored * \throw SmartRedis::Exception if script deletion fails */ @@ -466,6 +481,7 @@ class Redis : public RedisServer * \param key The key associated with the model * \returns The CommandReply that contains the result * of the get model execution on the server + * \throw SmartRedis::Exception if model retrieval fails */ virtual CommandReply get_model(const std::string& key); @@ -474,6 +490,7 @@ class Redis : public RedisServer * \param key The key associated with the script * \returns The CommandReply that contains the result * of the get script execution on the server + * \throw SmartRedis::Exception if script retrieval fails */ virtual CommandReply get_script(const std::string& key); @@ -485,12 +502,48 @@ class Redis : public RedisServer * with the model or script should be reset. * \returns The CommandReply that contains the result * of the AI.INFO execution on the server + * \throw SmartRedis::Exception if info retrieval fails */ virtual CommandReply get_model_script_ai_info(const std::string& address, const std::string& key, const bool reset_stat); + /*! + * \brief Retrieve the current model chunk size + * \returns The size in bytes for model chunking + */ + virtual int get_model_chunk_size(); + + /*! + * \brief Reconfigure the chunking size that Redis uses for model + * serialization, replication, and the model_get command. + * \details This method triggers the AI.CONFIG method in the Redis + * database to change the model chunking size. + * + * NOTE: The default size of 511MB should be fine for most + * applications, so it is expected to be very rare that a + * client calls this method. It is not necessary to call + * this method a model to be chunked. + * \param chunk_size The new chunk size in bytes + * \throw SmartRedis::Exception if the command fails. + */ + virtual void set_model_chunk_size(int chunk_size); + + /*! + * \brief Run a CommandList via a Pipeline + * \param cmdlist The list of commands to run + * \returns The PipelineReply with the result of command execution + * \throw SmartRedis::Exception if execution fails + */ + PipelineReply run_in_pipeline(CommandList& cmdlist); + + /*! + * \brief Create a string representation of the Redis connection + * \returns A string representation of the Redis connection + */ + virtual std::string to_string() const; + private: /*! @@ -501,8 +554,8 @@ class Redis : public RedisServer /*! * \brief Run a Command on the server * \param cmd The Command to run - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ inline CommandReply _run(const Command& cmd); @@ -515,8 +568,18 @@ class Redis : public RedisServer /*! * \brief Connect to the server at the address and port * \param db_address The server address + * \throw SmartRedis::Exception if connection fails */ inline void _connect(SRAddress& db_address); + + /*! + * \brief Pipeline execute a series of commands + * \param cmds The commands to execute + * \returns Pipeline reply from the command execution + * \throw SmartRedis::Exception if command execution fails + */ + PipelineReply _run_pipeline(std::vector& cmds); + }; } // namespace SmartRedis diff --git a/2023-01/smartsim/smartredis/include/rediscluster.h b/2023-01/smartsim/smartredis/include/rediscluster.h index 018af377..1d818ea2 100644 --- a/2023-01/smartsim/smartredis/include/rediscluster.h +++ b/2023-01/smartsim/smartredis/include/rediscluster.h @@ -55,18 +55,18 @@ class RedisCluster : public RedisServer /*! * \brief RedisCluster constructor. - * \param context The owning context + * \param cfgopts Our source for configuration options */ - RedisCluster(const SRObject* context); + RedisCluster(ConfigOptions* cfgopts); /*! - * \brief RedisCluster constructor. - * Uses address provided to constructor instead - * of environment variables. - * \param context The owning context + * \brief RedisCluster constructor. Uses address provided to + * constructor instead of environment variables. + * \param cfgopts Our source for configuration options * \param address_spec The TCP or UDS address of the server + * \throw SmartRedis::Exception if connection fails */ - RedisCluster(const SRObject* context, std::string address_spec); + RedisCluster(ConfigOptions* cfgopts, std::string address_spec); /*! * \brief RedisCluster copy constructor is not allowed @@ -99,55 +99,52 @@ class RedisCluster : public RedisServer /*! * \brief Run a single-key Command on the server - * \param cmd The single-key Comand to run - * \returns The CommandReply from the - * command execution + * \param cmd The single-key Command to run + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(SingleKeyCommand& cmd); /*! * \brief Run a multi-key Command on the server - * \param cmd The multi-key Comand to run - * \returns The CommandReply from the - * command execution + * \param cmd The multi-key Command to run + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(MultiKeyCommand& cmd); /*! * \brief Run a compound Command on the server - * \param cmd The compound Comand to run - * \returns The CommandReply from the - * command execution + * \param cmd The compound Command to run + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(CompoundCommand& cmd); /*! * \brief Run a non-keyed Command that * addresses the given db node on the server - * \param cmd The non-keyed Command that - * addresses the given db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses the given db node + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAtCommand& cmd); /*! * \brief Run a non-keyed Command that * addresses any db node on the server - * \param cmd The non-keyed Command that - * addresses any db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses any db node + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAnyCommand& cmd); /*! * \brief Run a non-keyed Command that * addresses every db node on the server - * \param cmd The non-keyed Command that - * addresses any db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses all db nodes + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAllCommand& cmd); @@ -155,11 +152,11 @@ class RedisCluster : public RedisServer * \brief Run multiple single-key or single-hash slot * Command on the server. Each Command in the * CommandList is run sequentially. - * \param cmd The CommandList containing multiple - * single-key or single-hash - * slot Command to run + * \param cmd The CommandList containing multiple single-key or + * single-hash slot Command to run * \returns A list of CommandReply for each Command * in the CommandList + * \throw SmartRedis::Exception if command execution fails */ virtual std::vector run(CommandList& cmd); @@ -170,23 +167,23 @@ class RedisCluster : public RedisServer * by shard, and executed in groups by shard. * Commands are not guaranteed to be executed * in any sequence or ordering. - * \param cmd The CommandList containing multiple - * single-key or single-hash - * slot Command to run + * \param cmd_list The CommandList containing multiple single-key + * or single-hash slot Commands to run * \returns A list of CommandReply for each Command * in the CommandList. The order of the result * matches the order of the input CommandList. + * \throw SmartRedis::Exception if command execution fails */ virtual PipelineReply run_via_unordered_pipelines(CommandList& cmd_list); /*! - * \brief Check if a key exists in the database. This - * function does not work for models and scripts. - * For models and scripts, model_key_exists should - * be used. + * \brief Check if a key exists in the database. This function does + * not work for models and scripts. For models and scripts, + * model_key_exists should be used. * \param key The key to check * \returns True if the key exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool key_exists(const std::string& key); @@ -195,6 +192,7 @@ class RedisCluster : public RedisServer * \param key The key containing the field * \param field The field in the key to check * \returns True if the hash field exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool hash_field_exists(const std::string& key, const std::string& field); @@ -203,6 +201,7 @@ class RedisCluster : public RedisServer * \brief Check if a model or script key exists in the database * \param key The key to check * \returns True if the key exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool model_key_exists(const std::string& key); @@ -218,6 +217,7 @@ class RedisCluster : public RedisServer * \param tensor The Tensor to put on the server * \returns The CommandReply from the put tensor * command execution + * \throw SmartRedis::Exception if tensor storage fails */ virtual CommandReply put_tensor(TensorBase& tensor); @@ -226,10 +226,21 @@ class RedisCluster : public RedisServer * \param key The name of the tensor to retrieve * \returns The CommandReply from the get tensor server * command execution + * \throw SmartRedis::Exception if tensor retrieval fails */ virtual CommandReply get_tensor(const std::string& key); /*! + * \brief Get a list of Tensor from the server. All tensors + * must be on the same node + * \param keys The keys of the tensor to retrieve + * \returns The PipelineReply from executing the get tensor commands + * \throw SmartRedis::Exception if tensor retrieval fails + */ + virtual PipelineReply get_tensors( + const std::vector& keys); + + /*! * \brief Rename a tensor in the database * \param key The original key for the tensor * \param new_key The new key for the tensor @@ -237,6 +248,7 @@ class RedisCluster : public RedisServer * execution in the renaming of the tensor. * In the case of RedisCluster, this is * the reply for the final delete_tensor call. + * \throw SmartRedis::Exception if tensor rename fails */ virtual CommandReply rename_tensor(const std::string& key, const std::string& new_key); @@ -246,6 +258,7 @@ class RedisCluster : public RedisServer * \param key The database key for the tensor * \returns The CommandReply from delete command * executed on the server + * \throw SmartRedis::Exception if tensor removal fails */ virtual CommandReply delete_tensor(const std::string& key); @@ -258,6 +271,7 @@ class RedisCluster : public RedisServer * execution in the copying of the tensor. * In the case of RedisCluster, this is * the CommandReply from a put_tensor commands. + * \throw SmartRedis::Exception if tensor copy fails */ virtual CommandReply copy_tensor(const std::string& src_key, const std::string& dest_key); @@ -271,6 +285,7 @@ class RedisCluster : public RedisServer * execution in the copying of the tensor. * Different implementations may have different * sequences of commands. + * \throw SmartRedis::Exception if tensor copy fails */ virtual CommandReply copy_tensors(const std::vector& src, const std::vector& dest); @@ -279,7 +294,7 @@ class RedisCluster : public RedisServer * \brief Set a model from std::string_view buffer in the * database for future execution * \param key The key to associate with the model - * \param model The model as a continuous buffer string_view + * \param model The model as a sequence of buffer string_view chunks * \param backend The name of the backend * (TF, TFLITE, TORCH, ONNX) * \param device The name of the device for execution @@ -287,6 +302,7 @@ class RedisCluster : public RedisServer * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -294,13 +310,15 @@ class RedisCluster : public RedisServer * \param outputs One or more names of model output nodes * (TF models only) * \returns The CommandReply from the set_model Command + * \throw RuntimeException for all client errors */ virtual CommandReply set_model(const std::string& key, - std::string_view model, + const std::vector& model, const std::string& backend, const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -311,7 +329,7 @@ class RedisCluster : public RedisServer * \brief Set a model from std::string_view buffer in the * database for future execution in a multi-GPU system * \param name The name to associate with the model - * \param model The model as a continuous buffer string_view + * \param model The model as a sequence of buffer string_view chunks * \param backend The name of the backend * (TF, TFLITE, TORCH, ONNX) * \param first_gpu The first GPU to use with this model @@ -319,6 +337,7 @@ class RedisCluster : public RedisServer * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -328,12 +347,13 @@ class RedisCluster : public RedisServer * \throw RuntimeException for all client errors */ virtual void set_model_multigpu(const std::string& name, - const std::string_view& model, + const std::vector& model, const std::string& backend, int first_gpu, int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -348,6 +368,7 @@ class RedisCluster : public RedisServer * (e.g. CPU or GPU) * \param script The script source in a std::string_view * \returns The CommandReply from set_script Command + * \throw RuntimeException for all client errors */ virtual CommandReply set_script(const std::string& key, const std::string& device, @@ -371,12 +392,11 @@ class RedisCluster : public RedisServer * \brief Run a model in the database using the * specified input and output tensors * \param key The key associated with the model - * \param inputs The keys of inputs tensors to use - * in the model + * \param inputs The keys of inputs tensors to use in the model * \param outputs The keys of output tensors that * will be used to save model results - * \returns The CommandReply from the run model server - * Command + * \returns The CommandReply from the run model server Command + * \throw RuntimeException for all client errors */ virtual CommandReply run_model(const std::string& key, std::vector inputs, @@ -411,8 +431,8 @@ class RedisCluster : public RedisServer * in the script * \param outputs The keys of output tensors that * will be used to save script results - * \returns The CommandReply from script run Command - * execution + * \returns The CommandReply from script run Command execution + * \throw RuntimeException for all client errors */ virtual CommandReply run_script(const std::string& key, const std::string& function, @@ -453,7 +473,7 @@ class RedisCluster : public RedisServer * \brief Remove a model from the database that was stored * for use with multiple GPUs * \param name The name associated with the model - * \param first_cpu the first GPU (zero-based) to use with the model + * \param first_gpu the first GPU (zero-based) to use with the model * \param num_gpus the number of gpus for which the model was stored * \throw SmartRedis::Exception if model deletion fails */ @@ -472,7 +492,7 @@ class RedisCluster : public RedisServer * \brief Remove a script from the database that was stored * for use with multiple GPUs * \param name The name associated with the script - * \param first_cpu the first GPU (zero-based) to use with the script + * \param first_gpu the first GPU (zero-based) to use with the script * \param num_gpus the number of gpus for which the script was stored * \throw SmartRedis::Exception if script deletion fails */ @@ -484,6 +504,7 @@ class RedisCluster : public RedisServer * \param key The key associated with the model * \returns The CommandReply that contains the result * of the get model execution on the server + * \throw SmartRedis::Exception if model retrieval fails */ virtual CommandReply get_model(const std::string& key); @@ -492,6 +513,7 @@ class RedisCluster : public RedisServer * \param key The key associated with the script * \returns The CommandReply that contains the result * of the get script execution on the server + * \throw SmartRedis::Exception if script retrieval fails */ virtual CommandReply get_script(const std::string& key); @@ -503,11 +525,32 @@ class RedisCluster : public RedisServer * with the model or script should be reset. * \returns The CommandReply that contains the result * of the AI.INFO execution on the server + * \throw SmartRedis::Exception if info retrieval fails */ virtual CommandReply get_model_script_ai_info(const std::string& address, const std::string& key, const bool reset_stat); + /*! + * \brief Retrieve the current model chunk size + * \returns The size in bytes for model chunking + */ + virtual int get_model_chunk_size(); + + /*! + * \brief Run a CommandList via a Pipeline. + * All commands must go to the same shard + * \param cmdlist The list of commands to run + * \returns The PipelineReply with the result of command execution + * \throw SmartRedis::Exception if execution fails + */ + PipelineReply run_in_pipeline(CommandList& cmdlist); + + /*! + * \brief Create a string representation of the Redis connection + * \returns A string representation of the Redis connection + */ + virtual std::string to_string() const; protected: @@ -542,22 +585,21 @@ class RedisCluster : public RedisServer * \param cmd The command to run on the server * \param db_prefix The prefix of the db node the * command addresses - * \returns The CommandReply from the - * command execution + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ inline CommandReply _run(const Command& cmd, std::string db_prefix); /*! * \brief Connect to the cluster at the address and port - * \param address_port A string formatted as - * tcp:://address:port - * for redis connection + * \param db_address The server address + * \throw SmartRedis::Exception if connection fails */ inline void _connect(SRAddress& db_address); /*! - * \brief Map the RedisCluster via the CLUSTER SLOTS - * command. + * \brief Map the RedisCluster via the CLUSTER SLOTS command. + * \throw SmartRedis::Exception if the command fails */ inline void _map_cluster(); @@ -678,6 +720,7 @@ class RedisCluster : public RedisServer * keys use the same hash slot * \param keys The keys to be deleted * \returns A vector of updated names + * \throw SmartRedis::Exception deletion fails */ void _delete_keys(std::vector keys); @@ -688,6 +731,7 @@ class RedisCluster : public RedisServer * in the model * \param outputs The keys of output tensors that * will be used to save model results + * \throw SmartRedis::Exception execution fails */ void __run_model_dagrun(const std::string& key, std::vector inputs, @@ -706,6 +750,21 @@ class RedisCluster : public RedisServer std::vector& inputs, std::vector& outputs); + /*! + * \brief Reconfigure the chunking size that Redis uses for model + * serialization, replication, and the model_get command. + * \details This method triggers the AI.CONFIG method in the Redis + * database to change the model chunking size. + * + * NOTE: The default size of 511MB should be fine for most + * applications, so it is expected to be very rare that a + * client calls this method. It is not necessary to call + * this method a model to be chunked. + * \param chunk_size The new chunk size in bytes + * \throw SmartRedis::Exception if the command fails. + */ + virtual void set_model_chunk_size(int chunk_size); + /*! * \brief Execute a pipeline for the provided commands. * The provided commands MUST be executable on a single @@ -718,6 +777,7 @@ class RedisCluster : public RedisServer * \return A PipelineReply for the provided commands. The * PipelineReply will be in the same order as the provided * Command vector. + * \throw SmartRedis::Exception if pipelined execution fails */ PipelineReply _run_pipeline(std::vector& cmds, std::string& shard_prefix); diff --git a/2023-01/smartsim/smartredis/include/redisserver.h b/2023-01/smartsim/smartredis/include/redisserver.h index 2a64f029..76a7b7c5 100644 --- a/2023-01/smartsim/smartredis/include/redisserver.h +++ b/2023-01/smartsim/smartredis/include/redisserver.h @@ -33,7 +33,6 @@ #include #include #include -#include #include "command.h" #include "commandreply.h" @@ -59,7 +58,7 @@ namespace SmartRedis { -class SRObject; +class ConfigOptions; /*! * \brief Abstract class that defines interface for @@ -71,9 +70,10 @@ class RedisServer { /*! * \brief Default constructor - * \param context The owning context + * \param cfgopts Our source for configuration options + * \throw SmartRedis::Exception if connection fails */ - RedisServer(const SRObject* context); + RedisServer(ConfigOptions* cfgopts); /*! * \brief Destructor @@ -82,55 +82,52 @@ class RedisServer { /*! * \brief Run a single-key Command on the server - * \param cmd The single-key Comand to run - * \returns The CommandReply from the - * command execution + * \param cmd The single-key Command to run + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(SingleKeyCommand& cmd) = 0; /*! * \brief Run a multi-key Command on the server - * \param cmd The multi-key Comand to run - * \returns The CommandReply from the - * command execution + * \param cmd The multi-key Command to run + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(MultiKeyCommand& cmd) = 0; /*! * \brief Run a compound Command on the server - * \param cmd The compound Comand to run - * \returns The CommandReply from the - * command execution + * \param cmd The compound Command to run + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(CompoundCommand& cmd) = 0; /*! * \brief Run a non-keyed Command that * addresses the given db node on the server - * \param cmd The non-keyed Command that - * addresses the given db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses the given db node + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAtCommand& cmd) = 0; /*! * \brief Run a non-keyed Command that * addresses any db node on the server - * \param cmd The non-keyed Command that - * addresses any db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses any db node + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAnyCommand& cmd) = 0; /*! * \brief Run a non-keyed Command that * addresses every db node on the server - * \param cmd The non-keyed Command that - * addresses any db node - * \returns The CommandReply from the - * command execution + * \param cmd The non-keyed Command that addresses all db nodes + * \returns The CommandReply from the command execution + * \throw SmartRedis::Exception if command execution fails */ virtual CommandReply run(AddressAllCommand& cmd) = 0; @@ -138,11 +135,10 @@ class RedisServer { * \brief Run multiple single-key or single-hash slot * Command on the server. Each Command in the * CommandList is run sequentially. - * \param cmd The CommandList containing multiple - * single-key or single-hash - * slot Comand to run - * \returns A list of CommandReply for each Command - * in the CommandList + * \param cmd The CommandList containing multiple single-key or + * single-hash slot Comand to run + * \returns A list of CommandReply for each Command in the CommandList + * \throw SmartRedis::Exception if command execution fails */ virtual std::vector run(CommandList& cmd) = 0; @@ -153,12 +149,12 @@ class RedisServer { * by shard, and executed in groups by shard. * Commands are not guaranteed to be executed * in any sequence or ordering. - * \param cmd The CommandList containing multiple - * single-key or single-hash - * slot Command to run + * \param cmd_list The CommandList containing multiple single-key + * or single-hash slot Command to run * \returns A list of CommandReply for each Command * in the CommandList. The order of the result * matches the order of the input CommandList. + * \throw SmartRedis::Exception if command execution fails */ virtual PipelineReply run_via_unordered_pipelines(CommandList& cmd_list) = 0; @@ -167,6 +163,7 @@ class RedisServer { * \brief Check if a key exists in the database * \param key The key to check * \returns True if the key exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool key_exists(const std::string& key) = 0; @@ -175,6 +172,7 @@ class RedisServer { * \param key The key containing the field * \param field The field in the key to check * \returns True if the hash field exists, otherwise False + * \throw SmartRedis::Exception if existence check fails */ virtual bool hash_field_exists(const std::string& key, const std::string& field) = 0; @@ -183,6 +181,7 @@ class RedisServer { * \brief Check if a model or script exists in the database * \param key The script or model key * \return True if the model or script exists + * \throw SmartRedis::Exception if existence check fails */ virtual bool model_key_exists(const std::string& key) = 0; @@ -198,6 +197,7 @@ class RedisServer { * \param tensor The Tensor to put on the server * \returns The CommandReply from the put tensor * command execution + * \throw SmartRedis::Exception if tensor storage fails */ virtual CommandReply put_tensor(TensorBase& tensor) = 0; @@ -206,9 +206,20 @@ class RedisServer { * \param key The name of the tensor to retrieve * \returns The CommandReply from the get tensor server * command execution + * \throw SmartRedis::Exception if tensor retrieval fails */ virtual CommandReply get_tensor(const std::string& key) = 0; + /*! + * \brief Get a list of Tensor from the server. For clustered + * servers, all tensors must be on the same node + * \param keys The keys of the tensor to retrieve + * \returns The PipelineReply from executing the get tensor commands + * \throw SmartRedis::Exception if tensor retrieval fails + */ + virtual PipelineReply get_tensors( + const std::vector& keys) = 0; + /*! * \brief Rename a tensor in the database * \param key The original key for the tensor @@ -217,6 +228,7 @@ class RedisServer { * execution in the renaming of the tensor. * Different implementations may have different * sequences of commands. + * \throw SmartRedis::Exception if tensor rename fails */ virtual CommandReply rename_tensor(const std::string& key, const std::string& new_key) @@ -227,6 +239,7 @@ class RedisServer { * \param key The database key for the tensor * \returns The CommandReply from delete command * executed on the server + * \throw SmartRedis::Exception if tensor removal fails */ virtual CommandReply delete_tensor(const std::string& key) = 0; @@ -239,6 +252,7 @@ class RedisServer { * execution in the copying of the tensor. * Different implementations may have different * sequences of commands. + * \throw SmartRedis::Exception if tensor copy fails */ virtual CommandReply copy_tensor(const std::string& src_key, const std::string& dest_key) @@ -253,6 +267,7 @@ class RedisServer { * execution in the copying of the tensor. * Different implementations may have different * sequences of commands. + * \throw SmartRedis::Exception if tensor copy fails */ virtual CommandReply copy_tensors(const std::vector& src, const std::vector& dest @@ -262,7 +277,7 @@ class RedisServer { * \brief Set a model from std::string_view buffer in the * database for future execution * \param key The key to associate with the model - * \param model The model as a continuous buffer string_view + * \param model The model as a sequence of buffer string_view chunks * \param backend The name of the backend * (TF, TFLITE, TORCH, ONNX) * \param device The name of the device for execution @@ -270,6 +285,7 @@ class RedisServer { * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -277,13 +293,15 @@ class RedisServer { * \param outputs One or more names of model output nodes * (TF models only) * \returns The CommandReply from the set_model Command + * \throw RuntimeException for all client errors */ virtual CommandReply set_model(const std::string& key, - std::string_view model, + const std::vector& model, const std::string& backend, const std::string& device, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -295,7 +313,7 @@ class RedisServer { * \brief Set a model from std::string_view buffer in the * database for future execution in a multi-GPU system * \param name The name to associate with the model - * \param model The model as a continuous buffer string_view + * \param model The model as a sequence of buffer string_view chunks * \param backend The name of the backend * (TF, TFLITE, TORCH, ONNX) * \param first_gpu The first GPU to use with this model @@ -303,6 +321,7 @@ class RedisServer { * \param batch_size The batch size for model execution * \param min_batch_size The minimum batch size for model * execution + * \param min_batch_timeout Max time (ms) to wait for min batch size * \param tag A tag to attach to the model for * information purposes * \param inputs One or more names of model input nodes @@ -312,12 +331,13 @@ class RedisServer { * \throw RuntimeException for all client errors */ virtual void set_model_multigpu(const std::string& name, - const std::string_view& model, + const std::vector& model, const std::string& backend, int first_gpu, int num_gpus, int batch_size = 0, int min_batch_size = 0, + int min_batch_timeout = 0, const std::string& tag = "", const std::vector& inputs = std::vector(), @@ -332,6 +352,7 @@ class RedisServer { * (e.g. CPU or GPU) * \param script The script source in a std::string_view * \returns The CommandReply from set_script Command + * \throw RuntimeException for all client errors */ virtual CommandReply set_script(const std::string& key, const std::string& device, @@ -363,6 +384,7 @@ class RedisServer { * execution in the model run execution. * Different implementations may have different * sequences of commands. + * \throw RuntimeException for all client errors */ virtual CommandReply run_model(const std::string& key, std::vector inputs, @@ -401,6 +423,7 @@ class RedisServer { * execution in the script run execution. * Different implementations may have different * sequences of commands. + * \throw RuntimeException for all client errors */ virtual CommandReply run_script(const std::string& key, const std::string& function, @@ -442,7 +465,7 @@ class RedisServer { * \brief Remove a model from the database that was stored * for use with multiple GPUs * \param name The name associated with the model - * \param first_cpu the first GPU (zero-based) to use with the model + * \param first_gpu the first GPU (zero-based) to use with the model * \param num_gpus the number of gpus for which the model was stored * \throw SmartRedis::Exception if model deletion fails */ @@ -461,7 +484,7 @@ class RedisServer { * \brief Remove a script from the database that was stored * for use with multiple GPUs * \param name The name associated with the script - * \param first_cpu the first GPU (zero-based) to use with the script + * \param first_gpu the first GPU (zero-based) to use with the script * \param num_gpus the number of gpus for which the script was stored * \throw SmartRedis::Exception if script deletion fails */ @@ -473,6 +496,7 @@ class RedisServer { * \param key The key associated with the model * \returns The CommandReply that contains the result * of the get model execution on the server + * \throw SmartRedis::Exception if model retrieval fails */ virtual CommandReply get_model(const std::string& key) = 0; @@ -481,6 +505,7 @@ class RedisServer { * \param key The key associated with the script * \returns The CommandReply that contains the result * of the get script execution on the server + * \throw SmartRedis::Exception if script retrieval fails */ virtual CommandReply get_script(const std::string& key) = 0; @@ -492,12 +517,57 @@ class RedisServer { * with the model or script should be reset. * \returns The CommandReply that contains the result * of the AI.INFO execution on the server + * \throw SmartRedis::Exception if info retrieval fails */ virtual CommandReply get_model_script_ai_info(const std::string& address, const std::string& key, const bool reset_stat) = 0; + /*! + * \brief Retrieve the current model chunk size + * \returns The size in bytes for model chunking + */ + virtual int get_model_chunk_size() = 0; + + /*! + * \brief Reconfigure the chunking size that Redis uses for model + * serialization, replication, and the model_get command. + * \details This method triggers the AI.CONFIG method in the Redis + * database to change the model chunking size. + * + * NOTE: The default size of 511MB should be fine for most + * applications, so it is expected to be very rare that a + * client calls this method. It is not necessary to call + * this method a model to be chunked. + * \param chunk_size The new chunk size in bytes + * \throw SmartRedis::Exception if the command fails. + */ + virtual void set_model_chunk_size(int chunk_size) = 0; + + /*! + * \brief Store the current model chunk size + * \param chunk_size The updated model chunk size + */ + virtual void store_model_chunk_size(int chunk_size) { + _model_chunk_size = chunk_size; + } + + /*! + * \brief Run a CommandList via a Pipeline. For clustered databases + * all commands must go to the same shard + * \param cmdlist The list of commands to run + * \returns The PipelineReply with the result of command execution + * \throw SmartRedis::Exception if execution fails + */ + virtual PipelineReply run_in_pipeline(CommandList& cmdlist) = 0; + + /*! + * \brief Create a string representation of the Redis connection + * \returns A string representation of the Redis connection + */ + virtual std::string to_string() const = 0; + protected: /*! @@ -530,6 +600,17 @@ class RedisServer { */ int _command_attempts; + /*! + * \brief The chunk size into which models need to be broken for + * transfer to Redis + */ + int _model_chunk_size; + + /*! + * \brief Default socket timeout (milliseconds) + */ + static constexpr int _DEFAULT_SOCKET_TIMEOUT = 250; + /*! * \brief Default value of connection timeout (seconds) */ @@ -562,7 +643,12 @@ class RedisServer { static constexpr int _DEFAULT_THREAD_COUNT = 4; /*! - * \brief The owning context + * \brief Our source for configuration options + */ + ConfigOptions* _cfgopts; + + /*! + * \brief Our logging context */ const SRObject* _context; @@ -586,13 +672,18 @@ class RedisServer { */ ThreadPool *_tp; - /* + /*! * \brief Indicates whether the server was connected to * via a Unix domain socket (true) or TCP connection * (false) */ bool _is_domain_socket; + /*! + * \brief Default model chunk size + */ + static constexpr int _UNKNOWN_MODEL_CHUNK_SIZE = -1; + /*! * \brief Environment variable for connection timeout */ diff --git a/2023-01/smartsim/smartredis/include/scalarfield.h b/2023-01/smartsim/smartredis/include/scalarfield.h index 98b7d66e..45a6392e 100644 --- a/2023-01/smartsim/smartredis/include/scalarfield.h +++ b/2023-01/smartsim/smartredis/include/scalarfield.h @@ -123,7 +123,7 @@ class ScalarField : public MetadataField { * \brief Retrieve the number of values in the field * \returns The number of values */ - virtual size_t size(); + virtual size_t size() const; /*! * \brief Clear the values in the field diff --git a/2023-01/smartsim/smartredis/include/scalarfield.tcc b/2023-01/smartsim/smartredis/include/scalarfield.tcc index 67421306..9de0ba87 100644 --- a/2023-01/smartsim/smartredis/include/scalarfield.tcc +++ b/2023-01/smartsim/smartredis/include/scalarfield.tcc @@ -75,7 +75,7 @@ void ScalarField::append(const void* value) // Retrieve the number of values in a scalar template -size_t ScalarField::size() +size_t ScalarField::size() const { return _vals.size(); } diff --git a/2023-01/smartsim/smartredis/include/srobject.h b/2023-01/smartsim/smartredis/include/srobject.h index e66d6221..d88e4814 100644 --- a/2023-01/smartsim/smartredis/include/srobject.h +++ b/2023-01/smartsim/smartredis/include/srobject.h @@ -101,7 +101,7 @@ class SRObject return _lname; } - private: + protected: /*! * \brief The name prefix log entries with diff --git a/2023-01/smartsim/smartredis/include/stringfield.h b/2023-01/smartsim/smartredis/include/stringfield.h index 15f47da6..a4f15e9b 100644 --- a/2023-01/smartsim/smartredis/include/stringfield.h +++ b/2023-01/smartsim/smartredis/include/stringfield.h @@ -117,7 +117,7 @@ class StringField : public MetadataField { * \brief Retrieve the number of values in the field * \returns The number of values */ - virtual size_t size(); + virtual size_t size() const; /*! * \brief Clear the values in the field diff --git a/2023-01/smartsim/smartredis/include/tensorbase.h b/2023-01/smartsim/smartredis/include/tensorbase.h index 8e755bcc..7bea067b 100644 --- a/2023-01/smartsim/smartredis/include/tensorbase.h +++ b/2023-01/smartsim/smartredis/include/tensorbase.h @@ -151,13 +151,13 @@ class TensorBase{ * \brief Retrieve the name of the TensorBase * \returns The name of the TensorBase */ - std::string name(); + std::string name() const; /*! * \brief Retrieve the type of the TensorBase * \returns The type of the TensorBase */ - SRTensorType type(); + SRTensorType type() const; /*! * \brief Retrieve a string representation of @@ -170,13 +170,13 @@ class TensorBase{ * \brief Retrieve the dimensions of the TensorBase * \returns TensorBase dimensions */ - std::vector dims(); + std::vector dims() const; /*! * \brief Retrieve number of values in the TensorBase * \returns The number values in the TensorBase */ - size_t num_values(); + size_t num_values() const; /*! * \brief Retrieve a pointer to the TensorBase data diff --git a/2023-01/smartsim/smartredis/include/tensorpack.h b/2023-01/smartsim/smartredis/include/tensorpack.h index e4522bd1..c4f72942 100644 --- a/2023-01/smartsim/smartredis/include/tensorpack.h +++ b/2023-01/smartsim/smartredis/include/tensorpack.h @@ -133,7 +133,7 @@ class TensorPack * \param name The name used to reference the tensor * \returns A pointer to the TensorBase object */ - TensorBase* get_tensor(const std::string& name); + TensorBase* get_tensor(const std::string& name) const; /*! * \brief Return a pointer to the tensor data memory space @@ -149,7 +149,7 @@ class TensorPack * \returns True if the name corresponds to a tensor * in the TensorPack, otherwise False. */ - bool tensor_exists(const std::string& name); + bool tensor_exists(const std::string& name) const; /*! * \brief Returns an iterator pointing to the diff --git a/2023-01/smartsim/smartredis/include/utility.h b/2023-01/smartsim/smartredis/include/utility.h index a19576e4..76529bf3 100644 --- a/2023-01/smartsim/smartredis/include/utility.h +++ b/2023-01/smartsim/smartredis/include/utility.h @@ -31,24 +31,39 @@ #include #include +#include "sr_enums.h" ///@file namespace SmartRedis { +/*! +* \brief Flag to skip warnings when retrieving configuration options +* and the requested option is not present +*/ +const int flag_suppress_warning = 1; + +/*! +* \brief Flag to emit a KeyException when retrieving configuration options +* and the requested option is not present +*/ +const int throw_on_absent = 2; + /*! * \brief Initialize an integer from configuration, such as an * environment variable * \param value Receives the configuration value * \param cfg_key The key to query for the configuration variable * \param default_value Default if configuration key is not set -* \param suppress_warning Do not issue a warning if the variable -* is not set +* \param flags flag_suppress_warning = Do not issue a warning if the +* variable is not set; throw_on_absent = throw KeyException +* if value not set. The value zero means that no flags are set +* \throw KeyException if value not set and throw_on_absent is not set */ void get_config_integer(int& value, const std::string& cfg_key, const int default_value, - bool suppress_warning = false); + int flags = 0); /*! * \brief Initialize an string from configuration, such as an @@ -56,13 +71,28 @@ void get_config_integer(int& value, * \param value Receives the configuration value * \param cfg_key The key to query for the configuration variable * \param default_value Default if configuration key is not set -* \param suppress_warning Do not issue a warning if the variable -* is not set +* \param flags flag_suppress_warning = Do not issue a warning if the +* variable is not set; throw_on_absent = throw KeyException +* if value not set. The value zero means that no flags are set +* \throw KeyException if value not set and throw_on_absent is not set */ void get_config_string(std::string& value, const std::string& cfg_key, const std::string& default_value, - bool suppress_warning = false); + int flags = 0); + +/*! +* \brief Create a string representation of a tensor type +* \param ttype The tensor type to put in string form +*/ +std::string to_string(SRTensorType ttype); + +/*! +* \brief Create a string representation of a metadata field type +* \param mdtype The metadata field type to put in string form +*/ +std::string to_string(SRMetaDataType mdtype); + } // namespace SmartRedis diff --git a/2023-01/smartsim/smartredis/pyproject.toml b/2023-01/smartsim/smartredis/pyproject.toml index 3ebd8574..0d0dbf02 100644 --- a/2023-01/smartsim/smartredis/pyproject.toml +++ b/2023-01/smartsim/smartredis/pyproject.toml @@ -6,7 +6,8 @@ requires = ["setuptools>=42", build-backend = "setuptools.build_meta" [tool.black] -target-version = ['py37', 'py38'] +line-length = 88 +target-version = ['py37', 'py38', 'py39', 'py310'] exclude = ''' ( | \.egg @@ -26,3 +27,29 @@ exclude = ''' [tool.pytest.ini_options] log_cli = true log_cli_level = "debug" + +[tool.mypy] +namespace_packages = true +files = [ + "src/python/module/smartredis/" +] +plugins = [] +ignore_errors=false + +# Strict fn defs +disallow_untyped_calls = true +disallow_untyped_defs = true +disallow_incomplete_defs = true +disallow_untyped_decorators = true + +# Safety/Upgrading Mypy +warn_unused_ignores = true +# warn_redundant_casts = true # not a per-module setting? + +[[tool.mypy.overrides]] +# Ignore packages that are not used or not typed +module = [ + "smartredis.smartredisPy", +] +ignore_missing_imports = true +ignore_errors = true \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/requirements-dev.txt b/2023-01/smartsim/smartredis/requirements-dev.txt deleted file mode 100644 index 4dcea5c8..00000000 --- a/2023-01/smartsim/smartredis/requirements-dev.txt +++ /dev/null @@ -1,12 +0,0 @@ -numpy>=1.18.2 -sphinx==3.1.1 -sphinx-book-theme==0.2.0 -pytest>=6.0.0 -pytest-cov==2.10.1 -black==20.8b1 -isort==5.6.4 -pylint==2.6.0 -breathe==4.26.0 -torch==1.7.1 -sphinx-fortran==1.1.1 -jinja2==3.0.3 diff --git a/2023-01/smartsim/smartredis/requirements.txt b/2023-01/smartsim/smartredis/requirements.txt index ad21d06a..bb27ece7 100644 --- a/2023-01/smartsim/smartredis/requirements.txt +++ b/2023-01/smartsim/smartredis/requirements.txt @@ -1 +1,3 @@ -numpy>=1.18.2 +# requirements.txt +# Dependencies are maintained in setup.cfg +-e . diff --git a/2023-01/smartsim/smartredis/setup.cfg b/2023-01/smartsim/smartredis/setup.cfg index abf560d3..c27151f4 100644 --- a/2023-01/smartsim/smartredis/setup.cfg +++ b/2023-01/smartsim/smartredis/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = smartredis -version = 0.3.1 +version = 0.4.2 description = RedisAI clients for SmartSim long_description = file: README.md long_description_content_type=text/markdown @@ -20,6 +20,7 @@ classifiers = Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 + Programming Language :: Python :: 3.10 License :: OSI Approved :: BSD License Intended Audience :: Science/Research Topic :: Scientific/Engineering @@ -29,30 +30,30 @@ package_dir= =src/python/module packages=find: setup_requires = - setuptools>=39.2 + setuptools>=42 include_package_data = True install_requires = numpy>=1.18.2 - xarray>=0.14.1 - -python_requires = >=3.7 +python_requires = >=3.7,<3.11 [options.extras_require] dev = - numpy>=1.18.2 pytest>=6.0.0 pytest-cov==2.10.1 - black==20.8b1 + black==23.3.0 isort==5.6.4 - pylint==2.6.0 - torch==1.7.1 + pylint>=2.10.0 + torch>=1.7.1 + mypy>=1.4.0 + jinja2==3.0.3 doc= sphinx==3.1.1 sphinx-fortran==1.1.1 sphinx_rtd_theme>=0.5.0 - breathe==4.25.1 + sphinx-book-theme==0.2.0 + breathe==4.26.0 xarray= xarray>=0.14.1 diff --git a/2023-01/smartsim/smartredis/setup.py b/2023-01/smartsim/smartredis/setup.py index dd19c6c0..90493eef 100644 --- a/2023-01/smartsim/smartredis/setup.py +++ b/2023-01/smartsim/smartredis/setup.py @@ -33,7 +33,6 @@ from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext -from distutils.version import LooseVersion # get number of processors NPROC = mp.cpu_count() @@ -50,59 +49,72 @@ def cmake(self): cmake_cmd = shutil.which("cmake") return cmake_cmd + @property + def make(self): + """Find and use installed cmake""" + make_cmd = shutil.which("make") + return make_cmd + def run(self): + # Validate dependencies + check_prereq("cmake") check_prereq("make") check_prereq("gcc") check_prereq("g++") + # Set up parameters + source_directory = Path(__file__).parent.resolve() build_directory = Path(self.build_temp).resolve() - cmake_args = [ - '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + str(build_directory), - '-DPYTHON_EXECUTABLE=' + sys.executable - ] - cfg = 'Debug' if self.debug else 'Release' - build_args = ['--config', cfg] - build_args += ['--', f'-j{str(NPROC)}'] - self.build_args = build_args - - cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] - # setup build environment + # Setup build environment env = os.environ.copy() env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format( env.get('CXXFLAGS', ''), self.distribution.get_version()) - # make tmp dir - if not build_directory.is_dir(): - os.makedirs(self.build_temp) - - print('-'*10, 'Building C dependencies', '-'*40) - make_cmd = shutil.which("make") - setup_path = Path(os.path.abspath(os.path.dirname(__file__))).resolve() - - # build dependencies - subprocess.check_call([f"{make_cmd} deps"], - cwd=setup_path, - shell=True) - - # run cmake prep step - print('-'*10, 'Running CMake prepare', '-'*40) - subprocess.check_call([self.cmake, setup_path] + cmake_args, - cwd=build_directory, - env=env) - - - print('-'*10, 'Building extensions', '-'*40) - cmake_cmd = [self.cmake, '--build', '.'] + self.build_args - subprocess.check_call(cmake_cmd, - cwd=build_directory) - - shutil.copytree(setup_path.joinpath("install"), - build_directory.joinpath("install")) + # Build dependencies + print('-'*10, 'Building third-party dependencies', '-'*40) + subprocess.check_call( + [self.make, "deps"], + cwd=source_directory, + shell=False + ) + + # Run CMake config step + print('-'*10, 'Configuring build', '-'*40) + config_args = [ + '-S.', + f'-B{str(build_directory)}', + '-DSR_BUILD=' + cfg, + '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + str(build_directory), + '-DPYTHON_EXECUTABLE=' + sys.executable, + '-DSR_PYTHON=ON', + ] + subprocess.check_call( + [self.cmake] + config_args, + cwd=source_directory, + env=env + ) + + # Run CMake build step + print('-'*10, 'Building library', '-'*40) + build_args = [ + '--build', + str(build_directory), + '--', + f'-j{str(NPROC)}' + ] + subprocess.check_call( + [self.cmake] + build_args, + cwd=build_directory, + env=env + ) # Move from build temp to final position + # (Note that we skip the CMake install step because + # we configured the library to be built directly into the + # build directory) for ext in self.extensions: self.move_output(ext) @@ -116,23 +128,13 @@ def move_output(self, ext): self.copy_file(source_path, dest_path) # check that certain dependencies are installed -# TODO: Check versions for compatible versions def check_prereq(command): try: - out = subprocess.check_output([command, '--version']) + _ = subprocess.check_output([command, '--version']) except OSError: raise RuntimeError( f"{command} must be installed to build SmartRedis") -# update existing env var -def update_env_var(var, new): - try: - value = os.environ[var] - value = ":".join((value, str(new))) - return value - except KeyError: - return new - ext_modules = [ CMakeExtension('smartredisPy'), ] diff --git a/2023-01/smartsim/smartredis/smartredis_defs.cmake b/2023-01/smartsim/smartredis/smartredis_defs.cmake new file mode 100644 index 00000000..3825db81 --- /dev/null +++ b/2023-01/smartsim/smartredis/smartredis_defs.cmake @@ -0,0 +1,87 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Set defaults if variables are undefined +if(NOT DEFINED SR_BUILD) + set(SR_BUILD "Release") +endif() +if(NOT DEFINED SR_LINK) + set(SR_LINK "Shared") +endif() + +# Configure the CMake build based on the SR_BUILD selection +string(TOLOWER "${SR_BUILD}" srbuild_lowercase) +if(srbuild_lowercase STREQUAL "release") + set(CMAKE_BUILD_TYPE RELEASE) + set(SRLIB_NAME_SUFFIX "") + set(SR_STATIC_NAME_SUFFIX "-r") +elseif(srbuild_lowercase STREQUAL "debug") + set(CMAKE_BUILD_TYPE DEBUG) + set(SRLIB_NAME_SUFFIX "-debug") + set(SR_STATIC_NAME_SUFFIX "-d") +elseif(srbuild_lowercase STREQUAL "coverage") + set(CMAKE_BUILD_TYPE DEBUG) + set(SRLIB_NAME_SUFFIX "-coverage") + set(SR_STATIC_NAME_SUFFIX "-c") + if((CMAKE_CXX_COMPILER_ID STREQUAL "GNU") AND (CMAKE_C_COMPILER_ID STREQUAL "GNU")) + add_compile_options(--coverage) + add_link_options(--coverage) + else() + message(WARNING "A coverage build was specified, but the CMAKE compiler is not GCC") + endif() +else() + message(FATAL_ERROR "Unrecognized build type (${SR_BUILD}) specified in SR_BUILD") +endif() + +# Configure CMake linkage on the SR_LINK selection +string(TOLOWER "${SR_LINK}" srlink_lowercase) +if(srlink_lowercase STREQUAL "static") + set(SMARTREDIS_LINK_MODE STATIC) + set(SMARTREDIS_LINK_LIBRARY_SUFFIX .a) + set(SMARTREDIS_LINK_SUFFIX "-static") + set(CMAKE_POSITION_INDEPENDENT_CODE ON) + set(STATIC_BUILD TRUE) + if(APPLE) + message(FATAL_ERROR "Static builds are not supported on MacOS") + endif(APPLE) +elseif(srlink_lowercase STREQUAL "shared") + set(SMARTREDIS_LINK_MODE SHARED) + set(SMARTREDIS_LINK_LIBRARY_SUFFIX .so) + set(SMARTREDIS_LINK_SUFFIX "") + set(STATIC_BUILD FALSE) +else() + message(FATAL_ERROR "Unrecognized link type (${SR_LINK}) specified in SR_LINK") +endif() + +# Identify the SmartRedis library names based on the build and link +set(SMARTREDIS_LIB smartredis${SRLIB_NAME_SUFFIX}${SMARTREDIS_LINK_SUFFIX}) +set(SMARTREDIS_LIB_FULLNAME + libsmartredis${SRLIB_NAME_SUFFIX}${SMARTREDIS_LINK_SUFFIX}${SMARTREDIS_LINK_LIBRARY_SUFFIX} +) +set(SMARTREDIS_FORTRAN_LIB smartredis-fortran${SRLIB_NAME_SUFFIX}${SMARTREDIS_LINK_SUFFIX}) +set(SMARTREDIS_FORTRAN_LIB_FULLNAME + libsmartredis-fortran${SRLIB_NAME_SUFFIX}${SMARTREDIS_LINK_SUFFIX}${SMARTREDIS_LINK_LIBRARY_SUFFIX} +) diff --git a/2023-01/smartsim/smartredis/src/c/c_client.cpp b/2023-01/smartsim/smartredis/src/c/c_client.cpp index ecd014f6..38e22665 100644 --- a/2023-01/smartsim/smartredis/src/c/c_client.cpp +++ b/2023-01/smartsim/smartredis/src/c/c_client.cpp @@ -35,104 +35,139 @@ using namespace SmartRedis; -// Return a pointer to a new Client. -// The caller is responsible for deleting the client via DeleteClient(). -extern "C" -SRError SmartRedisCClient( +// Decorator to standardize exception handling in C Client API methods +template +auto c_client_api(T&& client_api_func, const char* name) +{ + // we create a closure below + auto decorated = [name, client_api_func = + std::forward(client_api_func)](auto&&... args) + { + SRError result = SRNoError; + try { + client_api_func(std::forward(args)...); + } + catch (const Exception& e) { + SRSetLastError(e); + result = e.to_error_code(); + } + catch (...) { + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + SRSetLastError(SRInternalException(msg)); + result = SRInternalError; + } + return result; + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_CLIENT_API(stuff)\ + c_client_api([&] { stuff }, __func__)() + +// Create a simple Client +SRError SimpleCreateClient( + const char* logger_name, + const size_t logger_name_length, + void** new_client) +{ + return MAKE_CLIENT_API({ + // Sanity check params + SR_CHECK_PARAMS(new_client != NULL && logger_name != NULL); + + std::string _logger_name(logger_name, logger_name_length); + try { + *new_client = NULL; + Client* s = new Client(_logger_name); + *new_client = reinterpret_cast(s); + } + catch (const std::bad_alloc& e) { + throw SRBadAllocException("client allocation"); + } + }); +} + +// Create a Client that uses a ConfigOptions object +SRError CreateClient( + void* config_options, + const char* logger_name, + const size_t logger_name_length, + void** new_client) +{ + return MAKE_CLIENT_API({ + // Sanity check params + SR_CHECK_PARAMS( + config_options != NULL && new_client != NULL && logger_name != NULL); + + ConfigOptions* cfgopts = reinterpret_cast(config_options); + + std::string _logger_name(logger_name, logger_name_length); + try { + *new_client = NULL; + Client* s = new Client(cfgopts, _logger_name); + *new_client = reinterpret_cast(s); + } + catch (const std::bad_alloc& e) { + throw SRBadAllocException("client allocation"); + } + }); +} + +// Return a pointer to a new Client (deprecated) +extern "C" SRError SmartRedisCClient( bool cluster, const char* logger_name, const size_t logger_name_length, void** new_client) { - SRError result = SRNoError; - try { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(new_client != NULL && logger_name != NULL); std::string _logger_name(logger_name, logger_name_length); - Client* s = new Client(cluster, _logger_name); - *new_client = reinterpret_cast(s); - } - catch (const std::bad_alloc& e) { - *new_client = NULL; - SRSetLastError(SRBadAllocException("client allocation")); - result = SRBadAllocError; - } - catch (const Exception& e) { - *new_client = NULL; - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - *new_client = NULL; - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + try { + *new_client = NULL; + Client* s = new Client(cluster, _logger_name); + *new_client = reinterpret_cast(s); + } + catch (const std::bad_alloc& e) { + throw SRBadAllocException("client allocation"); + } + }); } -// Free the memory associated with the c client. -extern "C" -SRError DeleteCClient(void** c_client) +// Free the memory associated with the c client +extern "C" SRError DeleteCClient(void** c_client) { - SRError result = SRNoError; - - try { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL); delete reinterpret_cast(*c_client); *c_client = NULL; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Put a dataset into the database. -extern "C" -SRError put_dataset(void* c_client, void* dataset) +// Put a dataset into the database +extern "C" SRError put_dataset(void* c_client, void* dataset) { - SRError result = SRNoError; - - try { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && dataset != NULL); Client* s = reinterpret_cast(c_client); DataSet* d = reinterpret_cast(dataset); - s->put_dataset(*d); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Return a pointer to a new dataset. The user is -// responsible for deleting the dataset via DeallocateeDataSet() -extern "C" -SRError get_dataset(void* c_client, const char* name, - const size_t name_length, void **dataset) +// Return a pointer to a new dataset +extern "C" SRError get_dataset( + void* c_client, const char* name, const size_t name_length, void **dataset) { - SRError result = SRNoError; - - try { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && dataset != NULL); @@ -145,30 +180,18 @@ SRError get_dataset(void* c_client, const char* name, *dataset = reinterpret_cast(d); } catch (const std::bad_alloc& e) { *dataset = NULL; - throw SRBadAllocException("client allocation"); + throw SRBadAllocException("dataset allocation"); } - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Rename a dataset in the database. -extern "C" -SRError rename_dataset(void* c_client, const char* old_name, - const size_t old_name_length, const char* new_name, - const size_t new_name_length) +// Rename a dataset in the database +extern "C" SRError rename_dataset( + void* c_client, const char* old_name, + const size_t old_name_length, const char* new_name, + const size_t new_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && old_name != NULL && new_name != NULL); @@ -177,29 +200,16 @@ SRError rename_dataset(void* c_client, const char* old_name, std::string new_name_str(new_name, new_name_length); s->rename_dataset(name_str, new_name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } - // Copy a dataset from the src_name to the dest_name -extern "C" -SRError copy_dataset(void* c_client, const char* src_name, - const size_t src_name_length, const char* dest_name, - const size_t dest_name_length) +extern "C" SRError copy_dataset( + void* c_client, const char* src_name, + const size_t src_name_length, const char* dest_name, + const size_t dest_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && src_name != NULL && dest_name != NULL); @@ -208,59 +218,35 @@ SRError copy_dataset(void* c_client, const char* src_name, std::string dest_name_str(dest_name, dest_name_length); s->copy_dataset(src_name_str, dest_name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Delete a dataset (all metadata and tensors) from the database. -extern "C" -SRError delete_dataset(void* c_client, const char* name, const size_t name_length) +// Delete a dataset (all metadata and tensors) from the database +extern "C" SRError delete_dataset( + void* c_client, const char* name, const size_t name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL); Client* s = reinterpret_cast(c_client); std::string dataset_name(name, name_length); s->delete_dataset(dataset_name); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Put a tensor of a specified type into the database -extern "C" -SRError put_tensor(void* c_client, - const char* name, - const size_t name_length, - void* data, - const size_t* dims, - const size_t n_dims, - const SRTensorType type, - const SRMemoryLayout mem_layout) +extern "C" SRError put_tensor( + void* c_client, + const char* name, + const size_t name_length, + void* data, + const size_t* dims, + const size_t n_dims, + const SRTensorType type, + const SRMemoryLayout mem_layout) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && data != NULL && dims != NULL); @@ -272,33 +258,21 @@ SRError put_tensor(void* c_client, dims_vec.assign(dims, dims + n_dims); s->put_tensor(name_str, data, dims_vec, type, mem_layout); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Get a tensor of a specified type from the database -extern "C" -SRError get_tensor(void* c_client, - const char* name, - const size_t name_length, - void** result, - size_t** dims, - size_t* n_dims, - SRTensorType* type, - const SRMemoryLayout mem_layout) +extern "C" SRError get_tensor( + void* c_client, + const char* name, + const size_t name_length, + void** result, + size_t** dims, + size_t* n_dims, + SRTensorType* type, + const SRMemoryLayout mem_layout) { - SRError outcome = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && result != NULL && dims != NULL && n_dims != NULL); @@ -307,34 +281,22 @@ SRError get_tensor(void* c_client, std::string name_str(name, name_length); s->get_tensor(name_str, *result, *dims, *n_dims, *type, mem_layout); - } - catch (const Exception& e) { - SRSetLastError(e); - outcome = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - outcome = SRInternalError; - } - - return outcome; + }); } // Get a tensor of a specified type from the database -// and put the values into the user provided memory space. -extern "C" -SRError unpack_tensor(void* c_client, - const char* name, - const size_t name_length, - void* result, - const size_t* dims, - const size_t n_dims, - const SRTensorType type, - const SRMemoryLayout mem_layout) +// and put the values into the user provided memory space +extern "C" SRError unpack_tensor( + void* c_client, + const char* name, + const size_t name_length, + void* result, + const size_t* dims, + const size_t n_dims, + const SRTensorType type, + const SRMemoryLayout mem_layout) { - SRError outcome = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && result != NULL && dims != NULL); @@ -346,28 +308,18 @@ SRError unpack_tensor(void* c_client, dims_vec.assign(dims, dims + n_dims); s->unpack_tensor(name_str, result, dims_vec, type, mem_layout); - } - catch (const Exception& e) { - SRSetLastError(e); - outcome = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - outcome = SRInternalError; - } - - return outcome; + }); } // Rename a tensor from old_name to new_name -extern "C" -SRError rename_tensor(void* c_client, - const char* old_name, const size_t old_name_length, - const char* new_name, const size_t new_name_length) +extern "C" SRError rename_tensor( + void* c_client, + const char* old_name, + const size_t old_name_length, + const char* new_name, + const size_t new_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && old_name != NULL && new_name != NULL); @@ -376,27 +328,14 @@ SRError rename_tensor(void* c_client, std::string new_name_str(new_name, new_name_length); s->rename_tensor(old_name_str, new_name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Delete a tensor from the database. -extern "C" -SRError delete_tensor(void* c_client, const char* name, - const size_t name_length) +// Delete a tensor from the database +extern "C" SRError delete_tensor( + void* c_client, const char* name, const size_t name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL); @@ -404,30 +343,18 @@ SRError delete_tensor(void* c_client, const char* name, std::string name_str(name, name_length); s->delete_tensor(name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Copy a tensor from src_name to dest_name. -extern "C" -SRError copy_tensor(void* c_client, - const char* src_name, - const size_t src_name_length, - const char* dest_name, - const size_t dest_name_length) +// Copy a tensor from src_name to dest_name +extern "C" SRError copy_tensor( + void* c_client, + const char* src_name, + const size_t src_name_length, + const char* dest_name, + const size_t dest_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && src_name != NULL && dest_name != NULL); @@ -436,20 +363,11 @@ SRError copy_tensor(void* c_client, std::string dest_str(dest_name, dest_name_length); s->copy_tensor(src_str, dest_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -bool CompareCaseInsensitive(const char* a,const char* b) { +// Perform a case insensitive compare fo two strings +static bool _compareCaseInsensitive(const char* a,const char* b) { while (*a != '\0' && *b != '\0') { // Check current character if (toupper(*a) != toupper(*b)) @@ -467,14 +385,15 @@ bool CompareCaseInsensitive(const char* a,const char* b) { // Return True if the backend is TF or TFLITE bool _isTensorFlow(const char* backend) { - return CompareCaseInsensitive(backend, "TF") || CompareCaseInsensitive(backend, "TFLITE"); + return _compareCaseInsensitive(backend, "TF") || + _compareCaseInsensitive(backend, "TFLITE"); } // Check the parameters common to all set_model functions -void _check_params_set_model(void* c_client, - const char* name, const char* backend, - const char** inputs, const size_t* input_lengths, const size_t n_inputs, - const char** outputs, const size_t* output_lengths, const size_t n_outputs) +void _check_params_set_model( + void* c_client, const char* name, const char* backend, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs) { // Sanity check params. Tag is strictly optional, and inputs/outputs are // mandatory IFF backend is TensorFlow (TF or TFLITE) @@ -483,13 +402,14 @@ void _check_params_set_model(void* c_client, if (_isTensorFlow(backend)) { if (inputs == NULL || input_lengths == NULL || outputs == NULL || output_lengths == NULL) { - throw SRParameterException("Inputs and outputs are required with TensorFlow"); + throw SRParameterException( + "Inputs and outputs are required with TensorFlow"); } } // For the inputs and outputs arrays, a single empty string is ok (this means - // that the array should be skipped) but if more than one entry is present, the - // strings must be nonzero length + // that the array should be skipped) but if more than one entry is present, + // the strings must be nonzero length if (_isTensorFlow(backend)) { if (n_inputs != 1 && input_lengths[0] != 0) { for (size_t i = 0; i < n_inputs; i++){ @@ -510,23 +430,21 @@ void _check_params_set_model(void* c_client, } } -// Set a model stored in a binary file. -extern "C" -SRError set_model_from_file(void* c_client, - const char* name, const size_t name_length, - const char* model_file, const size_t model_file_length, - const char* backend, const size_t backend_length, - const char* device, const size_t device_length, - const int batch_size, const int min_batch_size, - const char* tag, const size_t tag_length, - const char** inputs, const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, const size_t* output_lengths, - const size_t n_outputs) +// Set a model stored in a binary file +extern "C" SRError set_model_from_file( + void* c_client, + const char* name, const size_t name_length, + const char* model_file, const size_t model_file_length, + const char* backend, const size_t backend_length, + const char* device, const size_t device_length, + const int batch_size, + const int min_batch_size, + const int min_batch_timeout, + const char* tag, const size_t tag_length, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params. Tag is strictly optional, and inputs/outputs are // mandatory IFF backend is TensorFlow (TF or TFLITE) _check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs, @@ -560,35 +478,26 @@ SRError set_model_from_file(void* c_client, } s->set_model_from_file(name_str, model_file_str, backend_str, device_str, - batch_size, min_batch_size, tag_str, input_vec, - output_vec); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } + batch_size, min_batch_size, min_batch_timeout, + tag_str, input_vec, output_vec); + }); +} - return result; -} -extern "C" -SRError set_model_from_file_multigpu(void* c_client, - const char* name, const size_t name_length, - const char* model_file, const size_t model_file_length, - const char* backend, const size_t backend_length, - const int first_gpu, const int num_gpus, - const int batch_size, const int min_batch_size, - const char* tag, const size_t tag_length, - const char** inputs, const size_t* input_lengths, - const size_t n_inputs, const char** outputs, - const size_t* output_lengths, const size_t n_outputs) +// Set a model stored in a binary file for use with multiple GPUs +extern "C" SRError set_model_from_file_multigpu( + void* c_client, + const char* name, const size_t name_length, + const char* model_file, const size_t model_file_length, + const char* backend, const size_t backend_length, + const int first_gpu, const int num_gpus, + const int batch_size, const int min_batch_size, + const int min_batch_timeout, + const char* tag, const size_t tag_length, + const char** inputs, const size_t* input_lengths, + const size_t n_inputs, const char** outputs, + const size_t* output_lengths, const size_t n_outputs) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params. Tag is strictly optional, and inputs/outputs are // mandatory IFF backend is TensorFlow (TF or TFLITE) _check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs, @@ -621,38 +530,27 @@ SRError set_model_from_file_multigpu(void* c_client, } s->set_model_from_file_multigpu(name_str, model_file_str, backend_str, first_gpu, - num_gpus, batch_size, min_batch_size, tag_str, - input_vec, output_vec); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + num_gpus, batch_size, min_batch_size, min_batch_timeout, + tag_str, input_vec, output_vec); + }); } // Set a model stored in a buffer c-string. -extern "C" -SRError set_model(void* c_client, - const char* name, const size_t name_length, - const char* model, const size_t model_length, - const char* backend, const size_t backend_length, - const char* device, const size_t device_length, - const int batch_size, const int min_batch_size, - const char* tag, const size_t tag_length, - const char** inputs, const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, const size_t* output_lengths, - const size_t n_outputs) +extern "C" SRError set_model( + void* c_client, + const char* name, const size_t name_length, + const char* model, const size_t model_length, + const char* backend, const size_t backend_length, + const char* device, const size_t device_length, + const int batch_size, const int min_batch_size, + const int min_batch_timeout, + const char* tag, const size_t tag_length, + const char** inputs, const size_t* input_lengths, + const size_t n_inputs, + const char** outputs, const size_t* output_lengths, + const size_t n_outputs) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params. Tag is strictly optional, and inputs/outputs are // mandatory IFF backend is TensorFlow (TF or TFLITE) _check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs, @@ -686,38 +584,27 @@ SRError set_model(void* c_client, } s->set_model(name_str, model_str, backend_str, device_str, - batch_size, min_batch_size, tag_str, input_vec, - output_vec); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + batch_size, min_batch_size, min_batch_timeout, + tag_str, input_vec, output_vec); + }); } -// Set a model stored in a buffer c-string. -extern "C" -SRError set_model_multigpu(void* c_client, - const char* name, const size_t name_length, - const char* model, const size_t model_length, - const char* backend, const size_t backend_length, - const int first_gpu, const int num_gpus, - const int batch_size, const int min_batch_size, - const char* tag, const size_t tag_length, - const char** inputs, const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, const size_t* output_lengths, - const size_t n_outputs) +// Set a model stored in a buffer c-string +extern "C" SRError set_model_multigpu( + void* c_client, + const char* name, const size_t name_length, + const char* model, const size_t model_length, + const char* backend, const size_t backend_length, + const int first_gpu, const int num_gpus, + const int batch_size, const int min_batch_size, + const int min_batch_timeout, + const char* tag, const size_t tag_length, + const char** inputs, const size_t* input_lengths, + const size_t n_inputs, + const char** outputs, const size_t* output_lengths, + const size_t n_outputs) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params. Tag is strictly optional, and inputs/outputs are // mandatory IFF backend is TensorFlow (TF or TFLITE) _check_params_set_model(c_client, name, backend, inputs, input_lengths, n_inputs, @@ -750,33 +637,18 @@ SRError set_model_multigpu(void* c_client, } s->set_model_multigpu(name_str, model_str, backend_str, first_gpu, num_gpus, - batch_size, min_batch_size, tag_str, input_vec, - output_vec); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + batch_size, min_batch_size, min_batch_timeout, + tag_str, input_vec, output_vec); + }); } - // Retrieve the model and model length from the database -extern "C" -SRError get_model(void* c_client, - const char* name, - const size_t name_length, - size_t* model_length, - const char** model) +extern "C" SRError get_model( + void* c_client, + const char* name, const size_t name_length, + size_t* model_length, const char** model) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && model_length != NULL && model != NULL); @@ -787,32 +659,17 @@ SRError get_model(void* c_client, *model_length = model_str_view.size(); *model = model_str_view.data(); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Put a script in the database that is stored in a file. -extern "C" -SRError set_script_from_file(void* c_client, - const char* name, - const size_t name_length, - const char* device, - const size_t device_length, - const char* script_file, - const size_t script_file_length) +extern "C" SRError set_script_from_file( + void* c_client, + const char* name, const size_t name_length, + const char* device, const size_t device_length, + const char* script_file, const size_t script_file_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && device != NULL && script_file != NULL); @@ -823,32 +680,18 @@ SRError set_script_from_file(void* c_client, std::string script_file_str(script_file, script_file_length); s->set_script_from_file(name_str, device_str, script_file_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Put a script in the database that is stored in a file in a multi-GPU system -extern "C" -SRError set_script_from_file_multigpu(void* c_client, - const char* name, - const size_t name_length, - const char* script_file, - const size_t script_file_length, - const int first_gpu, - const int num_gpus) +extern "C" SRError set_script_from_file_multigpu( + void* c_client, + const char* name, const size_t name_length, + const char* script_file, const size_t script_file_length, + const int first_gpu, + const int num_gpus) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && script_file != NULL); @@ -857,32 +700,17 @@ SRError set_script_from_file_multigpu(void* c_client, std::string script_file_str(script_file, script_file_length); s->set_script_from_file_multigpu(name_str, script_file_str, first_gpu, num_gpus); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Put a script in the database that is stored in a string. -extern "C" -SRError set_script(void* c_client, - const char* name, - const size_t name_length, - const char* device, - const size_t device_length, - const char* script, - const size_t script_length) +extern "C" SRError set_script( + void* c_client, + const char* name, const size_t name_length, + const char* device, const size_t device_length, + const char* script, const size_t script_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && device != NULL && script != NULL); @@ -894,32 +722,18 @@ SRError set_script(void* c_client, std::string script_str(script, script_length); s->set_script(name_str, device_str, script_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Put a script in the database that is stored in a string in a multi-GPU system -extern "C" -SRError set_script_multigpu(void* c_client, - const char* name, - const size_t name_length, - const char* script, - const size_t script_length, - const int first_gpu, - const int num_gpus) +extern "C" SRError set_script_multigpu( + void* c_client, + const char* name, const size_t name_length, + const char* script, const size_t script_length, + const int first_gpu, + const int num_gpus) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && script != NULL); @@ -929,32 +743,16 @@ SRError set_script_multigpu(void* c_client, std::string script_str(script, script_length); s->set_script_multigpu(name_str, script_str, first_gpu, num_gpus); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } - - // Retrieve the script stored in the database -extern "C" -SRError get_script(void* c_client, - const char* name, - const size_t name_length, - const char** script, - size_t* script_length) +extern "C" SRError get_script( + void* c_client, + const char* name, const size_t name_length, + const char** script, size_t* script_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && script != NULL && script_length != NULL); @@ -965,69 +763,49 @@ SRError get_script(void* c_client, (*script) = script_str_view.data(); (*script_length) = script_str_view.size(); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -void _check_params_run_script(void* c_client, - const char* name, - const char* function, - const char** inputs, - const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, - const size_t* output_lengths, - const size_t n_outputs) +// Validate parameters for running scripts +void _check_params_run_script( + void* c_client, + const char* name, + const char* function, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs) { - // Sanity check params - SR_CHECK_PARAMS(c_client != NULL && name != NULL && function != NULL && - inputs != NULL && input_lengths != NULL && - outputs != NULL && output_lengths != NULL); - - // Inputs and outputs are mandatory for run_script - for (size_t i = 0; i < n_inputs; i++){ - if (inputs[i] == NULL || input_lengths[i] == 0) { - throw SRParameterException( - "inputs[" + std::to_string(i) + "] is NULL or empty"); - } + // Sanity check params + SR_CHECK_PARAMS(c_client != NULL && name != NULL && function != NULL && + inputs != NULL && input_lengths != NULL && + outputs != NULL && output_lengths != NULL); + + // Inputs and outputs are mandatory for run_script + for (size_t i = 0; i < n_inputs; i++){ + if (inputs[i] == NULL || input_lengths[i] == 0) { + throw SRParameterException( + "inputs[" + std::to_string(i) + "] is NULL or empty"); } - for (size_t i = 0; i < n_outputs; i++) { - if (outputs[i] == NULL || output_lengths[i] == 0) { - throw SRParameterException( - "outputs[" + std::to_string(i) + "] is NULL or empty"); - } + } + for (size_t i = 0; i < n_outputs; i++) { + if (outputs[i] == NULL || output_lengths[i] == 0) { + throw SRParameterException( + "outputs[" + std::to_string(i) + "] is NULL or empty"); } + } } // Run a script function in the database -extern "C" -SRError run_script(void* c_client, - const char* name, - const size_t name_length, - const char* function, - const size_t function_length, - const char** inputs, - const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, - const size_t* output_lengths, - const size_t n_outputs) +extern "C" SRError run_script( + void* c_client, + const char* name, const size_t name_length, + const char* function, const size_t function_length, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ _check_params_run_script(c_client, name, function, - inputs, input_lengths, n_inputs, - outputs, output_lengths, n_outputs); + inputs, input_lengths, n_inputs, + outputs, output_lengths, n_outputs); std::string name_str(name, name_length); std::string function_str(function, function_length); @@ -1047,42 +825,24 @@ SRError run_script(void* c_client, Client* s = reinterpret_cast(c_client); s->run_script(name_str, function_str, input_vec, output_vec); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Run a script function in the database in a multi-GPU system -extern "C" -SRError run_script_multigpu(void* c_client, - const char* name, - const size_t name_length, - const char* function, - const size_t function_length, - const char** inputs, - const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, - const size_t* output_lengths, - const size_t n_outputs, - const int offset, - const int first_gpu, - const int num_gpus) +extern "C" SRError run_script_multigpu( + void* c_client, + const char* name, const size_t name_length, + const char* function, const size_t function_length, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs, + const int offset, + const int first_gpu, + const int num_gpus) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ _check_params_run_script(c_client, name, function, - inputs, input_lengths, n_inputs, - outputs, output_lengths, n_outputs); + inputs, input_lengths, n_inputs, + outputs, output_lengths, n_outputs); std::string name_str(name, name_length); std::string function_str(function, function_length); @@ -1102,66 +862,47 @@ SRError run_script_multigpu(void* c_client, Client* s = reinterpret_cast(c_client); s->run_script_multigpu(name_str, function_str, input_vec, output_vec, - offset, first_gpu, num_gpus); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + offset, first_gpu, num_gpus); + }); } -void _check_params_run_model(void* c_client, - const char* name, - const char** inputs, - const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, - const size_t* output_lengths, - const size_t n_outputs) +// Validate the parameters for running models +void _check_params_run_model( + void* c_client, + const char* name, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs) { - // Sanity check params - SR_CHECK_PARAMS(c_client != NULL && name != NULL && - inputs != NULL && input_lengths != NULL && - outputs != NULL && output_lengths != NULL); - - // Inputs and outputs are mandatory for run_script - for (size_t i = 0; i < n_inputs; i++){ - if (inputs[i] == NULL || input_lengths[i] == 0) { - throw SRParameterException( - "inputs[" + std::to_string(i) + "] is NULL or empty"); - } + // Sanity check params + SR_CHECK_PARAMS(c_client != NULL && name != NULL && + inputs != NULL && input_lengths != NULL && + outputs != NULL && output_lengths != NULL); + + // Inputs and outputs are mandatory for run_script + for (size_t i = 0; i < n_inputs; i++){ + if (inputs[i] == NULL || input_lengths[i] == 0) { + throw SRParameterException( + "inputs[" + std::to_string(i) + "] is NULL or empty"); } - for (size_t i = 0; i < n_outputs; i++) { - if (outputs[i] == NULL || output_lengths[i] == 0) { - throw SRParameterException( - "outputs[" + std::to_string(i) + "] is NULL or empty"); - } + } + for (size_t i = 0; i < n_outputs; i++) { + if (outputs[i] == NULL || output_lengths[i] == 0) { + throw SRParameterException( + "outputs[" + std::to_string(i) + "] is NULL or empty"); } + } } // Run a model in the database -extern "C" -SRError run_model(void* c_client, - const char* name, - const size_t name_length, - const char** inputs, - const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, - const size_t* output_lengths, - const size_t n_outputs) +extern "C" SRError run_model( + void* c_client, + const char* name, const size_t name_length, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ _check_params_run_model(c_client, name, inputs, input_lengths, n_inputs, - outputs, output_lengths, n_outputs); + outputs, output_lengths, n_outputs); std::string name_str(name, name_length); std::vector input_vec; @@ -1180,39 +921,22 @@ SRError run_model(void* c_client, Client* s = reinterpret_cast(c_client); s->run_model(name_str, input_vec, output_vec); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Run a model in the database -extern "C" -SRError run_model_multigpu(void* c_client, - const char* name, - const size_t name_length, - const char** inputs, - const size_t* input_lengths, - const size_t n_inputs, - const char** outputs, - const size_t* output_lengths, - const size_t n_outputs, - const int offset, - const int first_gpu, - const int num_gpus) +// Run a model in the database for multiple GPUs +extern "C" SRError run_model_multigpu( + void* c_client, + const char* name, const size_t name_length, + const char** inputs, const size_t* input_lengths, const size_t n_inputs, + const char** outputs, const size_t* output_lengths, const size_t n_outputs, + const int offset, + const int first_gpu, + const int num_gpus) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ _check_params_run_model(c_client, name, inputs, input_lengths, n_inputs, - outputs, output_lengths, n_outputs); + outputs, output_lengths, n_outputs); std::string name_str(name, name_length); std::vector input_vec; @@ -1231,144 +955,77 @@ SRError run_model_multigpu(void* c_client, Client* s = reinterpret_cast(c_client); s->run_model_multigpu(name_str, input_vec, output_vec, offset, - first_gpu, num_gpus); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + first_gpu, num_gpus); + }); } // Remove a model from the database -extern "C" -SRError delete_model(void* c_client, - const char* name, - const size_t name_length) +extern "C" SRError delete_model( + void* c_client, const char* name, const size_t name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL); std::string name_str(name, name_length); Client* s = reinterpret_cast(c_client); s->delete_model(name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Remove a model from the database on a system with multiple GPUs -extern "C" -SRError delete_model_multigpu(void* c_client, - const char* name, - const size_t name_length, - const int first_gpu, - const int num_gpus) +extern "C" SRError delete_model_multigpu( + void* c_client, + const char* name, const size_t name_length, + const int first_gpu, + const int num_gpus) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL); std::string name_str(name, name_length); Client* s = reinterpret_cast(c_client); s->delete_model_multigpu(name_str, first_gpu, num_gpus); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Remove a script from the database -extern "C" -SRError delete_script(void* c_client, - const char* name, - const size_t name_length) +extern "C" SRError delete_script( + void* c_client, const char* name, const size_t name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL); std::string name_str(name, name_length); Client* s = reinterpret_cast(c_client); s->delete_script(name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Remove a script from the database in a system with multiple GPUs -extern "C" -SRError delete_script_multigpu(void* c_client, - const char* name, - const size_t name_length, - const int first_gpu, - const int num_gpus) +extern "C" SRError delete_script_multigpu( + void* c_client, + const char* name, const size_t name_length, + const int first_gpu, + const int num_gpus) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL); std::string name_str(name, name_length); Client* s = reinterpret_cast(c_client); s->delete_script_multigpu(name_str, first_gpu, num_gpus); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Check whether a key exists in the database -extern "C" -SRError key_exists(void* c_client, const char* key, const size_t key_length, - bool* exists) +extern "C" SRError key_exists( + void* c_client, const char* key, const size_t key_length, bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && key != NULL && exists != NULL); @@ -1376,27 +1033,14 @@ SRError key_exists(void* c_client, const char* key, const size_t key_length, std::string key_str(key, key_length); *exists = s->key_exists(key_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Check whether a model exists in the database -extern "C" -SRError model_exists(void* c_client, const char* name, const size_t name_length, - bool* exists) +extern "C" SRError model_exists( + void* c_client, const char* name, const size_t name_length, bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && exists != NULL); @@ -1404,27 +1048,14 @@ SRError model_exists(void* c_client, const char* name, const size_t name_length, std::string name_str(name, name_length); *exists = s->model_exists(name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Check whether a tensor exists in the database -extern "C" -SRError tensor_exists(void* c_client, const char* name, const size_t name_length, - bool* exists) +extern "C" SRError tensor_exists( + void* c_client, const char* name, const size_t name_length, bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && exists != NULL); @@ -1432,27 +1063,14 @@ SRError tensor_exists(void* c_client, const char* name, const size_t name_length std::string name_str(name, name_length); *exists = s->tensor_exists(name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Delay until a dataset exists in the database -extern "C" -SRError dataset_exists(void* c_client, const char* name, const size_t name_length, - bool* exists) +// Check whether a dataset exists in the database +extern "C" SRError dataset_exists( + void* c_client, const char* name, const size_t name_length, bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && exists != NULL); @@ -1460,31 +1078,18 @@ SRError dataset_exists(void* c_client, const char* name, const size_t name_lengt std::string name_str(name, name_length); *exists = s->dataset_exists(name_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Delay until a key exists in the database -extern "C" -SRError poll_key(void* c_client, - const char* key, - const size_t key_length, - const int poll_frequency_ms, - const int num_tries, - bool* exists) +extern "C" SRError poll_key( + void* c_client, + const char* key, const size_t key_length, + const int poll_frequency_ms, + const int num_tries, + bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && key != NULL && exists != NULL); @@ -1492,31 +1097,18 @@ SRError poll_key(void* c_client, std::string key_str(key, key_length); *exists = s->poll_key(key_str, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Delay until a model exists in the database -extern "C" -SRError poll_model(void* c_client, - const char* name, - const size_t name_length, - const int poll_frequency_ms, - const int num_tries, - bool* exists) +extern "C" SRError poll_model( + void* c_client, + const char* name, const size_t name_length, + const int poll_frequency_ms, + const int num_tries, + bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && exists != NULL); @@ -1524,31 +1116,18 @@ SRError poll_model(void* c_client, std::string name_str(name, name_length); *exists = s->poll_model(name_str, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Delay until a tensor exists in the database -extern "C" -SRError poll_tensor(void* c_client, - const char* name, - const size_t name_length, - const int poll_frequency_ms, - const int num_tries, - bool* exists) +extern "C" SRError poll_tensor( + void* c_client, + const char* name, const size_t name_length, + const int poll_frequency_ms, + const int num_tries, + bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && exists != NULL); @@ -1556,31 +1135,18 @@ SRError poll_tensor(void* c_client, std::string name_str(name, name_length); *exists = s->poll_tensor(name_str, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Delay until a dataset exists in the database -extern "C" -SRError poll_dataset(void* c_client, - const char* name, - const size_t name_length, - const int poll_frequency_ms, - const int num_tries, - bool* exists) +extern "C" SRError poll_dataset( + void* c_client, + const char* name, const size_t name_length, + const int poll_frequency_ms, + const int num_tries, + bool* exists) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && exists != NULL); @@ -1588,28 +1154,14 @@ SRError poll_dataset(void* c_client, std::string name_str(name, name_length); *exists = s->poll_dataset(name_str, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Establish a data source -extern "C" -SRError set_data_source(void* c_client, - const char* source_id, - const size_t source_id_length) +extern "C" SRError set_data_source( + void* c_client, const char* source_id, const size_t source_id_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && source_id != NULL); @@ -1617,102 +1169,64 @@ SRError set_data_source(void* c_client, std::string source_id_str(source_id, source_id_length); s->set_data_source(source_id_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Control whether a model ensemble prefix is used -extern "C" -SRError use_model_ensemble_prefix(void* c_client, bool use_prefix) +extern "C" SRError use_model_ensemble_prefix(void* c_client, bool use_prefix) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL); Client* s = reinterpret_cast(c_client); s->use_model_ensemble_prefix(use_prefix); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Control whether a tensor ensemble prefix is used -extern "C" -SRError use_tensor_ensemble_prefix(void* c_client, bool use_prefix) +extern "C" SRError use_tensor_ensemble_prefix(void* c_client, bool use_prefix) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL); Client* s = reinterpret_cast(c_client); s->use_tensor_ensemble_prefix(use_prefix); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } + }); +} + +// Control whether a dataset ensemble prefix is used +extern "C" SRError use_dataset_ensemble_prefix(void* c_client, bool use_prefix) +{ + return MAKE_CLIENT_API({ + // Sanity check params + SR_CHECK_PARAMS(c_client != NULL); - return result; + Client* s = reinterpret_cast(c_client); + s->use_dataset_ensemble_prefix(use_prefix); + }); } // Control whether aggregation lists are prefixed -extern "C" -SRError use_list_ensemble_prefix(void* c_client, bool use_prefix) +extern "C" SRError use_list_ensemble_prefix(void* c_client, bool use_prefix) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL); Client* s = reinterpret_cast(c_client); s->use_list_ensemble_prefix(use_prefix); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Append a dataset to the aggregation list -extern "C" -SRError append_to_list(void* c_client, const char* list_name, - const size_t list_name_length, const void* dataset) +extern "C" SRError append_to_list( + void* c_client, + const char* list_name, const size_t list_name_length, + const void* dataset) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && list_name != NULL && dataset != NULL); @@ -1721,27 +1235,14 @@ SRError append_to_list(void* c_client, const char* list_name, std::string lname(list_name, list_name_length); s->append_to_list(lname, *d); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Delete an aggregation list -extern "C" -SRError delete_list(void* c_client, const char* list_name, - const size_t list_name_length) +extern "C" SRError delete_list( + void* c_client, const char* list_name, const size_t list_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && list_name != NULL); @@ -1749,28 +1250,16 @@ SRError delete_list(void* c_client, const char* list_name, std::string lname(list_name, list_name_length); s->delete_list(lname); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Copy an aggregation list -extern "C" -SRError copy_list(void* c_client, - const char* src_name, const size_t src_name_length, - const char* dest_name, const size_t dest_name_length) +extern "C" SRError copy_list( + void* c_client, + const char* src_name, const size_t src_name_length, + const char* dest_name, const size_t dest_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && src_name != NULL && dest_name != NULL); @@ -1779,28 +1268,16 @@ SRError copy_list(void* c_client, std::string dname(dest_name, dest_name_length); s->copy_list(sname, dname); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Rename an aggregation list -extern "C" -SRError rename_list(void* c_client, - const char* src_name, const size_t src_name_length, - const char* dest_name, const size_t dest_name_length) +extern "C" SRError rename_list( + void* c_client, + const char* src_name, const size_t src_name_length, + const char* dest_name, const size_t dest_name_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && src_name != NULL && dest_name != NULL); @@ -1809,27 +1286,16 @@ SRError rename_list(void* c_client, std::string dname(dest_name, dest_name_length); s->rename_list(sname, dname); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Get the number of entries in the list -extern "C" -SRError get_list_length(void* c_client, const char* list_name, - const size_t list_name_length, int* result_length) +extern "C" SRError get_list_length( + void* c_client, + const char* list_name, const size_t list_name_length, + int* result_length) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && list_name != NULL); @@ -1837,29 +1303,19 @@ SRError get_list_length(void* c_client, const char* list_name, std::string lname(list_name, list_name_length); *result_length = s->get_list_length(lname); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Poll list length until length is equal to the provided length -extern "C" -SRError poll_list_length(void* c_client, const char* name, - const size_t name_length, int list_length, - int poll_frequency_ms, int num_tries, - bool* poll_result) +// Poll until list length is equal to the provided length +extern "C" SRError poll_list_length( + void* c_client, + const char* name, const size_t name_length, + int list_length, + int poll_frequency_ms, + int num_tries, + bool* poll_result) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && poll_result != NULL); @@ -1868,29 +1324,19 @@ SRError poll_list_length(void* c_client, const char* name, *poll_result = s->poll_list_length( lname, list_length, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Poll list length until length is greater than or equal to the provided length -extern "C" -SRError poll_list_length_gte(void* c_client, const char* name, - const size_t name_length, int list_length, - int poll_frequency_ms, int num_tries, - bool* poll_result) +// Poll until list length is greater than or equal to the provided length +extern "C" SRError poll_list_length_gte( + void* c_client, + const char* name, const size_t name_length, + int list_length, + int poll_frequency_ms, + int num_tries, + bool* poll_result) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && poll_result != NULL); @@ -1899,29 +1345,19 @@ SRError poll_list_length_gte(void* c_client, const char* name, *poll_result = s->poll_list_length_gte( lname, list_length, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Poll list length until length is less than or equal to the provided length -extern "C" -SRError poll_list_length_lte(void* c_client, const char* name, - const size_t name_length, int list_length, - int poll_frequency_ms, int num_tries, - bool* poll_result) +extern "C" SRError poll_list_length_lte( + void* c_client, + const char* name, const size_t name_length, + int list_length, + int poll_frequency_ms, + int num_tries, + bool* poll_result) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && name != NULL && poll_result != NULL); @@ -1930,28 +1366,17 @@ SRError poll_list_length_lte(void* c_client, const char* name, *poll_result = s->poll_list_length_lte( lname, list_length, poll_frequency_ms, num_tries); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Get datasets from an aggregation list -extern "C" -SRError get_datasets_from_list(void* c_client, const char* list_name, - const size_t list_name_length, - void*** datasets, size_t* num_datasets) +extern "C" SRError get_datasets_from_list( + void* c_client, + const char* list_name, const size_t list_name_length, + void*** datasets, + size_t* num_datasets) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && list_name != NULL && datasets != NULL && num_datasets != NULL); @@ -1970,29 +1395,19 @@ SRError get_datasets_from_list(void* c_client, const char* list_name, *datasets = (void**)alloc; } *num_datasets = ndatasets; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Get a range of datasets (by index) from an aggregation list -extern "C" -SRError get_dataset_list_range(void* c_client, const char* list_name, - const size_t list_name_length, - const int start_index, const int end_index, - void*** datasets, size_t* num_datasets) +extern "C" SRError get_dataset_list_range( + void* c_client, + const char* list_name, const size_t list_name_length, + const int start_index, + const int end_index, + void*** datasets, + size_t* num_datasets) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && list_name != NULL && datasets != NULL && num_datasets != NULL); @@ -2012,29 +1427,19 @@ SRError get_dataset_list_range(void* c_client, const char* list_name, *datasets = (void**)alloc; } *num_datasets = ndatasets; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } + // Get a range of datasets (by index) from an aggregation list into an // already allocated vector of datasets -extern "C" -SRError _get_dataset_list_range_allocated(void* c_client, const char* list_name, - const size_t list_name_length, - const int start_index, const int end_index, - void** datasets) +extern "C" SRError _get_dataset_list_range_allocated( + void* c_client, + const char* list_name, const size_t list_name_length, + const int start_index, + const int end_index, + void** datasets) { - SRError result = SRNoError; - try - { + return MAKE_CLIENT_API({ // Sanity check params SR_CHECK_PARAMS(c_client != NULL && list_name != NULL && datasets != NULL); @@ -2045,10 +1450,9 @@ SRError _get_dataset_list_range_allocated(void* c_client, const char* list_name, std::vector result_datasets = s->get_dataset_list_range( lname, start_index, end_index); size_t num_datasets = result_datasets.size(); - if ( num_datasets != (size_t) (end_index-start_index+1)) { - SRSetLastError(SRInternalException( - "Returned dataset list is not equal to the requested range" - )); + if (num_datasets != (size_t)(end_index - start_index + 1)) { + throw SRInternalException( + "Returned dataset list is not equal to the requested range"); } if (num_datasets > 0) { @@ -2056,15 +1460,30 @@ SRError _get_dataset_list_range_allocated(void* c_client, const char* list_name, datasets[i] = (void*)(new DataSet(std::move(result_datasets[i]))); } } + }); +} + +// Retrieve a string representation of the client +const char* client_to_string(void* c_client) +{ + static std::string result; + try + { + // Sanity check params + SR_CHECK_PARAMS(c_client != NULL); + + Client* s = reinterpret_cast(c_client); + result = s->to_string(); } catch (const Exception& e) { SRSetLastError(e); - result = e.to_error_code(); + result = e.what(); } catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; + result = "A non-standard exception was encountered while executing "; + result += __func__; + SRSetLastError(SRInternalException(result)); } - return result; + return result.c_str(); } diff --git a/2023-01/smartsim/smartredis/src/c/c_configoptions.cpp b/2023-01/smartsim/smartredis/src/c/c_configoptions.cpp new file mode 100644 index 00000000..e7b3083d --- /dev/null +++ b/2023-01/smartsim/smartredis/src/c/c_configoptions.cpp @@ -0,0 +1,205 @@ +/* + * BSD 2-Clause License + * + * Copyright (c) 2021-2023, Hewlett Packard Enterprise + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "srassert.h" +#include "srexception.h" +#include "configoptions.h" + +using namespace SmartRedis; + +// Decorator to standardize exception handling in C ConfigOptions API methods +template +auto c_cfgopt_api(T&& cfgopt_api_func, const char* name) +{ + // we create a closure below + auto decorated = [name, cfgopt_api_func = + std::forward(cfgopt_api_func)](auto&&... args) + { + SRError result = SRNoError; + try { + cfgopt_api_func(std::forward(args)...); + } + catch (const Exception& e) { + SRSetLastError(e); + result = e.to_error_code(); + } + catch (...) { + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + SRSetLastError(SRInternalException(msg)); + result = SRInternalError; + } + return result; + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_CFGOPT_API(stuff)\ + c_cfgopt_api([&] { stuff }, __func__)() + +// Instantiate ConfigOptions from environment variables +extern "C" +SRError create_configoptions_from_environment( + const char* db_suffix, + const size_t db_suffix_length, + void** new_configoptions) +{ + return MAKE_CFGOPT_API({ + try { + // Sanity check params + SR_CHECK_PARAMS(db_suffix != NULL && new_configoptions != NULL); + + std::string db_suffix_str(db_suffix, db_suffix_length); + + auto cfgOpts = ConfigOptions::create_from_environment(db_suffix_str); + ConfigOptions* pCfgOpts = cfgOpts.release(); + *new_configoptions = reinterpret_cast(pCfgOpts); + } + catch (const std::bad_alloc& e) { + throw SRBadAllocException("config options allocation"); + } + }); +} + +// Retrieve the value of a numeric configuration option +extern "C" +SRError get_integer_option( + void* c_cfgopts, + const char* option_name, + size_t option_name_len, + int64_t* option_result) +{ + return MAKE_CFGOPT_API({ + // Sanity check params + SR_CHECK_PARAMS(c_cfgopts != NULL && option_name != NULL && + option_name_len > 0 && option_result != NULL); + + std::string option_name_str(option_name, option_name_len); + ConfigOptions* co = reinterpret_cast(c_cfgopts); + + *option_result = co->get_integer_option(option_name_str); + }); +} + +// Retrieve the value of a string configuration option +extern "C" +SRError get_string_option( + void* c_cfgopts, + const char* option_name, + size_t option_name_len, + char** option_result, + size_t* option_result_len) +{ + return MAKE_CFGOPT_API({ + // Sanity check params + SR_CHECK_PARAMS(c_cfgopts != NULL && option_name != NULL && + option_name_len > 0 && option_result != NULL && + option_result_len != NULL); + + std::string option_name_str(option_name, option_name_len); + ConfigOptions* co = reinterpret_cast(c_cfgopts); + + // Set up an empty string as the result in case something goes wrong + *option_result = NULL; + *option_result = 0; + + std::string option_result_str = co->get_string_option(option_name_str); + + *option_result_len = option_result_str.length(); + *option_result = new char[*option_result_len + 1]; + strncpy(*option_result, option_result_str.c_str(), *option_result_len); + + // Save the pointer to this buffer so we can clean it up later + co->_add_string_buffer(*option_result); + }); +} + +// Check whether a configuration option is set +extern "C" +SRError is_configured( + void* c_cfgopts, + const char* option_name, + size_t option_name_len, + bool* cfg_result) +{ + return MAKE_CFGOPT_API({ + // Sanity check params + SR_CHECK_PARAMS(c_cfgopts != NULL && option_name != NULL && cfg_result != NULL); + + std::string option_name_str(option_name, option_name_len); + ConfigOptions* co = reinterpret_cast(c_cfgopts); + + *cfg_result = co->is_configured(option_name_str); + }); +} + +// Override the value of a numeric configuration option +extern "C" +SRError override_integer_option( + void* c_cfgopts, + const char* option_name, + size_t option_name_len, + int64_t value) +{ + return MAKE_CFGOPT_API({ + // Sanity check params + SR_CHECK_PARAMS(c_cfgopts != NULL && option_name != NULL && + option_name_len > 0); + + std::string option_name_str(option_name, option_name_len); + ConfigOptions* co = reinterpret_cast(c_cfgopts); + + co->override_integer_option(option_name_str, value); + }); +} + +// Override the value of a string configuration option +extern "C" +SRError override_string_option( + void* c_cfgopts, + const char* option_name, + size_t option_name_len, + const char* value, + size_t value_len) +{ + return MAKE_CFGOPT_API({ + // Sanity check params + SR_CHECK_PARAMS(c_cfgopts != NULL && option_name != NULL && + option_name_len > 0 && value != NULL); + + std::string option_name_str(option_name, option_name_len); + std::string value_str(value, value_len); + ConfigOptions* co = reinterpret_cast(c_cfgopts); + + co->override_string_option(option_name_str, value_str); + }); +} diff --git a/2023-01/smartsim/smartredis/src/c/c_dataset.cpp b/2023-01/smartsim/smartredis/src/c/c_dataset.cpp index 0a97a183..0f0ef5f3 100644 --- a/2023-01/smartsim/smartredis/src/c/c_dataset.cpp +++ b/2023-01/smartsim/smartredis/src/c/c_dataset.cpp @@ -32,79 +32,82 @@ using namespace SmartRedis; -// Create a new DataSet. -// The user is responsible for deallocating the DataSet via DeallocateeDataSet() -extern "C" -SRError CDataSet(const char* name, const size_t name_length, void** new_dataset) +// Decorator to standardize exception handling in C Dataset API methods +template +auto c_dataset_api(T&& dataset_api_func, const char* name) { - SRError result = SRNoError; - try { + // we create a closure below + auto decorated = [name, dataset_api_func = + std::forward(dataset_api_func)](auto&&... args) + { + SRError result = SRNoError; + try { + dataset_api_func(std::forward(args)...); + } + catch (const Exception& e) { + SRSetLastError(e); + result = e.to_error_code(); + } + catch (...) { + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + SRSetLastError(SRInternalException(msg)); + result = SRInternalError; + } + return result; + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_DATASET_API(stuff)\ + c_dataset_api([&] { stuff }, __func__)() + + +// Create a new DataSet +extern "C" SRError CDataSet( + const char* name, const size_t name_length, void** new_dataset) +{ + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(name != NULL && new_dataset != NULL); std::string name_str(name, name_length); - DataSet* dataset = new DataSet(name_str); - *new_dataset = reinterpret_cast(dataset); - } - catch (const std::bad_alloc& e) { - *new_dataset = NULL; - SRSetLastError(SRBadAllocException("dataset allocation")); - result = SRBadAllocError; - } - catch (const Exception& e) { - *new_dataset = NULL; - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - *new_dataset = NULL; - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + try { + *new_dataset = NULL; + DataSet* dataset = new DataSet(name_str); + *new_dataset = reinterpret_cast(dataset); + } + catch (const std::bad_alloc& e) { + SRSetLastError(SRBadAllocException("dataset allocation")); + } + }); } // Deallocate a DataSet -extern "C" -SRError DeallocateeDataSet(void** dataset) +extern "C" SRError DeallocateDataSet(void** dataset) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL); DataSet* d = reinterpret_cast(*dataset); delete d; *dataset = NULL; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Add a tensor to the dataset. -extern "C" -SRError add_tensor(void* dataset, - const char* tensor_name, - const size_t tensor_name_length, - void* data, - const size_t* dims, - const size_t n_dims, - const SRTensorType type, - const SRMemoryLayout mem_layout) +// Add a tensor to the dataset +extern "C" SRError add_tensor( + void* dataset, + const char* tensor_name, const size_t tensor_name_length, + void* data, + const size_t* dims, const size_t n_dims, + const SRTensorType type, + const SRMemoryLayout mem_layout) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && dims != NULL); @@ -115,30 +118,17 @@ SRError add_tensor(void* dataset, dims_vec.assign(dims, dims + n_dims); d->add_tensor(tensor_name_str, data, dims_vec, type, mem_layout); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Add a meta data value to the named meta data field -extern "C" -SRError add_meta_scalar(void* dataset, - const char* name, - const size_t name_length, - const void* data, - const SRMetaDataType type) +extern "C" SRError add_meta_scalar( + void* dataset, + const char* name, const size_t name_length, + const void* data, + const SRMetaDataType type) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && name != NULL && data != NULL); @@ -146,30 +136,16 @@ SRError add_meta_scalar(void* dataset, std::string name_str(name, name_length); d->add_meta_scalar(name_str, data, type); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Add a meta data value to the named meta data field -extern "C" -SRError add_meta_string(void* dataset, - const char* name, - const size_t name_length, - const char* data, - const size_t data_length) +extern "C" SRError add_meta_string( + void* dataset, + const char* name, const size_t name_length, + const char* data, const size_t data_length) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && name != NULL && data != NULL); @@ -178,36 +154,19 @@ SRError add_meta_string(void* dataset, std::string data_str(data, data_length); d->add_meta_string(name_str, data_str); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Get a tensor of a specified type from the database. -// This function may allocate new memory for the tensor. -// This memory will be deleted when the user deletes the -// DataSet object. -extern "C" -SRError get_dataset_tensor(void* dataset, - const char* name, - const size_t name_length, - void** data, - size_t** dims, - size_t* n_dims, - SRTensorType* type, - const SRMemoryLayout mem_layout) +// Get a tensor of a specified type from the database +extern "C" SRError get_dataset_tensor( + void* dataset, + const char* name, const size_t name_length, + void** data, + size_t** dims, size_t* n_dims, + SRTensorType* type, + const SRMemoryLayout mem_layout) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && name != NULL && data != NULL && dims != NULL && n_dims != NULL && type != NULL); @@ -217,33 +176,19 @@ SRError get_dataset_tensor(void* dataset, *type = SRTensorTypeInvalid; d->get_tensor(name_str, *data, *dims, *n_dims, *type, mem_layout); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Copy the tensor data buffer into the provided memory space (data). -extern "C" -SRError unpack_dataset_tensor(void* dataset, - const char* name, - const size_t name_length, - void* data, - const size_t* dims, - const size_t n_dims, - const SRTensorType type, - const SRMemoryLayout mem_layout) +extern "C" SRError unpack_dataset_tensor( + void* dataset, + const char* name, const size_t name_length, + void* data, + const size_t* dims, const size_t n_dims, + const SRTensorType type, + const SRMemoryLayout mem_layout) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && name != NULL && dims != NULL); @@ -254,32 +199,18 @@ SRError unpack_dataset_tensor(void* dataset, dims_vec.assign(dims, dims + n_dims); d->unpack_tensor(name_str, data, dims_vec, type, mem_layout); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Get a meta data field. This method may allocate memory that is cleared when -// the user deletes the DataSet object -extern "C" -SRError get_meta_scalars(void* dataset, - const char* name, - const size_t name_length, - size_t* length, - SRMetaDataType* type, - void** scalar_data) +// Get a meta data field +extern "C" SRError get_meta_scalars( + void* dataset, + const char* name, const size_t name_length, + size_t* length, + SRMetaDataType* type, + void** scalar_data) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && name != NULL && length != NULL && type != NULL && scalar_data != NULL); @@ -291,33 +222,18 @@ SRError get_meta_scalars(void* dataset, d->get_meta_scalars(key_str, data, *length, *type); *scalar_data = data; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } -// Get a meta data string field. This method may -// allocate memory that is cleared when the user -// deletes the DataSet object. -extern "C" -SRError get_meta_strings(void* dataset, - const char* name, - const size_t name_length, - char*** data, - size_t* n_strings, - size_t** lengths) +// Get a meta data string field +extern "C" SRError get_meta_strings( + void* dataset, + const char* name, const size_t name_length, + char*** data, + size_t* n_strings, + size_t** lengths) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && name != NULL && data != NULL && n_strings != NULL && lengths != NULL); @@ -325,54 +241,28 @@ SRError get_meta_strings(void* dataset, DataSet* d = reinterpret_cast(dataset); std::string key_str(name, name_length); d->get_meta_strings(key_str, *data, *n_strings, *lengths); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Retrieve the names of tensors in the DataSet -extern "C" -SRError get_tensor_names( +extern "C" SRError get_tensor_names( void* dataset, char*** data, size_t* n_strings, size_t** lengths) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && data != NULL && n_strings != NULL && lengths != NULL); DataSet* d = reinterpret_cast(dataset); d->get_tensor_names(*data, *n_strings, *lengths); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Retrieve the data type of a Tensor in the DataSet -extern "C" -SRError get_tensor_type( +extern "C" SRError get_tensor_type( void* dataset, const char* name, size_t name_len, SRTensorType* ttype) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && ttype != NULL); @@ -380,54 +270,60 @@ SRError get_tensor_type( std::string tensor_name(name, name_len); SRTensorType result = d->get_tensor_type(tensor_name); *ttype = result; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } + }); +} + +// Retrieve the dimensions of a Tensor in the DataSet +extern "C" SRError get_tensor_dims( + void* dataset, + const char* name, size_t name_len, + size_t** dims, + size_t *ndims) +{ + return MAKE_DATASET_API({ + // Sanity check params + SR_CHECK_PARAMS(dataset != NULL && dims != NULL && ndims != NULL); - return result; + DataSet* d = reinterpret_cast(dataset); + std::string tensor_name(name, name_len); + auto result = d->get_tensor_dims(tensor_name); + size_t num_dims = result.size(); + + // Make sure they gave us a big enough buffer + if (*ndims < num_dims) { + *ndims = num_dims; + throw SRBadAllocException( + "Insufficient space in buffer for tensor dimensions"); + } + + // Give them the results + *ndims = num_dims; + int i = 0; + for (auto it = result.cbegin(); it != result.cend(); ++it, ++i) { + (*dims)[i] = *it; + } + }); } // Retrieve the names of all metadata fields in the DataSet -extern "C" -SRError get_metadata_field_names( +extern "C" SRError get_metadata_field_names( void* dataset, char*** data, size_t* n_strings, size_t** lengths) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && data != NULL && n_strings != NULL && lengths != NULL); DataSet* d = reinterpret_cast(dataset); d->get_metadata_field_names(*data, *n_strings, *lengths); - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } // Retrieve the data type of a metadata field in the DataSet -extern "C" -SRError get_metadata_field_type( +extern "C" SRError get_metadata_field_type( void* dataset, const char* name, size_t name_len, SRMetaDataType* mdtype) { - SRError result = SRNoError; - try - { + return MAKE_DATASET_API({ // Sanity check params SR_CHECK_PARAMS(dataset != NULL && mdtype != NULL); @@ -435,15 +331,30 @@ SRError get_metadata_field_type( std::string mdf_name(name, name_len); SRMetaDataType result = d->get_metadata_field_type(mdf_name); *mdtype = result; + }); +} + +// Retrieve a string representation of the dataset +const char* dataset_to_string(void* dataset) +{ + static std::string result; + try + { + // Sanity check params + SR_CHECK_PARAMS(dataset != NULL); + + DataSet* d = reinterpret_cast(dataset); + result = d->to_string(); } catch (const Exception& e) { SRSetLastError(e); - result = e.to_error_code(); + result = e.what(); } catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; + result = "A non-standard exception was encountered while executing "; + result += __func__; + SRSetLastError(SRInternalException(result)); } - return result; + return result.c_str(); } diff --git a/2023-01/smartsim/smartredis/src/c/c_logcontext.cpp b/2023-01/smartsim/smartredis/src/c/c_logcontext.cpp index 71089e4b..7d4334e3 100644 --- a/2023-01/smartsim/smartredis/src/c/c_logcontext.cpp +++ b/2023-01/smartsim/smartredis/src/c/c_logcontext.cpp @@ -32,64 +32,68 @@ using namespace SmartRedis; -// Create a new LogContext. -// The user is responsible for deallocating the LogContext -// via DeallocateeLogContext() -extern "C" -SRError SmartRedisCLogContext( - const char* context, - const size_t context_length, - void** new_logcontext) +// Decorator to standardize exception handling in C LogContext API methods +template +auto c_logcontext_api(T&& logcontext_api_func, const char* name) { - SRError result = SRNoError; - try { + // we create a closure below + auto decorated = [name, logcontext_api_func + = std::forward(logcontext_api_func)](auto&&... args) + { + SRError result = SRNoError; + try { + logcontext_api_func(std::forward(args)...); + } + catch (const Exception& e) { + SRSetLastError(e); + result = e.to_error_code(); + } + catch (...) { + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + SRSetLastError(SRInternalException(msg)); + result = SRInternalError; + } + return result; + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_LOGCONTEXT_API(stuff)\ + c_logcontext_api([&] { stuff }, __func__)() + + +// Create a new LogContext +extern "C" SRError SmartRedisCLogContext( + const char* context, const size_t context_length, void** new_logcontext) +{ + return MAKE_LOGCONTEXT_API({ // Sanity check params SR_CHECK_PARAMS(context != NULL && new_logcontext != NULL); - std::string context_str(context, context_length); - LogContext* logcontext = new LogContext(context_str); - *new_logcontext = reinterpret_cast(logcontext); - } - catch (const std::bad_alloc& e) { - *new_logcontext = NULL; - SRSetLastError(SRBadAllocException("logcontext allocation")); - result = SRBadAllocError; - } - catch (const Exception& e) { - *new_logcontext = NULL; - result = e.to_error_code(); - } - catch (...) { - *new_logcontext = NULL; - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + try { + std::string context_str(context, context_length); + *new_logcontext = NULL; + LogContext* logcontext = new LogContext(context_str); + *new_logcontext = reinterpret_cast(logcontext); + } + catch (const std::bad_alloc& e) { + SRSetLastError(SRBadAllocException("logcontext allocation")); + } + }); } // Deallocate a LogContext -extern "C" -SRError DeallocateLogContext(void** logcontext) +extern "C" SRError DeallocateLogContext(void** logcontext) { - SRError result = SRNoError; - try - { + return MAKE_LOGCONTEXT_API({ // Sanity check params SR_CHECK_PARAMS(logcontext != NULL); LogContext* lc = reinterpret_cast(*logcontext); delete lc; *logcontext = NULL; - } - catch (const Exception& e) { - SRSetLastError(e); - result = e.to_error_code(); - } - catch (...) { - SRSetLastError(SRInternalException("Unknown exception occurred")); - result = SRInternalError; - } - - return result; + }); } diff --git a/2023-01/smartsim/smartredis/src/c/c_logger.cpp b/2023-01/smartsim/smartredis/src/c/c_logger.cpp index fbe1744e..7324943a 100644 --- a/2023-01/smartsim/smartredis/src/c/c_logger.cpp +++ b/2023-01/smartsim/smartredis/src/c/c_logger.cpp @@ -43,6 +43,38 @@ using namespace SmartRedis; + +// Decorator to standardize exception handling in C Logger API methods +template +auto c_logger_api(T&& logger_api_func, const char* name) +{ + // we create a closure below + auto decorated = [name, logger_api_func + = std::forward(logger_api_func)](auto&&... args) + { + try { + logger_api_func(std::forward(args)...); + } + catch (Exception& e) { + std::cout << "Logging failure: " << e.where() + << ": " << e.what() << std::endl; + } + catch (...) { + std::string msg( + "Internal error: A non-standard exception was encountered "); + msg += "while executing "; + msg += name; + std::cout << msg << std::endl; + } + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_LOGGER_API(stuff)\ + c_logger_api([&] { stuff }, __func__)() + + // Conditionally log data if the logging level is high enough // (exception-free variant) extern "C" void log_data_noexcept( @@ -51,17 +83,13 @@ extern "C" void log_data_noexcept( const char* data, size_t data_len) { - try { - SR_CHECK_PARAMS(context != NULL && data != NULL); - const SRObject* temp_context = - reinterpret_cast(context); - std::string strData(data, data_len); - temp_context->log_data(level, strData); - } - catch (Exception& e) { - std::cout << "Logging failure: " << e.where() - << ": " << e.what() << std::endl; - } + MAKE_LOGGER_API({ + SR_CHECK_PARAMS(context != NULL && data != NULL); + const SRObject* temp_context = + reinterpret_cast(context); + std::string strData(data, data_len); + temp_context->log_data(level, strData); + }); } // Conditionally log a warning if the logging level is high enough @@ -72,17 +100,13 @@ extern "C" void log_warning_noexcept( const char* data, size_t data_len) { - try { - SR_CHECK_PARAMS(context != NULL && data != NULL); - const SRObject* temp_context = - reinterpret_cast(context); - std::string strData(data, data_len); - temp_context->log_warning(level, strData); - } - catch (Exception& e) { - std::cout << "Logging failure: " << e.where() - << ": " << e.what() << std::endl; - } + MAKE_LOGGER_API({ + SR_CHECK_PARAMS(context != NULL && data != NULL); + const SRObject* temp_context = + reinterpret_cast(context); + std::string strData(data, data_len); + temp_context->log_warning(level, strData); + }); } // Conditionally log an error if the logging level is high enough @@ -93,79 +117,59 @@ extern "C" void log_error_noexcept( const char* data, size_t data_len) { - try { - SR_CHECK_PARAMS(context != NULL && data != NULL); - const SRObject* temp_context = - reinterpret_cast(context); - std::string strData(data, data_len); - temp_context->log_error(level, strData); - } - catch (Exception& e) { - std::cout << "Logging failure: " << e.where() - << ": " << e.what() << std::endl; - } + MAKE_LOGGER_API({ + SR_CHECK_PARAMS(context != NULL && data != NULL); + const SRObject* temp_context = + reinterpret_cast(context); + std::string strData(data, data_len); + temp_context->log_error(level, strData); + }); } - // Conditionally log data if the logging level is high enough // (exception-free variant) extern "C" void log_data_noexcept_string( - const char* context, - size_t context_len, + const char* context, size_t context_len, SRLoggingLevel level, const char* data, size_t data_len) { - try { - SR_CHECK_PARAMS(context != NULL && data != NULL); - std::string temp_context(context, context_len); - std::string strData(data, data_len); - log_data(temp_context, level, strData); - } - catch (Exception& e) { - std::cout << "Logging failure: " << e.where() - << ": " << e.what() << std::endl; - } + MAKE_LOGGER_API({ + SR_CHECK_PARAMS(context != NULL && data != NULL); + std::string temp_context(context, context_len); + std::string strData(data, data_len); + log_data(temp_context, level, strData); + }); } // Conditionally log a warning if the logging level is high enough // (exception-free variant) extern "C" void log_warning_noexcept_string( - const char* context, - size_t context_len, + const char* context, size_t context_len, SRLoggingLevel level, const char* data, size_t data_len) { - try { - SR_CHECK_PARAMS(context != NULL && data != NULL); - std::string temp_context(context, context_len); - std::string strData(data, data_len); - log_warning(temp_context, level, strData); - } - catch (Exception& e) { - std::cout << "Logging failure: " << e.where() - << ": " << e.what() << std::endl; - } + MAKE_LOGGER_API({ + SR_CHECK_PARAMS(context != NULL && data != NULL); + std::string temp_context(context, context_len); + std::string strData(data, data_len); + log_warning(temp_context, level, strData); + }); } // Conditionally log an error if the logging level is high enough // (exception-free variant) extern "C" void log_error_noexcept_string( - const char* context, - size_t context_len, + const char* context, size_t context_len, SRLoggingLevel level, const char* data, size_t data_len) { - try { - SR_CHECK_PARAMS(context != NULL && data != NULL); - std::string temp_context(context, context_len); - std::string strData(data, data_len); - log_error(temp_context, level, strData); - } - catch (Exception& e) { - std::cout << "Logging failure: " << e.where() - << ": " << e.what() << std::endl; - } + MAKE_LOGGER_API({ + SR_CHECK_PARAMS(context != NULL && data != NULL); + std::string temp_context(context, context_len); + std::string strData(data, data_len); + log_error(temp_context, level, strData); + }); } diff --git a/2023-01/smartsim/smartredis/src/cpp/client.cpp b/2023-01/smartsim/smartredis/src/cpp/client.cpp index e0fa7cc7..86b8dc58 100644 --- a/2023-01/smartsim/smartredis/src/cpp/client.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/client.cpp @@ -27,33 +27,108 @@ */ #include +#include +#include +#include +#include +#include +#include #include "client.h" #include "srexception.h" #include "logger.h" #include "utility.h" +#include "configoptions.h" using namespace SmartRedis; -// Constructor +// Simple Client constructor +Client::Client(const char* logger_name) + : SRObject(logger_name) +{ + // Create our ConfigOptions object (default: no suffixing) + auto cfgopts = ConfigOptions::create_from_environment(""); + _cfgopts = cfgopts.release(); + _cfgopts->_set_log_context(this); + + // Log that a new client has been instantiated + log_data(LLDebug, "New client created"); + + // Establish our server connection + _establish_server_connection(); +} + +// Constructor with config options +Client::Client(ConfigOptions* cfgopts, const std::string& logger_name) + : SRObject(logger_name), _cfgopts(cfgopts->clone()) +{ + // Log that a new client has been instantiated + _cfgopts->_set_log_context(this); + log_data(LLDebug, "New client created"); + + // Establish our server connection + _establish_server_connection(); +} + +// Initialize a connection to the back-end database +void Client::_establish_server_connection() +{ + // See what type of connection the user wants + std::string server_type = _cfgopts->_resolve_string_option( + "SR_DB_TYPE", "Clustered"); + std::transform(server_type.begin(), server_type.end(), server_type.begin(), + [](unsigned char c){ return std::tolower(c); }); + + // Set up Redis server connection + // A std::bad_alloc exception on the initializer will be caught + // by the call to new for the client + if (server_type == "clustered") { + log_data(LLDeveloper, "Instantiating clustered Redis connection"); + _redis_cluster = new RedisCluster(_cfgopts); + _redis = NULL; + _redis_server = _redis_cluster; + } + else { // Standalone or Colocated + log_data(LLDeveloper, "Instantiating standalone Redis connection"); + _redis_cluster = NULL; + _redis = new Redis(_cfgopts); + _redis_server = _redis; + } + log_data(LLDeveloper, "Redis connection established"); + + // Initialize key prefixing + _get_prefix_settings(); + _use_tensor_prefix = true; + _use_dataset_prefix = true; + _use_model_prefix = false; + _use_list_prefix = true; +} + +// Constructor (deprecated) Client::Client(bool cluster, const std::string& logger_name) : SRObject(logger_name) { // Log that a new client has been instantiated log_data(LLDebug, "New client created"); + // Create our ConfigOptions object (default = no suffixing) + auto cfgopts = ConfigOptions::create_from_environment(""); + _cfgopts = cfgopts.release(); + _cfgopts->_set_log_context(this); + // Set up Redis server connection // A std::bad_alloc exception on the initializer will be caught // by the call to new for the client - _redis_cluster = (cluster ? new RedisCluster(this) : NULL); - _redis = (cluster ? NULL : new Redis(this)); + _redis_cluster = (cluster ? new RedisCluster(_cfgopts) : NULL); + _redis = (cluster ? NULL : new Redis(_cfgopts)); if (cluster) _redis_server = _redis_cluster; else _redis_server = _redis; // Initialize key prefixing - _set_prefixes_from_env(); + _get_prefix_settings(); _use_tensor_prefix = true; + _use_dataset_prefix = true; _use_model_prefix = false; _use_list_prefix = true; } @@ -72,6 +147,8 @@ Client::~Client() _redis = NULL; } _redis_server = NULL; + delete _cfgopts; + _cfgopts = NULL; // Log Client destruction log_data(LLDebug, "Client destroyed"); @@ -87,7 +164,7 @@ void Client::put_dataset(DataSet& dataset) _append_dataset_metadata_commands(cmds, dataset); _append_dataset_tensor_commands(cmds, dataset); _append_dataset_ack_command(cmds, dataset); - _run(cmds); + _redis_server->run_in_pipeline(cmds); } // Retrieve a DataSet object from the database @@ -108,15 +185,25 @@ DataSet Client::get_dataset(const std::string& name) DataSet dataset(name); _unpack_dataset_metadata(dataset, reply); + // Build the tensor keys std::vector tensor_names = dataset.get_tensor_names(); - - // Retrieve DataSet tensors and fill the DataSet object - for(size_t i = 0; i < tensor_names.size(); i++) { - // Build the tensor key - std::string tensor_key = - _build_dataset_tensor_key(name, tensor_names[i], true); - // Retrieve tensor and add it to the dataset - _get_and_add_dataset_tensor(dataset, tensor_names[i], tensor_key); + if (tensor_names.size() == 0) + return dataset; // If no tensors, we're done + std::vector tensor_keys; + std::transform( + tensor_names.cbegin(), + tensor_names.cend(), + std::back_inserter(tensor_keys), + [this, name](std::string s){ + return _build_dataset_tensor_key(name, s, true); + }); + + // Retrieve DataSet tensors + PipelineReply tensors = _redis_server->get_tensors(tensor_keys); + + // Put them into the dataset + for (size_t i = 0; i < tensor_names.size(); i++) { + _add_dataset_tensor(dataset, tensor_names[i], tensors[i]); } return dataset; @@ -166,7 +253,7 @@ void Client::copy_dataset(const std::string& src_name, CommandList put_meta_cmds; _append_dataset_metadata_commands(put_meta_cmds, dataset); _append_dataset_ack_command(put_meta_cmds, dataset); - (void)_run(put_meta_cmds); + (void)_redis_server->run_in_pipeline(put_meta_cmds); } // Delete a DataSet from the database. @@ -486,6 +573,7 @@ void Client::set_model_from_file(const std::string& name, const std::string& device, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -506,7 +594,7 @@ void Client::set_model_from_file(const std::string& name, std::string_view model(tmp.data(), tmp.length()); set_model(name, model, backend, device, batch_size, - min_batch_size, tag, inputs, outputs); + min_batch_size, min_batch_timeout, tag, inputs, outputs); } // Set a model from file in the database for future execution in a multi-GPU system @@ -517,6 +605,7 @@ void Client::set_model_from_file_multigpu(const std::string& name, int num_gpus, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -537,8 +626,42 @@ void Client::set_model_from_file_multigpu(const std::string& name, std::string_view model(tmp.data(), tmp.length()); set_model_multigpu(name, model, backend, first_gpu, num_gpus, batch_size, - min_batch_size, tag, inputs, outputs); + min_batch_size, min_batch_timeout, tag, inputs, outputs); +} + +// Validate batch settings for the set_model calls +inline void __check_batch_settings( + int batch_size, int min_batch_size, int min_batch_timeout) +{ + // Throw a usage exception if batch_size is zero but one of the other + // parameters is non-zero + if (batch_size == 0 && (min_batch_size > 0 || min_batch_timeout > 0)) { + throw SRRuntimeException( + "batch_size must be non-zero if min_batch_size or " + "min_batch_timeout is used; otherwise batching will " + "not be performed." + ); + } + + // Throw a usage exception if min_batch_timeout is nonzero and + // min_batch_size is zero. (batch_size also has to be non-zero, but + // this was caught in the previous clause.) + if (min_batch_timeout > 0 && min_batch_size == 0) { + throw SRRuntimeException( + "min_batch_size must be non-zero if min_batch_timeout " + "is used; otherwise the min_batch_timeout parameter is ignored." + ); + } + + // Issue a warning if min_batch_size is non-zero but min_batch_timeout is zero + if (min_batch_size > 0 && min_batch_timeout == 0) { + std::cerr << "WARNING: min_batch_timeout was not set when a non-zero " + << "min_batch_size was selected. " << std::endl + << "Setting a small value (~10ms) for min_batch_timeout " + << "may improve performance" << std::endl; + } } + // Set a model from a string buffer in the database for future execution void Client::set_model(const std::string& name, const std::string_view& model, @@ -546,6 +669,7 @@ void Client::set_model(const std::string& name, const std::string& device, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -590,10 +714,29 @@ void Client::set_model(const std::string& name, throw SRRuntimeException(device + " is not a valid device."); } + __check_batch_settings(batch_size, min_batch_size, min_batch_timeout); + + // Split model into chunks + size_t offset = 0; + std::vector model_segments; + size_t chunk_size = _redis_server->get_model_chunk_size(); + size_t remaining = model.length(); + for (offset = 0; offset < model.length(); offset += chunk_size) { + size_t this_chunk_size = remaining > chunk_size ? chunk_size : remaining; + std::string_view chunk(model.data() + offset, this_chunk_size); + model_segments.push_back(chunk); + remaining -= this_chunk_size; + } + std::string key = _build_model_key(name, false); - _redis_server->set_model(key, model, backend, device, - batch_size, min_batch_size, - tag, inputs, outputs); + auto response = _redis_server->set_model( + key, model_segments, backend, device, + batch_size, min_batch_size, min_batch_timeout, + tag, inputs, outputs); + if (response.has_error()) { + throw SRInternalException( + "An unknown error occurred while setting the model"); + } } void Client::set_model_multigpu(const std::string& name, @@ -603,6 +746,7 @@ void Client::set_model_multigpu(const std::string& name, int num_gpus, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -644,10 +788,24 @@ void Client::set_model_multigpu(const std::string& name, throw SRParameterException(backend + " is not a valid backend."); } + __check_batch_settings(batch_size, min_batch_size, min_batch_timeout); + + // Split model into chunks + size_t offset = 0; + std::vector model_segments; + size_t chunk_size = _redis_server->get_model_chunk_size(); + size_t remaining = model.length(); + for (offset = 0; offset < model.length(); offset += chunk_size) { + size_t this_chunk_size = remaining > chunk_size ? chunk_size : remaining; + std::string_view chunk(model.data() + offset, this_chunk_size); + model_segments.push_back(chunk); + remaining -= this_chunk_size; + } + std::string key = _build_model_key(name, false); _redis_server->set_model_multigpu( - key, model, backend, first_gpu, num_gpus, - batch_size, min_batch_size, + key, model_segments, backend, first_gpu, num_gpus, + batch_size, min_batch_size, min_batch_timeout, tag, inputs, outputs); } @@ -658,16 +816,35 @@ std::string_view Client::get_model(const std::string& name) // Track calls to this API function LOG_API_FUNCTION(); + // Get the model from the server std::string get_key = _build_model_key(name, true); CommandReply reply = _redis_server->get_model(get_key); if (reply.has_error()) throw SRRuntimeException("failed to get model from server"); - char* model = _model_queries.allocate(reply.str_len()); + // In most cases, the reply will be a single string + // consisting of the serialized model + if (!reply.is_array()) { + char* model = _model_queries.allocate(reply.str_len()); + if (model == NULL) + throw SRBadAllocException("model query"); + std::memcpy(model, reply.str(), reply.str_len()); + return std::string_view(model, reply.str_len()); + } + + // Otherwise, we need to concatenate the segments together + size_t model_length = 0; + size_t offset = 0; + for (size_t i = 0; i < reply.n_elements(); i++) { + model_length += reply[i].str_len(); + } + char* model = _model_queries.allocate(model_length); if (model == NULL) throw SRBadAllocException("model query"); - std::memcpy(model, reply.str(), reply.str_len()); - return std::string_view(model, reply.str_len()); + for (size_t i = 0; i < reply.n_elements(); i++) { + std::memcpy(model + offset, reply[i].str(), reply[i].str_len()); + } + return std::string_view(model, model_length); } // Set a script from file in the database for future execution @@ -730,7 +907,11 @@ void Client::set_script(const std::string& name, } std::string key = _build_model_key(name, false); - _redis_server->set_script(key, device, script); + auto response = _redis_server->set_script(key, device, script); + if (response.has_error()) { + throw SRInternalException( + "An unknown error occurred while setting the script"); + } } // Set a script in the database for future execution in a multi-GPU system @@ -1094,7 +1275,7 @@ void Client::use_list_ensemble_prefix(bool use_prefix) } -// Set whether names of tensor and dataset entities should be prefixed +// Set whether names of tensor entities should be prefixed // (e.g. in an ensemble) to form database keys. Prefixes will only be used // if they were previously set through the environment variables SSKEYOUT // and SSKEYIN. Keys of entities created before this function is called @@ -1109,6 +1290,21 @@ void Client::use_tensor_ensemble_prefix(bool use_prefix) _use_tensor_prefix = use_prefix; } +// Set whether names of dataset entities should be prefixed +// (e.g. in an ensemble) to form database keys. Prefixes will only be used +// if they were previously set through the environment variables SSKEYOUT +// and SSKEYIN. Keys of entities created before this function is called +// will not be affected. By default, the client prefixes dataset +// keys with the first prefix specified with the SSKEYIN and SSKEYOUT +// environment variables. +void Client::use_dataset_ensemble_prefix(bool use_prefix) +{ + // Track calls to this API function + LOG_API_FUNCTION(); + + _use_dataset_prefix = use_prefix; +} + // Returns information about the given database node parsed_reply_nested_map Client::get_db_node_info(std::string address) { @@ -1397,7 +1593,7 @@ void Client::copy_list(const std::string& src_name, copy_cmd.add_field_ptr(reply[i].str(), reply[i].str_len()); } - CommandReply copy_reply = this->_run(copy_cmd); + CommandReply copy_reply = _run(copy_cmd); if (reply.has_error() > 0) throw SRRuntimeException("Dataset aggregation list copy " @@ -1436,7 +1632,7 @@ int Client::get_list_length(const std::string& list_name) LOG_API_FUNCTION(); // Build the list key - std::string list_key = _build_list_key(list_name, false); + std::string list_key = _build_list_key(list_name, true); // Build the command SingleKeyCommand cmd; @@ -1547,20 +1743,20 @@ std::vector Client::get_dataset_list_range(const std::string& list_name } // Set the prefixes that are used for set and get methods using SSKEYIN -// and SSKEYOUT environment variables. -void Client::_set_prefixes_from_env() +// and SSKEYOUT configuration settings +void Client::_get_prefix_settings() { // Establish set prefix - std::string put_key_prefix; - get_config_string(put_key_prefix, "SSKEYOUT", ""); + std::string put_key_prefix = _cfgopts->_resolve_string_option( + "SSKEYOUT", ""); if (put_key_prefix.length() > 0) _put_key_prefix = put_key_prefix; else _put_key_prefix.clear(); // Establish get prefix(es) - std::string get_key_prefixes; - get_config_string(get_key_prefixes, "SSKEYIN", ""); + std::string get_key_prefixes = _cfgopts->_resolve_string_option( + "SSKEYIN", ""); if (get_key_prefixes.length() > 0) { const char* a = get_key_prefixes.c_str(); const char* b = a; @@ -1627,18 +1823,16 @@ inline CommandReply Client::_get_dataset_metadata(const std::string& name) return _run(cmd); } -// Retrieve a tensor and add it to the dataset -inline void Client::_get_and_add_dataset_tensor(DataSet& dataset, - const std::string& name, - const std::string& key) +// Add a retrieved tensor to a dataset +inline void Client::_add_dataset_tensor( + DataSet& dataset, + const std::string& name, + CommandReply tensor_data) { - // Run tensor retrieval command - CommandReply reply = _redis_server->get_tensor(key); - // Extract tensor properties from command reply - std::vector reply_dims = GetTensorCommand::get_dims(reply); - std::string_view blob = GetTensorCommand::get_data_blob(reply); - SRTensorType type = GetTensorCommand::get_data_type(reply); + std::vector reply_dims = GetTensorCommand::get_dims(tensor_data); + std::string_view blob = GetTensorCommand::get_data_blob(tensor_data); + SRTensorType type = GetTensorCommand::get_data_type(tensor_data); // Add tensor to the dataset dataset._add_to_tensorpack(name, (void*)blob.data(), reply_dims, @@ -1792,7 +1986,7 @@ Client::_get_dataset_list_range(const std::string& list_name, inline std::string Client::_build_tensor_key(const std::string& key, const bool on_db) { - std::string prefix; + std::string prefix(""); if (_use_tensor_prefix) prefix = on_db ? _get_prefix() : _put_prefix(); @@ -1804,7 +1998,7 @@ inline std::string Client::_build_tensor_key(const std::string& key, inline std::string Client::_build_model_key(const std::string& key, const bool on_db) { - std::string prefix; + std::string prefix(""); if (_use_model_prefix) prefix = on_db ? _get_prefix() : _put_prefix(); @@ -1815,8 +2009,8 @@ inline std::string Client::_build_model_key(const std::string& key, inline std::string Client::_build_dataset_key(const std::string& dataset_name, const bool on_db) { - std::string prefix; - if (_use_tensor_prefix) + std::string prefix(""); + if (_use_dataset_prefix) prefix = on_db ? _get_prefix() : _put_prefix(); return prefix + "{" + dataset_name + "}"; @@ -2079,4 +2273,36 @@ bool Client::_poll_list_length(const std::string& name, int list_length, } return false; -} \ No newline at end of file +} + +// Reconfigure the model chunk size for the database +void Client::set_model_chunk_size(int chunk_size) +{ + // Track calls to this API function + LOG_API_FUNCTION(); + + // Build the command + AddressAnyCommand cmd; + cmd << "AI.CONFIG" << "MODEL_CHUNK_SIZE" << std::to_string(chunk_size); + std::cout << cmd.to_string() << std::endl; + + // Run it + CommandReply reply = _run(cmd); + if (reply.has_error() > 0) + throw SRRuntimeException("AI.CONFIG MODEL_CHUNK_SIZE command failed"); + + // Remember the new chunk size + _redis_server->store_model_chunk_size(chunk_size); +} + +// Create a string representation of the client +std::string Client::to_string() const +{ + // Track calls to this API function + LOG_API_FUNCTION(); + + std::string result; + result = "Client (" + _lname + "):\n"; + result += _redis_server->to_string(); + return result; +} diff --git a/2023-01/smartsim/smartredis/src/cpp/configoptions.cpp b/2023-01/smartsim/smartredis/src/cpp/configoptions.cpp new file mode 100644 index 00000000..753248dd --- /dev/null +++ b/2023-01/smartsim/smartredis/src/cpp/configoptions.cpp @@ -0,0 +1,238 @@ +/* + * BSD 2-Clause License + * + * Copyright (c) 2021-2023, Hewlett Packard Enterprise + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include "configoptions.h" +#include "srexception.h" +#include "logger.h" +#include "utility.h" + +using namespace SmartRedis; + +// ConfigOptions constructor +ConfigOptions::ConfigOptions( + cfgSrc source, + const std::string& string) + : _source(source), _string(string), _lazy(source == cs_envt), + _log_context(NULL) +{ + // Read in options if needed + if (!_lazy) { + _populate_options(); + } +} + +// Deep copy a ConfigOptions object +ConfigOptions* ConfigOptions::clone() +{ + ConfigOptions* result = new ConfigOptions(_source, _string); + result->_log_context = _log_context; + result->_int_options = _int_options; + result->_string_options = _string_options; + return result; +} + +// ConfigOptions destructor +ConfigOptions::~ConfigOptions() +{ + // Nuke each string from our stash + auto nuke = [](char* buf) { delete buf; }; + std::for_each(_string_buffer_stash.begin(), _string_buffer_stash.end(), nuke); + _string_buffer_stash.clear(); +} + +// Instantiate ConfigOptions, getting selections from environment variables +std::unique_ptr ConfigOptions::create_from_environment( + const std::string& db_suffix) +{ + // NOTE: We can't use std::make_unique<> here because our constructor + // is private + return std::unique_ptr( + new ConfigOptions(cs_envt, db_suffix)); +} + +// Instantiate ConfigOptions, getting selections from environment variables +std::unique_ptr ConfigOptions::create_from_environment( + const char* db_suffix) +{ + std::string str_suffix(db_suffix != NULL ? db_suffix : ""); + return create_from_environment(str_suffix); +} + +// Retrieve the value of a numeric configuration option +int64_t ConfigOptions::get_integer_option(const std::string& option_name) +{ + // If we already have the value, return it + auto search = _int_options.find(option_name); + if (search != _int_options.end()) + return _int_options[option_name]; + + // If we're doing lazy evaluation of option names, fetch the value + int64_t default_value = -1; + int64_t result = default_value; + if (_lazy) { + int temp = 0; + get_config_integer( + temp, _suffixed(option_name), default_value, throw_on_absent); + result = (int64_t)temp; + } + + // Store the final value before we exit + _int_options.insert({option_name, result}); + return result; +} + +// Retrieve the value of a string configuration option +std::string ConfigOptions::get_string_option(const std::string& option_name) +{ + // If we already have the value, return it + auto search = _string_options.find(option_name); + if (search != _string_options.end()) + return _string_options[option_name]; + + // If we're doing lazy evaluation of option names, fetch the value + std::string default_value(""); + std::string result(default_value); + if (_lazy) { + get_config_string( + result, _suffixed(option_name), default_value, throw_on_absent); + } + + // Store the final value before we exit + _string_options.insert({option_name, result}); + return result; +} + +// Resolve the value of a numeric configuration option +int64_t ConfigOptions::_resolve_integer_option( + const std::string& option_name, int64_t default_value) +{ + // If we already have the value, return it + auto search = _int_options.find(option_name); + if (search != _int_options.end()) + return _int_options[option_name]; + + // If we're doing lazy evaluation of option names, fetch the value + int64_t result = default_value; + if (_lazy) { + int temp = 0; + get_config_integer(temp, _suffixed(option_name), default_value); + result = (int64_t)temp; + } + + // Store the final value before we exit + _int_options.insert({option_name, result}); + return result; +} + +// Resolve the value of a string configuration option +std::string ConfigOptions::_resolve_string_option( + const std::string& option_name, const std::string& default_value) +{ + // If we already have the value, return it + auto search = _string_options.find(option_name); + if (search != _string_options.end()) + return _string_options[option_name]; + + // If we're doing lazy evaluation of option names, fetch the value + std::string result(default_value); + if (_lazy) { + get_config_string(result, _suffixed(option_name), default_value); + } + + // Store the final value before we exit + _string_options.insert({option_name, result}); + return result; +} + +// Check whether a configuration option is set in the selected source +bool ConfigOptions::is_configured(const std::string& option_name) +{ + // Check each map in turn + if (_int_options.find(option_name) != _int_options.end()) + return true; + if (_string_options.find(option_name) != _string_options.end()) + return true; + + // Check to see if the value is available and we just haven't + // seen it yet + if (_lazy) { + std::string suffixed = _suffixed(option_name); + char* environment_string = std::getenv(suffixed.c_str()); + return NULL != environment_string; + } + + // Not found + return false; +} + +// Override the value of a numeric configuration option +void ConfigOptions::override_integer_option( + const std::string& option_name, int64_t value) +{ + _int_options.insert_or_assign(option_name, value); +} + +// Override the value of a string configuration option +void ConfigOptions::override_string_option( + const std::string& option_name, const std::string& value) +{ + _string_options.insert_or_assign(option_name, value); +} + +// Process option data from a fixed source +void ConfigOptions::_populate_options() +{ + throw SRRuntimeException( + "Sources other than environment variables " + "are not currently supported" + ); +} + +// Apply a suffix to a option_name if the source is environment +// variables and the suffix is nonempty +std::string ConfigOptions::_suffixed(const std::string& option_name) +{ + // Sanity check + if ("" == option_name) { + throw SRKeyException( + "Invalid empty environment variable name detected"); + } + std::string result(option_name); + if (_source == cs_envt && _string != "") + result = option_name + + "_" + _string; + return result; +} + +// Clear a configuration option from the cache +void ConfigOptions::_clear_option_from_cache(const std::string& option_name) +{ + _int_options.erase(option_name); + _string_options.erase(option_name); +} diff --git a/2023-01/smartsim/smartredis/src/cpp/dataset.cpp b/2023-01/smartsim/smartredis/src/cpp/dataset.cpp index 31e03e56..1a71b9da 100644 --- a/2023-01/smartsim/smartredis/src/cpp/dataset.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/dataset.cpp @@ -30,6 +30,7 @@ #include "dataset.h" #include "srexception.h" #include "logger.h" +#include "utility.h" using namespace SmartRedis; @@ -188,7 +189,7 @@ void DataSet::get_meta_strings(const std::string& name, } // Check if the DataSet has a field -bool DataSet::has_field(const std::string& field_name) +bool DataSet::has_field(const std::string& field_name) const { // Track calls to this API function LOG_API_FUNCTION(); @@ -206,7 +207,7 @@ void DataSet::clear_field(const std::string& field_name) } // Retrieve the names of the tensors in the DataSet -std::vector DataSet::get_tensor_names() +std::vector DataSet::get_tensor_names() const { // Track calls to this API function LOG_API_FUNCTION(); @@ -240,7 +241,8 @@ void DataSet::get_tensor_names( // Get the strings in a metadata string field. Because standard C++ // containers are used, memory management is handled by the returned // std::vector. -std::vector DataSet::get_meta_strings(const std::string& name) +std::vector DataSet::get_meta_strings( + const std::string& name) const { // Track calls to this API function LOG_API_FUNCTION(); @@ -249,7 +251,7 @@ std::vector DataSet::get_meta_strings(const std::string& name) } // Get the Tensor type of the Tensor -SRTensorType DataSet::get_tensor_type(const std::string& name) +SRTensorType DataSet::get_tensor_type(const std::string& name) const { // Track calls to this API function LOG_API_FUNCTION(); @@ -265,8 +267,26 @@ SRTensorType DataSet::get_tensor_type(const std::string& name) return tensor->type(); } +// Retrieve the dimensions of a Tensor in the DataSet +const std::vector DataSet::get_tensor_dims( + const std::string& name) const +{ + // Track calls to this API function + LOG_API_FUNCTION(); + + // Get the tensor + auto tensor = _tensorpack.get_tensor(name); + if (tensor == NULL) { + throw SRKeyException( + "No tensor named " + name + " is in the dataset"); + } + + // Return its dimensions + return tensor->dims(); +} + // Retrieve the names of all metadata fields in the DataSet -std::vector DataSet::get_metadata_field_names() +std::vector DataSet::get_metadata_field_names() const { // Track calls to this API function LOG_API_FUNCTION(); @@ -285,7 +305,8 @@ void DataSet::get_metadata_field_names( } // Retrieve the data type of a metadata field in the DataSet -SRMetaDataType DataSet::get_metadata_field_type(const std::string& name) +SRMetaDataType DataSet::get_metadata_field_type( + const std::string& name) const { // Track calls to this API function LOG_API_FUNCTION(); @@ -367,3 +388,51 @@ TensorBase* DataSet::_get_tensorbase_obj(const std::string& name) _enforce_tensor_exists(name); return _tensorpack.get_tensor(name)->clone(); } + +// Create a string representation of the DataSet +std::string DataSet::to_string() const +{ + std::string result; + result = "DataSet (" + _lname + "):\n"; + + // Tensors + result += "Tensors:\n"; + auto it = _tensorpack.tensor_cbegin(); + int ntensors = 0; + for ( ; it != _tensorpack.tensor_cend(); ++it) { + ntensors++; + result += " " + (*it)->name() + ":\n"; + result += " type: " + ::to_string((*it)->type()) + "\n"; + auto dims = (*it)->dims(); + result += " dimensions: ["; + size_t ndims = dims.size(); + for (auto itdims = dims.cbegin(); itdims != dims.cend(); ++itdims) { + result += std::to_string(*itdims); + if (--ndims > 0) + result += ", "; + } + result += "]\n"; + result += " elements: " + std::to_string((*it)->num_values()) + "\n"; + } + if (ntensors == 0) { + result += " none\n"; + } + + // Metadata + result += "Metadata:\n"; + auto mdnames = get_metadata_field_names(); + int nmetadata = 0; + for (auto itmd = mdnames.cbegin(); itmd != mdnames.cend(); ++itmd) { + nmetadata++; + result += " " + (*itmd) + ":\n"; + result += " type: " + + ::to_string(get_metadata_field_type(*itmd)) + "\n"; + } + if (nmetadata == 0) { + result += " none\n"; + } + + // Done + return result; +} + diff --git a/2023-01/smartsim/smartredis/src/cpp/logger.cpp b/2023-01/smartsim/smartredis/src/cpp/logger.cpp index 6e8be1c7..1c7cfdda 100644 --- a/2023-01/smartsim/smartredis/src/cpp/logger.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/logger.cpp @@ -59,7 +59,7 @@ void Logger::configure_logging() _initialized = true; // Get the logfile - get_config_string(_logfile, "SR_LOG_FILE", "", true); + get_config_string(_logfile, "SR_LOG_FILE", "", flag_suppress_warning); std::string requestedLogfile(_logfile); bool missingLogFile = _logfile.length() == 0; @@ -77,7 +77,7 @@ void Logger::configure_logging() // Get the logging level std::string level; - get_config_string(level, "SR_LOG_LEVEL", "", true); + get_config_string(level, "SR_LOG_LEVEL", "", flag_suppress_warning); bool missingLogLevel = level.length() == 0; bool badLogLevel = false; if (level.length() > 0) { diff --git a/2023-01/smartsim/smartredis/src/cpp/metadata.cpp b/2023-01/smartsim/smartredis/src/cpp/metadata.cpp index a8aff3e6..7bb8c62a 100644 --- a/2023-01/smartsim/smartredis/src/cpp/metadata.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/metadata.cpp @@ -235,20 +235,21 @@ void MetaData::get_scalar_values(const std::string& name, } // Retrieve the type of a metadata field -SRMetaDataType MetaData::get_field_type(const std::string& name) +SRMetaDataType MetaData::get_field_type(const std::string& name) const { - // Make sure the field exists - if (_field_map[name] == NULL) { + try { + // Return the type. If it doesn't exist, the exception below will + // be thrown + return _field_map.at(name)->type(); + } + catch (std::out_of_range& e) { throw SRKeyException( "The metadata field " + name + " does not exist."); } - - // Return the type - return _field_map[name]->type(); } // Retrieve a vector of metadata field names -std::vector MetaData::get_field_names(bool skip_internal) +std::vector MetaData::get_field_names(bool skip_internal) const { std::vector fieldnames; fieldnames.reserve(_field_map.size()); @@ -332,11 +333,14 @@ void MetaData::get_string_values(const std::string& name, // Get metadata values string field std::vector -MetaData::get_string_values(const std::string& name) +MetaData::get_string_values(const std::string& name) const { // Get the field - MetadataField* mdf = _field_map[name]; - if (mdf == NULL) { + const MetadataField* mdf = NULL; + try { + mdf = _field_map.at(name); + } + catch (std::out_of_range& e) { throw SRRuntimeException("The metadata field " + name + " does not exist."); } @@ -352,7 +356,7 @@ MetaData::get_string_values(const std::string& name) } // This function checks if the DataSet has a field -bool MetaData::has_field(const std::string& field_name) +bool MetaData::has_field(const std::string& field_name) const { return (_field_map.count(field_name) > 0); } @@ -467,10 +471,11 @@ void MetaData::_create_string_field(const std::string& field_name) // Allocate new memory to hold metadata field values and return these values // via the c-ptr reference being pointed to the newly allocated memory template -void MetaData::_get_numeric_field_values(const std::string& name, - void*& data, - size_t& n_values, - SharedMemoryList& mem_list) +void MetaData::_get_numeric_field_values( + const std::string& name, + void*& data, + size_t& n_values, + SharedMemoryList& mem_list) { // Make sure the field exists MetadataField* mdf = _field_map[name]; diff --git a/2023-01/smartsim/smartredis/src/cpp/metadatafield.cpp b/2023-01/smartsim/smartredis/src/cpp/metadatafield.cpp index b8c7dbb7..b62da03c 100644 --- a/2023-01/smartsim/smartredis/src/cpp/metadatafield.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/metadatafield.cpp @@ -38,13 +38,13 @@ MetadataField::MetadataField(const std::string& name, SRMetaDataType type) } // Retrieve the MetadataField name -std::string MetadataField::name() +std::string MetadataField::name() const { return _name; } // Retrieve the MetadataField name -SRMetaDataType MetadataField::type() +SRMetaDataType MetadataField::type() const { return _type; } diff --git a/2023-01/smartsim/smartredis/src/cpp/redis.cpp b/2023-01/smartsim/smartredis/src/cpp/redis.cpp index 69e5af93..9b493406 100644 --- a/2023-01/smartsim/smartredis/src/cpp/redis.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/redis.cpp @@ -26,6 +26,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include "redis.h" #include "srexception.h" #include "utility.h" @@ -34,8 +35,8 @@ using namespace SmartRedis; // Redis constructor. -Redis::Redis(const SRObject* context) - : RedisServer(context) +Redis::Redis(ConfigOptions* cfgopts) + : RedisServer(cfgopts) { SRAddress db_address(_get_ssdb()); // Remember whether it's a unix domain socket for later @@ -45,8 +46,8 @@ Redis::Redis(const SRObject* context) } // Redis constructor. Uses address provided to constructor instead of environment variables -Redis::Redis(const SRObject* context, std::string addr_spec) - : RedisServer(context) +Redis::Redis(ConfigOptions* cfgopts, std::string addr_spec) + : RedisServer(cfgopts) { SRAddress db_address(addr_spec); _add_to_address_map(db_address); @@ -79,10 +80,13 @@ CommandReply Redis::run(CompoundCommand& cmd){ // Run an address-at Command on the server CommandReply Redis::run(AddressAtCommand& cmd){ - if (!is_addressable(cmd.get_address())) - throw SRRuntimeException("The provided address does not match "\ - "the address used to initialize the "\ - "non-cluster client connection."); + if (!is_addressable(cmd.get_address())) { + std::string msg("The provided address does not match "\ + "the address used to initialize the "\ + "non-cluster client connection."); + msg += " Received: " + cmd.get_address().to_string(); + throw SRRuntimeException(msg); + } return this->_run(cmd); } @@ -192,6 +196,22 @@ CommandReply Redis::get_tensor(const std::string& key) return run(cmd); } +// Get a list of Tensor from the server +PipelineReply Redis::get_tensors(const std::vector& keys) +{ + // Build up the commands to get the tensors + CommandList cmdlist; // This just holds the memory + std::vector cmds; + for (auto it = keys.begin(); it != keys.end(); ++it) { + GetTensorCommand* cmd = cmdlist.add_command(); + (*cmd) << "AI.TENSORGET" << Keyfield(*it) << "META" << "BLOB"; + cmds.push_back(cmd); + } + + // Run them via pipeline + return _run_pipeline(cmds); +} + // Rename a tensor in the database CommandReply Redis::rename_tensor(const std::string& key, const std::string& new_key) @@ -277,11 +297,12 @@ CommandReply Redis::copy_tensors(const std::vector& src, // Set a model from std::string_view buffer in the database for future execution CommandReply Redis::set_model(const std::string& model_name, - std::string_view model, + const std::vector& model, const std::string& backend, const std::string& device, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs @@ -301,6 +322,9 @@ CommandReply Redis::set_model(const std::string& model_name, if (min_batch_size > 0) { cmd << "MINBATCHSIZE" << std::to_string(min_batch_size); } + if (min_batch_timeout > 0) { + cmd << "MINBATCHTIMEOUT" << std::to_string(min_batch_timeout); + } if (inputs.size() > 0) { cmd << "INPUTS" << std::to_string(inputs.size()) << inputs; } @@ -316,12 +340,13 @@ CommandReply Redis::set_model(const std::string& model_name, // Set a model from std::string_view buffer in the // database for future execution in a multi-GPU system void Redis::set_model_multigpu(const std::string& name, - const std::string_view& model, + const std::vector& model, const std::string& backend, int first_gpu, int num_gpus, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -332,7 +357,8 @@ void Redis::set_model_multigpu(const std::string& name, std::string device = "GPU:" + std::to_string(i); std::string model_key = name + "." + device; result = set_model( - name, model_key, backend, device, batch_size, min_batch_size, tag, inputs, outputs); + model_key, model, backend, device, batch_size, min_batch_size, min_batch_timeout, + tag, inputs, outputs); if (result.has_error() > 0) { throw SRRuntimeException("Failed to set model for GPU " + std::to_string(i)); } @@ -340,7 +366,8 @@ void Redis::set_model_multigpu(const std::string& name, // Add a version for get_model to find result = set_model( - name, model, backend, "GPU", batch_size, min_batch_size, tag, inputs, outputs); + name, model, backend, "GPU", batch_size, min_batch_size, min_batch_timeout, + tag, inputs, outputs); if (result.has_error() > 0) { throw SRRuntimeException("Failed to set general model"); } @@ -564,7 +591,7 @@ CommandReply Redis::get_model_script_ai_info(const std::string& address, "non-cluster client connection."); } - //Build the Command + // Build the Command cmd.set_exec_address(db_address); cmd << "AI.INFO" << Keyfield(key); @@ -576,6 +603,48 @@ CommandReply Redis::get_model_script_ai_info(const std::string& address, return run(cmd); } +// Retrieve the current model chunk size +int Redis::get_model_chunk_size() +{ + // If we've already set a chunk size, just return it + if (_model_chunk_size != _UNKNOWN_MODEL_CHUNK_SIZE) + return _model_chunk_size; + + // Build the command + AddressAnyCommand cmd; + cmd << "AI.CONFIG" << "GET" << "MODEL_CHUNK_SIZE"; + + CommandReply reply = _run(cmd); + if (reply.has_error() > 0) + throw SRRuntimeException("AI.CONFIG GET MODEL_CHUNK_SIZE command failed"); + + if (reply.redis_reply_type() != "REDIS_REPLY_INTEGER") + throw SRRuntimeException("An unexpected type was returned for " + "for the model chunk size."); + + int chunk_size = reply.integer(); + + if (chunk_size < 0) + throw SRRuntimeException("An invalid, negative value was " + "returned for the model chunk size."); + + return chunk_size; +} + +// Reconfigure the model chunk size for the database +void Redis::set_model_chunk_size(int chunk_size) +{ + AddressAnyCommand cmd; + cmd << "AI.CONFIG" << "MODEL_CHUNK_SIZE" << std::to_string(chunk_size); + + CommandReply reply = _run(cmd); + if (reply.has_error() > 0) + throw SRRuntimeException("AI.CONFIG MODEL_CHUNK_SIZE command failed"); + + // Store the new model chunk size for later + _model_chunk_size = chunk_size; +} + inline CommandReply Redis::_run(const Command& cmd) { for (int i = 1; i <= _command_attempts; i++) { @@ -653,10 +722,27 @@ inline void Redis::_add_to_address_map(SRAddress& db_address) inline void Redis::_connect(SRAddress& db_address) { + // Build a connections object for this connection + // No need to repeat the build on each connection attempt + // so we do it outside the loop + sw::redis::ConnectionOptions connectOpts; + if (db_address._is_tcp) { + connectOpts.host = db_address._tcp_host; + connectOpts.port = db_address._tcp_port; + connectOpts.type = sw::redis::ConnectionType::TCP; + } + else { + connectOpts.path = db_address._uds_file; + connectOpts.type = sw::redis::ConnectionType::UNIX; + } + connectOpts.socket_timeout = std::chrono::milliseconds( + _DEFAULT_SOCKET_TIMEOUT); + + // Connect for (int i = 1; i <= _connection_attempts; i++) { try { // Try to create the sw::redis::Redis object - _redis = new sw::redis::Redis(db_address.to_string(true)); + _redis = new sw::redis::Redis(connectOpts); // Attempt to have the sw::redis::Redis object // make a connection using the PING command @@ -724,3 +810,98 @@ inline void Redis::_connect(SRAddress& db_address) throw SRTimeoutException(std::string("Connection attempt failed after ") + std::to_string(_connection_attempts) + "tries"); } + +// Run a CommandList via a Pipeline +PipelineReply Redis::run_in_pipeline(CommandList& cmdlist) +{ + // Convert from CommandList to vector + std::vector cmds; + for (auto it = cmdlist.begin(); it != cmdlist.end(); ++it) { + cmds.push_back(*it); + } + + // Run the commands + return _run_pipeline(cmds); +} + +// Build and run unordered pipeline +PipelineReply Redis::_run_pipeline(std::vector& cmds) +{ + PipelineReply reply; + for (int i = 1; i <= _command_attempts; i++) { + try { + // Get pipeline object for shard (no new connection) + auto pipeline = _redis->pipeline(false); + + // Loop over all commands and add to the pipeline + for (size_t i = 0; i < cmds.size(); i++) { + // Add the commands to the pipeline + pipeline.command(cmds[i]->cbegin(), cmds[i]->cend()); + } + + // Execute the pipeline + reply = pipeline.exec(); + + // Check the replies + if (reply.has_error()) { + throw SRRuntimeException("Redis failed to execute the pipeline"); + } + + // If we get here, it all worked + return reply; + } + catch (SmartRedis::Exception& e) { + // Exception is already prepared, just propagate it + throw; + } + catch (sw::redis::IoError &e) { + // For an error from Redis, retry unless we're out of chances + if (i == _command_attempts) { + throw SRDatabaseException( + std::string("Redis IO error when executing the pipeline: ") + + e.what()); + } + // else, Fall through for a retry + } + catch (sw::redis::ClosedError &e) { + // For an error from Redis, retry unless we're out of chances + if (i == _command_attempts) { + throw SRDatabaseException( + std::string("Redis Closed error when executing the "\ + "pipeline: ") + e.what()); + } + // else, Fall through for a retry + } + catch (sw::redis::Error &e) { + // For other errors from Redis, report them immediately + throw SRRuntimeException( + std::string("Redis error when executing the pipeline: ") + + e.what()); + } + catch (std::exception& e) { + // Should never hit this, so bail immediately if we do + throw SRInternalException( + std::string("Unexpected exception executing the pipeline: ") + + e.what()); + } + catch (...) { + // Should never hit this, so bail immediately if we do + throw SRInternalException( + "Non-standard exception encountered executing the pipeline"); + } + + // Sleep before the next attempt + std::this_thread::sleep_for(std::chrono::milliseconds(_command_interval)); + } + + // If we get here, we've run out of retry attempts + throw SRTimeoutException("Unable to execute pipeline"); +} + +// Create a string representation of the Redis connection +std::string Redis::to_string() const +{ + std::string result("Non-clustered Redis connection:\n"); + result += RedisServer::to_string(); + return result; +} diff --git a/2023-01/smartsim/smartredis/src/cpp/rediscluster.cpp b/2023-01/smartsim/smartredis/src/cpp/rediscluster.cpp index 0fe3c88f..f68ecbe3 100644 --- a/2023-01/smartsim/smartredis/src/cpp/rediscluster.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/rediscluster.cpp @@ -26,23 +26,26 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include "rediscluster.h" #include "nonkeyedcommand.h" #include "keyedcommand.h" #include "srexception.h" #include "utility.h" #include "srobject.h" +#include "configoptions.h" using namespace SmartRedis; // RedisCluster constructor -RedisCluster::RedisCluster(const SRObject* context) - : RedisServer(context) +RedisCluster::RedisCluster(ConfigOptions* cfgopts) + : RedisServer(cfgopts) { SRAddress db_address(_get_ssdb()); if (!db_address._is_tcp) { throw SRRuntimeException("Unix Domain Socket is not supported with clustered Redis"); } + _is_domain_socket = false; _connect(db_address); _map_cluster(); if (_address_node_map.count(db_address.to_string()) > 0) @@ -55,8 +58,8 @@ RedisCluster::RedisCluster(const SRObject* context) // RedisCluster constructor. Uses address provided to constructor instead of // environment variables -RedisCluster::RedisCluster(const SRObject* context, std::string address_spec) - : RedisServer(context) +RedisCluster::RedisCluster(ConfigOptions* cfgopts, std::string address_spec) + : RedisServer(cfgopts) { SRAddress db_address(address_spec); _connect(db_address); @@ -385,6 +388,26 @@ CommandReply RedisCluster::get_tensor(const std::string& key) return run(cmd); } +// Get a list of Tensor from the server +PipelineReply RedisCluster::get_tensors(const std::vector& keys) +{ + // Build up the commands to get the tensors + CommandList cmdlist; // This just holds the memory + std::vector cmds; + for (auto it = keys.begin(); it != keys.end(); ++it) { + GetTensorCommand* cmd = cmdlist.add_command(); + (*cmd) << "AI.TENSORGET" << Keyfield(*it) << "META" << "BLOB"; + cmds.push_back(cmd); + } + + // Get the shard index for the first key + size_t db_index = _get_db_node_index(keys[0]); + std::string shard_prefix = _db_nodes[db_index].prefix; + + // Run them via pipeline + return _run_pipeline(cmds, shard_prefix); +} + // Rename a tensor in the database CommandReply RedisCluster::rename_tensor(const std::string& key, const std::string& new_key) @@ -484,11 +507,12 @@ CommandReply RedisCluster::copy_tensors(const std::vector& src, // Set a model from a string buffer in the database for future execution CommandReply RedisCluster::set_model(const std::string& model_name, - std::string_view model, + const std::vector& model, const std::string& backend, const std::string& device, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -509,6 +533,9 @@ CommandReply RedisCluster::set_model(const std::string& model_name, if (min_batch_size > 0) { cmd << "MINBATCHSIZE" << std::to_string(min_batch_size); } + if (min_batch_timeout > 0) { + cmd << "MINBATCHTIMEOUT" << std::to_string(min_batch_timeout); + } if ( inputs.size() > 0) { cmd << "INPUTS" << std::to_string(inputs.size()) << inputs; } @@ -530,12 +557,13 @@ CommandReply RedisCluster::set_model(const std::string& model_name, // Set a model from std::string_view buffer in the // database for future execution in a multi-GPU system void RedisCluster::set_model_multigpu(const std::string& name, - const std::string_view& model, + const std::vector& model, const std::string& backend, int first_gpu, int num_gpus, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) @@ -549,7 +577,7 @@ void RedisCluster::set_model_multigpu(const std::string& name, // Store it CommandReply result = set_model( model_key, model, backend, device, batch_size, min_batch_size, - tag, inputs, outputs); + min_batch_timeout, tag, inputs, outputs); if (result.has_error() > 0) { throw SRRuntimeException("Failed to set model for " + device); } @@ -558,7 +586,7 @@ void RedisCluster::set_model_multigpu(const std::string& name, // Add a version for get_model to find CommandReply result = set_model( name, model, backend, "GPU", batch_size, min_batch_size, - tag, inputs, outputs); + min_batch_timeout, tag, inputs, outputs); if (result.has_error() > 0) { throw SRRuntimeException("Failed to set general model"); } @@ -919,6 +947,57 @@ CommandReply RedisCluster::get_model_script_ai_info(const std::string& address, return run(cmd); } +// Retrieve the current model chunk size +int RedisCluster::get_model_chunk_size() +{ + // If we've already set a chunk size, just return it + if (_model_chunk_size != _UNKNOWN_MODEL_CHUNK_SIZE) + return _model_chunk_size; + + // Build the command + AddressAnyCommand cmd; + cmd << "AI.CONFIG" << "GET" << "MODEL_CHUNK_SIZE"; + + CommandReply reply = run(cmd); + if (reply.has_error() > 0) + throw SRRuntimeException("AI.CONFIG GET MODEL_CHUNK_SIZE command failed"); + + if (reply.redis_reply_type() != "REDIS_REPLY_INTEGER") + throw SRRuntimeException("An unexpected type was returned for " + "for the model chunk size."); + + int chunk_size = reply.integer(); + + if (chunk_size < 0) + throw SRRuntimeException("An invalid, negative value was " + "returned for the model chunk size."); + + return chunk_size; +} + +// Reconfigure the model chunk size for the database +void RedisCluster::set_model_chunk_size(int chunk_size) +{ + // Repeat for each server node: + auto node = _db_nodes.cbegin(); + for ( ; node != _db_nodes.cend(); node++) { + // Pick a node for the command + AddressAtCommand cmd; + cmd.set_exec_address(node->address); + // Build the command + cmd << "AI.CONFIG" << "MODEL_CHUNK_SIZE" << std::to_string(chunk_size); + + // Run it + CommandReply reply = run(cmd); + if (reply.has_error() > 0) { + throw SRRuntimeException("set_model_chunk_size failed for node " + node->name); + } + } + + // Store the new model chunk size for later + _model_chunk_size = chunk_size; +} + inline CommandReply RedisCluster::_run(const Command& cmd, std::string db_prefix) { std::string_view sv_prefix(db_prefix.data(), db_prefix.size()); @@ -996,19 +1075,43 @@ inline CommandReply RedisCluster::_run(const Command& cmd, std::string db_prefix // Connect to the cluster at the address and port inline void RedisCluster::_connect(SRAddress& db_address) { + // Build a connections object for this connection + // No need to repeat the build on each connection attempt + // so we do it outside the loop + sw::redis::ConnectionOptions connectOpts; + if (db_address._is_tcp) { + connectOpts.host = db_address._tcp_host; + connectOpts.port = db_address._tcp_port; + connectOpts.type = sw::redis::ConnectionType::TCP; + } + else { + throw SRInternalException( + "RedisCluster encountered a UDS request in _connect()"); + } + connectOpts.socket_timeout = std::chrono::milliseconds( + _DEFAULT_SOCKET_TIMEOUT); + + // Connect + std::string msg; for (int i = 1; i <= _connection_attempts; i++) { + msg = "Connection attempt " + std::to_string(i) + " of " + + std::to_string(_connection_attempts); + _cfgopts->_get_log_context()->log_data(LLDeveloper, msg); + try { // Attempt the connection - _redis_cluster = new sw::redis::RedisCluster(db_address.to_string(true)); - return; + _redis_cluster = new sw::redis::RedisCluster(connectOpts); return; } catch (std::bad_alloc& e) { // On a memory error, bail immediately _redis_cluster = NULL; + _cfgopts->_get_log_context()->log_data(LLDeveloper, "Memory error"); throw SRBadAllocException("RedisCluster connection"); } catch (sw::redis::Error& e) { // For an error from Redis, retry unless we're out of chances + msg = "redis error: "; msg += e.what(); + _cfgopts->_get_log_context()->log_data(LLDeveloper, msg); _redis_cluster = NULL; std::string message("Unable to connect to backend database: "); message += e.what(); @@ -1022,6 +1125,8 @@ inline void RedisCluster::_connect(SRAddress& db_address) } catch (std::exception& e) { // Should never hit this, so bail immediately if we do + msg = "std::exception: "; msg += e.what(); + _cfgopts->_get_log_context()->log_data(LLDeveloper, msg); _redis_cluster = NULL; throw SRInternalException( std::string("Unexpected exception while connecting: ") + @@ -1029,6 +1134,7 @@ inline void RedisCluster::_connect(SRAddress& db_address) } catch (...) { // Should never hit this, so bail immediately if we do + _cfgopts->_get_log_context()->log_data(LLDeveloper, "unknown exception"); _redis_cluster = NULL; throw SRInternalException( "A non-standard exception was encountered during client "\ @@ -1058,8 +1164,7 @@ inline void RedisCluster::_map_cluster() cmd << "CLUSTER" << "SLOTS"; // Run it - CommandReply reply(_redis_cluster-> - command(cmd.begin(), cmd.end())); + CommandReply reply = run(cmd); if (reply.has_error() > 0) { throw SRRuntimeException("CLUSTER SLOTS command failed"); } @@ -1343,10 +1448,30 @@ DBNode* RedisCluster::_get_model_script_db(const std::string& name, return db; } +// Run a CommandList via a Pipeline +PipelineReply RedisCluster::run_in_pipeline(CommandList& cmdlist) +{ + // Convert from CommandList to vector and grab the shard along + // the way + std::vector cmds; + std::string shard_prefix = _db_nodes[0].prefix; + bool shard_found = false; + for (auto it = cmdlist.begin(); it != cmdlist.end(); ++it) { + cmds.push_back(*it); + if (!shard_found && (*it)->has_keys()) { + shard_prefix = _get_db_node_prefix(*(*it)); + shard_found = true; + } + } + + // Run the commands + return _run_pipeline(cmds, shard_prefix); +} + // Build and run unordered pipeline -PipelineReply -RedisCluster::_run_pipeline(std::vector& cmds, - std::string& shard_prefix) +PipelineReply RedisCluster::_run_pipeline( + std::vector& cmds, + std::string& shard_prefix) { PipelineReply reply; for (int i = 1; i <= _command_attempts; i++) { @@ -1368,6 +1493,9 @@ RedisCluster::_run_pipeline(std::vector& cmds, if (reply.has_error()) { throw SRRuntimeException("Redis failed to execute the pipeline"); } + + // If we get here, it all worked + return reply; } catch (SmartRedis::Exception& e) { // Exception is already prepared, just propagate it @@ -1411,14 +1539,16 @@ RedisCluster::_run_pipeline(std::vector& cmds, // Sleep before the next attempt std::this_thread::sleep_for(std::chrono::milliseconds(_command_interval)); - - // Return the reply - return reply; } // If we get here, we've run out of retry attempts throw SRTimeoutException("Unable to execute pipeline"); +} - // Return the reply - return reply; -} \ No newline at end of file +// Create a string representation of the Redis connection +std::string RedisCluster::to_string() const +{ + std::string result("Clustered Redis connection:\n"); + result += RedisServer::to_string(); + return result; +} diff --git a/2023-01/smartsim/smartredis/src/cpp/redisserver.cpp b/2023-01/smartsim/smartredis/src/cpp/redisserver.cpp index 6806a1f3..a08dfeae 100644 --- a/2023-01/smartsim/smartredis/src/cpp/redisserver.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/redisserver.cpp @@ -27,27 +27,30 @@ */ #include +#include #include "redisserver.h" #include "srexception.h" #include "utility.h" #include "srobject.h" +#include "configoptions.h" using namespace SmartRedis; // RedisServer constructor -RedisServer::RedisServer(const SRObject* context) - : _context(context), _gen(_rd()) +RedisServer::RedisServer(ConfigOptions* cfgopts) + : _cfgopts(cfgopts), _context(cfgopts->_get_log_context()), + _gen(_rd()) { - get_config_integer(_connection_timeout, _CONN_TIMEOUT_ENV_VAR, - _DEFAULT_CONN_TIMEOUT); - get_config_integer(_connection_interval, _CONN_INTERVAL_ENV_VAR, - _DEFAULT_CONN_INTERVAL); - get_config_integer(_command_timeout, _CMD_TIMEOUT_ENV_VAR, - _DEFAULT_CMD_TIMEOUT); - get_config_integer(_command_interval, _CMD_INTERVAL_ENV_VAR, - _DEFAULT_CMD_INTERVAL); - get_config_integer(_thread_count, _TP_THREAD_COUNT, - _DEFAULT_THREAD_COUNT); + _connection_timeout = _cfgopts->_resolve_integer_option( + _CONN_TIMEOUT_ENV_VAR, _DEFAULT_CONN_TIMEOUT); + _connection_interval = _cfgopts->_resolve_integer_option( + _CONN_INTERVAL_ENV_VAR, _DEFAULT_CONN_INTERVAL); + _command_timeout = _cfgopts->_resolve_integer_option( + _CMD_TIMEOUT_ENV_VAR, _DEFAULT_CMD_TIMEOUT); + _command_interval = _cfgopts->_resolve_integer_option( + _CMD_INTERVAL_ENV_VAR, _DEFAULT_CMD_INTERVAL); + _thread_count = _cfgopts->_resolve_integer_option( + _TP_THREAD_COUNT, _DEFAULT_THREAD_COUNT); _check_runtime_variables(); @@ -58,6 +61,7 @@ RedisServer::RedisServer(const SRObject* context) _command_interval + 1; _tp = new ThreadPool(_context, _thread_count); + _model_chunk_size = _UNKNOWN_MODEL_CHUNK_SIZE; } // RedisServer destructor @@ -74,8 +78,7 @@ RedisServer::~RedisServer() SRAddress RedisServer::_get_ssdb() { // Retrieve the environment variable - std::string db_spec; - get_config_string(db_spec, "SSDB", ""); + std::string db_spec = _cfgopts->_resolve_string_option("SSDB", ""); if (db_spec.length() == 0) throw SRRuntimeException("The environment variable SSDB "\ "must be set to use the client."); @@ -101,16 +104,27 @@ SRAddress RedisServer::_get_ssdb() address_choices.push_back(addr_spec); } + std::string msg = "Found " + std::to_string(address_choices.size()) + " addresses:"; + _cfgopts->_get_log_context()->log_data(LLDeveloper, msg); + for (size_t i = 0; i < address_choices.size(); i++) { + _cfgopts->_get_log_context()->log_data( + LLDeveloper, "\t" + address_choices[i].to_string()); + } + // Pick an entry from the list at random std::uniform_int_distribution<> distrib(0, address_choices.size() - 1); - return address_choices[distrib(_gen)]; + auto choice = address_choices[distrib(_gen)]; + _cfgopts->_get_log_context()->log_data( + LLDeveloper, "Picked: " + choice.to_string()); + return choice; } // Check that the SSDB environment variable value does not have any errors void RedisServer::_check_ssdb_string(const std::string& env_str) { + std::string allowed_specials = ".:,/_-"; for (size_t i = 0; i < env_str.size(); i++) { char c = env_str[i]; - if (!isalnum(c) && c != '.' && c != ':' && c != ',' && c != '/') { + if (!isalnum(c) && (allowed_specials.find(c) == std::string::npos)) { throw SRRuntimeException("The provided SSDB value, " + env_str + " is invalid because of character " + c); } @@ -153,4 +167,43 @@ inline void RedisServer::_check_runtime_variables() " must be less than " + std::to_string(INT_MAX / 1000)); } -} \ No newline at end of file +} + +// Create a string representation of the Redis connection +std::string RedisServer::to_string() const +{ + std::string result; + + // Shards + result += " Redis shards at:\n"; + auto it = _address_node_map.begin(); + for ( ; it != _address_node_map.end(); it++) { + result += " " + it->first + "\n"; + } + + // Protocol + result += " Protocol: "; + result += _is_domain_socket ? "Unix Domain Socket" : "TCP"; + result += "\n"; + + // Parameters + result += " Command parameters:\n"; + result += " Retry attempts: " + + std::to_string(_command_attempts) + "\n"; + result += " Retry interval (ms): " + + std::to_string(_command_interval) + "\n"; + result += " Attempt timeout (ms): " + + std::to_string(_command_timeout) + "\n"; + result += " Connection parameters:\n"; + result += " Retry attempts: " + + std::to_string(_connection_attempts) + "\n"; + result += " Retry interval (ms): " + + std::to_string(_connection_interval) + "\n"; + result += " Attempt timeout (ms): " + + std::to_string(_connection_timeout) + "\n"; + + // Threadpool + result += " Threadpool: " + std::to_string(_thread_count) + " threads\n"; + + return result; +} diff --git a/2023-01/smartsim/smartredis/src/cpp/stringfield.cpp b/2023-01/smartsim/smartredis/src/cpp/stringfield.cpp index b21c3594..fcb9c587 100644 --- a/2023-01/smartsim/smartredis/src/cpp/stringfield.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/stringfield.cpp @@ -66,7 +66,7 @@ void StringField::append(const std::string& value) } // Retrieve the number of values in the field -size_t StringField::size() +size_t StringField::size() const { return _vals.size(); } diff --git a/2023-01/smartsim/smartredis/src/cpp/tensorbase.cpp b/2023-01/smartsim/smartredis/src/cpp/tensorbase.cpp index c841a44e..2ffd4391 100644 --- a/2023-01/smartsim/smartredis/src/cpp/tensorbase.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/tensorbase.cpp @@ -139,13 +139,13 @@ TensorBase& TensorBase::operator=(TensorBase&& tb) } // Retrieve the tensor name. -std::string TensorBase::name() +std::string TensorBase::name() const { return _name; } // Retrieve the tensor type. -SRTensorType TensorBase::type() +SRTensorType TensorBase::type() const { return _type; } @@ -157,13 +157,13 @@ std::string TensorBase::type_str() } // Retrieve the tensor dims. -std::vector TensorBase::dims() +std::vector TensorBase::dims() const { return _dims; } // Retrieve the total number of values in the tensor. -size_t TensorBase::num_values() +size_t TensorBase::num_values() const { if (_dims.size() == 0) throw SRRuntimeException("Invalid dimensionality for tensor detected"); diff --git a/2023-01/smartsim/smartredis/src/cpp/tensorpack.cpp b/2023-01/smartsim/smartredis/src/cpp/tensorpack.cpp index 6e0919b5..85c670b8 100644 --- a/2023-01/smartsim/smartredis/src/cpp/tensorpack.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/tensorpack.cpp @@ -129,7 +129,7 @@ void TensorPack::add_tensor(TensorBase* tensor) } // Return a TensorBase pointer based on name. -TensorBase* TensorPack::get_tensor(const std::string& name) +TensorBase* TensorPack::get_tensor(const std::string& name) const { return _tensorbase_inventory.at(name); } @@ -144,7 +144,7 @@ void* TensorPack::get_tensor_data(const std::string& name) } // Check whether a tensor with a given name exists in the TensorPack -bool TensorPack::tensor_exists(const std::string& name) +bool TensorPack::tensor_exists(const std::string& name) const { return (_tensorbase_inventory.count(name) > 0); } diff --git a/2023-01/smartsim/smartredis/src/cpp/utility.cpp b/2023-01/smartsim/smartredis/src/cpp/utility.cpp index 68cbb328..f13b4604 100644 --- a/2023-01/smartsim/smartredis/src/cpp/utility.cpp +++ b/2023-01/smartsim/smartredis/src/cpp/utility.cpp @@ -27,6 +27,8 @@ */ #include +#include +#include #include #include #include @@ -37,21 +39,16 @@ namespace SmartRedis { -/*! -* \brief Initialize an integer from configuration, such as an -* environment variable -* \param value Receives the configuration value -* \param cfg_key The key to query for the configuration variable -* \param default_value Default if configuration key is not set -* \param suppress_warning Do not issue a warning if the variable -* is not set -*/ +// Initialize an integer from an environment variable void get_config_integer(int& value, const std::string& cfg_key, const int default_value, - bool suppress_warning /*= false*/) + int flags /* = 0 */) { - value = default_value; + bool suppress_warning = 0 != (flags & flag_suppress_warning); + bool keyerror_on_absent = 0 != (flags & throw_on_absent); + + int result = default_value; std::string message = "Getting value for " + cfg_key; log_data("SmartRedis Library", LLDebug, message); @@ -59,10 +56,16 @@ void get_config_integer(int& value, message = "Retrieved value \""; message += cfg_val == NULL ? "" : cfg_val; message += "\""; - if (NULL == cfg_val) + if ((NULL == cfg_val) && !keyerror_on_absent) message += ". Using default value of " + std::to_string(default_value); log_data("SmartRedis Library", LLDebug, message); + if ((cfg_val == NULL) && keyerror_on_absent) { + std::string msg("No value found for key "); + msg += cfg_key; + throw SRKeyException(msg); + } + if (cfg_val != NULL && std::strlen(cfg_val) > 0) { // Enforce that all characters are digits because std::stoi // will truncate a string like "10xy" to 10. @@ -75,7 +78,7 @@ void get_config_integer(int& value, } try { - value = std::stoi(cfg_val); + result = std::stoi(cfg_val); } catch (std::invalid_argument& e) { throw SRParameterException("The value of " + cfg_key + " could "\ @@ -101,25 +104,21 @@ void get_config_integer(int& value, ); } + value = result; message = "Exiting with value \"" + std::to_string(value) + "\""; log_data("SmartRedis Library", LLDebug, message); } -/*! -* \brief Initialize an string from configuration, such as an -* environment variable -* \param value Receives the configuration value -* \param cfg_key The key to query for the configuration variable -* \param default_value Default if configuration key is not set -* \param suppress_warning Do not issue a warning if the variable -* is not set -*/ +// Initialize a string from an environment variable void get_config_string(std::string& value, const std::string& cfg_key, const std::string& default_value, - bool suppress_warning /*= false*/) + int flags /* = 0 */) { - value = default_value; + bool suppress_warning = 0 != (flags & flag_suppress_warning); + bool keyerror_on_absent = 0 != (flags & throw_on_absent); + + std::string result = default_value; std::string message = "Getting value for " + cfg_key; log_data("SmartRedis Library", LLDebug, message); @@ -127,12 +126,18 @@ void get_config_string(std::string& value, message = "Retrieved value \""; message += cfg_val == NULL ? "" : cfg_val; message += "\""; - if (NULL == cfg_val) - message += ". Using default value of \"" + default_value + "\""; + if ((NULL == cfg_val) && !keyerror_on_absent) + message += ". Using default value of " + default_value; log_data("SmartRedis Library", LLDebug, message); + if ((cfg_val == NULL) && keyerror_on_absent) { + std::string msg("No value found for key "); + msg += cfg_key; + throw SRKeyException(msg); + } + if (cfg_val != NULL && std::strlen(cfg_val) > 0) - value = cfg_val; + result = cfg_val; else if (!suppress_warning) { log_warning( "SmartRedis Library", @@ -141,8 +146,61 @@ void get_config_string(std::string& value, ); } + value = result; message = "Exiting with value \"" + value + "\""; log_data("SmartRedis Library", LLDebug, message); } -} // namespace SmartRedis { +// Create a string representation of a tensor type +std::string to_string(SRTensorType ttype) +{ + switch (ttype) { + case SRTensorTypeDouble: + return "double"; + case SRTensorTypeFloat: + return "float"; + case SRTensorTypeInt8: + return "8 bit signed integer"; + case SRTensorTypeInt16: + return "16 bit signed integer"; + case SRTensorTypeInt32: + return "32 bit signed integer"; + case SRTensorTypeInt64: + return "64 bit signed integer"; + case SRTensorTypeUint8: + return "8 bit unsigned integer"; + case SRTensorTypeUint16: + return "16 bit unsigned integer"; + case SRTensorTypeInvalid: + // Fall through + default: + return "Invalid tensor type"; + } +} + +// Create a string representation of a metadata field type +std::string to_string(SRMetaDataType mdtype) +{ + switch (mdtype) { + case SRMetadataTypeDouble: + return "double"; + case SRMetadataTypeFloat: + return "float"; + case SRMetadataTypeInt32: + return "32 bit signed integer"; + case SRMetadataTypeInt64: + return "64 bit signed integer"; + case SRMetadataTypeUint32: + return "32 bit unsigned integer"; + case SRMetadataTypeUint64: + return "64 bit unsigned integer"; + case SRMetadataTypeString: + return "string"; + case SRMetadataTypeInvalid: + // Fall through + default: + return "Invalid metadata type"; + } +} + +} // namespace SmartRedis diff --git a/2023-01/smartsim/smartredis/src/fortran/client.F90 b/2023-01/smartsim/smartredis/src/fortran/client.F90 index 9882639c..a7b16f08 100644 --- a/2023-01/smartsim/smartredis/src/fortran/client.F90 +++ b/2023-01/smartsim/smartredis/src/fortran/client.F90 @@ -24,6 +24,15 @@ ! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +! Note the below macros are here to allow compilation with Nvidia drivers +! While assumed size should be sufficient, this does not seem to work with +! Intel and GNU (however those have support for assumed rank) +#ifdef __NVCOMPILER +#define DIM_RANK_SPEC dimension(*) +#else +#define DIM_RANK_SPEC dimension(..) +#endif + module smartredis_client use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int @@ -33,8 +42,10 @@ module smartredis_client use, intrinsic :: iso_fortran_env, only: stderr => error_unit use smartredis_dataset, only : dataset_type +use smartredis_configoptions, only : configoptions_type use fortran_c_interop, only : convert_char_array_to_c, enum_kind, C_MAX_STRING + implicit none; private #include "enum_fortran.inc" @@ -47,6 +58,7 @@ module smartredis_client #include "client/client_dataset_interfaces.inc" #include "client/ensemble_interfaces.inc" #include "client/aggregation_interfaces.inc" +#include "errors/errors_interfaces.inc" public :: enum_kind !< The kind of integer equivalent to a C enum. According to C an Fortran !! standards this should be c_int, but is renamed here to ensure that @@ -58,12 +70,13 @@ module smartredis_client type, public :: client_type private - logical(kind=c_bool) :: cluster = .false. !< True if a database cluster is being used - type(c_ptr) :: client_ptr = c_null_ptr !< Pointer to the initialized SmartRedisClient - logical :: is_initialized = .false. !< True if client is initialized + type(c_ptr) :: client_ptr = c_null_ptr !< Pointer to the initialized SmartRedisClient + logical :: is_initialized = .false. !< True if client is initialized contains ! Public procedures + !> Initializes a new instance of the SmartRedis client + generic :: initialize => initialize_client_deprecated, initialize_client_simple, initialize_client_cfgopts !> Puts a tensor into the database (overloaded) generic :: put_tensor => put_tensor_i8, put_tensor_i16, put_tensor_i32, put_tensor_i64, & put_tensor_float, put_tensor_double @@ -73,8 +86,6 @@ module smartredis_client !> Decode a response code from an API function procedure :: SR_error_parser - !> Initializes a new instance of the SmartRedis client - procedure :: initialize => initialize_client !> Check if a SmartRedis client has been initialized procedure :: isinitialized !> Destructs a new instance of the SmartRedis client @@ -152,6 +163,8 @@ module smartredis_client !> If true, preprend the ensemble id for tensor-related keys procedure :: use_tensor_ensemble_prefix + !> If true, preprend the ensemble id for dataset-related keys + procedure :: use_dataset_ensemble_prefix !> If true, preprend the ensemble id for model-related keys procedure :: use_model_ensemble_prefix !> If true, preprend the ensemble id for dataset list-related keys @@ -179,8 +192,15 @@ module smartredis_client procedure :: get_datasets_from_list !> Retrieve vector of datasets from the list over a given range procedure :: get_datasets_from_list_range + !> Retrieve a string representation of the client + procedure :: to_string + !> Print a string representation of the client + procedure :: print_client ! Private procedures + procedure, private :: initialize_client_deprecated + procedure, private :: initialize_client_simple + procedure, private :: initialize_client_cfgopts procedure, private :: put_tensor_i8 procedure, private :: put_tensor_i16 procedure, private :: put_tensor_i32 @@ -234,10 +254,33 @@ function SR_error_parser(self, response_code) result(is_error) end function SR_error_parser !> Initializes a new instance of a SmartRedis client -function initialize_client(self, cluster, logger_name) - integer(kind=enum_kind) :: initialize_client - class(client_type), intent(inout) :: self !< Receives the initialized client - logical, optional, intent(in ) :: cluster !< If true, client uses a database cluster (Default: .false.) +function initialize_client_simple(self, logger_name) + integer(kind=enum_kind) :: initialize_client_simple + class(client_type), intent(inout) :: self !< Receives the initialized client + character(len=*), optional, intent(in ) :: logger_name !< Identifier for the current client + + ! Local variables + character(kind=c_char, len=:), allocatable :: c_logger_name + integer(kind=c_size_t) :: logger_name_length + + if (present(logger_name)) then + c_logger_name = logger_name + else + c_logger_name = 'default' + endif + logger_name_length = len_trim(c_logger_name) + + initialize_client_simple = c_simple_constructor( & + c_logger_name, logger_name_length, self%client_ptr) + self%is_initialized = initialize_client_simple .eq. SRNoError + if (allocated(c_logger_name)) deallocate(c_logger_name) +end function initialize_client_simple + +!> Initializes a new instance of a SmartRedis client +function initialize_client_cfgopts(self, cfgopts, logger_name) + integer(kind=enum_kind) :: initialize_client_cfgopts + class(client_type), intent(inout) :: self !< Receives the initialized client + type(configoptions_type), intent(in ) :: cfgopts !< Source for configuration settings character(len=*), optional, intent(in ) :: logger_name !< Identifier for the current client ! Local variables @@ -251,11 +294,37 @@ function initialize_client(self, cluster, logger_name) endif logger_name_length = len_trim(c_logger_name) - if (present(cluster)) self%cluster = cluster - initialize_client = c_constructor(self%cluster, c_logger_name, logger_name_length, self%client_ptr) - self%is_initialized = initialize_client .eq. SRNoError + initialize_client_cfgopts = c_constructor( & + cfgopts%get_c_pointer(), c_logger_name, logger_name_length, self%client_ptr) + self%is_initialized = initialize_client_cfgopts .eq. SRNoError if (allocated(c_logger_name)) deallocate(c_logger_name) -end function initialize_client +end function initialize_client_cfgopts + +!> Initializes a new instance of a SmartRedis client (deprecated) +function initialize_client_deprecated(self, cluster, logger_name) + integer(kind=enum_kind) :: initialize_client_deprecated + class(client_type), intent(inout) :: self !< Receives the initialized client + logical, intent(in ) :: cluster !< If true, client uses a database cluster (Default: .false.) + character(len=*), optional, intent(in ) :: logger_name !< Identifier for the current client + + ! Local variables + character(kind=c_char, len=:), allocatable :: c_logger_name + integer(kind=c_size_t) :: logger_name_length + logical(kind=c_bool) :: c_cluster + + if (present(logger_name)) then + c_logger_name = logger_name + else + c_logger_name = 'default' + endif + logger_name_length = len_trim(c_logger_name) + c_cluster = cluster + + initialize_client_deprecated = c_deprecated_constructor( & + c_cluster, c_logger_name, logger_name_length, self%client_ptr) + self%is_initialized = initialize_client_deprecated .eq. SRNoError + if (allocated(c_logger_name)) deallocate(c_logger_name) +end function initialize_client_deprecated !> Check whether the client has been initialized logical function isinitialized(this) @@ -283,67 +352,75 @@ end function get_c_pointer function key_exists(self, key, exists) class(client_type), intent(in) :: self !< The client character(len=*), intent(in) :: key !< The key to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists + logical, intent(out) :: exists !< Receives whether the key exists integer(kind=enum_kind) :: key_exists ! Local variables character(kind=c_char, len=len_trim(key)) :: c_key integer(kind=c_size_t) :: c_key_length + logical(kind=c_bool) :: c_exists c_key = trim(key) c_key_length = len_trim(key) - key_exists = key_exists_c(self%client_ptr, c_key, c_key_length, exists) + key_exists = key_exists_c(self%client_ptr, c_key, c_key_length, c_exists) + exists = c_exists end function key_exists !> Check if the specified model exists in the database function model_exists(self, model_name, exists) result(code) class(client_type), intent(in) :: self !< The client character(len=*), intent(in) :: model_name !< The model to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + logical, intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code ! Local variables character(kind=c_char, len=len_trim(model_name)) :: c_model_name integer(kind=c_size_t) :: c_model_name_length + logical(kind=c_bool) :: c_exists c_model_name = trim(model_name) c_model_name_length = len_trim(model_name) - code = model_exists_c(self%client_ptr, c_model_name, c_model_name_length, exists) + code = model_exists_c(self%client_ptr, c_model_name, c_model_name_length, c_exists) + exists = c_exists end function model_exists !> Check if the specified tensor exists in the database function tensor_exists(self, tensor_name, exists) result(code) class(client_type), intent(in) :: self !< The client character(len=*), intent(in) :: tensor_name !< The tensor to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + logical, intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code ! Local variables character(kind=c_char, len=len_trim(tensor_name)) :: c_tensor_name integer(kind=c_size_t) :: c_tensor_name_length + logical(kind=c_bool) :: c_exists c_tensor_name = trim(tensor_name) c_tensor_name_length = len_trim(tensor_name) - code = tensor_exists_c(self%client_ptr, c_tensor_name, c_tensor_name_length, exists) + code = tensor_exists_c(self%client_ptr, c_tensor_name, c_tensor_name_length, c_exists) + exists = c_exists end function tensor_exists !> Check if the specified dataset exists in the database function dataset_exists(this, dataset_name, exists) result(code) class(client_type), intent(in) :: this !< The client character(len=*), intent(in) :: dataset_name !< The dataset to check - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + logical, intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code character(kind=c_char, len=len_trim(dataset_name)) :: c_dataset_name integer(kind=c_size_t) :: c_dataset_name_length + logical(kind=c_bool) :: c_exists c_dataset_name = trim(dataset_name) c_dataset_name_length = len_trim(dataset_name) - code = dataset_exists_c(this%client_ptr, c_dataset_name, c_dataset_name_length, exists) + code = dataset_exists_c(this%client_ptr, c_dataset_name, c_dataset_name_length, c_exists) + exists = c_exists end function dataset_exists !> Repeatedly poll the database until the tensor exists or the number of tries is exceeded @@ -352,20 +429,22 @@ function poll_tensor(self, tensor_name, poll_frequency_ms, num_tries, exists) re character(len=*), intent(in) :: tensor_name !< name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the tensor exists + logical, intent(out) :: exists !< Receives whether the tensor exists integer(kind=enum_kind) :: code ! Local variables character(kind=c_char,len=len_trim(tensor_name)) :: c_tensor_name integer(kind=c_size_t) :: c_tensor_name_length integer(kind=c_int) :: c_poll_frequency, c_num_tries + logical(kind=c_bool) :: c_exists c_tensor_name = trim(tensor_name) c_tensor_name_length = len_trim(tensor_name) c_num_tries = num_tries c_poll_frequency = poll_frequency_ms - code = poll_tensor_c(self%client_ptr, c_tensor_name, c_tensor_name_length, c_poll_frequency, c_num_tries, exists) + code = poll_tensor_c(self%client_ptr, c_tensor_name, c_tensor_name_length, c_poll_frequency, c_num_tries, c_exists) + exists = c_exists end function poll_tensor !> Repeatedly poll the database until the dataset exists or the number of tries is exceeded @@ -375,19 +454,21 @@ function poll_dataset(self, dataset_name, poll_frequency_ms, num_tries, exists) character(len=*), intent(in) :: dataset_name !< Name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the tensor exists + logical, intent(out) :: exists !< Receives whether the tensor exists ! Local variables character(kind=c_char,len=len_trim(dataset_name)) :: c_dataset_name integer(kind=c_size_t) :: c_dataset_name_length integer(kind=c_int) :: c_poll_frequency, c_num_tries + logical(kind=c_bool) :: c_exists c_dataset_name = trim(dataset_name) c_dataset_name_length = len_trim(dataset_name) c_num_tries = num_tries c_poll_frequency = poll_frequency_ms - poll_dataset = poll_dataset_c(self%client_ptr, c_dataset_name, c_dataset_name_length, c_poll_frequency, c_num_tries, exists) + poll_dataset = poll_dataset_c(self%client_ptr, c_dataset_name, c_dataset_name_length, c_poll_frequency, c_num_tries, c_exists) + exists = c_exists end function poll_dataset !> Repeatedly poll the database until the model exists or the number of tries is exceeded @@ -396,20 +477,22 @@ function poll_model(self, model_name, poll_frequency_ms, num_tries, exists) resu character(len=*), intent(in) :: model_name !< Name in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the model exists + logical, intent(out) :: exists !< Receives whether the model exists integer(kind=enum_kind) :: code ! Local variables character(kind=c_char,len=len_trim(model_name)) :: c_model_name integer(kind=c_size_t) :: c_model_name_length integer(kind=c_int) :: c_poll_frequency, c_num_tries + logical(kind=c_bool) :: c_exists c_model_name = trim(model_name) c_model_name_length = len_trim(model_name) c_num_tries = num_tries c_poll_frequency = poll_frequency_ms - code = poll_model_c(self%client_ptr, c_model_name, c_model_name_length, c_poll_frequency, c_num_tries, exists) + code = poll_model_c(self%client_ptr, c_model_name, c_model_name_length, c_poll_frequency, c_num_tries, c_exists) + exists = c_exists end function poll_model !> Repeatedly poll the database until the key exists or the number of tries is exceeded @@ -418,25 +501,27 @@ function poll_key(self, key, poll_frequency_ms, num_tries, exists) result(code) character(len=*), intent(in) :: key !< Key in the database to poll integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: exists !< Receives whether the key exists + logical, intent(out) :: exists !< Receives whether the key exists integer(kind=enum_kind) :: code ! Local variables character(kind=c_char, len=len_trim(key)) :: c_key integer(kind=c_size_t) :: c_key_length integer(kind=c_int) :: c_poll_frequency, c_num_tries + logical(kind=c_bool) :: c_exists c_key = trim(key) c_key_length = len_trim(key) c_num_tries = num_tries c_poll_frequency = poll_frequency_ms - code = poll_key_c(self%client_ptr, c_key, c_key_length, c_poll_frequency, c_num_tries, exists) + code = poll_key_c(self%client_ptr, c_key, c_key_length, c_poll_frequency, c_num_tries, c_exists) + exists = c_exists end function poll_key !> Put a tensor whose Fortran type is the equivalent 'int8' C-type function put_tensor_i8(self, name, data, dims) result(code) - integer(kind=c_int8_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int8_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(client_type), intent(in) :: self !< Fortran SmartRedis client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -452,7 +537,7 @@ end function put_tensor_i8 !> Put a tensor whose Fortran type is the equivalent 'int16' C-type function put_tensor_i16(self, name, data, dims) result(code) - integer(kind=c_int16_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int16_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(client_type), intent(in) :: self !< Fortran SmartRedis client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -468,7 +553,7 @@ end function put_tensor_i16 !> Put a tensor whose Fortran type is the equivalent 'int32' C-type function put_tensor_i32(self, name, data, dims) result(code) - integer(kind=c_int32_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int32_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(client_type), intent(in) :: self !< Fortran SmartRedis client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -484,7 +569,7 @@ end function put_tensor_i32 !> Put a tensor whose Fortran type is the equivalent 'int64' C-type function put_tensor_i64(self, name, data, dims) result(code) - integer(kind=c_int64_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int64_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(client_type), intent(in) :: self !< Fortran SmartRedis client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -500,7 +585,7 @@ end function put_tensor_i64 !> Put a tensor whose Fortran type is the equivalent 'float' C-type function put_tensor_float(self, name, data, dims) result(code) - real(kind=c_float), dimension(..), target, intent(in) :: data !< Data to be sent + real(kind=c_float), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(client_type), intent(in) :: self !< Fortran SmartRedis client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -516,7 +601,7 @@ end function put_tensor_float !> Put a tensor whose Fortran type is the equivalent 'double' C-type function put_tensor_double(self, name, data, dims) result(code) - real(kind=c_double), dimension(..), target, intent(in) :: data !< Data to be sent + real(kind=c_double), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(client_type), intent(in) :: self !< Fortran SmartRedis client character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -532,7 +617,7 @@ end function put_tensor_double !> Put a tensor whose Fortran type is the equivalent 'int8' C-type function unpack_tensor_i8(self, name, result, dims) result(code) - integer(kind=c_int8_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=c_int8_t), DIM_RANK_SPEC, target, intent(out) :: result !< Data to be sent class(client_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -548,7 +633,7 @@ end function unpack_tensor_i8 !> Put a tensor whose Fortran type is the equivalent 'int16' C-type function unpack_tensor_i16(self, name, result, dims) result(code) - integer(kind=c_int16_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=c_int16_t), DIM_RANK_SPEC, target, intent(out) :: result !< Data to be sent class(client_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -564,7 +649,7 @@ end function unpack_tensor_i16 !> Put a tensor whose Fortran type is the equivalent 'int32' C-type function unpack_tensor_i32(self, name, result, dims) result(code) - integer(kind=c_int32_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=c_int32_t), DIM_RANK_SPEC, target, intent(out) :: result !< Data to be sent class(client_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -580,7 +665,7 @@ end function unpack_tensor_i32 !> Put a tensor whose Fortran type is the equivalent 'int64' C-type function unpack_tensor_i64(self, name, result, dims) result(code) - integer(kind=c_int64_t), dimension(..), target, intent(out) :: result !< Data to be sent + integer(kind=c_int64_t), DIM_RANK_SPEC, target, intent(out) :: result !< Data to be sent class(client_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -596,7 +681,7 @@ end function unpack_tensor_i64 !> Put a tensor whose Fortran type is the equivalent 'float' C-type function unpack_tensor_float(self, name, result, dims) result(code) - real(kind=c_float), dimension(..), target, intent(out) :: result !< Data to be sent + real(kind=c_float), DIM_RANK_SPEC, target, intent(out) :: result !< Data to be sent class(client_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -612,7 +697,7 @@ end function unpack_tensor_float !> Put a tensor whose Fortran type is the equivalent 'double' C-type function unpack_tensor_double(self, name, result, dims) result(code) - real(kind=c_double), dimension(..), target, intent(out) :: result !< Data to be sent + real(kind=c_double), DIM_RANK_SPEC, target, intent(out) :: result !< Data to be sent class(client_type), intent(in) :: self !< Pointer to the initialized client character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -713,8 +798,8 @@ function get_model(self, name, model) result(code) end function get_model !> Load the machine learning model from a file and set the configuration -function set_model_from_file(self, name, model_file, backend, device, batch_size, min_batch_size, tag, & - inputs, outputs) result(code) +function set_model_from_file(self, name, model_file, backend, device, batch_size, min_batch_size, & + min_batch_timeout, tag, inputs, outputs) result(code) class(client_type), intent(in) :: self !< An initialized SmartRedis client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model_file !< The file storing the model @@ -722,6 +807,7 @@ function set_model_from_file(self, name, model_file, backend, device, batch_size character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) integer, optional, intent(in) :: batch_size !< The batch size for model execution integer, optional, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer, optional, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(len=*), optional, intent(in) :: tag !< A tag to attach to the model for !! information purposes character(len=*), dimension(:), optional, intent(in) :: inputs !< One or more names of model input nodes (TF @@ -742,7 +828,7 @@ function set_model_from_file(self, name, model_file, backend, device, batch_size integer(c_size_t), dimension(:), allocatable, target :: input_lengths, output_lengths integer(kind=c_size_t) :: name_length, model_file_length, backend_length, device_length, tag_length, n_inputs, & n_outputs - integer(kind=c_int) :: c_batch_size, c_min_batch_size + integer(kind=c_int) :: c_batch_size, c_min_batch_size, c_min_batch_timeout type(c_ptr) :: inputs_ptr, input_lengths_ptr, outputs_ptr, output_lengths_ptr type(c_ptr), dimension(:), allocatable :: ptrs_to_inputs, ptrs_to_outputs @@ -751,6 +837,8 @@ function set_model_from_file(self, name, model_file, backend, device, batch_size if (present(batch_size)) c_batch_size = batch_size c_min_batch_size = 0 if (present(min_batch_size)) c_min_batch_size = min_batch_size + c_min_batch_timeout = 0 + if (present(min_batch_timeout)) c_min_batch_timeout = min_batch_timeout if (present(tag)) then allocate(character(kind=c_char, len=len_trim(tag)) :: c_tag) c_tag = tag @@ -795,8 +883,8 @@ function set_model_from_file(self, name, model_file, backend, device, batch_size code = set_model_from_file_c(self%client_ptr, c_name, name_length, c_model_file, model_file_length, & c_backend, backend_length, c_device, device_length, c_batch_size, c_min_batch_size, & - c_tag, tag_length, inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, & - output_lengths_ptr, n_outputs) + c_min_batch_timeout, c_tag, tag_length, inputs_ptr, input_lengths_ptr, n_inputs, & + outputs_ptr, output_lengths_ptr, n_outputs) if (allocated(c_inputs)) deallocate(c_inputs) if (allocated(input_lengths)) deallocate(input_lengths) if (allocated(ptrs_to_inputs)) deallocate(ptrs_to_inputs) @@ -807,7 +895,7 @@ end function set_model_from_file !> Load the machine learning model from a file and set the configuration for use in multi-GPU systems function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu, num_gpus, batch_size, min_batch_size, & - tag, inputs, outputs) result(code) + min_batch_timeout, tag, inputs, outputs) result(code) class(client_type), intent(in) :: self !< An initialized SmartRedis client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model_file !< The file storing the model @@ -816,6 +904,7 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu integer, intent(in) :: num_gpus !< The number of GPUs to use with the model integer, optional, intent(in) :: batch_size !< The batch size for model execution integer, optional, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer, optional, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(len=*), optional, intent(in) :: tag !< A tag to attach to the model for !! information purposes character(len=*), dimension(:), optional, intent(in) :: inputs !< One or more names of model input nodes (TF @@ -835,7 +924,7 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu integer(c_size_t), dimension(:), allocatable, target :: input_lengths, output_lengths integer(kind=c_size_t) :: name_length, model_file_length, backend_length, tag_length, n_inputs, & n_outputs - integer(kind=c_int) :: c_batch_size, c_min_batch_size, c_first_gpu, c_num_gpus + integer(kind=c_int) :: c_batch_size, c_min_batch_size, c_min_batch_timeout, c_first_gpu, c_num_gpus type(c_ptr) :: inputs_ptr, input_lengths_ptr, outputs_ptr, output_lengths_ptr type(c_ptr), dimension(:), allocatable :: ptrs_to_inputs, ptrs_to_outputs @@ -844,6 +933,8 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu if (present(batch_size)) c_batch_size = batch_size c_min_batch_size = 0 if (present(min_batch_size)) c_min_batch_size = min_batch_size + c_min_batch_timeout = 0 + if (present(min_batch_timeout)) c_min_batch_timeout = min_batch_timeout if (present(tag)) then allocate(character(kind=c_char, len=len_trim(tag)) :: c_tag) c_tag = tag @@ -889,8 +980,8 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu code = set_model_from_file_multigpu_c(self%client_ptr, c_name, name_length, c_model_file, model_file_length, & c_backend, backend_length, c_first_gpu, c_num_gpus, c_batch_size, c_min_batch_size, & - c_tag, tag_length, inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, & - output_lengths_ptr, n_outputs) + c_min_batch_timeout, c_tag, tag_length, inputs_ptr, input_lengths_ptr, n_inputs, & + outputs_ptr, output_lengths_ptr, n_outputs) if (allocated(c_inputs)) deallocate(c_inputs) if (allocated(input_lengths)) deallocate(input_lengths) @@ -901,8 +992,8 @@ function set_model_from_file_multigpu(self, name, model_file, backend, first_gpu end function set_model_from_file_multigpu !> Establish a model to run -function set_model(self, name, model, backend, device, batch_size, min_batch_size, tag, & - inputs, outputs) result(code) +function set_model(self, name, model, backend, device, batch_size, min_batch_size, min_batch_timeout, & + tag, inputs, outputs) result(code) class(client_type), intent(in) :: self !< An initialized SmartRedis client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model !< The binary representation of the model @@ -910,6 +1001,7 @@ function set_model(self, name, model, backend, device, batch_size, min_batch_siz character(len=*), intent(in) :: device !< The name of the device (CPU, GPU, GPU:0, GPU:1...) integer, intent(in) :: batch_size !< The batch size for model execution integer, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(len=*), intent(in) :: tag !< A tag to attach to the model for information purposes character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) @@ -927,7 +1019,7 @@ function set_model(self, name, model, backend, device, batch_size, min_batch_siz integer(c_size_t), dimension(:), allocatable, target :: input_lengths, output_lengths integer(kind=c_size_t) :: name_length, model_length, backend_length, device_length, tag_length, n_inputs, & n_outputs - integer(kind=c_int) :: c_batch_size, c_min_batch_size + integer(kind=c_int) :: c_batch_size, c_min_batch_size, c_min_batch_timeout type(c_ptr) :: inputs_ptr, input_lengths_ptr, outputs_ptr, output_lengths_ptr type(c_ptr), dimension(:), allocatable :: ptrs_to_inputs, ptrs_to_outputs @@ -951,12 +1043,13 @@ function set_model(self, name, model, backend, device, batch_size, min_batch_siz output_lengths_ptr, n_outputs) if (code /= SRNoError) return - ! Cast the batch sizes to C integers + ! Cast the batch params to C integers c_batch_size = batch_size c_min_batch_size = min_batch_size + c_min_batch_timeout = min_batch_timeout code = set_model_c(self%client_ptr, c_name, name_length, c_model, model_length, c_backend, backend_length, & - c_device, device_length, batch_size, min_batch_size, c_tag, tag_length, & + c_device, device_length, batch_size, min_batch_size, c_min_batch_timeout, c_tag, tag_length, & inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, output_lengths_ptr, n_outputs) if (allocated(c_inputs)) deallocate(c_inputs) @@ -968,8 +1061,8 @@ function set_model(self, name, model, backend, device, batch_size, min_batch_siz end function set_model !> Set a model from a byte string to run on a system with multiple GPUs -function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, batch_size, min_batch_size, tag, & - inputs, outputs) result(code) +function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, batch_size, min_batch_size, & + min_batch_timeout, tag, inputs, outputs) result(code) class(client_type), intent(in) :: self !< An initialized SmartRedis client character(len=*), intent(in) :: name !< The name to use to place the model character(len=*), intent(in) :: model !< The binary representation of the model @@ -978,6 +1071,7 @@ function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, bat integer, intent(in) :: num_gpus !< The number of GPUs to use with the model integer, intent(in) :: batch_size !< The batch size for model execution integer, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(len=*), intent(in) :: tag !< A tag to attach to the model for information purposes character(len=*), dimension(:), intent(in) :: inputs !< One or more names of model input nodes (TF models) character(len=*), dimension(:), intent(in) :: outputs !< One or more names of model output nodes (TF models) @@ -993,7 +1087,7 @@ function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, bat integer(c_size_t), dimension(:), allocatable, target :: input_lengths, output_lengths integer(kind=c_size_t) :: name_length, model_length, backend_length, tag_length, n_inputs, n_outputs - integer(kind=c_int) :: c_batch_size, c_min_batch_size, c_first_gpu, c_num_gpus + integer(kind=c_int) :: c_batch_size, c_min_batch_size, c_min_batch_timeout, c_first_gpu, c_num_gpus type(c_ptr) :: inputs_ptr, input_lengths_ptr, outputs_ptr, output_lengths_ptr type(c_ptr), dimension(:), allocatable :: ptrs_to_inputs, ptrs_to_outputs @@ -1015,14 +1109,15 @@ function set_model_multigpu(self, name, model, backend, first_gpu, num_gpus, bat output_lengths_ptr, n_outputs) if (code /= SRNoError) return - ! Cast the batch sizes to C integers + ! Cast the batch params to C integers c_batch_size = batch_size c_min_batch_size = min_batch_size + c_min_batch_timeout = min_batch_timeout c_first_gpu = first_gpu c_num_gpus = num_gpus code = set_model_multigpu_c(self%client_ptr, c_name, name_length, c_model, model_length, c_backend, backend_length, & - c_first_gpu, c_num_gpus, c_batch_size, c_min_batch_size, c_tag, tag_length, & + c_first_gpu, c_num_gpus, c_batch_size, c_min_batch_size, c_min_batch_timeout, c_tag, tag_length, & inputs_ptr, input_lengths_ptr, n_inputs, outputs_ptr, output_lengths_ptr, n_outputs) if (allocated(c_inputs)) deallocate(c_inputs) @@ -1540,10 +1635,10 @@ function use_model_ensemble_prefix(self, use_prefix) result(code) end function use_model_ensemble_prefix -!> Set whether names of tensor and dataset entities should be prefixed (e.g. in an ensemble) to form database keys. +!> Set whether names of tensor entities should be prefixed (e.g. in an ensemble) to form database keys. !! Prefixes will only be used if they were previously set through the environment variables SSKEYOUT and SSKEYIN. !! Keys of entities created before client function is called will not be affected. By default, the client prefixes -!! tensor and dataset keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. +!! tensor keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. function use_tensor_ensemble_prefix(self, use_prefix) result(code) class(client_type), intent(in) :: self !< An initialized SmartRedis client logical, intent(in) :: use_prefix !< The prefix setting @@ -1552,6 +1647,18 @@ function use_tensor_ensemble_prefix(self, use_prefix) result(code) code = use_tensor_ensemble_prefix_c(self%client_ptr, logical(use_prefix,kind=c_bool)) end function use_tensor_ensemble_prefix +!> Set whether names of dataset entities should be prefixed (e.g. in an ensemble) to form database keys. +!! Prefixes will only be used if they were previously set through the environment variables SSKEYOUT and SSKEYIN. +!! Keys of entities created before client function is called will not be affected. By default, the client prefixes +!! dataset keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. +function use_dataset_ensemble_prefix(self, use_prefix) result(code) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + logical, intent(in) :: use_prefix !< The prefix setting + integer(kind=enum_kind) :: code + + code = use_dataset_ensemble_prefix_c(self%client_ptr, logical(use_prefix,kind=c_bool)) +end function use_dataset_ensemble_prefix + !> Control whether aggregation lists are prefixed function use_list_ensemble_prefix(self, use_prefix) result(code) class(client_type), intent(in) :: self !< An initialized SmartRedis client @@ -1659,13 +1766,14 @@ function poll_list_length(self, list_name, list_length, poll_frequency_ms, num_t integer, intent(in ) :: list_length !< The desired length of the list integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in ) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, False if not after num_tries. + logical, intent( out) :: poll_result !< True if the list is the requested length, False if not after num_tries. integer(kind=enum_kind) :: code ! Local variables character(kind=c_char, len=len_trim(list_name)) :: list_name_c integer(kind=c_size_t) :: list_name_length integer(kind=c_int) :: c_poll_frequency, c_num_tries, c_list_length + logical(kind=c_bool) :: c_poll_result list_name_c = trim(list_name) list_name_length = len_trim(list_name) @@ -1674,7 +1782,8 @@ function poll_list_length(self, list_name, list_length, poll_frequency_ms, num_t c_list_length = list_length code = poll_list_length_c(self%client_ptr, list_name_c, list_name_length, & - c_list_length, c_poll_frequency, c_num_tries, poll_result) + c_list_length, c_poll_frequency, c_num_tries, c_poll_result) + poll_result = c_poll_result end function poll_list_length !> Get the length of the aggregation list @@ -1684,13 +1793,14 @@ function poll_list_length_gte(self, list_name, list_length, poll_frequency_ms, n integer, intent(in ) :: list_length !< The desired length of the list integer, intent(in ) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in ) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent( out) :: poll_result !< True if the list is the requested length, False if not after num_tries. + logical, intent( out) :: poll_result !< True if the list is the requested length, False if not after num_tries. integer(kind=enum_kind) :: code ! Local variables character(kind=c_char, len=len_trim(list_name)) :: list_name_c integer(kind=c_size_t) :: list_name_length integer(kind=c_int) :: c_poll_frequency, c_num_tries, c_list_length + logical(kind=c_bool) :: c_poll_result list_name_c = trim(list_name) list_name_length = len_trim(list_name) @@ -1699,7 +1809,8 @@ function poll_list_length_gte(self, list_name, list_length, poll_frequency_ms, n c_list_length = list_length code = poll_list_length_gte_c(self%client_ptr, list_name_c, list_name_length, & - c_list_length, c_poll_frequency, c_num_tries, poll_result) + c_list_length, c_poll_frequency, c_num_tries, c_poll_result) + poll_result = c_poll_result end function poll_list_length_gte !> Get the length of the aggregation list @@ -1709,13 +1820,14 @@ function poll_list_length_lte(self, list_name, list_length, poll_frequency_ms, n integer, intent(in) :: list_length !< The desired length of the list integer, intent(in) :: poll_frequency_ms !< Frequency at which to poll the database (ms) integer, intent(in) :: num_tries !< Number of times to poll the database before failing - logical(kind=c_bool), intent(out) :: poll_result !< True if the list is the requested length, False if not after num_tries. + logical, intent(out) :: poll_result !< True if the list is the requested length, False if not after num_tries. integer(kind=enum_kind) :: code ! Local variables character(kind=c_char, len=len_trim(list_name)) :: list_name_c integer(kind=c_size_t) :: list_name_length integer(kind=c_int) :: c_poll_frequency, c_num_tries, c_list_length + logical(kind=c_bool) :: c_poll_result list_name_c = trim(list_name) list_name_length = len_trim(list_name) @@ -1724,7 +1836,8 @@ function poll_list_length_lte(self, list_name, list_length, poll_frequency_ms, n c_list_length = list_length code = poll_list_length_lte_c(self%client_ptr, list_name_c, list_name_length, & - c_list_length, c_poll_frequency, c_num_tries, poll_result) + c_list_length, c_poll_frequency, c_num_tries, c_poll_result) + poll_result = c_poll_result end function poll_list_length_lte !> Get datasets from an aggregation list. Note that this will deallocate an existing list. @@ -1816,5 +1929,44 @@ function get_datasets_from_list_range(self, list_name, start_index, end_index, d deallocate(dataset_ptrs) end function get_datasets_from_list_range +!> Retrieve a string representation of the client +function to_string(self) + character(kind=c_char, len=:), allocatable :: to_string !< Text version of client + class(client_type), intent(in) :: self !< An initialized SmartRedis client + + type(c_ptr) :: c_cli_str + integer(kind=c_size_t) :: c_cli_str_len + + ! Get the string representation of the client from C + c_cli_str = client_to_string_c(self%client_ptr) + c_cli_str_len = c_strlen(c_cli_str) + to_string = make_str(c_cli_str, c_cli_str_len) +end function to_string + +!> Convert a pointer view of a string to a Fortran string +function make_str(strptr, str_len) + character(kind=c_char, len=:), allocatable :: make_str + type(c_ptr), intent(in), value :: strptr + integer(kind=c_size_t) :: str_len + + character(len=str_len, kind=c_char), pointer :: ptrview + call c_f_pointer(strptr, ptrview) + make_str = ptrview +end function make_str + +!> Print a string representation of the client +subroutine print_client(self, unit) + class(client_type), intent(in) :: self !< An initialized SmartRedis client + integer, optional, intent(in) :: unit !< Unit to which to print the client + + ! Determine which unit to write to + integer :: target_unit + target_unit = STDERR + if (present(unit)) target_unit = unit + + ! Write the error to the target unit + write(target_unit,*) to_string(self) +end subroutine print_client + end module smartredis_client diff --git a/2023-01/smartsim/smartredis/src/fortran/client/client_interfaces.inc b/2023-01/smartsim/smartredis/src/fortran/client/client_interfaces.inc index 76bf2acd..6e707dff 100644 --- a/2023-01/smartsim/smartredis/src/fortran/client/client_interfaces.inc +++ b/2023-01/smartsim/smartredis/src/fortran/client/client_interfaces.inc @@ -25,18 +25,43 @@ ! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. interface - function c_constructor(cluster, logger_name, logger_name_length, new_client) bind(c, name="SmartRedisCClient") + function c_simple_constructor(logger_name, logger_name_length, new_client) & + bind(c, name="SimpleCreateClient") use iso_c_binding, only : c_ptr, c_bool, c_char, c_size_t import :: enum_kind - integer(kind=enum_kind) :: c_constructor - logical(kind=c_bool), value :: cluster !< True if a database cluster is being used + integer(kind=enum_kind) :: c_simple_constructor character(kind=c_char) :: logger_name(*) integer(kind=c_size_t), value :: logger_name_length + type(c_ptr) :: new_client !< Receives the newly constructed client + end function c_simple_constructor +end interface +interface + function c_constructor(cfgopts, logger_name, logger_name_length, new_client) & + bind(c, name="CreateClient") + use iso_c_binding, only : c_ptr, c_bool, c_char, c_size_t + import :: enum_kind + integer(kind=enum_kind) :: c_constructor + type(c_ptr), value :: cfgopts + character(kind=c_char) :: logger_name(*) + integer(kind=c_size_t), value :: logger_name_length type(c_ptr) :: new_client !< Receives the newly constructed client end function c_constructor end interface +interface + function c_deprecated_constructor(cluster, logger_name, logger_name_length, new_client) & + bind(c, name="SmartRedisCClient") + use iso_c_binding, only : c_ptr, c_bool, c_char, c_size_t + import :: enum_kind + integer(kind=enum_kind) :: c_deprecated_constructor + logical(kind=c_bool), value :: cluster !< True if a database cluster is being used + character(kind=c_char) :: logger_name(*) + integer(kind=c_size_t), value :: logger_name_length + type(c_ptr) :: new_client !< Receives the newly constructed client + end function c_deprecated_constructor +end interface + interface function c_destructor(client) bind(c, name="DeleteCClient") use iso_c_binding, only : c_ptr @@ -152,3 +177,11 @@ interface logical(kind=c_bool) :: exists end function poll_key_c end interface + +interface + function client_to_string_c(client) bind(c, name="client_to_string") + use iso_c_binding, only : c_ptr, c_char + type(c_ptr) :: client_to_string_c + type(c_ptr), value :: client + end function client_to_string_c +end interface diff --git a/2023-01/smartsim/smartredis/src/fortran/client/ensemble_interfaces.inc b/2023-01/smartsim/smartredis/src/fortran/client/ensemble_interfaces.inc index e05b9fbc..04f54ec1 100644 --- a/2023-01/smartsim/smartredis/src/fortran/client/ensemble_interfaces.inc +++ b/2023-01/smartsim/smartredis/src/fortran/client/ensemble_interfaces.inc @@ -25,7 +25,8 @@ ! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. interface - function set_data_source_c( client, source_id, source_id_length ) bind(c, name="set_data_source") + function set_data_source_c( client, source_id, source_id_length ) & + bind(c, name="set_data_source") use iso_c_binding, only : c_ptr, c_char, c_bool, c_size_t import :: enum_kind integer(kind=enum_kind) :: set_data_source_c @@ -36,7 +37,8 @@ interface end interface interface - function use_model_ensemble_prefix_c( client, use_prefix) bind(c, name="use_model_ensemble_prefix") + function use_model_ensemble_prefix_c( client, use_prefix) & + bind(c, name="use_model_ensemble_prefix") use iso_c_binding, only : c_ptr, c_bool import :: enum_kind integer(kind=enum_kind) :: use_model_ensemble_prefix_c @@ -46,11 +48,23 @@ interface end interface interface - function use_tensor_ensemble_prefix_c( client, use_prefix) bind(c, name="use_tensor_ensemble_prefix") + function use_tensor_ensemble_prefix_c( client, use_prefix) & + bind(c, name="use_tensor_ensemble_prefix") use iso_c_binding, only : c_ptr, c_bool import :: enum_kind integer(kind=enum_kind) :: use_tensor_ensemble_prefix_c type(c_ptr), value :: client logical(kind=c_bool), value :: use_prefix end function use_tensor_ensemble_prefix_c +end interface + +interface + function use_dataset_ensemble_prefix_c( client, use_prefix) & + bind(c, name="use_dataset_ensemble_prefix") + use iso_c_binding, only : c_ptr, c_bool + import :: enum_kind + integer(kind=enum_kind) :: use_dataset_ensemble_prefix_c + type(c_ptr), value :: client + logical(kind=c_bool), value :: use_prefix + end function use_dataset_ensemble_prefix_c end interface \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/src/fortran/client/model_interfaces.inc b/2023-01/smartsim/smartredis/src/fortran/client/model_interfaces.inc index d3836bc1..ef7d4666 100644 --- a/2023-01/smartsim/smartredis/src/fortran/client/model_interfaces.inc +++ b/2023-01/smartsim/smartredis/src/fortran/client/model_interfaces.inc @@ -40,8 +40,9 @@ end interface interface function set_model_from_file_c( c_client, key, key_length, model_file, model_file_length, & - backend, backend_length, device, device_length, batch_size, min_batch_size, tag, tag_length, & - inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs ) bind(c, name="set_model_from_file") + backend, backend_length, device, device_length, batch_size, min_batch_size, min_batch_timeout, & + tag, tag_length, inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs ) & + bind(c, name="set_model_from_file") use iso_c_binding, only : c_ptr, c_size_t, c_int, c_char import :: enum_kind integer(kind=enum_kind) :: set_model_from_file_c @@ -59,6 +60,7 @@ interface !! null terminating character integer(kind=c_int), value, intent(in) :: batch_size !< The batch size for model execution integer(kind=c_int), value, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer(kind=c_int), value, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(kind=c_char), intent(in) :: tag(*) !< A tag to attach to the model for information !! purposes integer(kind=c_size_t), value, intent(in) :: tag_length !< The length of the tag c-string, excluding null @@ -77,8 +79,9 @@ end interface interface function set_model_from_file_multigpu_c( c_client, key, key_length, model_file, model_file_length, & - backend, backend_length, first_gpu, num_gpus, batch_size, min_batch_size, tag, tag_length, & - inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs ) bind(c, name="set_model_from_file_multigpu") + backend, backend_length, first_gpu, num_gpus, batch_size, min_batch_size, min_batch_timeout, & + tag, tag_length, inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs) & + bind(c, name="set_model_from_file_multigpu") use iso_c_binding, only : c_ptr, c_size_t, c_int, c_char import :: enum_kind integer(kind=enum_kind) :: set_model_from_file_multigpu_c @@ -96,6 +99,7 @@ interface !! null terminating character integer(kind=c_int), value, intent(in) :: batch_size !< The batch size for model execution integer(kind=c_int), value, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer(kind=c_int), value, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(kind=c_char), intent(in) :: tag(*) !< A tag to attach to the model for information !! purposes integer(kind=c_size_t), value, intent(in) :: tag_length !< The length of the tag c-string, excluding null @@ -114,8 +118,9 @@ end interface interface function set_model_c( c_client, key, key_length, model, model_length, & - backend, backend_length, device, device_length, batch_size, min_batch_size, tag, tag_length, & - inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs ) bind(c, name="set_model") + backend, backend_length, device, device_length, batch_size, min_batch_size, min_batch_timeout, & + tag, tag_length, inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs) & + bind(c, name="set_model") use iso_c_binding, only : c_ptr, c_size_t, c_int, c_char import :: enum_kind integer(kind=enum_kind) :: set_model_c @@ -133,6 +138,7 @@ interface !! null terminating character integer(kind=c_int), value, intent(in) :: batch_size !< The batch size for model execution integer(kind=c_int), value, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer(kind=c_int), value, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(kind=c_char), intent(in) :: tag(*) !< A tag to attach to the model for information !! purposes integer(kind=c_size_t), value, intent(in) :: tag_length !< The length of the tag c-string, excluding null @@ -151,8 +157,9 @@ end interface interface function set_model_multigpu_c( c_client, key, key_length, model, model_length, & - backend, backend_length, first_gpu, num_gpus, batch_size, min_batch_size, tag, tag_length, & - inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs ) bind(c, name="set_model_multigpu") + backend, backend_length, first_gpu, num_gpus, batch_size, min_batch_size, min_batch_timeout, & + tag, tag_length, inputs, input_lengths, n_inputs, outputs, output_lengths, n_outputs) & + bind(c, name="set_model_multigpu") use iso_c_binding, only : c_ptr, c_size_t, c_int, c_char import :: enum_kind integer(kind=enum_kind) :: set_model_multigpu_c @@ -170,6 +177,7 @@ interface !! null terminating character integer(kind=c_int), value, intent(in) :: batch_size !< The batch size for model execution integer(kind=c_int), value, intent(in) :: min_batch_size !< The minimum batch size for model execution + integer(kind=c_int), value, intent(in) :: min_batch_timeout !< Max time (ms) to wait for min batch size character(kind=c_char), intent(in) :: tag(*) !< A tag to attach to the model for information !! purposes integer(kind=c_size_t), value, intent(in) :: tag_length !< The length of the tag c-string, excluding null diff --git a/2023-01/smartsim/smartredis/src/fortran/configoptions.F90 b/2023-01/smartsim/smartredis/src/fortran/configoptions.F90 new file mode 100644 index 00000000..7907fc65 --- /dev/null +++ b/2023-01/smartsim/smartredis/src/fortran/configoptions.F90 @@ -0,0 +1,219 @@ +! BSD 2-Clause License +! +! Copyright (c) 2021-2023, Hewlett Packard Enterprise +! All rights reserved. +! +! Redistribution and use in source and binary forms, with or without +! modification, are permitted provided that the following conditions are met: +! +! 1. Redistributions of source code must retain the above copyright notice, this +! list of conditions and the following disclaimer. +! +! 2. Redistributions in binary form must reproduce the above copyright notice, +! this list of conditions and the following disclaimer in the documentation +! and/or other materials provided with the distribution. +! +! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +module smartredis_configoptions + +use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int +use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t +use iso_c_binding, only : c_loc, c_f_pointer + +use, intrinsic :: iso_fortran_env, only: stderr => error_unit + +use fortran_c_interop, only : convert_char_array_to_c, enum_kind, C_MAX_STRING + +implicit none; private + +include 'enum_fortran.inc' +include 'configoptions/configoptions_interfaces.inc' + +public :: enum_kind !< The kind of integer equivalent to a C enum. According to C an Fortran + !! standards this should be c_int, but is renamed here to ensure that + !! users do not have to import the iso_c_binding module into their + !! programs + +!> Contains multiple tensors and metadata used to describe an entire set of data +type, public :: configoptions_type + type(c_ptr) :: configoptions_ptr !< A pointer to the initialized dataset object + + contains + + !> Access the raw C pointer for the configoptions + procedure :: get_c_pointer + + ! Factory methods + !> Instantiate ConfigOptions, getting selections from environment variables + procedure :: create_configoptions_from_environment + + ! Option access + !> Retrieve the value of a numeric configuration option + procedure :: get_integer_option + !> Retrieve the value of a string configuration option + procedure :: get_string_option + !> Check whether a configuration option is set + procedure :: is_configured + + ! Option value overrides + !> Override the value of a numeric configuration option + procedure :: override_integer_option + !> Override the value of a string configuration option + procedure :: override_string_option + +end type configoptions_type + +contains + + +!> Access the raw C pointer for the ConfigOptions +function get_c_pointer(self) + type(c_ptr) :: get_c_pointer + class(configoptions_type), intent(in) :: self + get_c_pointer = self%configoptions_ptr +end function get_c_pointer + +!> Instantiate ConfigOptions, getting selections from environment variables +function create_configoptions_from_environment(self, db_suffix) result(code) + class(configoptions_type), intent(inout) :: self !< Receives the configoptions + character(len=*), intent(in) :: db_suffix !< Suffix to apply to environment + !! variables; empty string for none + integer(kind=enum_kind) :: code !< Result of the operation + + ! Local variables + integer(kind=c_size_t) :: db_suffix_length + character(kind=c_char, len=len_trim(db_suffix)) :: c_db_suffix + + db_suffix_length = len_trim(db_suffix) + c_db_suffix = trim(db_suffix) + + code = create_configoptions_from_environment_c( & + c_db_suffix, db_suffix_length, self%configoptions_ptr) +end function create_configoptions_from_environment + +!> Retrieve the value of a numeric configuration option +function get_integer_option(self, option_name, result) result(code) + class(configoptions_type), intent(in) :: self !< The configoptions + character(len=*), intent(in) :: option_name !< The name of the configuration + !! option to retrieve + integer(kind=c_int64_t), intent(inout) :: result !< Receives value of option + integer(kind=enum_kind) :: code + + ! Local variables + character(kind=c_char, len=len_trim(option_name)) :: c_option_name + integer(kind=c_size_t) :: c_option_name_length + + c_option_name = trim(option_name) + c_option_name_length = len_trim(option_name) + + code = get_integer_option_c( & + self%configoptions_ptr, c_option_name, c_option_name_length, result) +end function get_integer_option + +!> Retrieve the value of a string configuration option +function get_string_option(self, option_name, result) result(code) + class(configoptions_type), intent(in) :: self !< The configoptions + character(len=*), intent(in) :: option_name !< The name of the configuration + !! option to retrieve + character(len=:), allocatable, intent(out) :: result !< Receives value of option + integer(kind=enum_kind) :: code + + ! Local variables + character(kind=c_char, len=len_trim(option_name)) :: c_option_name + integer(kind=c_size_t) :: c_option_name_length + integer(kind=c_size_t) :: c_result_length, i + character(kind=c_char), dimension(:), pointer :: f_result_ptr + type(c_ptr) :: c_result_ptr + + c_option_name = trim(option_name) + c_option_name_length = len_trim(option_name) + + code = get_string_option_c( & + self%configoptions_ptr, c_option_name, c_option_name_length, & + c_result_ptr, c_result_length) + + ! Translate the string result if we got a valid one + if (code .eq. SRNoError) then + call c_f_pointer(c_result_ptr, f_result_ptr, [ c_result_length ]) + + ALLOCATE(character(len=c_result_length) :: result) + do i = 1, c_result_length + result(i:i) = f_result_ptr(i) + enddo + endif +end function get_string_option + +!> Check whether a configuration option is set +function is_configured(self, option_name, result) result(code) + class(configoptions_type), intent(in) :: self !< The configoptions + character(len=*), intent(in) :: option_name !< The name of the configuration + !! option to check + logical, intent(inout) :: result !< Receives value of option + integer(kind=enum_kind) :: code + + ! Local variables + character(kind=c_char, len=len_trim(option_name)) :: c_option_name + integer(kind=c_size_t) :: c_option_name_length + logical(kind=c_bool) :: c_result + + c_option_name = trim(option_name) + c_option_name_length = len_trim(option_name) + + code = is_configured_c( & + self%configoptions_ptr, c_option_name, c_option_name_length, c_result) + result = c_result +end function is_configured + +!> Override the value of a numeric configuration option +function override_integer_option(self, option_name, value) result(code) + class(configoptions_type), intent(in) :: self !< The configoptions + character(len=*), intent(in) :: option_name !< The name of the configuration + !! option to override + integer(kind=c_int64_t), intent(in) :: value !< The value to store for the option + integer(kind=enum_kind) :: code + + ! Local variables + character(kind=c_char, len=len_trim(option_name)) :: c_option_name + integer(kind=c_size_t) :: c_option_name_length + + c_option_name = trim(option_name) + c_option_name_length = len_trim(option_name) + + code = override_integer_option_c( & + self%configoptions_ptr, c_option_name, c_option_name_length, value) +end function override_integer_option + +!> Override the value of a string configuration option +function override_string_option(self, option_name, value) result(code) + class(configoptions_type), intent(in) :: self !< The configoptions + character(len=*), intent(in) :: option_name !< The name of the configuration + !! option to override + character(len=*), intent(in) :: value !< The value to store for the option + integer(kind=enum_kind) :: code + + ! Local variables + character(kind=c_char, len=len_trim(option_name)) :: c_option_name + character(kind=c_char, len=len_trim(value)) :: c_value + integer(kind=c_size_t) :: c_option_name_length, c_value_length + + c_option_name = trim(option_name) + c_option_name_length = len_trim(option_name) + c_value = trim(value) + c_value_length = len_trim(value) + + code = override_string_option_c( & + self%configoptions_ptr, c_option_name, c_option_name_length, & + c_value, c_value_length) +end function override_string_option + +end module smartredis_configoptions diff --git a/2023-01/smartsim/smartredis/src/fortran/configoptions/configoptions_interfaces.inc b/2023-01/smartsim/smartredis/src/fortran/configoptions/configoptions_interfaces.inc new file mode 100644 index 00000000..87b87e1e --- /dev/null +++ b/2023-01/smartsim/smartredis/src/fortran/configoptions/configoptions_interfaces.inc @@ -0,0 +1,102 @@ +! BSD 2-Clause License +! +! Copyright (c) 2021-2023, Hewlett Packard Enterprise +! All rights reserved. +! +! Redistribution and use in source and binary forms, with or without +! modification, are permitted provided that the following conditions are met: +! +! 1. Redistributions of source code must retain the above copyright notice, this +! list of conditions and the following disclaimer. +! +! 2. Redistributions in binary form must reproduce the above copyright notice, +! this list of conditions and the following disclaimer in the documentation +! and/or other materials provided with the distribution. +! +! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +interface + + function create_configoptions_from_environment_c( & + db_suffix, db_suffix_len, configoptions) & + bind(c, name="create_configoptions_from_environment") + use iso_c_binding, only : c_ptr, c_char, c_size_t + import :: enum_kind + integer(kind=enum_kind) :: create_configoptions_from_environment_c + character(kind=c_char) :: db_suffix(*) !< Suffix to add to environment vars + integer(kind=c_size_t), value :: db_suffix_len !< How many characters in db_suffix + type(c_ptr) :: configoptions !< Receives the constructed configoptions + end function create_configoptions_from_environment_c + + function get_integer_option_c(cfgopts, option_name, option_name_len, result) & + bind(c, name="get_integer_option") + use iso_c_binding, only : c_ptr, c_char, c_size_t, c_int64_t + import :: enum_kind + type(c_ptr), value :: cfgopts + integer(kind=enum_kind) :: get_integer_option_c + character(kind=c_char) :: option_name(*) !< The name of the configuration + !! option to retrieve + integer(kind=c_size_t), value :: option_name_len !< The length of the option_name string + integer(kind=c_int64_t) :: result !< Receives the integer result + end function get_integer_option_c + + function get_string_option_c(cfgopts, option_name, option_name_len, result, result_len) & + bind(c, name="get_string_option") + use iso_c_binding, only : c_ptr, c_char, c_size_t + import :: enum_kind + type(c_ptr), value :: cfgopts + integer(kind=enum_kind) :: get_string_option_c + character(kind=c_char) :: option_name(*) !< The name of the configuration + !! option to retrieve + integer(kind=c_size_t), value :: option_name_len !< The length of the option_name string + type(c_ptr) :: result !< Receives the string result + integer(kind=c_size_t) :: result_len !< The length of the result string + end function get_string_option_c + + function is_configured_c(cfgopts, option_name, option_name_len, result) & + bind(c, name="is_configured") + use iso_c_binding, only : c_ptr, c_char, c_size_t, c_bool + import :: enum_kind + type(c_ptr), value :: cfgopts + integer(kind=enum_kind) :: is_configured_c + character(kind=c_char) :: option_name(*) !< The name of the configuration + !! option to check + integer(kind=c_size_t), value :: option_name_len !< The length of the option_name string + logical(kind=c_bool) :: result !< Receives the boolean result + end function is_configured_c + + function override_integer_option_c(cfgopts, option_name, option_name_len, value) & + bind(c, name="override_integer_option") + use iso_c_binding, only : c_ptr, c_char, c_size_t, c_int64_t + import :: enum_kind + type(c_ptr), value :: cfgopts + integer(kind=enum_kind) :: override_integer_option_c + character(kind=c_char) :: option_name(*) !< The name of the configuration + !! option to override + integer(kind=c_size_t), value :: option_name_len !< The length of the option_name string + integer(kind=c_int64_t), value :: value !< The value to store for the option + end function override_integer_option_c + + function override_string_option_c(cfgopts, option_name, option_name_len, value, value_len) & + bind(c, name="override_string_option") + use iso_c_binding, only : c_ptr, c_char, c_size_t + import :: enum_kind + type(c_ptr), value :: cfgopts + integer(kind=enum_kind) :: override_string_option_c + character(kind=c_char) :: option_name(*) !< The name of the configuration + !! option to override + integer(kind=c_size_t), value :: option_name_len !< The length of the option_name string + character(kind=c_char) :: value(*) !< The value to store for the option + integer(kind=c_size_t), value :: value_len !< The length of the value string + end function override_string_option_c + +end interface diff --git a/2023-01/smartsim/smartredis/src/fortran/dataset.F90 b/2023-01/smartsim/smartredis/src/fortran/dataset.F90 index e36b7210..4565e076 100644 --- a/2023-01/smartsim/smartredis/src/fortran/dataset.F90 +++ b/2023-01/smartsim/smartredis/src/fortran/dataset.F90 @@ -24,11 +24,23 @@ ! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +! Note the below macros are here to allow compilation with Nvidia drivers +! While assumed size should be sufficient, this does not seem to work with +! Intel and GNU (however those have support for assumed rank) +#ifdef __NVCOMPILER +#define DIM_RANK_SPEC dimension(*) +#else +#define DIM_RANK_SPEC dimension(..) +#endif + module smartredis_dataset -use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int +use iso_c_binding, only : c_ptr, c_char, c_int use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t use iso_c_binding, only : c_loc, c_f_pointer + +use, intrinsic :: iso_fortran_env, only: stderr => error_unit + use fortran_c_interop, only : enum_kind implicit none; private @@ -38,6 +50,7 @@ module smartredis_dataset include 'dataset/tensor_interfaces.inc' include 'dataset/unpack_dataset_tensor_interfaces.inc' include 'dataset/metadata_interfaces.inc' +#include "errors/errors_interfaces.inc" public :: enum_kind !< The kind of integer equivalent to a C enum. According to C an Fortran !! standards this should be c_int, but is renamed here to ensure that @@ -52,7 +65,7 @@ module smartredis_dataset !> Initialize a new dataset with a given name procedure :: initialize => initialize_dataset - !> Access the raw C pointer for the client + !> Access the raw C pointer for the dataset procedure :: get_c_pointer ! Metadata procedures @@ -82,6 +95,12 @@ module smartredis_dataset !> procedure :: get_tensor_names ! Not supported currently !> Retrieve the type for a tensor procedure :: get_tensor_type + !> Retrieve the dimensions for a tensor + procedure :: get_tensor_dims + !> Retrieve a string representation of the dataset + procedure :: to_string + !> Print a string representation of the dataset + procedure :: print_dataset ! Private procedures procedure, private :: add_tensor_i8 @@ -134,7 +153,7 @@ end function get_c_pointer !> Add a tensor to a dataset whose Fortran type is the equivalent 'int8' C-type function add_tensor_i8(self, name, data, dims) result(code) - integer(kind=c_int8_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int8_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(dataset_type), intent(in) :: self !< Fortran SmartRedis dataset character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -150,7 +169,7 @@ end function add_tensor_i8 !> Add a tensor to a dataset whose Fortran type is the equivalent 'int16' C-type function add_tensor_i16(self, name, data, dims) result(code) - integer(kind=c_int16_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int16_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(dataset_type), intent(in) :: self !< Fortran SmartRedis dataset character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -166,7 +185,7 @@ end function add_tensor_i16 !> Add a tensor to a dataset whose Fortran type is the equivalent 'int32' C-type function add_tensor_i32(self, name, data, dims) result(code) - integer(kind=c_int32_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int32_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(dataset_type), intent(in) :: self !< Fortran SmartRedis dataset character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -182,7 +201,7 @@ end function add_tensor_i32 !> Add a tensor to a dataset whose Fortran type is the equivalent 'int64' C-type function add_tensor_i64(self, name, data, dims) result(code) - integer(kind=c_int64_t), dimension(..), target, intent(in) :: data !< Data to be sent + integer(kind=c_int64_t), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(dataset_type), intent(in) :: self !< Fortran SmartRedis dataset character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -198,7 +217,7 @@ end function add_tensor_i64 !> Add a tensor to a dataset whose Fortran type is the equivalent 'float' C-type function add_tensor_float(self, name, data, dims) result(code) - real(kind=c_float), dimension(..), target, intent(in) :: data !< Data to be sent + real(kind=c_float), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(dataset_type), intent(in) :: self !< Fortran SmartRedis dataset character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -214,7 +233,7 @@ end function add_tensor_float !> Add a tensor to a dataset whose Fortran type is the equivalent 'double' C-type function add_tensor_double(self, name, data, dims) result(code) - real(kind=c_double), dimension(..), target, intent(in) :: data !< Data to be sent + real(kind=c_double), DIM_RANK_SPEC, target, intent(in) :: data !< Data to be sent class(dataset_type), intent(in) :: self !< Fortran SmartRedis dataset character(len=*), intent(in) :: name !< The unique name used to store in the database integer, dimension(:), intent(in) :: dims !< The length of each dimension @@ -231,7 +250,7 @@ end function add_tensor_double !> Unpack a tensor into already allocated memory whose Fortran type is the equivalent 'int8' C-type function unpack_dataset_tensor_i8(self, name, result, dims) result(code) - integer(kind=c_int8_t), dimension(..), target, intent(out) :: result !< Array to be populated with data + integer(kind=c_int8_t), DIM_RANK_SPEC, target, intent(out) :: result !< Array to be populated with data class(dataset_type), intent(in) :: self !< Pointer to the initialized dataset character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -247,7 +266,7 @@ end function unpack_dataset_tensor_i8 !> Unpack a tensor into already allocated memory whose Fortran type is the equivalent 'int16' C-type function unpack_dataset_tensor_i16(self, name, result, dims) result(code) - integer(kind=c_int16_t), dimension(..), target, intent(out) :: result !< Array to be populated with data + integer(kind=c_int16_t), DIM_RANK_SPEC, target, intent(out) :: result !< Array to be populated with data class(dataset_type), intent(in) :: self !< Pointer to the initialized dataset character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -263,7 +282,7 @@ end function unpack_dataset_tensor_i16 !> Unpack a tensor into already allocated memory whose Fortran type is the equivalent 'int32' C-type function unpack_dataset_tensor_i32(self, name, result, dims) result(code) - integer(kind=c_int32_t), dimension(..), target, intent(out) :: result !< Array to be populated with data + integer(kind=c_int32_t), DIM_RANK_SPEC, target, intent(out) :: result !< Array to be populated with data class(dataset_type), intent(in) :: self !< Pointer to the initialized dataset character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -279,7 +298,7 @@ end function unpack_dataset_tensor_i32 !> Unpack a tensor into already allocated memory whose Fortran type is the equivalent 'int64' C-type function unpack_dataset_tensor_i64(self, name, result, dims) result(code) - integer(kind=c_int64_t), dimension(..), target, intent(out) :: result !< Array to be populated with data + integer(kind=c_int64_t), DIM_RANK_SPEC, target, intent(out) :: result !< Array to be populated with data class(dataset_type), intent(in) :: self !< Pointer to the initialized dataset character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -295,7 +314,7 @@ end function unpack_dataset_tensor_i64 !> Unpack a tensor into already allocated memory whose Fortran type is the equivalent 'float' C-type function unpack_dataset_tensor_float(self, name, result, dims) result(code) - real(kind=c_float), dimension(..), target, intent(out) :: result !< Array to be populated with data + real(kind=c_float), DIM_RANK_SPEC, target, intent(out) :: result !< Array to be populated with data class(dataset_type), intent(in) :: self !< Pointer to the initialized dataset character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -311,7 +330,7 @@ end function unpack_dataset_tensor_float !> Unpack a tensor into already allocated memory whose Fortran type is the equivalent 'double' C-type function unpack_dataset_tensor_double(self, name, result, dims) result(code) - real(kind=c_double), dimension(..), target, intent(out) :: result !< Array to be populated with data + real(kind=c_double), DIM_RANK_SPEC, target, intent(out) :: result !< Array to be populated with data class(dataset_type), intent(in) :: self !< Pointer to the initialized dataset character(len=*), intent(in) :: name !< The name to use to place the tensor integer, dimension(:), intent(in) :: dims !< Length along each dimension of the tensor @@ -470,4 +489,75 @@ function get_tensor_type(self, name, ttype) result(code) code = get_tensor_type_c(self%dataset_ptr, c_name, name_length, ttype) end function get_tensor_type + +!> Retrieve the dimensions for a tensor into a supplied buffer, or receive the +!! number of dimensions if the supplied buffer is too small +function get_tensor_dims(self, name, dims, dims_length) result(code) + class(dataset_type), intent(in) :: self !< The dataset + character(len=*), intent(in) :: name !< The name of the tensor + integer, dimension(:), target, intent(inout) :: dims !< Receives the tensor dimensions + integer, intent(inout) :: dims_length !< Receives the number of tensor dimensions + integer(kind=enum_kind) :: code !< Result of the operation + + ! local variables + character(kind=c_char, len=len_trim(name)) :: c_name + integer(kind=c_size_t) :: name_length + type(c_ptr) :: dims_ptr + integer(kind=c_size_t), dimension(size(dims)), target :: c_dims + integer(kind=c_size_t) :: c_dims_length + + c_name = trim(name) + name_length = len_trim(c_name) + + if (dims_length .gt. size(dims)) then + error stop 'dims_length .gt. size(dims) in call to get_tensor_dims' + end if + dims_ptr = c_loc(c_dims) + c_dims_length = dims_length + + code = get_tensor_dims_c(self%dataset_ptr, c_name, name_length, dims_ptr, c_dims_length) + dims = int(c_dims, kind(dims)) + dims_length = int(c_dims_length, kind(dims_length)) +end function get_tensor_dims + + +!> Retrieve a string representation of the dataset +function to_string(self) + character(kind=c_char, len=:), allocatable :: to_string !< Text version of dataset + class(dataset_type), intent(in) :: self !< The dataset + + type(c_ptr) :: c_ds_str + integer(kind=c_size_t) :: c_ds_str_len + + ! Get the string representation of the dataset from C + c_ds_str = dataset_to_string_c(self%dataset_ptr) + c_ds_str_len = c_strlen(c_ds_str) + to_string = make_str(c_ds_str, c_ds_str_len) +end function to_string + +!> Convert a pointer view of a string to a Fortran string +function make_str(strptr, str_len) + character(kind=c_char, len=:), allocatable :: make_str + type(c_ptr), intent(in), value :: strptr + integer(kind=c_size_t) :: str_len + + character(len=str_len, kind=c_char), pointer :: ptrview + call c_f_pointer(strptr, ptrview) + make_str = ptrview +end function make_str + +!> Print a string representation of the dataset +subroutine print_dataset(self, unit) + class(dataset_type), intent(in) :: self !< The dataset + integer, optional, intent(in) :: unit !< Unit to which to print the dataset + + ! Determine which unit to write to + integer :: target_unit + target_unit = STDERR + if (present(unit)) target_unit = unit + + ! Write the error to the target unit + write(target_unit,*) to_string(self) +end subroutine print_dataset + end module smartredis_dataset diff --git a/2023-01/smartsim/smartredis/src/fortran/dataset/dataset_interfaces.inc b/2023-01/smartsim/smartredis/src/fortran/dataset/dataset_interfaces.inc index 7302ec89..9a121d08 100644 --- a/2023-01/smartsim/smartredis/src/fortran/dataset/dataset_interfaces.inc +++ b/2023-01/smartsim/smartredis/src/fortran/dataset/dataset_interfaces.inc @@ -34,3 +34,11 @@ interface type(c_ptr) :: dataset !< Receives the constructed dataset end function dataset_constructor end interface + +interface + function dataset_to_string_c(client) bind(c, name="dataset_to_string") + use iso_c_binding, only : c_ptr, c_char + type(c_ptr) :: dataset_to_string_c + type(c_ptr), value :: client + end function dataset_to_string_c +end interface diff --git a/2023-01/smartsim/smartredis/src/fortran/dataset/tensor_interfaces.inc b/2023-01/smartsim/smartredis/src/fortran/dataset/tensor_interfaces.inc index 737bef37..43ca80c1 100644 --- a/2023-01/smartsim/smartredis/src/fortran/dataset/tensor_interfaces.inc +++ b/2023-01/smartsim/smartredis/src/fortran/dataset/tensor_interfaces.inc @@ -43,7 +43,7 @@ interface end interface interface - function get_tensor_type_c( dataset, name, name_length, ttype ) & + function get_tensor_type_c(dataset, name, name_length, ttype) & bind(c, name="get_tensor_type") use iso_c_binding, only : c_ptr, c_size_t, c_char import :: enum_kind @@ -55,3 +55,18 @@ interface integer(kind=enum_kind), intent(out) :: ttype !< Receives the tensor type end function get_tensor_type_c end interface + +interface + function get_tensor_dims_c(dataset, name, name_length, dims, dims_length) & + bind(c, name="get_tensor_dims") + use iso_c_binding, only : c_ptr, c_size_t, c_char + import :: enum_kind + integer(kind=enum_kind) :: get_tensor_dims_c + type(c_ptr), value, intent(in) :: dataset !< A c_ptr to the dataset object + character(kind=c_char), intent(in) :: name(*) !< The name of the tensor + integer(kind=c_size_t), value, intent(in) :: name_length !< The length of the name c-string, + !! excluding null terminating character + type(c_ptr), intent(in) :: dims !< Receives the tensor dimensions + integer(kind=c_size_t), intent(inout) :: dims_length !< Receives the tensor dimensions + end function get_tensor_dims_c +end interface diff --git a/2023-01/smartsim/smartredis/src/python/bindings/bind.cpp b/2023-01/smartsim/smartredis/src/python/bindings/bind.cpp index f5385a7c..f5d1304c 100644 --- a/2023-01/smartsim/smartredis/src/python/bindings/bind.cpp +++ b/2023-01/smartsim/smartredis/src/python/bindings/bind.cpp @@ -31,103 +31,129 @@ #include "pydataset.h" #include "pylogcontext.h" #include "srexception.h" +#include "pyconfigoptions.h" #include "logger.h" -//#include "srobject.h" -//#include "logcontext.h" using namespace SmartRedis; namespace py = pybind11; PYBIND11_MODULE(smartredisPy, m) { +#define CLASS_METHOD(class, name) def(#name, &class::name) + m.doc() = "smartredis client"; // optional module docstring // Python SRObject class + #define SROBJECT_METHOD(name) CLASS_METHOD(PySRObject, name) py::class_(m, "PySRObject") .def(py::init()) - .def("log_data", &PySRObject::log_data) - .def("log_warning", &PySRObject::log_warning) - .def("log_error", &PySRObject::log_error); + .SROBJECT_METHOD(log_data) + .SROBJECT_METHOD(log_warning) + .SROBJECT_METHOD(log_error); // Python LogContext class py::class_(m, "PyLogContext") .def(py::init()); // Python client class + #define CLIENT_METHOD(name) CLASS_METHOD(PyClient, name) py::class_(m, "PyClient") .def(py::init()) - .def("put_tensor", &PyClient::put_tensor) - .def("get_tensor", &PyClient::get_tensor) - .def("delete_tensor", &PyClient::delete_tensor) - .def("copy_tensor", &PyClient::copy_tensor) - .def("rename_tensor", &PyClient::rename_tensor) - .def("put_dataset", &PyClient::put_dataset) - .def("get_dataset", &PyClient::get_dataset) - .def("delete_dataset", &PyClient::delete_dataset) - .def("copy_dataset", &PyClient::copy_dataset) - .def("rename_dataset", &PyClient::rename_dataset) - .def("set_script_from_file", &PyClient::set_script_from_file) - .def("set_script_from_file_multigpu", &PyClient::set_script_from_file_multigpu) - .def("set_script", &PyClient::set_script) - .def("set_script_multigpu", &PyClient::set_script_multigpu) - .def("get_script", &PyClient::get_script) - .def("run_script", &PyClient::run_script) - .def("run_script_multigpu", &PyClient::run_script_multigpu) - .def("delete_script", &PyClient::delete_script) - .def("delete_script_multigpu", &PyClient::delete_script_multigpu) - .def("set_model", &PyClient::set_model) - .def("set_model_multigpu", &PyClient::set_model_multigpu) - .def("set_model_from_file", &PyClient::set_model_from_file) - .def("set_model_from_file_multigpu", &PyClient::set_model_from_file_multigpu) - .def("get_model", &PyClient::get_model) - .def("run_model", &PyClient::run_model) - .def("run_model_multigpu", &PyClient::run_model_multigpu) - .def("delete_model", &PyClient::delete_model) - .def("delete_model_multigpu", &PyClient::delete_model_multigpu) - .def("key_exists", &PyClient::key_exists) - .def("poll_key", &PyClient::poll_key) - .def("model_exists", &PyClient::model_exists) - .def("tensor_exists", &PyClient::tensor_exists) - .def("dataset_exists", &PyClient::dataset_exists) - .def("poll_model", &PyClient::poll_model) - .def("poll_tensor", &PyClient::poll_tensor) - .def("poll_dataset", &PyClient::poll_dataset) - .def("set_data_source", &PyClient::set_data_source) - .def("use_tensor_ensemble_prefix", &PyClient::use_tensor_ensemble_prefix) - .def("use_model_ensemble_prefix", &PyClient::use_model_ensemble_prefix) - .def("use_list_ensemble_prefix", &PyClient::use_list_ensemble_prefix) - .def("get_db_node_info", &PyClient::get_db_node_info) - .def("get_db_cluster_info", &PyClient::get_db_cluster_info) - .def("get_ai_info", &PyClient::get_ai_info) - .def("flush_db", &PyClient::flush_db) - .def("config_set", &PyClient::config_set) - .def("config_get", &PyClient::config_get) - .def("save", &PyClient::save) - .def("append_to_list", &PyClient::append_to_list) - .def("delete_list", &PyClient::delete_list) - .def("copy_list", &PyClient::copy_list) - .def("rename_list", &PyClient::rename_list) - .def("get_list_length", &PyClient::get_list_length) - .def("poll_list_length", &PyClient::poll_list_length) - .def("poll_list_length_gte", &PyClient::poll_list_length_gte) - .def("poll_list_length_lte", &PyClient::poll_list_length_lte) - .def("get_datasets_from_list", &PyClient::get_datasets_from_list) - .def("get_dataset_list_range", &PyClient::get_dataset_list_range); + .def(py::init()) + .def(py::init()) + .CLIENT_METHOD(put_tensor) + .CLIENT_METHOD(get_tensor) + .CLIENT_METHOD(delete_tensor) + .CLIENT_METHOD(copy_tensor) + .CLIENT_METHOD(rename_tensor) + .CLIENT_METHOD(put_dataset) + .CLIENT_METHOD(get_dataset) + .CLIENT_METHOD(delete_dataset) + .CLIENT_METHOD(copy_dataset) + .CLIENT_METHOD(rename_dataset) + .CLIENT_METHOD(set_script_from_file) + .CLIENT_METHOD(set_script_from_file_multigpu) + .CLIENT_METHOD(set_script) + .CLIENT_METHOD(set_script_multigpu) + .CLIENT_METHOD(get_script) + .CLIENT_METHOD(run_script) + .CLIENT_METHOD(run_script_multigpu) + .CLIENT_METHOD(delete_script) + .CLIENT_METHOD(delete_script_multigpu) + .CLIENT_METHOD(set_model) + .CLIENT_METHOD(set_model_multigpu) + .CLIENT_METHOD(set_model_from_file) + .CLIENT_METHOD(set_model_from_file_multigpu) + .CLIENT_METHOD(get_model) + .CLIENT_METHOD(run_model) + .CLIENT_METHOD(run_model_multigpu) + .CLIENT_METHOD(delete_model) + .CLIENT_METHOD(delete_model_multigpu) + .CLIENT_METHOD(key_exists) + .CLIENT_METHOD(poll_key) + .CLIENT_METHOD(model_exists) + .CLIENT_METHOD(tensor_exists) + .CLIENT_METHOD(dataset_exists) + .CLIENT_METHOD(poll_model) + .CLIENT_METHOD(poll_tensor) + .CLIENT_METHOD(poll_dataset) + .CLIENT_METHOD(set_data_source) + .CLIENT_METHOD(use_tensor_ensemble_prefix) + .CLIENT_METHOD(use_dataset_ensemble_prefix) + .CLIENT_METHOD(use_model_ensemble_prefix) + .CLIENT_METHOD(use_list_ensemble_prefix) + .CLIENT_METHOD(get_db_node_info) + .CLIENT_METHOD(get_db_cluster_info) + .CLIENT_METHOD(get_ai_info) + .CLIENT_METHOD(flush_db) + .CLIENT_METHOD(config_set) + .CLIENT_METHOD(config_get) + .CLIENT_METHOD(save) + .CLIENT_METHOD(append_to_list) + .CLIENT_METHOD(delete_list) + .CLIENT_METHOD(copy_list) + .CLIENT_METHOD(rename_list) + .CLIENT_METHOD(get_list_length) + .CLIENT_METHOD(poll_list_length) + .CLIENT_METHOD(poll_list_length_gte) + .CLIENT_METHOD(poll_list_length_lte) + .CLIENT_METHOD(get_datasets_from_list) + .CLIENT_METHOD(get_dataset_list_range) + .CLIENT_METHOD(set_model_chunk_size) + .CLIENT_METHOD(to_string) + ; // Python Dataset class + #define DATASET_METHOD(name) CLASS_METHOD(PyDataset, name) py::class_(m, "PyDataset") .def(py::init()) - .def("add_tensor", &PyDataset::add_tensor) - .def("get_tensor", &PyDataset::get_tensor) - .def("add_meta_scalar", &PyDataset::add_meta_scalar) - .def("add_meta_string", &PyDataset::add_meta_string) - .def("get_meta_scalars", &PyDataset::get_meta_scalars) - .def("get_meta_strings", &PyDataset::get_meta_strings) - .def("get_name", &PyDataset::get_name) - .def("get_metadata_field_names", &PyDataset::get_metadata_field_names) - .def("get_metadata_field_type", &PyDataset::get_metadata_field_type) - .def("get_tensor_type", &PyDataset::get_tensor_type) - .def("get_tensor_names", &PyDataset::get_tensor_names); + .DATASET_METHOD(add_tensor) + .DATASET_METHOD(get_tensor) + .DATASET_METHOD(add_meta_scalar) + .DATASET_METHOD(add_meta_string) + .DATASET_METHOD(get_meta_scalars) + .DATASET_METHOD(get_meta_strings) + .DATASET_METHOD(get_name) + .DATASET_METHOD(get_metadata_field_names) + .DATASET_METHOD(get_metadata_field_type) + .DATASET_METHOD(get_tensor_type) + .DATASET_METHOD(get_tensor_names) + .DATASET_METHOD(get_tensor_dims) + .DATASET_METHOD(to_string) + ; + + // Python ConfigOptions class + #define CONFIGOPTIONS_METHOD(name) CLASS_METHOD(PyConfigOptions, name) + py::class_(m, "PyConfigOptions") + .def_static("create_from_environment", + static_cast( + &PyConfigOptions::create_from_environment)) + .CONFIGOPTIONS_METHOD(get_integer_option) + .CONFIGOPTIONS_METHOD(get_string_option) + .CONFIGOPTIONS_METHOD(is_configured) + .CONFIGOPTIONS_METHOD(override_integer_option) + .CONFIGOPTIONS_METHOD(override_string_option) + ; // Logging functions m.def("cpp_log_data", py::overload_cast(&log_data)) diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/__init__.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/__init__.py index c130d92e..611f2c91 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/__init__.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/__init__.py @@ -26,6 +26,7 @@ __all__ = [ "Client", + "ConfigOptions", "Dataset", "SRObject", "LogContext", @@ -40,13 +41,10 @@ ] from .client import Client +from .configoptions import ConfigOptions from .dataset import Dataset from .dataset_utils import DatasetConverter -from .logger import ( - log_data, log_warning, log_error -) -from .srobject import SRObject from .logcontext import LogContext -from .smartredisPy import ( - LLQuiet, LLInfo, LLDebug, LLDeveloper -) +from .logger import log_data, log_error, log_warning +from .smartredisPy import LLDebug, LLDeveloper, LLInfo, LLQuiet +from .srobject import SRObject diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/client.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/client.py index 6ac1f68f..1b1c830a 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/client.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/client.py @@ -24,24 +24,83 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# pylint: disable=too-many-lines,too-many-public-methods import inspect import os import os.path as osp -import functools - +import typing as t import numpy as np from .dataset import Dataset -from .srobject import SRObject +from .configoptions import ConfigOptions +from .error import RedisConnectionError from .smartredisPy import PyClient -from .util import Dtypes, init_default, exception_handler, typecheck - -from .error import * from .smartredisPy import RedisReplyError as PybindRedisReplyError +from .srobject import SRObject +from .util import Dtypes, exception_handler, init_default, typecheck + class Client(SRObject): - def __init__(self, address=None, cluster=False, logger_name="default"): - """Initialize a RedisAI client + def __init__(self, *a: t.Any, **kw: t.Any): + """Initialize a SmartRedis client + + At this time, the Client can be initialized with one of two + signatures. The first version is preferred, though the second is + supported (primarily for use in driver scripts). Note that the + order was swapped for first two parameters in the second signature + relative to previous releases of SmartRedis; this was necessary to + remove ambiguity. + + Client(config_options: ConfigOptions=None, + logger_name: str="Default") + Client(cluster: bool, address: optional(str)=None, + logger_name: str="Default") + + For detailed information on the first signature, please refer + to the __standard_construction() method below. + + For detailed information on the second signature, please refer + to the __address_construction() method below. + + :param a: The positional arguments supplied to this method; + see above for valid options + :type a: tuple[any]; see above for valid options + :param kw: Keyword arguments supplied to this method; + see above for valid options + :type kw: dict[string, any]; see above for valid options + :raises RedisConnectionError: if connection initialization fails + """ + if a: + if isinstance(a[0], bool): + for arg in kw: + if arg not in ["cluster", "address", "logger_name"]: + raise TypeError( + f"__init__() got an unexpected keyword argument '{arg}'" + ) + pyclient = self.__address_construction(*a, **kw) + elif isinstance(a[0], ConfigOptions) or a[0] is None: + pyclient = self.__standard_construction(*a, **kw) + else: + raise TypeError(f"Invalid type for argument 0: {type(a[0])}") + else: + # Only kwargs in the call + if "address" in kw or "cluster" in kw: + pyclient = self.__address_construction(*a, **kw) + else: + pyclient = self.__standard_construction(*a, **kw) + super().__init__(pyclient) + + def __address_construction( + self, + cluster: bool, + address: t.Optional[str] = None, + logger_name: str = "Default" + ) -> PyClient: + """Initialize a SmartRedis client + + This construction method is primarily intended for use by driver + scripts. It is preferred to set up configuration via environment + variables. For clusters, the address can be a single tcp/ip address and port of a database node. The rest of the cluster will be discovered @@ -50,9 +109,10 @@ def __init__(self, address=None, cluster=False, logger_name="default"): If an address is not set, the client will look for the environment variable ``SSDB`` (e.g. SSDB="127.0.0.1:6379;") - :param address: Address of the database :param cluster: True if connecting to a redis cluster, defaults to False - :type cluster: bool, optional + :type cluster: bool + :param address: Address of the database + :type address: str, optional :param logger_name: Identifier for the current client :type logger_name: str :raises RedisConnectionError: if connection initialization fails @@ -62,20 +122,52 @@ def __init__(self, address=None, cluster=False, logger_name="default"): if "SSDB" not in os.environ: raise RedisConnectionError("Could not connect to database. $SSDB not set") try: - super().__init__(PyClient(cluster, logger_name)) + return PyClient(cluster, logger_name) + except (PybindRedisReplyError, RuntimeError) as e: + raise RedisConnectionError(str(e)) from None + + @staticmethod + def __standard_construction( + config_options: t.Optional[ConfigOptions] = None, + logger_name: str = "Default" + ) -> PyClient: + """Initialize a RedisAI client + + The address of the Redis database is expected to be found in the + SSDB environment variable (or a suffixed variable if a suffix was + used when building the config_options object). + + :param config_options: Source for configuration data + :type config_options: ConfigOptions, optional + :param logger_name: Identifier for the current client + :type logger_name: str + :raises RedisConnectionError: if connection initialization fails + """ + try: + if config_options: + pybind_config_options = config_options.get_data() + return PyClient(pybind_config_options, logger_name) + return PyClient(logger_name) except PybindRedisReplyError as e: raise RedisConnectionError(str(e)) from None except RuntimeError as e: raise RedisConnectionError(str(e)) from None - @property - def _client(self): - """Alias _srobject to _client + def __str__(self) -> str: + """Create a string representation of the client + + :return: A string representation of the client + :rtype: str """ + return self._client.to_string() + + @property + def _client(self) -> PyClient: + """Alias _srobject to _client""" return self._srobject @exception_handler - def put_tensor(self, name, data): + def put_tensor(self, name: str, data: np.ndarray) -> None: """Put a tensor to a Redis database The final tensor key under which the tensor is stored @@ -94,7 +186,7 @@ def put_tensor(self, name, data): self._client.put_tensor(name, dtype, data) @exception_handler - def get_tensor(self, name): + def get_tensor(self, name: str) -> np.ndarray: """Get a tensor from the database The tensor key used to locate the tensor @@ -112,7 +204,7 @@ def get_tensor(self, name): return self._client.get_tensor(name) @exception_handler - def delete_tensor(self, name): + def delete_tensor(self, name: str) -> None: """Delete a tensor from the database The tensor key used to locate the tensor to be deleted @@ -128,7 +220,7 @@ def delete_tensor(self, name): self._client.delete_tensor(name) @exception_handler - def copy_tensor(self, src_name, dest_name): + def copy_tensor(self, src_name: str, dest_name: str) -> None: """Copy a tensor at one name to another name The source and destination tensor keys used to locate @@ -147,7 +239,7 @@ def copy_tensor(self, src_name, dest_name): self._client.copy_tensor(src_name, dest_name) @exception_handler - def rename_tensor(self, old_name, new_name): + def rename_tensor(self, old_name: str, new_name: str) -> None: """Rename a tensor in the database The old and new tensor keys used to find and relocate @@ -166,13 +258,13 @@ def rename_tensor(self, old_name, new_name): self._client.rename_tensor(old_name, new_name) @exception_handler - def put_dataset(self, dataset): + def put_dataset(self, dataset: Dataset) -> None: """Put a Dataset instance into the database The final dataset key under which the dataset is stored is generated from the name that was supplied when the dataset was created and may be prefixed. See - use_tensor_ensemble_prefix() for more details. + use_dataset_ensemble_prefix() for more details. All associated tensors and metadata within the Dataset instance will also be stored. @@ -187,13 +279,13 @@ def put_dataset(self, dataset): self._client.put_dataset(pybind_dataset) @exception_handler - def get_dataset(self, name): + def get_dataset(self, name: str) -> Dataset: """Get a dataset from the database The dataset key used to locate the dataset may be formed by applying a prefix to the supplied name. See set_data_source() - and use_tensor_ensemble_prefix() for more details. + and use_dataset_ensemble_prefix() for more details. :param name: name the dataset is stored under :type name: str @@ -207,13 +299,13 @@ def get_dataset(self, name): return python_dataset @exception_handler - def delete_dataset(self, name): + def delete_dataset(self, name: str) -> None: """Delete a dataset within the database The dataset key used to locate the dataset to be deleted may be formed by applying a prefix to the supplied name. See set_data_source() - and use_tensor_ensemble_prefix() for more details. + and use_dataset_ensemble_prefix() for more details. :param name: name of the dataset :type name: str @@ -223,13 +315,13 @@ def delete_dataset(self, name): self._client.delete_dataset(name) @exception_handler - def copy_dataset(self, src_name, dest_name): + def copy_dataset(self, src_name: str, dest_name: str) -> None: """Copy a dataset from one key to another The source and destination dataset keys used to locate the dataset may be formed by applying prefixes to the supplied src_name and dest_name. See set_data_source() - and use_tensor_ensemble_prefix() for more details. + and use_dataset_ensemble_prefix() for more details. :param src_name: source name for dataset to be copied :type src_name: str @@ -242,13 +334,13 @@ def copy_dataset(self, src_name, dest_name): self._client.copy_dataset(src_name, dest_name) @exception_handler - def rename_dataset(self, old_name, new_name): + def rename_dataset(self, old_name: str, new_name: str) -> None: """Rename a dataset in the database The old and new dataset keys used to find and relocate the dataset may be formed by applying prefixes to the supplied old_name and new_name. See set_data_source() - and use_tensor_ensemble_prefix() for more details. + and use_dataset_ensemble_prefix() for more details. :param old_name: original name of the dataset to be renamed :type old_name: str @@ -261,7 +353,9 @@ def rename_dataset(self, old_name, new_name): self._client.rename_dataset(old_name, new_name) @exception_handler - def set_function(self, name, function, device="CPU"): + def set_function( + self, name: str, function: t.Callable, device: str = "CPU" + ) -> None: """Set a callable function into the database The final script key used to store the function may be formed @@ -286,13 +380,17 @@ def set_function(self, name, function, device="CPU"): typecheck(name, "name", str) typecheck(device, "device", str) if not callable(function): - raise TypeError(f"Argument provided for function, {type(function)}, is not callable") + raise TypeError( + f"Argument provided for function, {type(function)}, is not callable" + ) device = self.__check_device(device) fn_src = inspect.getsource(function) self._client.set_script(name, device, fn_src) @exception_handler - def set_function_multigpu(self, name, function, first_gpu, num_gpus): + def set_function_multigpu( + self, name: str, function: t.Callable, first_gpu: int, num_gpus: int + ) -> None: """Set a callable function into the database for use in a multi-GPU system @@ -319,12 +417,14 @@ def set_function_multigpu(self, name, function, first_gpu, num_gpus): typecheck(first_gpu, "first_gpu", int) typecheck(num_gpus, "num_gpus", int) if not callable(function): - raise TypeError(f"Argument provided for function, {type(function)}, is not callable") + raise TypeError( + f"Argument provided for function, {type(function)}, is not callable" + ) fn_src = inspect.getsource(function) self._client.set_script_multigpu(name, fn_src, first_gpu, num_gpus) @exception_handler - def set_script(self, name, script, device="CPU"): + def set_script(self, name: str, script: str, device: str = "CPU") -> None: """Store a TorchScript at a key in the database The final script key used to store the script may be formed @@ -349,7 +449,9 @@ def set_script(self, name, script, device="CPU"): self._client.set_script(name, device, script) @exception_handler - def set_script_multigpu(self, name, script, first_gpu, num_gpus): + def set_script_multigpu( + self, name: str, script: str, first_gpu: int, num_gpus: int + ) -> None: """Store a TorchScript at a key in the database The final script key used to store the script may be formed @@ -373,7 +475,7 @@ def set_script_multigpu(self, name, script, first_gpu, num_gpus): self._client.set_script_multigpu(name, script, first_gpu, num_gpus) @exception_handler - def set_script_from_file(self, name, file, device="CPU"): + def set_script_from_file(self, name: str, file: str, device: str = "CPU") -> None: """Same as Client.set_script, but from file The final script key used to store the script may be formed @@ -396,7 +498,9 @@ def set_script_from_file(self, name, file, device="CPU"): self._client.set_script_from_file(name, device, file_path) @exception_handler - def set_script_from_file_multigpu(self, name, file, first_gpu, num_gpus): + def set_script_from_file_multigpu( + self, name: str, file: str, first_gpu: int, num_gpus: int + ) -> None: """Same as Client.set_script_multigpu, but from file The final script key used to store the script may be formed @@ -421,7 +525,7 @@ def set_script_from_file_multigpu(self, name, file, first_gpu, num_gpus): self._client.set_script_from_file_multigpu(name, file_path, first_gpu, num_gpus) @exception_handler - def get_script(self, name): + def get_script(self, name: str) -> str: """Retrieve a Torchscript stored in the database The script key used to locate the script @@ -440,7 +544,13 @@ def get_script(self, name): return script @exception_handler - def run_script(self, name, fn_name, inputs, outputs): + def run_script( + self, + name: str, + fn_name: str, + inputs: t.Union[str, t.List[str]], + outputs: t.Union[str, t.List[str]] + ) -> None: """Execute TorchScript stored inside the database The script key used to locate the script to be run @@ -455,21 +565,27 @@ def run_script(self, name, fn_name, inputs, outputs): :param fn_name: name of a function within the script to execute :type fn_name: str :param inputs: database tensor names to use as script inputs - :type inputs: list[str] + :type inputs: str | list[str] :param outputs: database tensor names to receive script outputs - :type outputs: list[str] + :type outputs: str | list[str] :raises RedisReplyError: if script execution fails """ typecheck(name, "name", str) typecheck(fn_name, "fn_name", str) - typecheck(inputs, "inputs", list) - typecheck(outputs, "outputs", list) inputs, outputs = self.__check_tensor_args(inputs, outputs) self._client.run_script(name, fn_name, inputs, outputs) @exception_handler def run_script_multigpu( - self, name, fn_name, inputs, outputs, offset, first_gpu, num_gpus): + self, + name: str, + fn_name: str, + inputs: t.Union[str, t.List[str]], + outputs: t.Union[str, t.List[str]], + offset: int, + first_gpu: int, + num_gpus: int, + ) -> None: """Execute TorchScript stored inside the database The script key used to locate the script to be run @@ -484,9 +600,9 @@ def run_script_multigpu( :param fn_name: name of a function within the script to execute :type fn_name: str :param inputs: database tensor names to use as script inputs - :type inputs: list[str] + :type inputs: str | list[str] :param outputs: database tensor names to receive script outputs - :type outputs: list[str] + :type outputs: str | list[str] :param offset: index of the current image, such as a processor ID or MPI rank :type offset: int @@ -498,17 +614,16 @@ def run_script_multigpu( """ typecheck(name, "name", str) typecheck(fn_name, "fn_name", str) - typecheck(inputs, "inputs", list) - typecheck(outputs, "outputs", list) typecheck(offset, "offset", int) typecheck(first_gpu, "first_gpu", int) typecheck(num_gpus, "num_gpus", int) inputs, outputs = self.__check_tensor_args(inputs, outputs) self._client.run_script_multigpu( - name, fn_name, inputs, outputs, offset, first_gpu, num_gpus) + name, fn_name, inputs, outputs, offset, first_gpu, num_gpus + ) @exception_handler - def delete_script(self, name): + def delete_script(self, name: str) -> None: """Remove a script from the database The script key used to locate the script to be run @@ -524,7 +639,7 @@ def delete_script(self, name): self._client.delete_script(name) @exception_handler - def delete_script_multigpu(self, name, first_gpu, num_gpus): + def delete_script_multigpu(self, name: str, first_gpu: int, num_gpus: int) -> None: """Remove a script from the database The script key used to locate the script to be run @@ -546,7 +661,7 @@ def delete_script_multigpu(self, name, first_gpu, num_gpus): self._client.delete_script_multigpu(name, first_gpu, num_gpus) @exception_handler - def get_model(self, name): + def get_model(self, name: str) -> bytes: """Get a stored model The model key used to locate the model @@ -567,16 +682,17 @@ def get_model(self, name): @exception_handler def set_model( self, - name, - model, - backend, - device="CPU", - batch_size=0, - min_batch_size=0, - tag="", - inputs=None, - outputs=None, - ): + name: str, + model: bytes, + backend: str, + device: str = "CPU", + batch_size: int = 0, + min_batch_size: int = 0, + min_batch_timeout: int = 0, + tag: str = "", + inputs: t.Optional[t.Union[str, t.List[str]]] = None, + outputs: t.Optional[t.Union[str, t.List[str]]] = None, + ) -> None: """Put a TF, TF-lite, PT, or ONNX model in the database The final model key used to store the model @@ -600,12 +716,14 @@ def set_model( :type batch_size: int, optional :param min_batch_size: minimum batch size for model execution, defaults to 0 :type min_batch_size: int, optional + :param min_batch_timeout: Max time (ms) to wait for min batch size + :type min_batch_timeout: int, optional :param tag: additional tag for model information, defaults to "" :type tag: str, optional :param inputs: model inputs (TF only), defaults to None - :type inputs: list[str], optional + :type inputs: str | list[str] | None :param outputs: model outputs (TF only), defaults to None - :type outputs: list[str], optional + :type outputs: str | list[str] | None :raises RedisReplyError: if model fails to set """ typecheck(name, "name", str) @@ -613,6 +731,7 @@ def set_model( typecheck(device, "device", str) typecheck(batch_size, "batch_size", int) typecheck(min_batch_size, "min_batch_size", int) + typecheck(min_batch_timeout, "min_batch_timeout", int) typecheck(tag, "tag", str) device = self.__check_device(device) backend = self.__check_backend(backend) @@ -624,6 +743,7 @@ def set_model( device, batch_size, min_batch_size, + min_batch_timeout, tag, inputs, outputs, @@ -632,17 +752,18 @@ def set_model( @exception_handler def set_model_multigpu( self, - name, - model, - backend, - first_gpu, - num_gpus, - batch_size=0, - min_batch_size=0, - tag="", - inputs=None, - outputs=None - ): + name: str, + model: bytes, + backend: str, + first_gpu: int, + num_gpus: int, + batch_size: int = 0, + min_batch_size: int = 0, + min_batch_timeout: int = 0, + tag: str = "", + inputs: t.Optional[t.Union[str, t.List[str]]] = None, + outputs: t.Optional[t.Union[str, t.List[str]]] = None, + ) -> None: """Put a TF, TF-lite, PT, or ONNX model in the database for use in a multi-GPU system @@ -667,12 +788,14 @@ def set_model_multigpu( :type batch_size: int, optional :param min_batch_size: minimum batch size for model execution, defaults to 0 :type min_batch_size: int, optional + :param min_batch_timeout: Max time (ms) to wait for min batch size + :type min_batch_timeout: int, optional :param tag: additional tag for model information, defaults to "" :type tag: str, optional :param inputs: model inputs (TF only), defaults to None - :type inputs: list[str], optional + :type inputs: str | list[str] | None :param outputs: model outputs (TF only), defaults to None - :type outputs: list[str], optional + :type outputs: str | list[str] | None :raises RedisReplyError: if model fails to set """ typecheck(name, "name", str) @@ -681,6 +804,7 @@ def set_model_multigpu( typecheck(num_gpus, "num_gpus", int) typecheck(batch_size, "batch_size", int) typecheck(min_batch_size, "min_batch_size", int) + typecheck(min_batch_timeout, "min_batch_timeout", int) typecheck(tag, "tag", str) backend = self.__check_backend(backend) inputs, outputs = self.__check_tensor_args(inputs, outputs) @@ -692,24 +816,26 @@ def set_model_multigpu( num_gpus, batch_size, min_batch_size, + min_batch_timeout, tag, inputs, - outputs + outputs, ) @exception_handler def set_model_from_file( self, - name, - model_file, - backend, - device="CPU", - batch_size=0, - min_batch_size=0, - tag="", - inputs=None, - outputs=None, - ): + name: str, + model_file: str, + backend: str, + device: str = "CPU", + batch_size: int = 0, + min_batch_size: int = 0, + min_batch_timeout: int = 0, + tag: str = "", + inputs: t.Optional[t.Union[str, t.List[str]]] = None, + outputs: t.Optional[t.Union[str, t.List[str]]] = None, + ) -> None: """Put a TF, TF-lite, PT, or ONNX model from file in the database The final model key used to store the model @@ -733,12 +859,14 @@ def set_model_from_file( :type batch_size: int, optional :param min_batch_size: minimum batch size for model execution, defaults to 0 :type min_batch_size: int, optional + :param min_batch_timeout: Max time (ms) to wait for min batch size + :type min_batch_timeout: int, optional :param tag: additional tag for model information, defaults to "" :type tag: str, optional :param inputs: model inputs (TF only), defaults to None - :type inputs: list[str], optional + :type inputs: str | list[str] | None :param outputs: model outupts (TF only), defaults to None - :type outputs: list[str], optional + :type outputs: str | list[str] | None :raises RedisReplyError: if model fails to set """ typecheck(name, "name", str) @@ -747,6 +875,7 @@ def set_model_from_file( typecheck(device, "device", str) typecheck(batch_size, "batch_size", int) typecheck(min_batch_size, "min_batch_size", int) + typecheck(min_batch_timeout, "min_batch_timeout", int) typecheck(tag, "tag", str) device = self.__check_device(device) backend = self.__check_backend(backend) @@ -759,6 +888,7 @@ def set_model_from_file( device, batch_size, min_batch_size, + min_batch_timeout, tag, inputs, outputs, @@ -767,17 +897,18 @@ def set_model_from_file( @exception_handler def set_model_from_file_multigpu( self, - name, - model_file, - backend, - first_gpu, - num_gpus, - batch_size=0, - min_batch_size=0, - tag="", - inputs=None, - outputs=None, - ): + name: str, + model_file: str, + backend: str, + first_gpu: int, + num_gpus: int, + batch_size: int = 0, + min_batch_size: int = 0, + min_batch_timeout: int = 0, + tag: str = "", + inputs: t.Optional[t.Union[str, t.List[str]]] = None, + outputs: t.Optional[t.Union[str, t.List[str]]] = None, + ) -> None: """Put a TF, TF-lite, PT, or ONNX model from file in the database for use in a multi-GPU system @@ -802,12 +933,14 @@ def set_model_from_file_multigpu( :type batch_size: int, optional :param min_batch_size: minimum batch size for model execution, defaults to 0 :type min_batch_size: int, optional + :param min_batch_timeout: Max time (ms) to wait for min batch size + :type min_batch_timeout: int, optional :param tag: additional tag for model information, defaults to "" :type tag: str, optional :param inputs: model inputs (TF only), defaults to None - :type inputs: list[str], optional + :type inputs: str | list[str] | None :param outputs: model outupts (TF only), defaults to None - :type outputs: list[str], optional + :type outputs: str | list[str] | None :raises RedisReplyError: if model fails to set """ typecheck(name, "name", str) @@ -817,6 +950,7 @@ def set_model_from_file_multigpu( typecheck(num_gpus, "num_gpus", int) typecheck(batch_size, "batch_size", int) typecheck(min_batch_size, "min_batch_size", int) + typecheck(min_batch_timeout, "min_batch_timeout", int) typecheck(tag, "tag", str) backend = self.__check_backend(backend) m_file = self.__check_file(model_file) @@ -829,13 +963,19 @@ def set_model_from_file_multigpu( num_gpus, batch_size, min_batch_size, + min_batch_timeout, tag, inputs, outputs, ) @exception_handler - def run_model(self, name, inputs=None, outputs=None): + def run_model( + self, + name: str, + inputs: t.Optional[t.Union[str, t.List[str]]] = None, + outputs: t.Optional[t.Union[str, t.List[str]]] = None, + ) -> None: """Execute a stored model The model key used to locate the model to be run @@ -846,9 +986,9 @@ def run_model(self, name, inputs=None, outputs=None): :param name: name for stored model :type name: str :param inputs: names of stored inputs to provide model, defaults to None - :type inputs: list[str], optional + :type inputs: str | list[str] | None :param outputs: names to store outputs under, defaults to None - :type outputs: list[str], optional + :type outputs: str | list[str] | None :raises RedisReplyError: if model execution fails """ typecheck(name, "name", str) @@ -858,12 +998,13 @@ def run_model(self, name, inputs=None, outputs=None): @exception_handler def run_model_multigpu( self, - name, - offset, - first_gpu, - num_gpus, - inputs=None, - outputs=None): + name: str, + offset: int, + first_gpu: int, + num_gpus: int, + inputs: t.Optional[t.Union[str, t.List[str]]] = None, + outputs: t.Optional[t.Union[str, t.List[str]]] = None, + ) -> None: """Execute a model stored for a multi-GPU system The model key used to locate the model to be run @@ -881,9 +1022,9 @@ def run_model_multigpu( :param num_gpus: the number of gpus for which the model was stored :type num_gpus: int :param inputs: names of stored inputs to provide model, defaults to None - :type inputs: list[str], optional + :type inputs: str | list[str] | None :param outputs: names to store outputs under, defaults to None - :type outputs: list[str], optional + :type outputs: str | list[str] | None :raises RedisReplyError: if model execution fails """ typecheck(name, "name", str) @@ -891,10 +1032,12 @@ def run_model_multigpu( typecheck(first_gpu, "first_gpu", int) typecheck(num_gpus, "num_gpus", int) inputs, outputs = self.__check_tensor_args(inputs, outputs) - self._client.run_model_multigpu(name, inputs, outputs, offset, first_gpu, num_gpus) + self._client.run_model_multigpu( + name, inputs, outputs, offset, first_gpu, num_gpus + ) @exception_handler - def delete_model(self, name): + def delete_model(self, name: str) -> None: """Remove a model from the database The model key used to locate the script to be run @@ -910,7 +1053,7 @@ def delete_model(self, name): self._client.delete_model(name) @exception_handler - def delete_model_multigpu(self, name, first_gpu, num_gpus): + def delete_model_multigpu(self, name: str, first_gpu: int, num_gpus: str) -> None: """Remove a model from the database that was stored for use with multiple GPUs The model key used to locate the script to be run @@ -932,7 +1075,7 @@ def delete_model_multigpu(self, name, first_gpu, num_gpus): self._client.delete_model_multigpu(name, first_gpu, num_gpus) @exception_handler - def tensor_exists(self, name): + def tensor_exists(self, name: str) -> bool: """Check if a tensor exists in the database The tensor key used to check for existence @@ -950,13 +1093,13 @@ def tensor_exists(self, name): return self._client.tensor_exists(name) @exception_handler - def dataset_exists(self, name): + def dataset_exists(self, name: str) -> bool: """Check if a dataset exists in the database The dataset key used to check for existence may be formed by applying a prefix to the supplied name. See set_data_source() - and use_tensor_ensemble_prefix() for more details. + and use_dataset_ensemble_prefix() for more details. :param name: The dataset name that will be checked in the database :type name: str @@ -968,7 +1111,7 @@ def dataset_exists(self, name): return self._client.dataset_exists(name) @exception_handler - def model_exists(self, name): + def model_exists(self, name: str) -> bool: """Check if a model or script exists in the database The model or script key used to check for existence @@ -986,7 +1129,7 @@ def model_exists(self, name): return self._client.model_exists(name) @exception_handler - def key_exists(self, key): + def key_exists(self, key: str) -> bool: """Check if the key exists in the database :param key: The key that will be checked in the database @@ -999,7 +1142,7 @@ def key_exists(self, key): return self._client.key_exists(key) @exception_handler - def poll_key(self, key, poll_frequency_ms, num_tries): + def poll_key(self, key: str, poll_frequency_ms: int, num_tries: int) -> bool: """Check if the key exists in the database The check is repeated at a specified polling interval and for @@ -1022,7 +1165,7 @@ def poll_key(self, key, poll_frequency_ms, num_tries): return self._client.poll_key(key, poll_frequency_ms, num_tries) @exception_handler - def poll_tensor(self, name, poll_frequency_ms, num_tries): + def poll_tensor(self, name: str, poll_frequency_ms: int, num_tries: int) -> bool: """Check if a tensor exists in the database The check is repeated at a specified polling interval and for @@ -1049,7 +1192,7 @@ def poll_tensor(self, name, poll_frequency_ms, num_tries): return self._client.poll_tensor(name, poll_frequency_ms, num_tries) @exception_handler - def poll_dataset(self, name, poll_frequency_ms, num_tries): + def poll_dataset(self, name: str, poll_frequency_ms: int, num_tries: int) -> bool: """Check if a dataset exists in the database The check is repeated at a specified polling interval and for @@ -1057,7 +1200,7 @@ def poll_dataset(self, name, poll_frequency_ms, num_tries): The dataset key used to check for existence may be formed by applying a prefix to the supplied name. See set_data_source() - and use_tensor_ensemble_prefix() for more details. + and use_dataset_ensemble_prefix() for more details. :param name: The dataset name that will be checked in the database :type name: str @@ -1076,7 +1219,7 @@ def poll_dataset(self, name, poll_frequency_ms, num_tries): return self._client.poll_dataset(name, poll_frequency_ms, num_tries) @exception_handler - def poll_model(self, name, poll_frequency_ms, num_tries): + def poll_model(self, name: str, poll_frequency_ms: int, num_tries: int) -> bool: """Check if a model or script exists in the database The check is repeated at a specified polling interval and for @@ -1103,7 +1246,7 @@ def poll_model(self, name, poll_frequency_ms, num_tries): return self._client.poll_model(name, poll_frequency_ms, num_tries) @exception_handler - def set_data_source(self, source_id): + def set_data_source(self, source_id: str) -> None: """Set the data source, a key prefix for future operations When running multiple applications, such as an ensemble @@ -1132,7 +1275,7 @@ def set_data_source(self, source_id): return self._client.set_data_source(source_id) @exception_handler - def use_model_ensemble_prefix(self, use_prefix): + def use_model_ensemble_prefix(self, use_prefix: bool) -> None: """Control whether model and script keys are prefixed (e.g. in an ensemble) when forming database keys @@ -1155,7 +1298,7 @@ def use_model_ensemble_prefix(self, use_prefix): return self._client.use_model_ensemble_prefix(use_prefix) @exception_handler - def use_list_ensemble_prefix(self, use_prefix): + def use_list_ensemble_prefix(self, use_prefix: bool) -> None: """Control whether aggregation lists are prefixed when forming database keys @@ -1169,9 +1312,9 @@ def use_list_ensemble_prefix(self, use_prefix): prefixed. By default, the client prefixes aggregation list keys with the first prefix specified with the SSKEYIN and SSKEYOUT environment variables. Note that - use_tensor_ensemble_prefix() controls prefixing + use_dataset_ensemble_prefix() controls prefixing for the entities in the aggregation list, and - use_tensor_ensemble_prefix() should be given the + use_dataset_ensemble_prefix() should be given the same value that was used during the initial setting of the DataSet into the database. @@ -1184,30 +1327,51 @@ def use_list_ensemble_prefix(self, use_prefix): return self._client.use_list_ensemble_prefix(use_prefix) @exception_handler - def use_tensor_ensemble_prefix(self, use_prefix): - """Control whether tensor and dataset keys are - prefixed (e.g. in an ensemble) when forming database keys + def use_tensor_ensemble_prefix(self, use_prefix: bool) -> None: + """Control whether tensor keys are prefixed (e.g. in an + ensemble) when forming database keys This function can be used to avoid key collisions in an ensemble by prepending the string value from the environment variable SSKEYIN - to tensor and dataset names. + to tensor names. Prefixes will only be used if they were previously set through environment variables SSKEYIN and SSKEYOUT. Keys for entities created before this function is called will not be retroactively prefixed. - By default, the client prefixes tensor and dataset - keys when a prefix is available. + By default, the client prefixes tensor keys when a prefix is + available. - :param use_prefix: If set to true, all future operations - on tensors and datasets will use a prefix, if - available. + :param use_prefix: If set to true, all future operations on tensors + will use a prefix, if available. :type use_prefix: bool """ typecheck(use_prefix, "use_prefix", bool) return self._client.use_tensor_ensemble_prefix(use_prefix) @exception_handler - def get_db_node_info(self, addresses): + def use_dataset_ensemble_prefix(self, use_prefix: bool) -> None: + """Control whether dataset keys are prefixed (e.g. in an ensemble) + when forming database keys + + This function can be used to avoid key collisions in an ensemble + by prepending the string value from the environment variable SSKEYIN + to dataset names. + Prefixes will only be used if they were previously set through + environment variables SSKEYIN and SSKEYOUT. + Keys for entities created before this function is called + will not be retroactively prefixed. + By default, the client prefixes dataset keys when a prefix is + available. + + :param use_prefix: If set to true, all future operations on datasets + will use a prefix, if available. + :type use_prefix: bool + """ + typecheck(use_prefix, "use_prefix", bool) + return self._client.use_dataset_ensemble_prefix(use_prefix) + + @exception_handler + def get_db_node_info(self, addresses: t.List[str]) -> t.List[t.Dict]: """Returns information about given database nodes :param addresses: The addresses of the database nodes @@ -1230,7 +1394,7 @@ def get_db_node_info(self, addresses): return self._client.get_db_node_info(addresses) @exception_handler - def get_db_cluster_info(self, addresses): + def get_db_cluster_info(self, addresses: t.List[str]) -> t.List[t.Dict]: """Returns cluster information from a specified db node. If the address does not correspond to a cluster node, an empty dictionary is returned. @@ -1256,7 +1420,9 @@ def get_db_cluster_info(self, addresses): return self._client.get_db_cluster_info(addresses) @exception_handler - def get_ai_info(self, address, key, reset_stat=False): + def get_ai_info( + self, address: t.List[str], key: str, reset_stat: bool = False + ) -> t.List[t.Dict]: """Returns AI.INFO command reply information for the script or model key at the provided addresses. @@ -1280,7 +1446,7 @@ def get_ai_info(self, address, key, reset_stat=False): return self._client.get_ai_info(address, key, reset_stat) @exception_handler - def flush_db(self, addresses): + def flush_db(self, addresses: t.List[str]) -> None: """Removes all keys from a specified db node. :param addresses: The addresses of the database nodes @@ -1300,7 +1466,7 @@ def flush_db(self, addresses): self._client.flush_db(addresses) @exception_handler - def config_get(self, expression, address): + def config_get(self, expression: str, address: t.List[str]) -> t.Dict: """Read the configuration parameters of a running server. If the address does not correspond to a cluster node, an empty dictionary is returned. @@ -1331,7 +1497,7 @@ def config_get(self, expression, address): return self._client.config_get(expression, address) @exception_handler - def config_set(self, config_param, value, address): + def config_set(self, config_param: str, value: str, address: str) -> None: """Reconfigure the server. It can change both trivial parameters or switch from one to another persistence option. All the configuration parameters set using this command are @@ -1363,7 +1529,7 @@ def config_set(self, config_param, value, address): self._client.config_set(config_param, value, address) @exception_handler - def save(self, addresses): + def save(self, addresses: t.List[str]) -> None: """Performs a synchronous save of the database shard producing a point in time snapshot of all the data inside the Redis instance, in the form of an RBD file. @@ -1385,7 +1551,26 @@ def save(self, addresses): self._client.save(addresses) @exception_handler - def append_to_list(self, list_name, dataset): + def set_model_chunk_size(self, chunk_size: int) -> None: + """Reconfigures the chunking size that Redis uses for model + serialization, replication, and the model_get command. + This method triggers the AI.CONFIG method in the Redis + database to change the model chunking size. + + NOTE: The default size of 511MB should be fine for most + applications, so it is expected to be very rare that a + client calls this method. It is not necessary to call + this method a model to be chunked. + :param chunk_size: The new chunk size in bytes + :type addresses: int + :raises RedisReplyError: if there is an error + in command execution. + """ + typecheck(chunk_size, "chunk_size", int) + self._client.set_model_chunk_size(chunk_size) + + @exception_handler + def append_to_list(self, list_name: str, dataset: Dataset) -> None: """Appends a dataset to the aggregation list When appending a dataset to an aggregation list, @@ -1412,7 +1597,7 @@ def append_to_list(self, list_name, dataset): self._client.append_to_list(list_name, pybind_dataset) @exception_handler - def delete_list(self, list_name): + def delete_list(self, list_name: str) -> None: """Delete an aggregation list The key used to locate the aggregation list to be @@ -1429,7 +1614,7 @@ def delete_list(self, list_name): self._client.delete_list(list_name) @exception_handler - def copy_list(self, src_name, dest_name): + def copy_list(self, src_name: str, dest_name: str) -> None: """Copy an aggregation list The source and destination aggregation list keys used to @@ -1450,7 +1635,7 @@ def copy_list(self, src_name, dest_name): self._client.copy_list(src_name, dest_name) @exception_handler - def rename_list(self, src_name, dest_name): + def rename_list(self, src_name: str, dest_name: str) -> None: """Rename an aggregation list The old and new aggregation list key used to find and @@ -1470,7 +1655,7 @@ def rename_list(self, src_name, dest_name): self._client.rename_list(src_name, dest_name) @exception_handler - def get_list_length(self, list_name): + def get_list_length(self, list_name: str) -> int: """Get the number of entries in the list :param list_name: The list name @@ -1484,7 +1669,9 @@ def get_list_length(self, list_name): return self._client.get_list_length(list_name) @exception_handler - def poll_list_length(self, name, list_length, poll_frequency_ms, num_tries): + def poll_list_length( + self, name: str, list_length: int, poll_frequency_ms: int, num_tries: int + ) -> bool: """Poll list length until length is equal to the provided length. If maximum number of attempts is exceeded, returns False @@ -1513,10 +1700,13 @@ def poll_list_length(self, name, list_length, poll_frequency_ms, num_tries): typecheck(poll_frequency_ms, "poll_frequency_ms", int) typecheck(num_tries, "num_tries", int) return self._client.poll_list_length( - name, list_length, poll_frequency_ms, num_tries) + name, list_length, poll_frequency_ms, num_tries + ) @exception_handler - def poll_list_length_gte(self, name, list_length, poll_frequency_ms, num_tries): + def poll_list_length_gte( + self, name: str, list_length: int, poll_frequency_ms: int, num_tries: int + ) -> bool: """Poll list length until length is greater than or equal to the user-provided length. If maximum number of attempts is exceeded, false is returned. @@ -1545,10 +1735,13 @@ def poll_list_length_gte(self, name, list_length, poll_frequency_ms, num_tries): typecheck(poll_frequency_ms, "poll_frequency_ms", int) typecheck(num_tries, "num_tries", int) return self._client.poll_list_length_gte( - name, list_length, poll_frequency_ms, num_tries) + name, list_length, poll_frequency_ms, num_tries + ) @exception_handler - def poll_list_length_lte(self, name, list_length, poll_frequency_ms, num_tries): + def poll_list_length_lte( + self, name: str, list_length: int, poll_frequency_ms: int, num_tries: int + ) -> bool: """Poll list length until length is less than or equal to the user-provided length. If maximum number of attempts is exceeded, false is returned. @@ -1577,10 +1770,11 @@ def poll_list_length_lte(self, name, list_length, poll_frequency_ms, num_tries): typecheck(poll_frequency_ms, "poll_frequency_ms", int) typecheck(num_tries, "num_tries", int) return self._client.poll_list_length_lte( - name, list_length, poll_frequency_ms, num_tries) + name, list_length, poll_frequency_ms, num_tries + ) @exception_handler - def get_datasets_from_list(self, list_name): + def get_datasets_from_list(self, list_name: str) -> t.List[Dataset]: """Get datasets from an aggregation list The aggregation list key used to retrieve datasets @@ -1599,7 +1793,9 @@ def get_datasets_from_list(self, list_name): return self._client.get_datasets_from_list(list_name) @exception_handler - def get_dataset_list_range(self, list_name, start_index, end_index): + def get_dataset_list_range( + self, list_name: str, start_index: int, end_index: int + ) -> t.List[Dataset]: """Get a range of datasets (by index) from an aggregation list The aggregation list key used to retrieve datasets @@ -1632,15 +1828,18 @@ def get_dataset_list_range(self, list_name, start_index, end_index): typecheck(list_name, "list_name", str) typecheck(start_index, "start_index", int) typecheck(end_index, "end_index", int) - return self._client.get_dataset_list_range( - list_name, start_index, end_index) + return self._client.get_dataset_list_range(list_name, start_index, end_index) # ---- helpers -------------------------------------------------------- @staticmethod - def __check_tensor_args(inputs, outputs): + def __check_tensor_args( + inputs: t.Optional[t.Union[t.List[str], str]], + outputs: t.Optional[t.Union[t.List[str], str]], + ) -> t.Tuple[t.List[str], t.List[str]]: inputs = init_default([], inputs, (list, str)) outputs = init_default([], outputs, (list, str)) + assert inputs is not None and outputs is not None if isinstance(inputs, str): inputs = [inputs] if isinstance(outputs, str): @@ -1648,29 +1847,29 @@ def __check_tensor_args(inputs, outputs): return inputs, outputs @staticmethod - def __check_backend(backend): + def __check_backend(backend: str) -> str: backend = backend.upper() if backend in ["TF", "TFLITE", "TORCH", "ONNX"]: return backend - else: - raise TypeError(f"Backend type {backend} unsupported") + + raise TypeError(f"Backend type {backend} unsupported") @staticmethod - def __check_file(file): + def __check_file(file: str) -> str: file_path = osp.abspath(file) if not osp.isfile(file_path): raise FileNotFoundError(file_path) return file_path @staticmethod - def __check_device(device): + def __check_device(device: str) -> str: device = device.upper() if not device.startswith("CPU") and not device.startswith("GPU"): raise TypeError("Device argument must start with either CPU or GPU") return device @staticmethod - def __set_address(address): + def __set_address(address: str) -> None: if "SSDB" in os.environ: del os.environ["SSDB"] os.environ["SSDB"] = address diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/configoptions.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/configoptions.py new file mode 100644 index 00000000..ad60542a --- /dev/null +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/configoptions.py @@ -0,0 +1,222 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import typing as t +from functools import wraps +from uuid import uuid4 + +from .error import RedisRuntimeError +from .smartredisPy import PyConfigOptions +from .util import exception_handler, typecheck + + +class _Managed: + """Marker class identifying factory-created objects""" + + +def create_managed_instance(base: t.Type[t.Any]) -> t.Any: + """Instantiate a managed instance of the class, enabling the use of type + checking to detect if an instance is managed""" + def get_dynamic_class_name(bases: t.Tuple[t.Type]) -> str: + """Create a name for the new type by concatenating base names. Appends a + unique suffix to avoid confusion if dynamic type comparisons occur""" + unique_key = str(uuid4()).split("-", 1)[0] + class_name = "".join(base.__name__ for base in bases) + unique_key + return class_name + + # Create a subtype that includes the _Managed marker + bases = (_Managed, base) + class_name = get_dynamic_class_name(bases) + managed_class = type(class_name, bases, {}) + return managed_class() + + +def managed(func: t.Callable) -> t.Callable: + """Decorator to verify that a class was constructed using a factory""" + not_managed = ( + "Attempting to call managed method on ConfigOptions object not " + "created from a factory method" + ) + + @wraps(func) + def _wrapper(*args: t.Any, **kwargs: t.Any) -> t.Any: + instance = args[0] + if not isinstance(instance, _Managed): + msg = not_managed.format(instance.__class__.__name__) + raise RedisRuntimeError(msg) + return func(*args, **kwargs) + + return _wrapper + + +class ConfigOptions: + def __init__(self) -> None: + """Initialize a ConfigOptions base object""" + self._config_opts: t.Any = None + + @staticmethod + def from_pybind(configoptions: PyConfigOptions) -> "ConfigOptions": + """Initialize a ConfigOptions object from a PyConfigOptions object + + :param configoptions: The pybind PyConfigOptions object + to use for construction + :type dataset: PyConfigOptions + :return: The newly constructed ConfigOptions from the PyConfigOptions + :rtype: ConfigOptions + """ + typecheck(configoptions, "configoptions", PyConfigOptions) + opts: ConfigOptions = create_managed_instance(ConfigOptions) + opts.set_configoptions(configoptions) + return opts + + @exception_handler + @managed + def get_data(self): + """Return the PyConfigOptions attribute + + :return: The PyConfigOptions attribute containing + the ConfigOptions information + :rtype: PyConfigOptions + """ + return self._config_opts + + @exception_handler + def set_configoptions(self, configoptions: PyConfigOptions) -> None: + """Set the PyConfigOptions attribute + + :param configoptions: The PyConfigOptions object + :type configoptions: PyConfigOptions + """ + typecheck(configoptions, "configoptions", PyConfigOptions) + self._config_opts = configoptions + + @classmethod + @exception_handler + def create_from_environment(cls, db_suffix: str) -> "ConfigOptions": + """Instantiate ConfigOptions, getting selections from + environment variables. If db_suffix is non-empty, + then "_{db_suffix}" will be appended to the name of + each environment variable that is read + + :param cls: The ConfigOptions class + :type cls: type + :param db_suffix: Prefix to append to environment variables + or an empty string to eschew appending + :type db_suffix: str + :return: An instantiated ConfigOptions object + :rtype: ConfigOptions + """ + typecheck(db_suffix, "db_suffix", str) + configoptions = PyConfigOptions.create_from_environment(db_suffix) + opts: ConfigOptions = create_managed_instance(ConfigOptions) + opts.set_configoptions(configoptions) + return opts + + @exception_handler + @managed + def get_integer_option(self, option_name: str) -> int: + """Retrieve the value of a numeric configuration option + from the selected source + + :param option_name: The name of the configuration option to retrieve + :type option_name: str + :return: The value of the selected option. Returns + default_value if the option was not set in the + selected source + :rtype: int + """ + typecheck(option_name, "option_name", str) + return self._config_opts.get_integer_option(option_name) + + @exception_handler + @managed + def get_string_option(self, option_name: str) -> str: + """Retrieve the value of a string configuration option + from the selected source + + :param option_name: The name of the configuration option to retrieve + :type option_name: str + :return: The value of the selected option. Returns + default_value if the option was not set in the + selected source + :rtype: str + """ + typecheck(option_name, "option_name", str) + return self._config_opts.get_string_option(option_name) + + @exception_handler + @managed + def is_configured(self, option_name: str) -> bool: + """Check whether a configuration option is set in the selected source + + :param option_name: The name of the configuration option to check + :type option_name: str + :return: True IFF the target option is defined in the selected source + or if it has been overridden + :rtype: bool + """ + typecheck(option_name, "option_name", str) + return self._config_opts.is_configured(option_name) + + @exception_handler + @managed + def override_integer_option(self, option_name: str, value: int) -> None: + """Override the value of a numeric configuration option + in the selected source + + Overrides are specific to an instance of the + ConfigOptions class. An instance that references + the same source will not be affected by an override to + a different ConfigOptions instance + + :param option_name: The name of the configuration option to override + :type option_name: str + :param value: The value to store for the configuration option + :type value: int + """ + typecheck(option_name, "option_name", str) + typecheck(value, "value", int) + self._config_opts.override_integer_option(option_name, value) + + @exception_handler + @managed + def override_string_option(self, option_name: str, value: str) -> None: + """Override the value of a string configuration option + in the selected source + + Overrides are specific to an instance of the + ConfigOptions class. An instance that references + the same source will not be affected by an override to + a different ConfigOptions instance + + :param option_name: The name of the configuration option to override + :type option_name: str + :param value: The value to store for the configuration option + :type value: str + """ + typecheck(option_name, "option_name", str) + typecheck(value, "value", str) + self._config_opts.override_string_option(option_name, value) diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset.py index 1a865b14..0816d11c 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset.py @@ -24,7 +24,7 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from numbers import Number +import typing as t import numpy as np @@ -32,10 +32,9 @@ from .srobject import SRObject from .util import Dtypes, exception_handler, typecheck -from .error import * class Dataset(SRObject): - def __init__(self, name): + def __init__(self, name: str) -> None: """Initialize a Dataset object :param name: name of dataset @@ -44,23 +43,32 @@ def __init__(self, name): super().__init__(PyDataset(name)) typecheck(name, "name", str) - @property - def _data(self): - """Alias _srobject to _data + def __str__(self) -> str: + """Create a string representation of the client + + :return: A string representation of the client + :rtype: str """ + return self._data.to_string() + + @property + def _data(self) -> PyDataset: + """Alias _srobject to _data""" + assert isinstance(self._srobject, PyDataset) return self._srobject @staticmethod - def from_pybind(dataset): - """Initialize a Dataset object from - a PyDataset object + def from_pybind(dataset: PyDataset) -> "Dataset": + """Initialize a Dataset object from a PyDataset object + + Create a new Dataset object using the data and properties + of a PyDataset object as the initial values. :param dataset: The pybind PyDataset object to use for construction :type dataset: PyDataset - :return: The newly constructor Dataset from - the PyDataset - :rtype: Dataset + :return: The newly constructed Dataset object + :rtype: Dataset object """ typecheck(dataset, "dataset", PyDataset) new_dataset = Dataset(dataset.get_name()) @@ -68,7 +76,7 @@ def from_pybind(dataset): return new_dataset @exception_handler - def get_data(self): + def get_data(self) -> PyDataset: """Return the PyDataset attribute :return: The PyDataset attribute containing @@ -78,7 +86,7 @@ def get_data(self): return self._data @exception_handler - def set_data(self, dataset): + def set_data(self, dataset: PyDataset) -> None: """Set the PyDataset attribute :param dataset: The PyDataset object @@ -88,13 +96,13 @@ def set_data(self, dataset): self._srobject = dataset @exception_handler - def add_tensor(self, name, data): - """Add a named tensor to this dataset + def add_tensor(self, name: str, data: np.ndarray) -> None: + """Add a named multi-dimensional data array (tensor) to this dataset - :param name: tensor name + :param name: name associated to the tensor data :type name: str :param data: tensor data - :type data: np.array + :type data: np.ndarray """ typecheck(name, "name", str) typecheck(data, "data", np.ndarray) @@ -102,29 +110,37 @@ def add_tensor(self, name, data): self._data.add_tensor(name, data, dtype) @exception_handler - def get_tensor(self, name): + def get_tensor(self, name: str) -> np.ndarray: """Get a tensor from the Dataset :param name: name of the tensor to get :type name: str :return: a numpy array of tensor data - :rtype: np.array + :rtype: np.ndarray """ typecheck(name, "name", str) return self._data.get_tensor(name) @exception_handler - def add_meta_scalar(self, name, data): - """Add metadata scalar field (non-string) with value to the DataSet + def get_name(self) -> str: + """Get the name of a Dataset - If the field does not exist, it will be created. - If the field exists, the value - will be appended to existing field. + :return: the name of the in-memory dataset + :rtype: str + """ + return self._data.get_name() - :param name: The name used to reference the metadata - field + @exception_handler + def add_meta_scalar(self, name: str, data: t.Union[int, float]) -> None: + """Add scalar (non-string) metadata to a field name if it exists; + otherwise, create and add + + If the field name exists, append the scalar metadata; otherwise, + create the field within the DataSet object and add the scalar metadata. + + :param name: The name used to reference the scalar metadata field :type name: str - :param data: a scalar + :param data: scalar metadata input :type data: int | float """ typecheck(name, "name", str) @@ -138,17 +154,15 @@ def add_meta_scalar(self, name, data): self._data.add_meta_scalar(name, data_as_array, dtype) @exception_handler - def add_meta_string(self, name, data): - """Add metadata string field with value to the DataSet + def add_meta_string(self, name: str, data: str) -> None: + """Add string metadata to a field name if it exists; otherwise, create and add - If the field does not exist, it will be created - If the field exists the value will - be appended to existing field. + If the field name exists, append the string metadata; otherwise, + create the field within the DataSet object and add the string metadata. - :param name: The name used to reference the metadata - field + :param name: The name used to reference the string metadata field :type name: str - :param data: The string to add to the field + :param data: string metadata input :type data: str """ typecheck(name, "name", str) @@ -156,39 +170,39 @@ def add_meta_string(self, name, data): self._data.add_meta_string(name, data) @exception_handler - def get_meta_scalars(self, name): - """Get the metadata scalar field values from the DataSet + def get_meta_scalars(self, name: str) -> t.Union[t.List[int], t.List[float]]: + """Get the scalar values from the DataSet assigned to a field name - :param name: The name used to reference the metadata - field in the DataSet + :param name: The field name to retrieve from :type name: str + :rtype: list[int] | list[float] """ typecheck(name, "name", str) return self._data.get_meta_scalars(name) @exception_handler - def get_meta_strings(self, name): - """Get the metadata scalar field values from the DataSet + def get_meta_strings(self, name: str) -> t.List[str]: + """Get the string values from the DataSet assigned to a field name - :param name: The name used to reference the metadata - field in the DataSet + :param name: The field name to retrieve from :type name: str + :rtype: list[str] """ typecheck(name, "name", str) return self._data.get_meta_strings(name) @exception_handler - def get_metadata_field_names(self): - """Get the names of all metadata scalars and strings from the DataSet + def get_metadata_field_names(self) -> t.List[str]: + """Get all field names from the DataSet - :return: a list of metadata field names - :rtype: list + :return: a list of all metadata field names + :rtype: list[str] """ return self._data.get_metadata_field_names() @exception_handler - def get_metadata_field_type(self, name): - """Get the names of all metadata scalars and strings from the DataSet + def get_metadata_field_type(self, name: str) -> t.Type: + """Get the type of metadata for a field name (scalar or string) :param name: The name used to reference the metadata field in the DataSet @@ -201,7 +215,7 @@ def get_metadata_field_type(self, name): return Dtypes.from_string(type_str) @exception_handler - def get_tensor_type(self, name): + def get_tensor_type(self, name: str) -> t.Type: """Get the type of a tensor in the DataSet :param name: The name used to reference the tensor in the DataSet @@ -214,10 +228,22 @@ def get_tensor_type(self, name): return Dtypes.from_string(type_str) @exception_handler - def get_tensor_names(self): + def get_tensor_names(self) -> t.List[str]: """Get the names of all tensors in the DataSet :return: a list of tensor names - :rtype: list + :rtype: list[str] """ return self._data.get_tensor_names() + + @exception_handler + def get_tensor_dims(self, name: str) -> t.List[int]: + """Get the dimensions of a tensor in the DataSet + + :param name: name associated to the tensor data + :type name: str + :return: a list of the tensor dimensions + :rtype: list[int] + """ + typecheck(name, "name", str) + return self._data.get_tensor_dims(name) diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset_utils.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset_utils.py index bca0d93e..76bcb875 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset_utils.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/dataset_utils.py @@ -24,35 +24,74 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -try: - import xarray as xr -except ImportError: - xr = None +import functools +import typing as t +from itertools import permutations +from typing import TYPE_CHECKING from .dataset import Dataset -from .util import Dtypes, exception_handler, typecheck -from itertools import permutations -from .error import * +from .error import RedisRuntimeError +from .util import typecheck + +if TYPE_CHECKING: # pragma: no cover + # Import optional deps for intellisense + import xarray as xr + + # Type hint magic bits + from typing_extensions import ParamSpec + + _PR = ParamSpec("_PR") + _RT = t.TypeVar("_RT") +else: + # Leave optional deps as nullish + xr = None # pylint: disable=invalid-name + +# ----helper decorators ----- + + +def _requires_xarray(fn: "t.Callable[_PR, _RT]") -> "t.Callable[_PR, _RT]": + @functools.wraps(fn) + def _import_xarray(*args: "_PR.args", **kwargs: "_PR.kwargs") -> "_RT": + global xr # pylint: disable=global-statement,invalid-name + try: + import xarray as xr # pylint: disable=import-outside-toplevel + except ImportError as e: + raise RedisRuntimeError( + "Optional package xarray must be installed; " + "Consider running `pip install smartredis[xarray]`" + ) from e + return fn(*args, **kwargs) + + return _import_xarray + # ----helper function ----- -def get_data(dataset, name, type): - return dataset.get_meta_strings(f"_xarray_{name}_{type}_names")[0].split(",") -def typecheck_stringlist(names, strings_name, string_name): +def get_data(dataset: Dataset, name: str, dtype: str) -> t.List[str]: + return dataset.get_meta_strings(f"_xarray_{name}_{dtype}_names")[0].split(",") + + +def typecheck_stringlist( + names: t.List[str], strings_name: str, string_name: str +) -> None: typecheck(names, strings_name, list) for name in names: typecheck(name, string_name, str) # Check if empty if string_name == "": - raise RedisRuntimeError + raise RedisRuntimeError("Empty string") class DatasetConverter: @staticmethod def add_metadata_for_xarray( - dataset, data_names, dim_names, coord_names=None, attr_names=None - ): + dataset: Dataset, + data_names: t.Union[t.List[str], str], + dim_names: t.Union[t.List[str], str], + coord_names: t.Optional[t.Union[t.List[str], str]] = None, + attr_names: t.Optional[t.Union[t.List[str], str]] = None, + ) -> None: """Extract metadata from a SmartRedis dataset and add it to dataformat specific fieldnames @@ -68,13 +107,13 @@ def add_metadata_for_xarray( :type attr_names: list[str], optional """ - if type(data_names) == str: + if isinstance(data_names, str): data_names = [data_names] - if type(dim_names) == str: + if isinstance(dim_names, str): dim_names = [dim_names] - if type(coord_names) == str: + if isinstance(coord_names, str): coord_names = [coord_names] - if type(attr_names) == str: + if isinstance(attr_names, str): attr_names = [attr_names] typecheck(dataset, "dataset", Dataset) @@ -90,7 +129,7 @@ def add_metadata_for_xarray( for name in data_names: dataset.add_meta_string("_xarray_data_names", name) - for (arg, sarg) in zip(args, sargs): + for arg, sarg in zip(args, sargs): if isinstance(arg, list): values = [] for val in arg: @@ -104,7 +143,8 @@ def add_metadata_for_xarray( dataset.add_meta_string(f"_xarray_{name}_{sarg}", "null") @staticmethod - def transform_to_xarray(dataset): + @_requires_xarray + def transform_to_xarray(dataset: Dataset) -> t.Dict: """Transform a SmartRedis Dataset, with the appropriate metadata, to an Xarray Dataarray @@ -116,10 +156,6 @@ def transform_to_xarray(dataset): fieldnames and appropriately formatted metadata. rtype: dict """ - - if (not xr): - raise RedisRuntimeError("Optional package xarray must be installed") - typecheck(dataset, "dataset", Dataset) coord_dict = {} @@ -159,26 +195,30 @@ def transform_to_xarray(dataset): ret_xarray = {} for variable_name in variable_names: - data_final = dataset.get_tensor(variable_name) - dims_final = [] # Extract dimensions in correct form - for dim_field_name in get_data(dataset, variable_name, "dim"): - dims_final.append(dataset.get_meta_strings(dim_field_name)[0]) - attrs_final = {} + dims_final = [ + dataset.get_meta_strings(dim_field_name)[0] + for dim_field_name + in get_data(dataset, variable_name, "dim") + ] + # Extract attributes in correct form - for attr_field_name in get_data(dataset, variable_name, "attr"): - fieldname = dataset.get_meta_strings(attr_field_name)[0] - attrs_final[attr_field_name] = fieldname + attrs_final = { + attr_field_name: dataset.get_meta_strings(attr_field_name)[0] + for attr_field_name + in get_data(dataset, variable_name, "attr") + } + # Add coordinates to the correct data name - for name in coord_final.keys(): + for name, value in coord_final.items(): if name == variable_name: - coords_final = coord_final.get(name) + coords_final = value # Construct a xr.DataArray using extracted dataset data, # append the dataarray to corresponding variable names ret_xarray[variable_name] = xr.DataArray( name=variable_name, - data=data_final, + data=dataset.get_tensor(variable_name), coords=coords_final, dims=dims_final, attrs=attrs_final, diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/error.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/error.py index 2e8b3228..38abd1f7 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/error.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/error.py @@ -26,16 +26,24 @@ from os import environ -__all__ = ['RedisConnectionError', 'RedisReplyError', 'RedisRuntimeError', - 'RedisBadAllocError', 'RedisDatabaseError', 'RedisInternalError', - 'RedisTimeoutError', 'RedisKeyError'] +__all__ = [ + "RedisConnectionError", + "RedisReplyError", + "RedisRuntimeError", + "RedisBadAllocError", + "RedisDatabaseError", + "RedisInternalError", + "RedisTimeoutError", + "RedisKeyError", +] + class RedisConnectionError(RuntimeError): - def __init__(self, cpp_error=""): + def __init__(self, cpp_error: str = "") -> None: super().__init__(self._set_message(cpp_error)) @staticmethod - def _set_message(cpp_error): + def _set_message(cpp_error: str) -> str: msg = "" if cpp_error: msg = cpp_error + "\n" @@ -45,20 +53,22 @@ def _set_message(cpp_error): class RedisReplyError(RuntimeError): - def __init__(self, cpp_error, method="", key=""): + def __init__(self, cpp_error: str, method: str = "", key: str = "") -> None: super().__init__(self._check_error(cpp_error, method, key)) + # pylint: disable=unused-argument @staticmethod - def _check_error(cpp_error, method="", key=""): + def _check_error(cpp_error: str, method: str = "", key: str = "") -> str: msg = "" if method: msg = f"{method} execution failed\n" msg += cpp_error return msg + class RedisRuntimeError(RedisReplyError): @staticmethod - def _check_error(cpp_error, method="", key=""): + def _check_error(cpp_error: str, method: str = "", key: str = "") -> str: msg = "" if method: msg = f"{method} execution failed\n" @@ -68,17 +78,22 @@ def _check_error(cpp_error, method="", key=""): msg += cpp_error return msg + class RedisBadAllocError(RedisReplyError): pass + class RedisDatabaseError(RedisReplyError): pass + class RedisInternalError(RedisReplyError): pass + class RedisTimeoutError(RedisReplyError): pass + class RedisKeyError(RedisReplyError): pass diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/logcontext.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/logcontext.py index 1f52b2d5..645d3eeb 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/logcontext.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/logcontext.py @@ -28,10 +28,9 @@ from .srobject import SRObject from .util import exception_handler, typecheck -from .error import * class LogContext(SRObject): - def __init__(self, context): + def __init__(self, context: str) -> None: """Initialize a LogContext object :param context: logging context @@ -42,13 +41,12 @@ def __init__(self, context): self._name = context @property - def _logcontext(self): - """Alias _srobject to _logcontext - """ + def _logcontext(self) -> PyLogContext: + """Alias _srobject to _logcontext""" return self._srobject @staticmethod - def from_pybind(logcontext): + def from_pybind(logcontext: PyLogContext) -> "LogContext": """Initialize a LogContext object from a PyLogContext object @@ -60,12 +58,14 @@ def from_pybind(logcontext): :rtype: LogContext """ typecheck(logcontext, "logcontext", PyLogContext) - new_logcontext = LogContext(logcontext._name) + new_logcontext = LogContext( + logcontext._name # pylint: disable=protected-access + ) new_logcontext.set_context(logcontext) return new_logcontext @exception_handler - def get_context(self): + def get_context(self) -> PyLogContext: """Return the PyLogContext attribute :return: The PyLogContext attribute containing @@ -75,7 +75,7 @@ def get_context(self): return self._logcontext @exception_handler - def set_context(self, logcontext): + def set_context(self, logcontext: PyLogContext) -> None: """Set the PyLogContext attribute :param logcontext: The PyLogContext object diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/logger.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/logger.py index 814ebe24..7445ca88 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/logger.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/logger.py @@ -24,17 +24,18 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from .smartredisPy import cpp_log_data, cpp_log_warning, cpp_log_error, SRLoggingLevel +from .smartredisPy import SRLoggingLevel, cpp_log_data, cpp_log_error, cpp_log_warning from .util import exception_handler, typecheck # Logging levels -#LLQuiet = 1 # No logging at all -#LLInfo = 2 # Informational logging only -#LLDebug = 3 # Verbose logging for debugging purposes -#LLDeveloper = 4 # Extra verbose logging for internal use +# LLQuiet = 1 # No logging at all +# LLInfo = 2 # Informational logging only +# LLDebug = 3 # Verbose logging for debugging purposes +# LLDeveloper = 4 # Extra verbose logging for internal use + @exception_handler -def log_data(context, level, data): +def log_data(context: str, level: SRLoggingLevel, data: str) -> None: """Log data to the SmartRedis logfile :param context: Logging context (string to prefix the log entry with) @@ -50,8 +51,9 @@ def log_data(context, level, data): typecheck(data, "data", str) cpp_log_data(context, level, data) + @exception_handler -def log_warning(context, level, data): +def log_warning(context: str, level: SRLoggingLevel, data: str) -> None: """Log a warning to the SmartRedis logfile :param context: Logging context (string to prefix the log entry with) @@ -67,8 +69,9 @@ def log_warning(context, level, data): typecheck(data, "data", str) cpp_log_warning(context, level, data) + @exception_handler -def log_error(context, level, data): +def log_error(context: str, level: SRLoggingLevel, data: str) -> None: """Log an error to the SmartRedis logfile :param context: Logging context (string to prefix the log entry with) @@ -83,4 +86,3 @@ def log_error(context, level, data): typecheck(level, "level", SRLoggingLevel) typecheck(data, "data", str) cpp_log_error(context, level, data) - diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/srobject.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/srobject.py index 787e6786..ccb642e5 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/srobject.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/srobject.py @@ -27,10 +27,9 @@ from .smartredisPy import PySRObject, SRLoggingLevel from .util import exception_handler, typecheck -from .error import * class SRObject: - def __init__(self, context): + def __init__(self, context: str) -> None: """Initialize a SRObject object :param context: logging context @@ -43,42 +42,8 @@ def __init__(self, context): else: self._srobject = context - @staticmethod - def from_pybind(srobject): - """Initialize a SRObject object from - a PySRObject object - - :param srobject: The pybind PySRObject object to use for construction - :type srobject: PySRObject - :return: The newly constructed¸ SRObject from the PySRObject - :rtype: SRObject - """ - typecheck(srobject, "srobject", PySRObject) - new_srobject = SRObject(srobject._name) - new_srobject.set_data(srobject) - return new_srobject - - @exception_handler - def get_srobject(self): - """Return the PySRObject attribute - - :return: The PySRObject attribute containing the srobject information - :rtype: PySRObject - """ - return self._srobject - - @exception_handler - def set_srobject(self, srobject): - """Set the PySRObject attribute - - :param srobject: The PySRObject object - :type srobject: PySRObject - """ - typecheck(srobject, "srobject", PySRObject) - self._srobject = srobject - @exception_handler - def log_data(self, level, data): + def log_data(self, level: SRLoggingLevel, data: str) -> None: """Conditionally log data if the logging level is high enough :param level: Minimum logging level for data to be logged @@ -92,7 +57,7 @@ def log_data(self, level, data): self._srobject.log_data(level, data) @exception_handler - def log_warning(self, level, data): + def log_warning(self, level: SRLoggingLevel, data: str) -> None: """Conditionally log warning data if the logging level is high enough :param level: Minimum logging level for data to be logged @@ -106,7 +71,7 @@ def log_warning(self, level, data): self._srobject.log_warning(level, data) @exception_handler - def log_error(self, level, data): + def log_error(self, level: SRLoggingLevel, data: str) -> None: """Conditionally log error data if the logging level is high enough :param level: Minimum logging level for data to be logged diff --git a/2023-01/smartsim/smartredis/src/python/module/smartredis/util.py b/2023-01/smartsim/smartredis/src/python/module/smartredis/util.py index 8ee29d04..1989bc0d 100644 --- a/2023-01/smartsim/smartredis/src/python/module/smartredis/util.py +++ b/2023-01/smartsim/smartredis/src/python/module/smartredis/util.py @@ -24,15 +24,26 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from .error import * +import typing as t from functools import wraps + +import numpy as np +from .error import * # pylint: disable=wildcard-import,unused-wildcard-import + from .smartredisPy import RedisReplyError as PybindRedisReplyError from .smartredisPy import c_get_last_error_location -import numpy as np + +if t.TYPE_CHECKING: + # Type hint magic bits + from typing_extensions import ParamSpec + + _PR = ParamSpec("_PR") + _RT = t.TypeVar("_RT") + class Dtypes: @staticmethod - def tensor_from_numpy(array): + def tensor_from_numpy(array: np.ndarray) -> str: mapping = { "float64": "DOUBLE", "float32": "FLOAT", @@ -49,7 +60,7 @@ def tensor_from_numpy(array): raise TypeError(f"Incompatible tensor type provided {dtype}") @staticmethod - def metadata_from_numpy(array): + def metadata_from_numpy(array: np.ndarray) -> str: mapping = { "float64": "DOUBLE", "float32": "FLOAT", @@ -64,25 +75,28 @@ def metadata_from_numpy(array): raise TypeError(f"Incompatible metadata type provided {dtype}") @staticmethod - def from_string(type_name): + def from_string(type_name: str) -> t.Type: mapping = { "DOUBLE": np.double, - "FLOAT": np.float64, - "UINT8": np.uint8, + "FLOAT": np.float64, + "UINT8": np.uint8, "UINT16": np.uint16, "UINT32": np.uint32, "UINT64": np.uint64, - "INT8": np.int8, - "INT16": np.int16, - "INT32": np.int32, - "INT64": np.int64, + "INT8": np.int8, + "INT16": np.int16, + "INT32": np.int32, + "INT64": np.int64, "STRING": str, } if type_name in mapping: return mapping[type_name] raise TypeError(f"Unrecognized type name {type_name}") -def init_default(default, init_value, expected_type=None): + +def init_default( + default: t.Any, init_value: t.Any, expected_type: t.Optional[t.Any] = None +) -> t.Any: """Used for setting a mutable type to a default value. PEP standards forbid setting a default value to a mutable type @@ -91,27 +105,42 @@ def init_default(default, init_value, expected_type=None): if init_value is None: return default if expected_type is not None and not isinstance(init_value, expected_type): - raise TypeError(f"Argument was of type {type(init_value)}, not {expected_type}") + msg = f"Argument was of type {type(init_value)}, not {expected_type}" + raise TypeError(msg) return init_value -def exception_handler(func): + +def exception_handler(func: "t.Callable[_PR, _RT]") -> "t.Callable[_PR, _RT]": """Route exceptions raised in processing SmartRedis API calls to our Python wrappers + WARNING: using this decorator with a class' @staticmethod or with an + unbound function that takes a type as its first argument will fail + because that will make the decorator think it's working with a + @classmethod + :param func: the API function to decorate with this wrapper :type func: function :raises RedisReplyError: if the wrapped function raised an exception """ + @wraps(func) - def smartredis_api_wrapper(*args, **kwargs): + def smartredis_api_wrapper(*args: "_PR.args", **kwargs: "_PR.kwargs") -> "_RT": try: return func(*args, **kwargs) # Catch RedisReplyErrors for additional processing (convert from # pyerror to our error module). # TypeErrors and ValueErrors we pass straight through except PybindRedisReplyError as cpp_error: - # query args[0] (i.e. 'self') for the class name - method_name = args[0].__class__.__name__ + "." + func.__name__ + # get the class for the calling context. + # for a @classmethod, this will be args[0], but for + # a "normal" method, args[0] is a self pointer from + # which we can grab the __class__ attribute + src_class = args[0] + if not isinstance(src_class, type): + src_class = args[0].__class__ + # Build the fully specified name of the calling context + method_name = src_class.__name__ + "." + func.__name__ # Get our exception from the global symbol table. # The smartredis.error hierarchy exactly # parallels the one built via pybind to enable this @@ -120,11 +149,15 @@ def smartredis_api_wrapper(*args, **kwargs): if error_loc == "unavailable": cpp_error_str = str(cpp_error) else: - cpp_error_str = f"File {error_loc}, in SmartRedis library\n{str(cpp_error)}" + cpp_error_str = ( + f"File {error_loc}, in SmartRedis library\n{str(cpp_error)}" + ) raise globals()[exception_name](cpp_error_str, method_name) from None + return smartredis_api_wrapper -def typecheck(arg, name, _type): + +def typecheck(arg: t.Any, name: str, _type: t.Union[t.Tuple, type]) -> None: """Validate that an argument is of a given type :param arg: the variable to be type tested @@ -136,4 +169,4 @@ def typecheck(arg, name, _type): :raises TypeError exception if arg is not of type _type """ if not isinstance(arg, _type): - raise TypeError(f"Argument {name} is of type {type(arg)}, not {_type}") \ No newline at end of file + raise TypeError(f"Argument {name} is of type {type(arg)}, not {_type}") diff --git a/2023-01/smartsim/smartredis/src/python/src/pyclient.cpp b/2023-01/smartsim/smartredis/src/python/src/pyclient.cpp index 6940583d..7577932c 100644 --- a/2023-01/smartsim/smartredis/src/python/src/pyclient.cpp +++ b/2023-01/smartsim/smartredis/src/python/src/pyclient.cpp @@ -35,12 +35,16 @@ using namespace SmartRedis; namespace py = pybind11; -PyClient::PyClient(bool cluster, const std::string& logger_name) - : PySRObject(logger_name) +// Decorator to standardize exception handling in PyBind Client API methods +template +auto pb_client_api(T&& client_api_func, const char* name) { - _client = NULL; + // we create a closure below + auto decorated = + [name, client_api_func = std::forward(client_api_func)](auto&&... args) + { try { - _client = new Client(cluster, logger_name); + return client_api_func(std::forward(args)...); } catch (Exception& e) { // exception is already prepared for caller @@ -52,310 +56,202 @@ PyClient::PyClient(bool cluster, const std::string& logger_name) } catch (...) { // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "during client construction."); + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + throw SRInternalException(msg); } + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_CLIENT_API(stuff)\ + pb_client_api([&] { stuff }, __func__)() + +PyClient::PyClient(const std::string& logger_name) + : PySRObject(logger_name) +{ + MAKE_CLIENT_API({ + _client = new Client(logger_name); + }); +} + +PyClient::PyClient( + PyConfigOptions& config_options, + const std::string& logger_name) + : PySRObject(logger_name) +{ + MAKE_CLIENT_API({ + ConfigOptions* co = config_options.get(); + _client = new Client(co, logger_name); + }); +} + +PyClient::PyClient(bool cluster, const std::string& logger_name) + : PySRObject(logger_name) +{ + MAKE_CLIENT_API({ + _client = new Client(cluster, logger_name); + }); } PyClient::~PyClient() { - if (_client != NULL) { - delete _client; - _client = NULL; - } + MAKE_CLIENT_API({ + if (_client != NULL) { + delete _client; + _client = NULL; + } + }); } -void PyClient::put_tensor(std::string& name, - std::string& type, - py::array data) +void PyClient::put_tensor( + std::string& name, std::string& type, py::array data) { - auto buffer = data.request(); - void* ptr = buffer.ptr; + MAKE_CLIENT_API({ + auto buffer = data.request(); + void* ptr = buffer.ptr; - // get dims - std::vector dims(buffer.ndim); - for (size_t i = 0; i < buffer.shape.size(); i++) { - dims[i] = (size_t)buffer.shape[i]; - } + // get dims + std::vector dims(buffer.ndim); + for (size_t i = 0; i < buffer.shape.size(); i++) { + dims[i] = (size_t)buffer.shape[i]; + } - SRTensorType ttype = TENSOR_TYPE_MAP.at(type); + SRTensorType ttype = TENSOR_TYPE_MAP.at(type); - try { _client->put_tensor(name, ptr, dims, ttype, SRMemLayoutContiguous); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing put_tensor."); - } + }); } py::array PyClient::get_tensor(const std::string& name) { - TensorBase* tensor = NULL; - try { - tensor = _client->_get_tensorbase_obj(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_tensor."); - } - - // Define py::capsule lambda function for destructor - py::capsule free_when_done((void*)tensor, [](void *tensor) { - delete reinterpret_cast(tensor); - }); - - // detect data type - switch (tensor->type()) { - case SRTensorTypeDouble: { - double* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeFloat: { - float* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt64: { - int64_t* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); + return MAKE_CLIENT_API({ + TensorBase* tensor = _client->_get_tensorbase_obj(name); + + // Define py::capsule lambda function for destructor + py::capsule free_when_done((void*)tensor, [](void *tensor) { + delete reinterpret_cast(tensor); + }); + + // detect data type + switch (tensor->type()) { + case SRTensorTypeDouble: { + double* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeFloat: { + float* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt64: { + int64_t* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt32: { + int32_t* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt16: { + int16_t* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt8: { + int8_t* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeUint16: { + uint16_t* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeUint8: { + uint8_t* data = reinterpret_cast(tensor->data_view( + SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + default : + throw SRRuntimeException("Could not infer type in "\ + "PyClient::get_tensor()."); } - case SRTensorTypeInt32: { - int32_t* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt16: { - int16_t* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt8: { - int8_t* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeUint16: { - uint16_t* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeUint8: { - uint8_t* data = reinterpret_cast(tensor->data_view( - SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - default : - throw SRRuntimeException("Could not infer type in "\ - "PyClient::get_tensor()."); - } + }); } -void PyClient::delete_tensor(const std::string& name) { - try { +void PyClient::delete_tensor(const std::string& name) +{ + MAKE_CLIENT_API({ _client->delete_tensor(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_tensor."); - } + }); } void PyClient::copy_tensor(const std::string& src_name, - const std::string& dest_name) { - try { + const std::string& dest_name) +{ + MAKE_CLIENT_API({ _client->copy_tensor(src_name, dest_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing copy_tensor."); - } + }); } void PyClient::rename_tensor(const std::string& old_name, - const std::string& new_name) { - try { + const std::string& new_name) +{ + MAKE_CLIENT_API({ _client->rename_tensor(old_name, new_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing rename_tensor."); - } + }); } void PyClient::put_dataset(PyDataset& dataset) { - try { - _client->put_dataset(*dataset.get()); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing put_dataset."); - } + MAKE_CLIENT_API({ + _client->put_dataset(*(dataset.get())); + }); } PyDataset* PyClient::get_dataset(const std::string& name) { - DataSet* data; - try { - data = new DataSet(_client->get_dataset(name)); - } - catch (const std::bad_alloc& e) { - data = NULL; - throw SRBadAllocException("DataSet"); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_dataset."); - } - PyDataset* dataset = new PyDataset(data); - return dataset; + return MAKE_CLIENT_API({ + DataSet* data = new DataSet(_client->get_dataset(name)); + return new PyDataset(data); + }); } -void PyClient::delete_dataset(const std::string& name) { - try { +void PyClient::delete_dataset(const std::string& name) +{ + MAKE_CLIENT_API({ _client->delete_dataset(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_dataset."); - } + }); } -void PyClient::copy_dataset(const std::string& src_name, - const std::string& dest_name) { - try { +void PyClient::copy_dataset( + const std::string& src_name, const std::string& dest_name) +{ + MAKE_CLIENT_API({ _client->copy_dataset(src_name, dest_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing copy_dataset."); - } + }); } -void PyClient::rename_dataset(const std::string& old_name, - const std::string& new_name) { - try { +void PyClient::rename_dataset( + const std::string& old_name, const std::string& new_name) +{ + MAKE_CLIENT_API({ _client->rename_dataset(old_name, new_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing rename_dataset."); - } + }); } void PyClient::set_script_from_file(const std::string& name, const std::string& device, const std::string& script_file) { - try { + MAKE_CLIENT_API({ _client->set_script_from_file(name, device, script_file); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_script_from_file."); - } + }); } void PyClient::set_script_from_file_multigpu(const std::string& name, @@ -363,45 +259,19 @@ void PyClient::set_script_from_file_multigpu(const std::string& name, int first_gpu, int num_gpus) { - try { - _client->set_script_from_file_multigpu(name, script_file, first_gpu, num_gpus); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_script_from_file_multigpu."); - } + MAKE_CLIENT_API({ + _client->set_script_from_file_multigpu( + name, script_file, first_gpu, num_gpus); + }); } - void PyClient::set_script(const std::string& name, const std::string& device, const std::string_view& script) { - try { + MAKE_CLIENT_API({ _client->set_script(name, device, script); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_script."); - } + }); } void PyClient::set_script_multigpu(const std::string& name, @@ -409,44 +279,16 @@ void PyClient::set_script_multigpu(const std::string& name, int first_gpu, int num_gpus) { - try { + MAKE_CLIENT_API({ _client->set_script_multigpu(name, script, first_gpu, num_gpus); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_script_multigpu."); - } + }); } std::string_view PyClient::get_script(const std::string& name) { - std::string_view script; - try { - script = _client->get_script(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_script."); - } - return script; + return MAKE_CLIENT_API({ + return _client->get_script(name); + }); } void PyClient::run_script(const std::string& name, @@ -454,22 +296,9 @@ void PyClient::run_script(const std::string& name, std::vector& inputs, std::vector& outputs) { - try { - _client->run_script(name, function, inputs, outputs); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing run_script."); - } + MAKE_CLIENT_API({ + _client->run_script(name, function, inputs, outputs); + }); } void PyClient::run_script_multigpu(const std::string& name, @@ -480,86 +309,33 @@ void PyClient::run_script_multigpu(const std::string& name, int first_gpu, int num_gpus) { - try { - _client->run_script_multigpu( - name, function, inputs, outputs, offset, first_gpu, num_gpus); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing run_script_multigpu."); - } + MAKE_CLIENT_API({ + _client->run_script_multigpu( + name, function, inputs, outputs, offset, first_gpu, num_gpus); + }); } - void PyClient::delete_script(const std::string& name) { - try { + MAKE_CLIENT_API({ _client->delete_script(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_script."); - } + }); } void PyClient::delete_script_multigpu( const std::string& name, int first_gpu, int num_gpus) { - try { + MAKE_CLIENT_API({ _client->delete_script_multigpu(name, first_gpu, num_gpus); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_script_multigpu."); - } + }); } py::bytes PyClient::get_model(const std::string& name) { - try { + return MAKE_CLIENT_API({ std::string model(_client->get_model(name)); return py::bytes(model); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_model."); - } + }); } void PyClient::set_model(const std::string& name, @@ -568,28 +344,16 @@ void PyClient::set_model(const std::string& name, const std::string& device, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) { - try { + MAKE_CLIENT_API({ _client->set_model(name, model, backend, device, - batch_size, min_batch_size, tag, - inputs, outputs); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_model."); - } + batch_size, min_batch_size, min_batch_timeout, + tag, inputs, outputs); + }); } void PyClient::set_model_multigpu(const std::string& name, @@ -599,28 +363,16 @@ void PyClient::set_model_multigpu(const std::string& name, int num_gpus, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) { - try { + MAKE_CLIENT_API({ _client->set_model_multigpu(name, model, backend, first_gpu, num_gpus, - batch_size, min_batch_size, tag, - inputs, outputs); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_model_multigpu."); - } + batch_size, min_batch_size, min_batch_timeout, + tag, inputs, outputs); + }); } void PyClient::set_model_from_file(const std::string& name, @@ -629,28 +381,16 @@ void PyClient::set_model_from_file(const std::string& name, const std::string& device, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) { - try { + MAKE_CLIENT_API({ _client->set_model_from_file(name, model_file, backend, device, - batch_size, min_batch_size, tag, - inputs, outputs); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_model_from_file."); - } + batch_size, min_batch_size, min_batch_timeout, + tag, inputs, outputs); + }); } void PyClient::set_model_from_file_multigpu(const std::string& name, @@ -660,50 +400,25 @@ void PyClient::set_model_from_file_multigpu(const std::string& name, int num_gpus, int batch_size, int min_batch_size, + int min_batch_timeout, const std::string& tag, const std::vector& inputs, const std::vector& outputs) { - try { + MAKE_CLIENT_API({ _client->set_model_from_file_multigpu( name, model_file, backend, first_gpu, num_gpus, batch_size, - min_batch_size, tag, inputs, outputs); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing set_model_from_file_multigpu."); - } + min_batch_size, min_batch_timeout, tag, inputs, outputs); + }); } void PyClient::run_model(const std::string& name, std::vector inputs, std::vector outputs) { - try { + MAKE_CLIENT_API({ _client->run_model(name, inputs, outputs); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing run_model."); - } + }); } void PyClient::run_model_multigpu(const std::string& name, @@ -713,242 +428,129 @@ void PyClient::run_model_multigpu(const std::string& name, int first_gpu, int num_gpus) { - try { + MAKE_CLIENT_API({ _client->run_model_multigpu(name, inputs, outputs, offset, first_gpu, num_gpus); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing run_model_multigpu."); - } + }); } void PyClient::delete_model(const std::string& name) { - try { + MAKE_CLIENT_API({ _client->delete_model(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_model."); - } + }); } void PyClient::delete_model_multigpu( const std::string& name, int first_gpu, int num_gpus) { - try { + MAKE_CLIENT_API({ _client->delete_model_multigpu(name, first_gpu, num_gpus); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_model_multigpu."); - } + }); } void PyClient::set_data_source(const std::string& source_id) { - _client->set_data_source(source_id); + MAKE_CLIENT_API({ + _client->set_data_source(source_id); + }); } bool PyClient::key_exists(const std::string& key) { - try { + return MAKE_CLIENT_API({ return _client->key_exists(key); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing key_exists."); - } + }); } bool PyClient::poll_key(const std::string& key, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_key(key, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_key."); - } + }); } bool PyClient::model_exists(const std::string& name) { - try { + return MAKE_CLIENT_API({ return _client->model_exists(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing model_exists."); - } + }); } -bool PyClient::tensor_exists(const std::string& name) -{ - try { - return _client->tensor_exists(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing tensor_exists."); - } +bool PyClient::tensor_exists(const std::string& name) +{ + return MAKE_CLIENT_API({ + return _client->tensor_exists(name); + }); } bool PyClient::dataset_exists(const std::string& name) { - return this->_client->dataset_exists(name); + return MAKE_CLIENT_API({ + return this->_client->dataset_exists(name); + }); } bool PyClient::poll_tensor(const std::string& name, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_tensor(name, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_tensor."); - } + }); } bool PyClient::poll_dataset(const std::string& name, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_dataset(name, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_dataset."); - } + }); } bool PyClient::poll_model(const std::string& name, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_model(name, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_model."); - } + }); } void PyClient::use_tensor_ensemble_prefix(bool use_prefix) { - _client->use_tensor_ensemble_prefix(use_prefix); + MAKE_CLIENT_API({ + _client->use_tensor_ensemble_prefix(use_prefix); + }); +} + +void PyClient::use_dataset_ensemble_prefix(bool use_prefix) +{ + MAKE_CLIENT_API({ + _client->use_dataset_ensemble_prefix(use_prefix); + }); } void PyClient::use_model_ensemble_prefix(bool use_prefix) { - _client->use_model_ensemble_prefix(use_prefix); + MAKE_CLIENT_API({ + _client->use_model_ensemble_prefix(use_prefix); + }); } void PyClient::use_list_ensemble_prefix(bool use_prefix) { - _client->use_list_ensemble_prefix(use_prefix); + MAKE_CLIENT_API({ + _client->use_list_ensemble_prefix(use_prefix); + }); } std::vector PyClient::get_db_node_info(std::vector addresses) { - try { + return MAKE_CLIENT_API({ std::vector addresses_info; for (size_t i = 0; i < addresses.size(); i++) { parsed_reply_nested_map info_map = _client->get_db_node_info(addresses[i]); @@ -956,25 +558,12 @@ std::vector PyClient::get_db_node_info(std::vector addres addresses_info.push_back(info_dict); } return addresses_info; - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_db_node_info."); - } + }); } std::vector PyClient::get_db_cluster_info(std::vector addresses) { - try { + return MAKE_CLIENT_API({ std::vector addresses_info; for (size_t i = 0; i < addresses.size(); i++) { parsed_reply_map info_map = _client->get_db_cluster_info(addresses[i]); @@ -982,141 +571,61 @@ std::vector PyClient::get_db_cluster_info(std::vector add addresses_info.push_back(info_dict); } return addresses_info; - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_db_cluster_info."); - } + }); } // Execute AI.INFO command -std::vector -PyClient::get_ai_info(const std::vector& addresses, - const std::string& key, - const bool reset_stat) - +std::vector PyClient::get_ai_info( + const std::vector& addresses, + const std::string& key, + const bool reset_stat) { - try { + return MAKE_CLIENT_API({ std::vector ai_info; for (size_t i = 0; i < addresses.size(); i++) { - parsed_reply_map result = - _client->get_ai_info(addresses[i], key, reset_stat); - py::dict result_dict = py::cast(result); - ai_info.push_back(result_dict); - + parsed_reply_map result = + _client->get_ai_info(addresses[i], key, reset_stat); + ai_info.push_back(py::cast(result)); } return ai_info; - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "during client get_ai_info() execution."); - } + }); } // Delete all keys of all existing databases void PyClient::flush_db(std::vector addresses) { for (size_t i = 0; i < addresses.size(); i++) { - try { + MAKE_CLIENT_API({ _client->flush_db(addresses[i]); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing flush_db."); - } + }); } } // Read the configuration parameters of a running server py::dict PyClient::config_get(std::string expression, std::string address) { - try { - std::unordered_map result_map = _client->config_get(expression, address); + return MAKE_CLIENT_API({ + auto result_map = _client->config_get(expression, address); return py::cast(result_map); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing config_get."); - } + }); } // Reconfigure the server -void PyClient::config_set(std::string config_param, std::string value, std::string address) +void PyClient::config_set( + std::string config_param, std::string value, std::string address) { - try { + MAKE_CLIENT_API({ _client->config_set(config_param, value, address); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing config_set."); - } + }); } // Save a copy of the database void PyClient::save(std::vector addresses) { for (size_t address_index = 0; address_index < addresses.size(); address_index++) { - try { + MAKE_CLIENT_API({ _client->save(addresses[address_index]); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing save."); - } + }); } } @@ -1124,181 +633,77 @@ void PyClient::save(std::vector addresses) // Appends a dataset to the aggregation list void PyClient::append_to_list(const std::string& list_name, PyDataset& dataset) { - try { + MAKE_CLIENT_API({ _client->append_to_list(list_name, *dataset.get()); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing append_to_list."); - } + }); } // Delete an aggregation list void PyClient::delete_list(const std::string& list_name) { - try { + MAKE_CLIENT_API({ _client->delete_list(list_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing delete_list."); - } + }); } // Copy an aggregation list void PyClient::copy_list(const std::string& src_name, const std::string& dest_name) { - try { + MAKE_CLIENT_API({ _client->copy_list(src_name, dest_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing copy_list."); - } + }); } // Rename an aggregation list void PyClient::rename_list(const std::string& src_name, const std::string& dest_name) { - try { + MAKE_CLIENT_API({ _client->rename_list(src_name, dest_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing rename_list."); - } + }); } // Get the number of entries in the list int PyClient::get_list_length(const std::string& list_name) { - try { + return MAKE_CLIENT_API({ return _client->get_list_length(list_name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_list_length."); - } + }); } // Poll list length until length is equal bool PyClient::poll_list_length(const std::string& name, int list_length, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_list_length( name, list_length, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_list_length."); - } + }); } // Poll list length until length is greater than or equal bool PyClient::poll_list_length_gte(const std::string& name, int list_length, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_list_length_gte( name, list_length, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_list_length_gte."); - } + }); } // Poll list length until length is less than or equal bool PyClient::poll_list_length_lte(const std::string& name, int list_length, int poll_frequency_ms, int num_tries) { - try { + return MAKE_CLIENT_API({ return _client->poll_list_length_lte( name, list_length, poll_frequency_ms, num_tries); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing poll_list_length_lte."); - } + }); } // Get datasets from an aggregation list py::list PyClient::get_datasets_from_list(const std::string& list_name) { - try { + return MAKE_CLIENT_API({ std::vector datasets = _client->get_datasets_from_list(list_name); std::vector result; for (auto it = datasets.begin(); it != datasets.end(); it++) { @@ -1307,27 +712,14 @@ py::list PyClient::get_datasets_from_list(const std::string& list_name) } py::list result_list = py::cast(result); return result_list; - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_datasets_from_list."); - } + }); } // Get a range of datasets (by index) from an aggregation list py::list PyClient::get_dataset_list_range( const std::string& list_name, const int start_index, const int end_index) { - try { + return MAKE_CLIENT_API({ std::vector datasets = _client->get_dataset_list_range( list_name, start_index, end_index); std::vector result; @@ -1337,20 +729,23 @@ py::list PyClient::get_dataset_list_range( } py::list result_list = py::cast(result); return result_list; - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_dataset_list_range."); - } + }); +} + +// Configure the Redis module chunk size +void PyClient::set_model_chunk_size(int chunk_size) +{ + return MAKE_CLIENT_API({ + return _client->set_model_chunk_size(chunk_size); + }); +} + +// Create a string representation of the Client +std::string PyClient::to_string() +{ + return MAKE_CLIENT_API({ + return _client->to_string(); + }); } // EOF diff --git a/2023-01/smartsim/smartredis/src/python/src/pyconfigoptions.cpp b/2023-01/smartsim/smartredis/src/python/src/pyconfigoptions.cpp new file mode 100644 index 00000000..8f0488df --- /dev/null +++ b/2023-01/smartsim/smartredis/src/python/src/pyconfigoptions.cpp @@ -0,0 +1,181 @@ +/* + * BSD 2-Clause License + * + * Copyright (c) 2021-2023, Hewlett Packard Enterprise + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + +#include "pyconfigoptions.h" +#include "srexception.h" + +using namespace SmartRedis; + +namespace py = pybind11; + +// Decorator to standardize exception handling in PyBind Client API methods +template +auto pb_cfgopt_api(T&& cfgopt_api_func, const char* name) +{ + // we create a closure below + auto decorated = + [name, cfgopt_api_func = std::forward(cfgopt_api_func)](auto&&... args) + { + try { + return cfgopt_api_func(std::forward(args)...); + } + catch (Exception& e) { + // exception is already prepared for caller + throw; + } + catch (std::exception& e) { + // should never happen + throw SRInternalException(e.what()); + } + catch (...) { + // should never happen + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + throw SRInternalException(msg); + } + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_CFGOPT_API(stuff)\ + pb_cfgopt_api([&] { stuff }, __func__)() + +PyConfigOptions::PyConfigOptions() +{ + MAKE_CFGOPT_API({ + _configoptions = NULL; + }); +} + +PyConfigOptions::PyConfigOptions(ConfigOptions* configoptions) +{ + _configoptions = configoptions; +} + +PyConfigOptions::~PyConfigOptions() +{ + MAKE_CFGOPT_API({ + if (_configoptions != NULL) { + delete _configoptions; + _configoptions = NULL; + } + }); +} + +ConfigOptions* PyConfigOptions::get() { + return _configoptions; +} + +// Instantiate ConfigOptions from environment variables +PyConfigOptions* PyConfigOptions::create_from_environment( + const std::string& db_suffix) +{ + return MAKE_CFGOPT_API({ + auto cfgOpts = ConfigOptions::create_from_environment(db_suffix); + ConfigOptions* pCfgOpts = cfgOpts.release(); + return new PyConfigOptions(pCfgOpts); + }); +} + +// Retrieve the value of a numeric configuration option +// from the selected source +int64_t PyConfigOptions::get_integer_option(const std::string& option_name) +{ + return MAKE_CFGOPT_API({ + if (_configoptions == NULL) { + throw SRRuntimeException( + "Attempted to call get_integer_option "\ + "from a non-factory constructed ConfigOptions"); + } + return _configoptions->get_integer_option(option_name); + }); +} + +// Retrieve the value of a string configuration option +// from the selected source +std::string PyConfigOptions::get_string_option(const std::string& option_name) +{ + return MAKE_CFGOPT_API({ + if (_configoptions == NULL) { + throw SRRuntimeException( + "Attempted to call get_string_option "\ + "from a non-factory constructed ConfigOptions"); + } + return _configoptions->get_string_option(option_name); + }); +} + +// Check whether a configuration option is set in the +// selected source +bool PyConfigOptions::is_configured(const std::string& option_name) +{ + return MAKE_CFGOPT_API({ + if (_configoptions == NULL) { + throw SRRuntimeException( + "Attempted to call is_defined "\ + "from a non-factory constructed ConfigOptions"); + } + return _configoptions->is_configured(option_name); + }); +} + +// Override the value of a numeric configuration option +// in the selected source +void PyConfigOptions::override_integer_option( + const std::string& option_name, int64_t value) +{ + MAKE_CFGOPT_API({ + if (_configoptions == NULL) { + throw SRRuntimeException( + "Attempted to call override_integer_option "\ + "from a non-factory constructed ConfigOptions"); + } + _configoptions->override_integer_option(option_name, value); + }); +} + +// Override the value of a string configuration option +// in the selected source +void PyConfigOptions::override_string_option( + const std::string& option_name, const std::string& value) +{ + MAKE_CFGOPT_API({ + if (_configoptions == NULL) { + throw SRRuntimeException( + "Attempted to call override_string_option "\ + "from a non-factory constructed ConfigOptions"); + } + _configoptions->override_string_option(option_name, value); + }); +} + +// EOF + diff --git a/2023-01/smartsim/smartredis/src/python/src/pydataset.cpp b/2023-01/smartsim/smartredis/src/python/src/pydataset.cpp index ff79e5fe..d3f17e4b 100644 --- a/2023-01/smartsim/smartredis/src/python/src/pydataset.cpp +++ b/2023-01/smartsim/smartredis/src/python/src/pydataset.cpp @@ -35,12 +35,16 @@ using namespace SmartRedis; namespace py = pybind11; -PyDataset::PyDataset(const std::string& name) - : PySRObject(name) +// Decorator to standardize exception handling in PyBind DataSet API methods +template +auto pb_dataset_api(T&& dataset_api_func, const char* name) { - _dataset = NULL; + // we create a closure below + auto decorated = + [name, dataset_api_func = std::forward(dataset_api_func)](auto&&... args) + { try { - _dataset = new DataSet(name); + return dataset_api_func(std::forward(args)...); } catch (Exception& e) { // exception is already prepared for caller @@ -52,32 +56,49 @@ PyDataset::PyDataset(const std::string& name) } catch (...) { // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "during dataset construction."); + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + throw SRInternalException(msg); } + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_DATASET_API(stuff)\ + pb_dataset_api([&] { stuff }, __func__)() + + +PyDataset::PyDataset(const std::string& name) + : PySRObject(name) +{ + MAKE_DATASET_API({ + _dataset = new DataSet(name); + }); } PyDataset::PyDataset(DataSet* dataset) : PySRObject(dataset->get_context()) { - _dataset = dataset; + MAKE_DATASET_API({ + _dataset = dataset; + }); } PyDataset::~PyDataset() { - if (_dataset != NULL) { - delete _dataset; - _dataset = NULL; - } -} - -DataSet* PyDataset::get() { - return _dataset; + MAKE_DATASET_API({ + if (_dataset != NULL) { + delete _dataset; + _dataset = NULL; + } + }); } void PyDataset::add_tensor(const std::string& name, py::array data, std::string& type) { - try { + MAKE_DATASET_API({ auto buffer = data.request(); void* ptr = buffer.ptr; @@ -89,263 +110,161 @@ void PyDataset::add_tensor(const std::string& name, py::array data, std::string& SRTensorType ttype = TENSOR_TYPE_MAP.at(type); _dataset->add_tensor(name, ptr, dims, ttype, SRMemLayoutContiguous); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing add_tensor."); - } + }); } py::array PyDataset::get_tensor(const std::string& name) { - TensorBase* tensor = NULL; - try { - tensor = _dataset->_get_tensorbase_obj(name); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_tensor."); - } + return MAKE_DATASET_API({ + TensorBase* tensor = _dataset->_get_tensorbase_obj(name); - // Define py::capsule lambda function for destructor - py::capsule free_when_done((void*)tensor, [](void *tensor) { - delete reinterpret_cast(tensor); - }); + // Define py::capsule lambda function for destructor + py::capsule free_when_done((void*)tensor, [](void *tensor) { + delete reinterpret_cast(tensor); + }); - // detect data type - switch (tensor->type()) { - case SRTensorTypeDouble: { - double* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeFloat: { - float* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt64: { - int64_t* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt32: { - int32_t* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt16: { - int16_t* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeInt8: { - int8_t* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeUint16: { - uint16_t* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); - } - case SRTensorTypeUint8: { - uint8_t* data = reinterpret_cast( - tensor->data_view(SRMemLayoutContiguous)); - return py::array(tensor->dims(), data, free_when_done); + // detect data type + switch (tensor->type()) { + case SRTensorTypeDouble: { + double* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeFloat: { + float* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt64: { + int64_t* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt32: { + int32_t* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt16: { + int16_t* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeInt8: { + int8_t* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeUint16: { + uint16_t* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + case SRTensorTypeUint8: { + uint8_t* data = reinterpret_cast( + tensor->data_view(SRMemLayoutContiguous)); + return py::array(tensor->dims(), data, free_when_done); + } + default : + throw SRRuntimeException("Could not infer type in "\ + "PyDataSet::get_tensor()."); } - default : - throw SRRuntimeException("Could not infer type in "\ - "PyDataSet::get_tensor()."); - } + }); } -void PyDataset::add_meta_scalar(const std::string& name, py::array data, std::string& type) +void PyDataset::add_meta_scalar( + const std::string& name, py::array data, std::string& type) { - try { + MAKE_DATASET_API({ auto buffer = data.request(); void* ptr = buffer.ptr; SRMetaDataType ttype = METADATA_TYPE_MAP.at(type); _dataset->add_meta_scalar(name, ptr, ttype); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing add_meta_scalar."); - } + }); } void PyDataset::add_meta_string(const std::string& name, const std::string& data) { - try { + MAKE_DATASET_API({ _dataset->add_meta_string(name, data); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing add_meta_string."); - } + }); } py::array PyDataset::get_meta_scalars(const std::string& name) { - SRMetaDataType type = SRMetadataTypeInvalid; - size_t length = 0; - void *ptr = NULL; - try { + return MAKE_DATASET_API({ + SRMetaDataType type = SRMetadataTypeInvalid; + size_t length = 0; + void *ptr = NULL; + _dataset->get_meta_scalars(name, ptr, length, type); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_meta_scalars."); - } - // detect data type - switch (type) { - case SRMetadataTypeDouble: { - double* data = reinterpret_cast(ptr); - return py::array(length, data, py::none()); - } - case SRMetadataTypeFloat: { - float* data = reinterpret_cast(ptr); - return py::array(length, data, py::none()); - } - case SRMetadataTypeInt32: { - int32_t* data = reinterpret_cast(ptr); - return py::array(length, data, py::none()); - } - case SRMetadataTypeInt64: { - int64_t* data = reinterpret_cast(ptr); - return py::array(length, data, py::none()); - } - case SRMetadataTypeUint32: { - uint32_t* data = reinterpret_cast(ptr); - return py::array(length, data, py::none()); - } - case SRMetadataTypeUint64: { - uint64_t* data = reinterpret_cast(ptr); - return py::array(length, data, py::none()); - } - case SRMetadataTypeString: { - throw SRRuntimeException("MetaData is of type string. "\ - "Use get_meta_strings method."); + // detect data type + switch (type) { + case SRMetadataTypeDouble: { + double* data = reinterpret_cast(ptr); + return py::array(length, data, py::none()); + } + case SRMetadataTypeFloat: { + float* data = reinterpret_cast(ptr); + return py::array(length, data, py::none()); + } + case SRMetadataTypeInt32: { + int32_t* data = reinterpret_cast(ptr); + return py::array(length, data, py::none()); + } + case SRMetadataTypeInt64: { + int64_t* data = reinterpret_cast(ptr); + return py::array(length, data, py::none()); + } + case SRMetadataTypeUint32: { + uint32_t* data = reinterpret_cast(ptr); + return py::array(length, data, py::none()); + } + case SRMetadataTypeUint64: { + uint64_t* data = reinterpret_cast(ptr); + return py::array(length, data, py::none()); + } + case SRMetadataTypeString: { + throw SRRuntimeException("MetaData is of type string. "\ + "Use get_meta_strings method."); + } + default : + throw SRRuntimeException("Could not infer type in "\ + "PyDataSet::get_meta_scalars()."); } - default : - throw SRRuntimeException("Could not infer type"); - } + }); } std::string PyDataset::get_name() { - try { + return MAKE_DATASET_API({ return _dataset->get_name(); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_name."); - } + }); } py::list PyDataset::get_meta_strings(const std::string& name) { - try { + return MAKE_DATASET_API({ // We return a copy return py::cast(_dataset->get_meta_strings(name)); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_meta_strings."); - } + }); } // Retrieve the names of all tensors in the DataSet py::list PyDataset::get_tensor_names() { - try { + return MAKE_DATASET_API({ // We return a copy return py::cast(_dataset->get_tensor_names()); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_tensor_names."); - } + }); } // Retrieve the data type of a Tensor in the DataSet std::string PyDataset::get_tensor_type(const std::string& name) { - try { + return MAKE_DATASET_API({ // get the type SRTensorType ttype = _dataset->get_tensor_type(name); switch (ttype) { @@ -369,47 +288,31 @@ std::string PyDataset::get_tensor_type(const std::string& name) throw SRRuntimeException("Unrecognized type in "\ "PyDataSet::get_tensor_type()."); } - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_tensor_type."); - }} + }); +} + +// Retrieve the dimensions of a Tensor in the DataSet +py::list PyDataset::get_tensor_dims(const std::string& name) +{ + return MAKE_DATASET_API({ + // We return a copy + return py::cast(_dataset->get_tensor_dims(name)); + }); +} // Retrieve the names of all metadata fields in the DataSet py::list PyDataset::get_metadata_field_names() { - try { + return MAKE_DATASET_API({ // We return a copy return py::cast(_dataset->get_metadata_field_names()); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_metadata_field_names."); - } + }); } // Retrieve the data type of a metadata field in the DataSet std::string PyDataset::get_metadata_field_type(const std::string& name) { - try { + return MAKE_DATASET_API({ // get the type auto mdtype = _dataset->get_metadata_field_type(name); for (auto it = METADATA_TYPE_MAP.begin(); @@ -419,21 +322,15 @@ std::string PyDataset::get_metadata_field_type(const std::string& name) } throw SRRuntimeException("Unrecognized type in "\ "PyDataSet::get_metadata_field_type()."); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing get_metadata_field_type."); - } + }); } -// EOF +// Create a string representation of the DataSet +std::string PyDataset::to_string() +{ + return MAKE_DATASET_API({ + return _dataset->to_string(); + }); +} +// EOF diff --git a/2023-01/smartsim/smartredis/src/python/src/pylogcontext.cpp b/2023-01/smartsim/smartredis/src/python/src/pylogcontext.cpp index bca06ac6..085e487a 100644 --- a/2023-01/smartsim/smartredis/src/python/src/pylogcontext.cpp +++ b/2023-01/smartsim/smartredis/src/python/src/pylogcontext.cpp @@ -35,12 +35,16 @@ using namespace SmartRedis; namespace py = pybind11; -PyLogContext::PyLogContext(const std::string& context) - : PySRObject(context) +// Decorator to standardize exception handling in PyBind LogContext API methods +template +auto pb_logcontext_api(T&& logcontext_api_func, const char* name) { - _logcontext = NULL; + // we create a closure below + auto decorated = + [name, logcontext_api_func = std::forward(logcontext_api_func)](auto&&... args) + { try { - _logcontext = new LogContext(context); + return logcontext_api_func(std::forward(args)...); } catch (Exception& e) { // exception is already prepared for caller @@ -52,25 +56,43 @@ PyLogContext::PyLogContext(const std::string& context) } catch (...) { // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "during dataset construction."); + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + throw SRInternalException(msg); } + }; + return decorated; +} + +// Macro to invoke the decorator with a lambda function +#define MAKE_LOGCONTEXT_API(stuff)\ + pb_logcontext_api([&] { stuff }, __func__)() + + + +PyLogContext::PyLogContext(const std::string& context) + : PySRObject(context) +{ + MAKE_LOGCONTEXT_API({ + _logcontext = new LogContext(context); + }); } PyLogContext::PyLogContext(LogContext* logcontext) : PySRObject(logcontext->get_context()) { - _logcontext = logcontext; + MAKE_LOGCONTEXT_API({ + _logcontext = logcontext; + }); } PyLogContext::~PyLogContext() { - if (_logcontext != NULL) { - delete _logcontext; - _logcontext = NULL; - } -} - -LogContext* PyLogContext::get() { - return _logcontext; + MAKE_LOGCONTEXT_API({ + if (_logcontext != NULL) { + delete _logcontext; + _logcontext = NULL; + } + }); } diff --git a/2023-01/smartsim/smartredis/src/python/src/pysrobject.cpp b/2023-01/smartsim/smartredis/src/python/src/pysrobject.cpp index b330847a..3a575d52 100644 --- a/2023-01/smartsim/smartredis/src/python/src/pysrobject.cpp +++ b/2023-01/smartsim/smartredis/src/python/src/pysrobject.cpp @@ -36,11 +36,17 @@ using namespace SmartRedis; namespace py = pybind11; -PySRObject::PySRObject(const std::string& context) + +// Decorator to standardize exception handling in PyBind SRObject API methods +template +auto pb_srobject_api(T&& srobject_api_func, const char* name) { - _srobject = NULL; + // we create a closure below + auto decorated = + [name, srobject_api_func = std::forward(srobject_api_func)](auto&&... args) + { try { - _srobject = new SRObject(context); + return srobject_api_func(std::forward(args)...); } catch (Exception& e) { // exception is already prepared for caller @@ -52,87 +58,64 @@ PySRObject::PySRObject(const std::string& context) } catch (...) { // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "during dataset construction."); + std::string msg( + "A non-standard exception was encountered while executing "); + msg += name; + throw SRInternalException(msg); } + }; + return decorated; } -PySRObject::PySRObject(SRObject* srobject) +// Macro to invoke the decorator with a lambda function +#define MAKE_SROBJECT_API(stuff)\ + pb_srobject_api([&] { stuff }, __func__)() + + +PySRObject::PySRObject(const std::string& context) { - _srobject = srobject; + MAKE_SROBJECT_API({ + _srobject = new SRObject(context); + }); } -PySRObject::~PySRObject() +PySRObject::PySRObject(SRObject* srobject) { - if (_srobject != NULL) { - delete _srobject; - _srobject = NULL; - } + MAKE_SROBJECT_API({ + _srobject = srobject; + }); } -SRObject* PySRObject::get() { - return _srobject; +PySRObject::~PySRObject() +{ + MAKE_SROBJECT_API({ + if (_srobject != NULL) { + delete _srobject; + _srobject = NULL; + } + }); } void PySRObject::log_data( SRLoggingLevel level, const std::string& data) const { - try { + MAKE_SROBJECT_API({ _srobject->log_data(level, data); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing log_data."); - } + }); } void PySRObject::log_warning( SRLoggingLevel level, const std::string& data) const { - try { + MAKE_SROBJECT_API({ _srobject->log_warning(level, data); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing log_warning."); - } + }); } void PySRObject::log_error( SRLoggingLevel level, const std::string& data) const { - try { + MAKE_SROBJECT_API({ _srobject->log_error(level, data); - } - catch (Exception& e) { - // exception is already prepared for caller - throw; - } - catch (std::exception& e) { - // should never happen - throw SRInternalException(e.what()); - } - catch (...) { - // should never happen - throw SRInternalException("A non-standard exception was encountered "\ - "while executing log_error."); - } + }); } diff --git a/2023-01/smartsim/smartredis/setup_test_env.sh b/2023-01/smartsim/smartredis/tests/CMakeLists.txt similarity index 55% rename from 2023-01/smartsim/smartredis/setup_test_env.sh rename to 2023-01/smartsim/smartredis/tests/CMakeLists.txt index 7be89b8e..771d3950 100644 --- a/2023-01/smartsim/smartredis/setup_test_env.sh +++ b/2023-01/smartsim/smartredis/tests/CMakeLists.txt @@ -1,5 +1,3 @@ -#!/bin/bash - # BSD 2-Clause License # # Copyright (c) 2021-2023, Hewlett Packard Enterprise @@ -26,33 +24,25 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -# setup the necessary environment variables for testing and builds -# this must be *sourced* in the top level smartsim directory in the -# shell that will be used for building. - -echo "Setting up SmartRedis environment for testing" - -export SMARTREDIS_TEST_CLUSTER=True -echo SMARTREDIS_TEST_CLUSTER set to $SMARTREDIS_TEST_CLUSTER - -export SMARTREDIS_TEST_DEVICE=cpu -echo SMARTREDIS_TEST_DEVICE set to $SMARTREDIS_TEST_DEVICE - -# Redis -if [[ -f ./third-party/redis/src/redis-server ]]; then - export REDIS_INSTALL_PATH="$(pwd)/third-party/redis/src" - echo "Set Redis server install path to $REDIS_INSTALL_PATH" -fi - -# detect RedisAI CPU installation -if [[ -f ./third-party/RedisAI/install-cpu/redisai.so ]]; then - export REDISAI_CPU_INSTALL_PATH="$(pwd)/third-party/RedisAI/install-cpu" - echo "Set RedisAI CPU install path to $REDISAI_CPU_INSTALL_PATH" -fi - -# detect RedisAI GPU installation -if [[ -f ./third-party/RedisAI/install-gpu/redisai.so ]]; then - export REDISAI_GPU_INSTALL_PATH="$(pwd)/third-party/RedisAI/install-gpu" - echo "Set RedisAI GPU install path to $REDISAI_GPU_INSTALL_PATH" -fi +# Project definition for the SmartRedis-Tests project +cmake_minimum_required(VERSION 3.13) +project(SmartSim-Tests) + +# Enable language support for the examples +enable_language(C) +enable_language(CXX) +if (SR_FORTRAN) + enable_language(Fortran) +endif() + +# Bring in subdirectories +add_subdirectory(c) +add_subdirectory(cpp) +if (SR_FORTRAN) + add_subdirectory(fortran) +endif() + +# NOTE: The docker subdirectory is designed to be built within a container, +# not as part of the normal test build. We therefore do not include it in a +# test build +#add_subdirectory(docker) diff --git a/2023-01/smartsim/smartredis/tests/c/CMakeLists.txt b/2023-01/smartsim/smartredis/tests/c/CMakeLists.txt index 69a52795..dc5b6de2 100644 --- a/2023-01/smartsim/smartredis/tests/c/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/tests/c/CMakeLists.txt @@ -24,74 +24,83 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -project(CClientTester) - +# Project definition for the SmartRedis-Tests-C project cmake_minimum_required(VERSION 3.13) +project(SmartSim-Tests-C) + +# Enable language support for the tests +enable_language(C) -set(CMAKE_VERBOSE_MAKEFILE ON) -set(CMAKE_BUILD_TYPE DEBUG) +# Configure the build set(CMAKE_CXX_STANDARD 17) SET(CMAKE_C_STANDARD 99) - -find_library(SR_LIB smartredis PATHS ../../install/lib NO_DEFAULT_PATH REQUIRED) - -include_directories(SYSTEM - /usr/local/include - ../../install/include -) - -add_executable(client_test_dataset_aggregation -client_test_dataset_aggregation.c -) - -target_link_libraries(client_test_dataset_aggregation - ${SR_LIB} -) - -add_executable(client_test_dataset_exists - client_test_dataset_exists.c -) - -target_link_libraries(client_test_dataset_exists - ${SR_LIB} -) - -add_executable(client_test_logging - client_test_logging.c -) - -target_link_libraries(client_test_logging - ${SR_LIB} -) - -add_executable(client_test_put_unpack_1D - client_test_put_unpack_1D.c -) - -target_link_libraries(client_test_put_unpack_1D - ${SR_LIB} +set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/../..") +include(smartredis_defs) + +# Assume by default that users should link against the +# install directory in this repository +if(NOT DEFINED SMARTREDIS_INSTALL_PATH) + set(SMARTREDIS_INSTALL_PATH "../../install/") +endif() + +# Locate dependencies +add_library(smartredis-main ${SMARTREDIS_LINK_MODE} IMPORTED) +find_library(SR_LIB ${SMARTREDIS_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} ) - -add_executable(client_test_put_get_1D - client_test_put_get_1D.c +set_target_properties(smartredis-main PROPERTIES + IMPORTED_LOCATION ${SR_LIB} ) -target_link_libraries(client_test_put_get_1D - ${SR_LIB} -) - -add_executable(client_test_put_get_2D - client_test_put_get_2D.c -) - -target_link_libraries(client_test_put_get_2D - ${SR_LIB} +# Select libraries for build +if (STATIC_BUILD) + # Mark that SmartRedis requires the C++ linker + set_target_properties(smartredis-main PROPERTIES + IMPORTED_LOCATION ${SR_LIB} + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + ) + + # Static builds have an extra dependency on the Pthreads library + find_package(Threads REQUIRED) + set(SMARTREDIS_LIBRARIES + smartredis-main + Threads::Threads + ) +else() + # Shared builds only need the SmartRedis library + set(SMARTREDIS_LIBRARIES + smartredis-main + ) +endif() + +# Define include directories for header files +include_directories(SYSTEM + /usr/local/include + ${SMARTREDIS_INSTALL_PATH}/include ) -add_executable(client_test_put_get_3D - client_test_put_get_3D.c +# Define all the tests to be built +list(APPEND EXECUTABLES + client_test_dataset_aggregation + client_test_dataset_exists + client_test_logging + client_test_put_unpack_1D + client_test_put_get_1D + client_test_put_get_2D + client_test_put_get_3D ) -target_link_libraries(client_test_put_get_3D - ${SR_LIB} -) +# Build the tests +foreach(EXECUTABLE ${EXECUTABLES}) + add_executable(${EXECUTABLE}_c_test + ${EXECUTABLE}.c + ) + set_target_properties(${EXECUTABLE}_c_test PROPERTIES + OUTPUT_NAME ${EXECUTABLE} + ) + target_link_libraries(${EXECUTABLE}_c_test + ${SMARTREDIS_LIBRARIES} + ) +endforeach() diff --git a/2023-01/smartsim/smartredis/tests/c/c_client_test_utils.h b/2023-01/smartsim/smartredis/tests/c/c_client_test_utils.h index fa35a394..56a3e49a 100644 --- a/2023-01/smartsim/smartredis/tests/c/c_client_test_utils.h +++ b/2023-01/smartsim/smartredis/tests/c/c_client_test_utils.h @@ -53,15 +53,15 @@ void to_lower(char* s, int maxchars) { bool use_cluster() { - /* This function determines if a cluster - configuration should be used in the test - when creating a Client. + /* This function determines if a cluster configuration should be used in + the test when creating a Client. + (deprecated) */ - char* smartredis_test_cluster = getenv("SMARTREDIS_TEST_CLUSTER"); - to_lower(smartredis_test_cluster, 256); + char* server_type = getenv("SR_DB_TYPE"); + to_lower(server_type, 256); - if(smartredis_test_cluster) { - if(strcmp(smartredis_test_cluster, "true")==0) + if(server_type) { + if(strcmp(server_type, "clustered")==0) return true; } return false; diff --git a/2023-01/smartsim/smartredis/tests/c/client_test_dataset_aggregation.c b/2023-01/smartsim/smartredis/tests/c/client_test_dataset_aggregation.c index a8780661..d9f817ea 100644 --- a/2023-01/smartsim/smartredis/tests/c/client_test_dataset_aggregation.c +++ b/2023-01/smartsim/smartredis/tests/c/client_test_dataset_aggregation.c @@ -135,7 +135,7 @@ int main(int argc, char* argv[]) // Initialize client void *client = NULL; - if (SRNoError != SmartRedisCClient(use_cluster(), logger_name, cid_len, &client) || + if (SRNoError != SimpleCreateClient(logger_name, cid_len, &client) || NULL == client) { printf("Failed to initialize client!\n"); printf("Test passed: NO\n"); diff --git a/2023-01/smartsim/smartredis/tests/c/client_test_dataset_exists.c b/2023-01/smartsim/smartredis/tests/c/client_test_dataset_exists.c index a2174b16..c08b9707 100644 --- a/2023-01/smartsim/smartredis/tests/c/client_test_dataset_exists.c +++ b/2023-01/smartsim/smartredis/tests/c/client_test_dataset_exists.c @@ -43,7 +43,7 @@ int missing_dataset(char *dataset_name, size_t dataset_name_len) void *client = NULL; const char* logger_name = "missing_dataset"; size_t cid_len = strlen(logger_name); - if (SRNoError != SmartRedisCClient(use_cluster(), logger_name, cid_len, &client)) + if (SRNoError != SimpleCreateClient(logger_name, cid_len, &client)) return -1; bool exists = false; @@ -80,7 +80,7 @@ int present_dataset(char *dataset_name, size_t dataset_name_len) size_t cid_len = strlen(logger_name); // Initialize client and dataset - if (SRNoError != SmartRedisCClient(use_cluster(), logger_name, cid_len, &client) || NULL == client) + if (SRNoError != SimpleCreateClient(logger_name, cid_len, &client) || NULL == client) return -1; if (SRNoError != CDataSet(dataset_name, dataset_name_len, &dataset) || NULL == dataset) return -1; diff --git a/2023-01/smartsim/smartredis/tests/c/client_test_logging.c b/2023-01/smartsim/smartredis/tests/c/client_test_logging.c index 7ea097d4..96f391b4 100644 --- a/2023-01/smartsim/smartredis/tests/c/client_test_logging.c +++ b/2023-01/smartsim/smartredis/tests/c/client_test_logging.c @@ -61,9 +61,8 @@ int main(int argc, char* argv[]) size_t ctx_logcontext_len = strlen(ctx_logcontext); // Initialize client, dataset, logcontext - if (SRNoError != SmartRedisCClient( - use_cluster(), ctx_client, ctx_client_len, - &client) || NULL == client) { + if (SRNoError != SimpleCreateClient( + ctx_client, ctx_client_len, &client) || NULL == client) { return -1; } if (SRNoError != CDataSet( diff --git a/2023-01/smartsim/smartredis/tests/c/client_test_put_get_1D.c b/2023-01/smartsim/smartredis/tests/c/client_test_put_get_1D.c index 9e246057..0b36a7fb 100644 --- a/2023-01/smartsim/smartredis/tests/c/client_test_put_get_1D.c +++ b/2023-01/smartsim/smartredis/tests/c/client_test_put_get_1D.c @@ -119,14 +119,14 @@ int put_get_1D_tensor_double(size_t* dims, size_t n_dims, void* client = NULL; const char* logger_name = "put_get_1D_tensor_double"; size_t cid_len = strlen(logger_name); - if (SRNoError != SmartRedisCClient(use_cluster(), logger_name, cid_len, &client)) + if (SRNoError != SimpleCreateClient(logger_name, cid_len, &client)) return -1; double* tensor = (double*)malloc(dims[0]*sizeof(double)); double* result = 0; for(size_t i=0; i dims, - std::string keyout="", - std::string keyin="") + std::vector dims, + std::string keyout="", + std::string keyin="") { - SmartRedis::Client client(use_cluster(), "client_test_ensemble::producer"); + SmartRedis::Client client("client_test_ensemble::producer"); client.use_model_ensemble_prefix(true); // Tensors @@ -77,7 +77,7 @@ void produce( // Models std::string model_key = "mnist_model"; - std::string model_file = "./../mnist_data/mnist_cnn.pt"; + std::string model_file = "mnist_data/mnist_cnn.pt"; client.set_model_from_file(model_key, model_file, "TORCH", "CPU"); @@ -86,7 +86,7 @@ void produce( // Scripts std::string script_key = "mnist_script"; - std::string script_file = "./../mnist_data/data_processing_script.txt"; + std::string script_file = "mnist_data/data_processing_script.txt"; client.set_script_from_file(script_key, "CPU", script_file); if(!client.model_exists(script_key)) @@ -128,7 +128,7 @@ void consume(std::vector dims, std::string keyout="", std::string keyin="") { - SmartRedis::Client client(use_cluster(), "client_test_ensemble::consumer"); + SmartRedis::Client client("client_test_ensemble::consumer"); client.use_model_ensemble_prefix(true); // Tensors diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_ensemble_dataset.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_ensemble_dataset.cpp index 77fd1ff8..12ba8f26 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_ensemble_dataset.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_ensemble_dataset.cpp @@ -35,8 +35,9 @@ void rename_dataset(std::string keyout) { std::vector dims({10,10,2}); - DATASET_TEST_UTILS::DatasetTestClient client(use_cluster(), "client_test_ensemble_dataset"); + DATASET_TEST_UTILS::DatasetTestClient client("client_test_ensemble_dataset"); client.use_tensor_ensemble_prefix(true); + client.use_dataset_ensemble_prefix(true); double*** t_send_1 = allocate_3D_array(dims[0], dims[1], dims[2]); @@ -129,16 +130,15 @@ void rename_dataset(std::string keyout) //Check that the metadata values are correct for the metadata DATASET_TEST_UTILS::check_dataset_metadata(retrieved_dataset); - - return; } void add_to_aggregation_list(std::string keyout) { std::vector dims({10,10,2}); - DATASET_TEST_UTILS::DatasetTestClient client(use_cluster(), "client_test_ensemble_dataset"); + DATASET_TEST_UTILS::DatasetTestClient client("client_test_ensemble_dataset"); client.use_tensor_ensemble_prefix(true); + client.use_dataset_ensemble_prefix(true); client.use_list_ensemble_prefix(true); double*** t_send_1 = @@ -235,8 +235,6 @@ void add_to_aggregation_list(std::string keyout) //Check that the metadata values are correct for the metadata DATASET_TEST_UTILS::check_dataset_metadata(retrieved_dataset); - - return; } int main(int argc, char* argv[]) { diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist.cpp index a07d4d9d..4ec4ecef 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist.cpp @@ -31,7 +31,7 @@ void load_mnist_image_to_array(float**** img) { - std::string image_file = "../mnist_data/one.raw"; + std::string image_file = "mnist_data/one.raw"; std::ifstream fin(image_file, std::ios::binary); std::ostringstream ostream; ostream << fin.rdbuf(); @@ -52,7 +52,7 @@ void load_mnist_image_to_array(float**** img) void run_mnist(const std::string& model_name, const std::string& script_name) { - SmartRedis::Client client(use_cluster(), "client_test_mnist"); + SmartRedis::Client client("client_test_mnist"); float**** array = allocate_4D_array(1,1,28,28); float** result = allocate_2D_array(1, 10); @@ -78,13 +78,14 @@ void run_mnist(const std::string& model_name, int main(int argc, char* argv[]) { - SmartRedis::Client client(use_cluster(), "client_test_mnist"); + SmartRedis::Client client("client_test_mnist"); std::string model_key = "mnist_model"; - std::string model_file = "./../mnist_data/mnist_cnn.pt"; + std::string model_file = "mnist_data/mnist_cnn.pt"; + client.set_model_chunk_size(1024 * 1024); client.set_model_from_file(model_key, model_file, "TORCH", "CPU"); std::string script_key = "mnist_script"; - std::string script_file = "./../mnist_data/data_processing_script.txt"; + std::string script_file = "mnist_data/data_processing_script.txt"; client.set_script_from_file(script_key, "CPU", script_file); std::string_view model = client.get_model(model_key); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist_dataset.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist_dataset.cpp index 402adf0d..e96aa453 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist_dataset.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_mnist_dataset.cpp @@ -31,7 +31,7 @@ void load_mnist_image_to_array(float**** img) { - std::string image_file = "../mnist_data/one.raw"; + std::string image_file = "mnist_data/one.raw"; std::ifstream fin(image_file, std::ios::binary); std::ostringstream ostream; ostream << fin.rdbuf(); @@ -52,7 +52,7 @@ void load_mnist_image_to_array(float**** img) void run_mnist(const std::string& model_name, const std::string& script_name) { - SmartRedis::Client client(use_cluster(), "client_test_mnist_dataset"); + SmartRedis::Client client("client_test_mnist_dataset"); float**** array = allocate_4D_array(1,1,28,28); float** result = allocate_2D_array(1, 10); @@ -82,13 +82,13 @@ void run_mnist(const std::string& model_name, int main(int argc, char* argv[]) { - SmartRedis::Client client(use_cluster(), "client_test_mnist_dataset"); + SmartRedis::Client client("client_test_mnist_dataset"); std::string model_key = "mnist_model"; - std::string model_file = "./../mnist_data/mnist_cnn.pt"; + std::string model_file = "mnist_data/mnist_cnn.pt"; client.set_model_from_file(model_key, model_file, "TORCH", "CPU"); std::string script_key = "mnist_script"; - std::string script_file = "./../mnist_data/data_processing_script.txt"; + std::string script_file = "mnist_data/data_processing_script.txt"; client.set_script_from_file(script_key, "CPU", script_file); std::string_view model = client.get_model(model_key); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_1D.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_1D.cpp index 4b2f98c7..9a9b6d5c 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_1D.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_1D.cpp @@ -38,7 +38,7 @@ void put_get_1D_array( SRTensorType type, std::string key_suffix="") { - SmartRedis::Client client(use_cluster(), "client_test_put_get_1D"); + SmartRedis::Client client("client_test_put_get_1D"); //Allocate and fill arrays T_send* array = (T_send*)malloc(dims[0]*sizeof(T_send)); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_2D.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_2D.cpp index 54ca6b18..025ffce8 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_2D.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_2D.cpp @@ -39,7 +39,7 @@ void put_get_2D_array( std::string key_suffix="") { - SmartRedis::Client client(use_cluster(), "client_test_put_get_2D"); + SmartRedis::Client client("client_test_put_get_2D"); //Allocate and fill arrays T_send** array = allocate_2D_array(dims[0], dims[1]); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D.cpp index 8b5b4c6e..54fda99e 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D.cpp @@ -38,7 +38,7 @@ void put_get_3D_array( SRTensorType type, std::string key_suffix="") { - SmartRedis::Client client(use_cluster(), "client_test_put_get_3D"); + SmartRedis::Client client("client_test_put_get_3D"); //Allocate and fill arrays T_send*** array = allocate_3D_array(dims[0], dims[1], dims[2]); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D_static_values.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D_static_values.cpp index 01d5184e..b8ce2442 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D_static_values.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_3D_static_values.cpp @@ -37,7 +37,7 @@ void put_get_3D_array( SRTensorType type, std::string key_suffix="") { - SmartRedis::Client client(use_cluster(), "client_test_put_get_3D_static_values"); + SmartRedis::Client client("client_test_put_get_3D_static_values"); //Allocate and fill arrays T_send*** array = allocate_3D_array(dims[0], dims[1], dims[2]); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_contiguous_3D.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_contiguous_3D.cpp index 634ce96f..138d1afc 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_contiguous_3D.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_contiguous_3D.cpp @@ -38,7 +38,7 @@ void put_get_3D_array( SRTensorType type, std::string key_suffix="") { - SmartRedis::Client client(use_cluster(), "client_test_put_get_contiguous_3D"); + SmartRedis::Client client("client_test_put_get_contiguous_3D"); //Allocate and fill arrays T_send* array = diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_transpose_3D.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_transpose_3D.cpp index 0f5fff81..d0184a72 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_transpose_3D.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_put_get_transpose_3D.cpp @@ -60,7 +60,7 @@ void put_get_3D_array( SRMemoryLayout send_direction = SRMemLayoutContiguous, SRMemoryLayout recv_direction = SRMemLayoutContiguous) { - SmartRedis::Client client(use_cluster(), "client_test_put_get_transpose_3D"); + SmartRedis::Client client("client_test_put_get_transpose_3D"); //Allocate and fill arrays T_send* array = (T_send*)malloc(dims[0]*dims[1]*dims[2]*sizeof(T_send)); diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_rename_dataset.cpp b/2023-01/smartsim/smartredis/tests/cpp/client_test_rename_dataset.cpp index 148e583d..5d14719b 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_rename_dataset.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_rename_dataset.cpp @@ -52,7 +52,7 @@ void rename_dataset( fill_array(t_send_3, dims[0], dims[1], dims[2]); //Create Client and DataSet - DATASET_TEST_UTILS::DatasetTestClient client(use_cluster(), "client_test_rename_dataset"); + DATASET_TEST_UTILS::DatasetTestClient client("client_test_rename_dataset"); SmartRedis::DataSet sent_dataset(dataset_name); //Add metadata to the DataSet diff --git a/2023-01/smartsim/smartredis/tests/cpp/client_test_utils.h b/2023-01/smartsim/smartredis/tests/cpp/client_test_utils.h index 1965bbb1..eb4df51e 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/client_test_utils.h +++ b/2023-01/smartsim/smartredis/tests/cpp/client_test_utils.h @@ -30,18 +30,20 @@ #define SMARTREDIS_TEST_UTILS_H #include +#include #include #include "rediscluster.h" #include "srobject.h" +#include "configoptions.h" using namespace SmartRedis; class RedisClusterTestObject : public RedisCluster { public: - RedisClusterTestObject(const SRObject* context) - : RedisCluster(context) {}; + RedisClusterTestObject(ConfigOptions* cfgopts) + : RedisCluster(cfgopts) {}; std::string get_crc16_prefix(uint64_t hash_slot) { return _get_crc16_prefix(hash_slot); @@ -53,31 +55,34 @@ inline void to_lower(char* s) { c-str into the lowercase value. This assumes the c-str is null terminated. */ - if(!s) + if (s == NULL) return; - while((*s)!=0) { - if( *s>='A' && *s<='Z') + while (*s != '\0') { + if (*s >= 'A' && *s <= 'Z') *s = *s - 'A' + 'a'; s++; } - return; } inline bool use_cluster() { /* This function determines if a cluster - configuration should be used in the test - when creating a Client. + configuration is being used */ - char* smartredis_test_cluster = std::getenv("SMARTREDIS_TEST_CLUSTER"); - to_lower(smartredis_test_cluster); - - if(smartredis_test_cluster) { - if(std::strcmp(smartredis_test_cluster, "true")==0) - return true; + char* server_type = std::getenv("SR_DB_TYPE"); + if (server_type == NULL) + return false; + + // lower-case a copy of the environment variable + // not the value in memory + char copy[256]; + for (int i = 0; i < 256; i++) { + copy[i] = std::tolower(server_type[i]); + if (copy[i] == '\0') + break; } - return false; + return std::strcmp(copy, "clustered") == 0; } template @@ -86,9 +91,9 @@ T** allocate_2D_array(int dim_1, int dim_2) /* This function allocates a 2D array and and returns a pointer to that 2D array. */ - T **array = (T **)malloc(dim_1*sizeof(T *)); - for (int i=0; i void free_4D_array(T**** array, int dim_1, int dim_2, int dim_3) { - for(int i=0; i @@ -176,9 +180,10 @@ bool is_equal_1D_array(T* a, U* b, int dim_1) /* This function compares two arrays to make sure their values are identical. */ - for(int i=0; i distribution; - for(int i=0; i @@ -253,7 +260,7 @@ void set_1D_array_integral_values(T* a, int dim_1) T t_min = std::numeric_limits::min(); T t_max = std::numeric_limits::max(); std::uniform_int_distribution distribution(t_min, t_max); - for(int i=0; i datasets; diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client.cpp index a7db1594..bc551491 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client.cpp @@ -101,7 +101,7 @@ SCENARIO("Testing Dataset Functions on Client Object", "[Client]") log_data(context, LLDebug, "***Beginning Client testing***"); GIVEN("A Client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); THEN("get, rename, and copy DataSet called on " "a nonexistent DataSet throws errors") @@ -111,7 +111,7 @@ SCENARIO("Testing Dataset Functions on Client Object", "[Client]") KeyException); CHECK_THROWS_AS( client.rename_dataset("DNE", "rename_DNE"), - KeyException); + KeyException); CHECK_THROWS_AS( client.copy_dataset("src_DNE", "dest_DNE"), KeyException); @@ -200,7 +200,7 @@ SCENARIO("Testing Tensor Functions on Client Object", "[Client]") log_data(context, LLDebug, "***Beginning Client tensor testing***"); GIVEN("A Client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); AND_WHEN("Tensors of each type are created and put into the Client") { @@ -483,8 +483,14 @@ SCENARIO("Testing INFO Functions on Client Object", "[Client]") log_data(context, LLDebug, "***Beginning Client INFO testing***"); GIVEN("A Client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); + THEN("The client can be serialized") + { + std::string serial = client.to_string(); + CHECK(serial.length() > 0); + std::cout << client; + } WHEN("INFO or CLUSTER INFO is called on database with " "an invalid address") { @@ -503,7 +509,7 @@ SCENARIO("Testing INFO Functions on Client Object", "[Client]") { THEN("No errors with be thrown for both cluster and " - "non-cluster environemnts") + "non-cluster environments") { std::string db_address = parse_SSDB(std::getenv("SSDB")); @@ -535,7 +541,7 @@ SCENARIO("Testing AI.INFO Functions on Client Object", "[Client]") log_data(context, LLDebug, "***Beginning Client AI.INFO testing***"); GIVEN("A Client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); WHEN("AI.INFO called on database with an invalid address") { @@ -565,7 +571,7 @@ SCENARIO("Testing AI.INFO Functions on Client Object", "[Client]") { std::string db_address = parse_SSDB(std::getenv("SSDB")); std::string model_key = "ai_info_model"; - std::string model_file = "./../../mnist_data/mnist_cnn.pt"; + std::string model_file = "../mnist_data/mnist_cnn.pt"; std::string backend = "TORCH"; std::string device = "CPU"; parsed_reply_map reply; @@ -588,7 +594,7 @@ SCENARIO("Testing FLUSHDB on empty Client Object", "[Client][FLUSHDB]") GIVEN("An empty non-cluster Client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); WHEN("FLUSHDB is called on database with " "an invalid address") @@ -632,7 +638,7 @@ SCENARIO("Testing FLUSHDB on Client Object", "[Client][FLUSHDB]") if (use_cluster()) return; - Client client(use_cluster(), "test_client"); + Client client("test_client"); std::string dataset_name = "test_dataset_name"; DataSet dataset(dataset_name); dataset.add_meta_string("meta_string_name", "meta_string_val"); @@ -672,7 +678,7 @@ SCENARIO("Testing CONFIG GET and CONFIG SET on Client Object", "[Client]") GIVEN("A Client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); WHEN("CONFIG GET or CONFIG SET are called on databases with " "invalid addresses ") @@ -721,7 +727,7 @@ SCENARIO("Test CONFIG GET on an unsupported command", "[Client]") GIVEN("A client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); std::string address = parse_SSDB(std::getenv("SSDB")); WHEN("CONFIG GET is called with an unsupported command") @@ -746,7 +752,7 @@ SCENARIO("Test CONFIG SET on an unsupported command", "[Client]") GIVEN("A client object") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); std::string address = parse_SSDB(std::getenv("SSDB")); WHEN("CONFIG SET is called with an unsupported command") @@ -771,7 +777,7 @@ SCENARIO("Testing SAVE command on Client Object", "[!mayfail][Client][SAVE]") GIVEN("A client object and some data") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); std::string dataset_name = "test_save_dataset"; DataSet dataset(dataset_name); dataset.add_meta_string("meta_string_save_name", "meta_string_val"); @@ -818,8 +824,10 @@ SCENARIO("Test that prefixing covers all hash slots of a cluster", "[Client]") GIVEN("A test RedisCluster test object") { + ConfigOptions* cfgopts = ConfigOptions::create_from_environment("").release(); LogContext context("test_client"); - RedisClusterTestObject redis_cluster(&context); + cfgopts->_set_log_context(&context); + RedisClusterTestObject redis_cluster(cfgopts); WHEN("A prefix is requested for a hash slot between 0 and 16384") { @@ -858,11 +866,11 @@ SCENARIO("Testing Multi-GPU Function error cases", "[Client]") GIVEN("A Client object, a script, and a model") { - Client client(use_cluster(), "test_client"); + Client client("test_client"); std::string model_key = "a_model"; - std::string model_file = "./../../mnist_data/mnist_cnn.pt"; + std::string model_file = "../mnist_data/mnist_cnn.pt"; std::string script_key = "a_script"; - std::string script_file = "./../../mnist_data/data_processing_script.txt"; + std::string script_file = "../mnist_data/data_processing_script.txt"; std::string backend = "TORCH"; WHEN("set_model_multigpu() called with invalid first gpu") diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_ensemble.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_ensemble.cpp index 737cc2c7..2ce69a89 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_ensemble.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_ensemble.cpp @@ -57,7 +57,7 @@ void reset_env_vars(const char* old_keyin, const char* old_keyout) // helper function for loading mnist void load_mnist_image_to_array(float**** img) { - std::string image_file = "../../mnist_data/one.raw"; + std::string image_file = "../mnist_data/one.raw"; std::ifstream fin(image_file, std::ios::binary); std::ostringstream ostream; ostream << fin.rdbuf(); @@ -100,11 +100,11 @@ SCENARIO("Testing Client ensemble using a producer/consumer paradigm") std::string tensor_key = "ensemble_test"; // for model std::string model_key = "mnist_model"; - std::string model_file = "./../../mnist_data/mnist_cnn.pt"; + std::string model_file = "./../mnist_data/mnist_cnn.pt"; // for script std::string script_key = "mnist_script"; std::string script_file = - "./../../mnist_data/data_processing_script.txt"; + "./../mnist_data/data_processing_script.txt"; // for setup mnist std::string in_key = "mnist_input"; std::string out_key = "mnist_output"; @@ -131,8 +131,9 @@ SCENARIO("Testing Client ensemble using a producer/consumer paradigm") setenv("SSKEYIN", keyin_env_put, (old_keyin != NULL)); setenv("SSKEYOUT", keyout_env_put, (old_keyout != NULL)); - Client producer_client(use_cluster(), "test_client_ensemble::producer"); + Client producer_client("test_client_ensemble::producer"); producer_client.use_model_ensemble_prefix(true); + producer_client.set_model_chunk_size(1024 * 1024); // Tensors float* array = (float*)malloc(dims[0]*sizeof(float)); @@ -184,7 +185,7 @@ SCENARIO("Testing Client ensemble using a producer/consumer paradigm") setenv("SSKEYIN", keyin_env_get, 1); setenv("SSKEYOUT", keyout_env_get, 1); - Client consumer_client(use_cluster(), "test_client_ensemble::consumer"); + Client consumer_client("test_client_ensemble::consumer"); consumer_client.use_model_ensemble_prefix(true); // Tensors diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_prefixing.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_prefixing.cpp new file mode 100644 index 00000000..4b6138be --- /dev/null +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_client_prefixing.cpp @@ -0,0 +1,97 @@ +/* + * BSD 2-Clause License + * + * Copyright (c) 2021-2023, Hewlett Packard Enterprise + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "../../../third-party/catch/single_include/catch2/catch.hpp" +#include "client.h" +#include "dataset.h" +#include "logger.h" +#include "../client_test_utils.h" + +unsigned long get_time_offset(); + +using namespace SmartRedis; + +// helper function for resetting environment +// variables to their original state +void reset_env_vars(const char* old_keyin, const char* old_keyout); + +SCENARIO("Testing Client prefixing") +{ + std::cout << std::to_string(get_time_offset()) << ": Testing Client prefixing" << std::endl; + std::string context("test_client_prefixing"); + log_data(context, LLDebug, "***Beginning Client Prefixing testing***"); + + GIVEN("Variables that will be used by the producer and consumer") + { + const char* old_keyin = std::getenv("SSKEYIN"); + const char* old_keyout = std::getenv("SSKEYOUT"); + char keyin_env_put[] = "prefix_test"; + char keyout_env_put[] = "prefix_test"; + char keyin_env_get[] = "prefix_test"; + char keyout_env_get[] = "prefix_test"; + size_t dim1 = 10; + std::vector dims = {dim1}; + std::string prefix = "prefix_test"; + std::string dataset_tensor_key = "dataset_tensor"; + std::string dataset_key = "test_dataset"; + std::string tensor_key = "test_tensor"; + + SRTensorType g_type; + std::vector g_dims; + void* g_result; + + THEN("Client prefixing can be tested") + { + setenv("SSKEYIN", keyin_env_put, (old_keyin != NULL)); + setenv("SSKEYOUT", keyout_env_put, (old_keyout != NULL)); + Client client(context); + client.use_dataset_ensemble_prefix(true); + client.use_tensor_ensemble_prefix(true); + + float* array = (float*)malloc(dims[0]*sizeof(float)); + for(int i=0; iget_integer_option("test_integer_key") == 42); + CHECK_FALSE(co->is_configured("test_integer_key_that_is_not_really_present")); + CHECK_THROWS_AS( + co->get_integer_option( + "test_integer_key_that_is_not_really_present"), + KeyException); + CHECK(co->_resolve_integer_option( + "test_integer_key_that_is_not_really_present", 11) == 11); + CHECK(co->is_configured("test_integer_key_that_is_not_really_present")); + CHECK(co->get_integer_option( + "test_integer_key_that_is_not_really_present") == 11); + co->override_integer_option( + "test_integer_key_that_is_not_really_present", 42); + CHECK(co->get_integer_option( + "test_integer_key_that_is_not_really_present") == 42); + CHECK(co->_resolve_integer_option( + "test_integer_key_that_is_not_really_present", 11) == 42); + + // string option tests + CHECK(co->get_string_option("test_string_key") == "charizard"); + CHECK_FALSE(co->is_configured("test_string_key_that_is_not_really_present")); + CHECK_THROWS_AS( + co->get_string_option( + "test_string_key_that_is_not_really_present"), + KeyException); + CHECK(co->_resolve_string_option( + "test_string_key_that_is_not_really_present", "pikachu") == "pikachu"); + CHECK(co->is_configured("test_string_key_that_is_not_really_present")); + CHECK(co->get_string_option( + "test_string_key_that_is_not_really_present") == "pikachu"); + co->override_string_option( + "test_string_key_that_is_not_really_present", "meowth"); + CHECK(co->get_string_option( + "test_string_key_that_is_not_really_present") == "meowth"); + CHECK(co->_resolve_string_option( + "test_string_key_that_is_not_really_present", "pikachu") == "meowth"); + } + } + + // Clean up test keys + unsetenv("test_integer_key"); + unsetenv("test_string_key"); + + log_data(context, LLDebug, "***End ConfigOptions testing***"); +} + +SCENARIO("Suffix Testing for ConfigOptions", "[CfgOpts]") +{ + std::cout << std::to_string(get_time_offset()) << ": Suffix Testing for ConfigOptions" << std::endl; + std::string context("test_configopts"); + log_data(context, LLDebug, "***Beginning ConfigOptions suffix testing***"); + + GIVEN("A ConfigOptions object") + { + // Make sure keys aren't set before we start + const char* keys[] = { + "integer_key_that_is_not_really_present_suffixtest", + "string_key_that_is_not_really_present_suffixtest", + "integer_key_suffixtest", + "string_key_suffixtest" + }; + INFO("Reserved keys must not be set before running this test."); + for (size_t i = 0; i < sizeof(keys)/sizeof(keys[0]); i++) { + REQUIRE(std::getenv(keys[i]) == NULL); + } + + // Set up keys for testing + setenv("integer_key_suffixtest", "42", true); + setenv("string_key_suffixtest", "charizard", true); + + auto co = ConfigOptions::create_from_environment("suffixtest"); + + THEN("Suffixed options should be configurable") + { + // integer option tests + CHECK(co->get_integer_option("integer_key") == 42); + CHECK_FALSE(co->is_configured("integer_key_that_is_not_really_present")); + CHECK_THROWS_AS( + co->get_integer_option( + "integer_key_that_is_not_really_present"), + KeyException); + CHECK(co->_resolve_integer_option( + "integer_key_that_is_not_really_present", 11) == 11); + CHECK(co->is_configured("integer_key_that_is_not_really_present")); + CHECK(co->get_integer_option( + "integer_key_that_is_not_really_present") == 11); + co->override_integer_option( + "integer_key_that_is_not_really_present", 42); + CHECK(co->get_integer_option( + "integer_key_that_is_not_really_present") == 42); + CHECK(co->_resolve_integer_option( + "integer_key_that_is_not_really_present", 11) == 42); + + // string option tests + CHECK(co->get_string_option("string_key") == "charizard"); + CHECK_FALSE(co->is_configured("string_key_that_is_not_really_present")); + CHECK_THROWS_AS( + co->get_string_option( + "string_key_that_is_not_really_present"), + KeyException); + CHECK(co->_resolve_string_option( + "string_key_that_is_not_really_present", "pikachu") == "pikachu"); + CHECK(co->is_configured("string_key_that_is_not_really_present")); + CHECK(co->get_string_option( + "string_key_that_is_not_really_present") == "pikachu"); + co->override_string_option( + "string_key_that_is_not_really_present", "meowth"); + CHECK(co->get_string_option( + "string_key_that_is_not_really_present") == "meowth"); + CHECK(co->_resolve_string_option( + "string_key_that_is_not_really_present", "pikachu") == "meowth"); + } + } + + // Clean up test keys + unsetenv("integer_key_suffixtest"); + unsetenv("string_key_suffixtest"); + + log_data(context, LLDebug, "***End ConfigOptions suffix testing***"); +} + diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_dataset.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_dataset.cpp index 55cdfdbb..e0a0b7af 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_dataset.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_dataset.cpp @@ -208,6 +208,13 @@ SCENARIO("Testing DataSet object", "[DataSet]") dataset.get_meta_strings(meta_str_name); CHECK(meta_str_data[0] == meta_str_val); } + + THEN("The dataset can be printed") + { + std::string dump = dataset.to_string(); + CHECK(dump.length() > 0); + std::cout << dataset; + } } } log_data(context, LLDebug, "***End DataSet testing***"); @@ -274,6 +281,11 @@ SCENARIO("Testing DataSet inspection", "[DataSet]") THEN("The tensor's type can be inspected") { + std::vector inspect_dims = dataset.get_tensor_dims(tensor_name); + CHECK(dims.size() == inspect_dims.size()); + for (int i = 0; i < dims.size(); i++) { + CHECK(dims[i] == inspect_dims[i]); + } CHECK(SRTensorTypeFloat == dataset.get_tensor_type(tensor_name)); } } diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_logger.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_logger.cpp index 0826febf..d9b1bdcd 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_logger.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_logger.cpp @@ -43,9 +43,9 @@ SCENARIO("Additional Testing for logging", "[LOG]") std::string context("test_logger"); log_data(context, LLDebug, "***Beginning Logger testing***"); - GIVEN("A Client object") + GIVEN("A Logger object") { - Client client(use_cluster(), "test_logger"); + Client client("test_logger"); THEN("Logging should be able to be done") { diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_redisserver.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_redisserver.cpp index 79626609..60875b0c 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_redisserver.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_redisserver.cpp @@ -57,7 +57,7 @@ using namespace SmartRedis; class RedisTest : public Redis { public: - RedisTest(SRObject* c) : Redis(c) {} + RedisTest(ConfigOptions* c) : Redis(c) {} int get_connection_timeout() {return _connection_timeout;} int get_connection_interval() {return _connection_interval;} int get_command_timeout() {return _command_timeout;} @@ -73,7 +73,7 @@ class RedisTest : public Redis class RedisClusterTest : public RedisCluster { public: - RedisClusterTest(SRObject* c) : RedisCluster(c) {} + RedisClusterTest(ConfigOptions* c) : RedisCluster(c) {} int get_connection_timeout() {return _connection_timeout;} int get_connection_interval() {return _connection_interval;} int get_command_timeout() {return _command_timeout;} @@ -97,12 +97,14 @@ const char* CMD_INTERVAL_ENV_VAR = "SR_CMD_INTERVAL"; // error to be thrown void invoke_constructor() { + ConfigOptions* cfgopts = ConfigOptions::create_from_environment("").release(); LogContext context("test_redisserver"); + cfgopts->_set_log_context(&context); if (use_cluster()) { - RedisClusterTest cluster_obj(&context); + RedisClusterTest cluster_obj(cfgopts); } else { - RedisTest non_cluster_obj(&context); + RedisTest non_cluster_obj(cfgopts); } } @@ -177,6 +179,8 @@ SCENARIO("Test runtime settings are initialized correctly", "[RedisServer]") std::string context("test_redisserver"); log_data(context, LLDebug, "***Beginning RedisServer testing***"); LogContext lc("test_redisserver"); + ConfigOptions* cfgopts = ConfigOptions::create_from_environment("").release(); + cfgopts->_set_log_context(&lc); char* __conn_timeout; char* __conn_interval; @@ -188,14 +192,14 @@ SCENARIO("Test runtime settings are initialized correctly", "[RedisServer]") { unset_all_env_vars(); if (use_cluster()) { - RedisClusterTest redis_server(&lc); + RedisClusterTest redis_server(cfgopts); THEN("Default member variable values are used") { check_all_defaults(redis_server); } } else { - RedisTest redis_server(&lc); + RedisTest redis_server(cfgopts); THEN("Default member variable values are used") { check_all_defaults(redis_server); @@ -211,14 +215,14 @@ SCENARIO("Test runtime settings are initialized correctly", "[RedisServer]") setenv(CMD_INTERVAL_ENV_VAR, "", true); if (use_cluster()) { - RedisClusterTest redis_server(&lc); + RedisClusterTest redis_server(cfgopts); THEN("Default member variable values are used") { check_all_defaults(redis_server); } } else { - RedisTest redis_server(&lc); + RedisTest redis_server(cfgopts); THEN("Default member variable values are used") { check_all_defaults(redis_server); @@ -241,7 +245,7 @@ SCENARIO("Test runtime settings are initialized correctly", "[RedisServer]") setenv(CMD_INTERVAL_ENV_VAR, std::to_string(cmd_interval).c_str(), true); if (use_cluster()) { - RedisClusterTest redis_server(&lc); + RedisClusterTest redis_server(cfgopts); THEN("Environment variables are used for member variables") { CHECK(redis_server.get_connection_timeout() == @@ -260,7 +264,7 @@ SCENARIO("Test runtime settings are initialized correctly", "[RedisServer]") } } else { - RedisTest redis_server(&lc); + RedisTest redis_server(cfgopts); THEN("Environment variables are used for member variables") { CHECK(redis_server.get_connection_timeout() == diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_ssdb.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_ssdb.cpp index 25ad1f2d..3e623870 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_ssdb.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_ssdb.cpp @@ -33,6 +33,7 @@ #include "logger.h" #include "logcontext.h" #include "srobject.h" +#include "configoptions.h" unsigned long get_time_offset(); @@ -42,12 +43,17 @@ using namespace SmartRedis; class TestSSDB : public Redis { public: - TestSSDB(const SRObject* c) : Redis(c) {} + TestSSDB(ConfigOptions* c) : Redis(c) {} SRAddress get_ssdb() { return _get_ssdb(); } + + void clear_cached_SSDB() + { + _cfgopts->_clear_option_from_cache("SSDB"); + } }; // helper function for putting the SSDB environment @@ -64,17 +70,19 @@ SCENARIO("Additional Testing for various SSDBs", "[SSDB]") std::cout << std::to_string(get_time_offset()) << ": Additional Testing for various SSDBs" << std::endl; std::string context("test_ssdb"); log_data(context, LLDebug, "***Beginning SSDB testing***"); + ConfigOptions* cfgopts = ConfigOptions::create_from_environment("").release(); LogContext lc("test_ssdb"); + cfgopts->_set_log_context(&lc); GIVEN("A TestSSDB object") { const char* old_ssdb = std::getenv("SSDB"); INFO("SSDB must be set to a valid host and "\ - "port before running this test."); + "port before running this test."); REQUIRE(old_ssdb != NULL); - TestSSDB test_ssdb(&lc); + TestSSDB test_ssdb(cfgopts); Client* c = NULL; THEN("SSDB environment variable must exist " @@ -82,17 +90,21 @@ SCENARIO("Additional Testing for various SSDBs", "[SSDB]") { // SSDB is nullptr unsetenv("SSDB"); + test_ssdb.clear_cached_SSDB(); CHECK_THROWS_AS(test_ssdb.get_ssdb(), SmartRedis::RuntimeException); // SSDB contains invalid characters setenv_ssdb ("127.0.0.1:*&^9"); + test_ssdb.clear_cached_SSDB(); CHECK_THROWS_AS(test_ssdb.get_ssdb(), SmartRedis::RuntimeException); // Valid SSDB. Ensure one of 127 or 128 is chosen setenv_ssdb("127,128"); + test_ssdb.clear_cached_SSDB(); CHECK_THROWS_AS(test_ssdb.get_ssdb(), SmartRedis::RuntimeException); // SSDB points to a unix domain socket and we're using clustered Redis + // FINDME: This test uses a deprecated constructor and will need to be rewritten setenv_ssdb ("unix://127.0.0.1:6349"); CHECK_THROWS_AS(c = new Client(true, "test_ssdb"), SmartRedis::RuntimeException); diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensor.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensor.cpp index d6b1d87c..6304d778 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensor.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensor.cpp @@ -50,7 +50,7 @@ SCENARIO("Testing Tensor", "[Tensor]") size_t tensor_size = dims.at(0) * dims.at(1) * dims.at(2); std::vector tensor(tensor_size, 0); for (size_t i=0; i t(name, data, dims, type, mem_layout); @@ -62,7 +62,7 @@ SCENARIO("Testing Tensor", "[Tensor]") size_t tensor_size_2 = dims_2.at(0) * dims_2.at(1) * dims_2.at(2); std::vector tensor_2(tensor_size_2, 0); for (size_t i=0; i t_2(name_2, data_2, dims_2, type_2, mem_layout_2); diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensorbase.cpp b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensorbase.cpp index b01b79c8..54eef51d 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensorbase.cpp +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_tensorbase.cpp @@ -58,7 +58,7 @@ SCENARIO("Testing TensorBase through TensorPack", "[TensorBase]") size_t tensor_size = dims.at(0) * dims.at(1) * dims.at(2); std::vector tensor(tensor_size, 0); for (size_t i=0; i tensor(tensor_size, 0); for (size_t i=0; i tensor(tensor_size, 0); for (size_t i=0; i dims = {}; std::vector tensor(5, 0); for (size_t i=0; i<5; i++) - tensor[i] = 2.0*rand()/RAND_MAX - 1.0; + tensor[i] = 2.0*rand()/(double)RAND_MAX - 1.0; void* data = tensor.data(); THEN("A runtime error is thrown") @@ -223,7 +223,7 @@ SCENARIO("Testing TensorBase through TensorPack", "[TensorBase]") std::vector dims = {1, 0, 3}; std::vector tensor(5, 0); for (size_t i=0; i<5; i++) - tensor[i] = 2.0*rand()/RAND_MAX - 1.0; + tensor[i] = 2.0*rand()/(double)RAND_MAX - 1.0; void* data = tensor.data(); THEN("A runtime error is thrown") diff --git a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_unit_cpp_client.py b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_unit_cpp_client.py index 7e2ac718..c4b6ddd0 100644 --- a/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_unit_cpp_client.py +++ b/2023-01/smartsim/smartredis/tests/cpp/unit-tests/test_unit_cpp_client.py @@ -26,8 +26,8 @@ import pytest from os import path as osp +from os import getcwd from glob import glob -from shutil import which from subprocess import Popen, PIPE, TimeoutExpired import time @@ -39,22 +39,21 @@ def get_test_names(): """Obtain test names by globbing for client_test Add tests manually if necessary """ - glob_path = osp.join(TEST_PATH, "build/cpp_unit_tests") - test_names = glob(glob_path) + test_names = [osp.join(TEST_PATH, "cpp_unit_tests")] test_names = [(pytest.param(test, id=osp.basename(test))) for test in test_names] - print(test_names) - #test_names = [("build/test", "unit_tests")] return test_names @pytest.mark.parametrize("test", get_test_names()) -def test_unit_cpp_client(test, use_cluster): - cmd = [] - cmd.append(test) - print(f"Running test: {osp.basename(test)}") - print(f"Test command {' '.join(cmd)}") - print(f"Using cluster: {use_cluster}") +def test_unit_cpp_client(test, build, link): + # Build the path to the test executable from the source file name + # . keep only the last three parts of the path: (language, unit-tests, basename) + test = "/".join(test.split("/")[-3:]) + # . prepend the path to the built test executable + test = f"{getcwd()}/build/{build}/tests/{link}/{test}" + cmd = [test] + print(f"\nRunning test: {osp.basename(test)}") execute_cmd(cmd) time.sleep(1) @@ -62,19 +61,19 @@ def execute_cmd(cmd_list): """Execute a command """ # spawning the subprocess and connecting to its output - run_path = osp.join(TEST_PATH, "build/") proc = Popen( - cmd_list, stderr=PIPE, stdout=PIPE, stdin=PIPE, cwd=run_path) + cmd_list, stderr=PIPE, stdout=PIPE, stdin=PIPE, cwd=TEST_PATH) try: out, err = proc.communicate(timeout=timeout_limit) + print("OUTPUT:", out.decode("utf-8") if out else "None") + print("ERROR:", err.decode("utf-8") if err else "None") + if (proc.returncode != 0): + print("Return code:", proc.returncode) assert(proc.returncode == 0) - if out: - print("OUTPUT:", out.decode("utf-8")) - if err: - print("ERROR:", err.decode("utf-8")) except UnicodeDecodeError: output, errs = proc.communicate() - print("ERROR:", errs.decode("utf-8")) + print("OUTPUT:", out) + print("ERROR:", errs) assert(False) except TimeoutExpired: proc.kill() @@ -82,10 +81,15 @@ def execute_cmd(cmd_list): print(f"TIMEOUT: test timed out after test timeout limit of {timeout_limit} seconds") print("OUTPUT:", output.decode("utf-8")) print("ERROR:", errs.decode("utf-8")) + print("OUTPUT:", out) + print("ERROR:", errs) + assert(False) + except AssertionError: assert(False) - except Exception: + except Exception as e: + print(e) proc.kill() output, errs = proc.communicate() - print("OUTPUT:", output.decode("utf-8")) - print("ERROR:", errs.decode("utf-8")) + print("OUTPUT:", out) + print("ERROR:", errs) assert(False) diff --git a/2023-01/smartsim/smartredis/tests/python/test_address.py b/2023-01/smartsim/smartredis/tests/docker/CMakeLists.txt similarity index 78% rename from 2023-01/smartsim/smartredis/tests/python/test_address.py rename to 2023-01/smartsim/smartredis/tests/docker/CMakeLists.txt index 3bd3f8f8..81e978e8 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_address.py +++ b/2023-01/smartsim/smartredis/tests/docker/CMakeLists.txt @@ -24,18 +24,15 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import os +cmake_minimum_required(VERSION 3.13) +project(SmartSim-Tests-Docker) -from smartredis import Client +enable_language(C) +enable_language(CXX) +enable_language(Fortran) +set(ALLOW_DUPLICATE_CUSTOM_TARGETS true) -def test_address(use_cluster, context): - # get env var to set through client init - ssdb = os.environ["SSDB"] - del os.environ["SSDB"] - - # client init should fail if SSDB not set - c = Client(address=ssdb, cluster=use_cluster, logger_name=context) - - # check if SSDB was set anyway - assert os.environ["SSDB"] == ssdb +add_subdirectory(c) +add_subdirectory(cpp) +add_subdirectory(fortran) diff --git a/2023-01/smartsim/smartredis/tests/docker/c/CMakeLists.txt b/2023-01/smartsim/smartredis/tests/docker/c/CMakeLists.txt index 3b09350f..3ccb77ed 100644 --- a/2023-01/smartsim/smartredis/tests/docker/c/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/tests/docker/c/CMakeLists.txt @@ -26,9 +26,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -project(DockerTester) - cmake_minimum_required(VERSION 3.13) +project(DockerTesterC) +enable_language(C) set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_BUILD_TYPE DEBUG) @@ -41,8 +41,10 @@ include_directories(SYSTEM ) # Build executables - -add_executable(docker_test +add_executable(docker_test_c test_docker.c ) -target_link_libraries(docker_test ${SR_LIB} pthread) +set_target_properties(docker_test_c PROPERTIES + OUTPUT_NAME docker_test +) +target_link_libraries(docker_test_c ${SR_LIB} pthread) diff --git a/2023-01/smartsim/smartredis/tests/docker/c/test_docker.c b/2023-01/smartsim/smartredis/tests/docker/c/test_docker.c index 8f24cef9..56f3581e 100644 --- a/2023-01/smartsim/smartredis/tests/docker/c/test_docker.c +++ b/2023-01/smartsim/smartredis/tests/docker/c/test_docker.c @@ -40,7 +40,7 @@ int main(int argc, char* argv[]) { size_t cid_len = strlen(logger_name); SRError return_code = SRNoError; - return_code = SmartRedisCClient(false, logger_name, cid_len, &client); + return_code = SimpleCreateClient(logger_name, cid_len, &client); if (return_code != SRNoError) { return -1; diff --git a/2023-01/smartsim/smartredis/tests/docker/cpp/CMakeLists.txt b/2023-01/smartsim/smartredis/tests/docker/cpp/CMakeLists.txt index 33414044..6de60d51 100644 --- a/2023-01/smartsim/smartredis/tests/docker/cpp/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/tests/docker/cpp/CMakeLists.txt @@ -26,9 +26,9 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -project(DockerTester) - cmake_minimum_required(VERSION 3.13) +project(DockerTesterCpp) +enable_language(CXX) set(CMAKE_VERBOSE_MAKEFILE ON) set(CMAKE_BUILD_TYPE DEBUG) @@ -42,7 +42,10 @@ include_directories(SYSTEM # Build executables -add_executable(docker_test +add_executable(docker_test_cpp docker_test.cpp ) -target_link_libraries(docker_test ${SR_LIB} pthread) +set_target_properties(docker_test_cpp PROPERTIES + OUTPUT_NAME docker_test +) +target_link_libraries(docker_test_cpp ${SR_LIB} pthread) diff --git a/2023-01/smartsim/smartredis/tests/docker/cpp/docker_test.cpp b/2023-01/smartsim/smartredis/tests/docker/cpp/docker_test.cpp index 0df4786a..215c3670 100644 --- a/2023-01/smartsim/smartredis/tests/docker/cpp/docker_test.cpp +++ b/2023-01/smartsim/smartredis/tests/docker/cpp/docker_test.cpp @@ -30,7 +30,7 @@ int main(int argc, char* argv[]) { - SmartRedis::Client client(false, __FILE__); + SmartRedis::Client client(__FILE__); std::vector data = {1.0, 2.0, 3.0}; std::vector dims = {3}; diff --git a/2023-01/smartsim/smartredis/tests/docker/fortran/CMakeLists.txt b/2023-01/smartsim/smartredis/tests/docker/fortran/CMakeLists.txt index 0c983d9f..cf989bbd 100644 --- a/2023-01/smartsim/smartredis/tests/docker/fortran/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/tests/docker/fortran/CMakeLists.txt @@ -26,32 +26,33 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # -project(DockerTester) - cmake_minimum_required(VERSION 3.13) - +project(DockerTesterFortran) enable_language(Fortran) -set(CMAKE_VERBOSE_MAKEFILE ON) -set(CMAKE_BUILD_TYPE Debug) +# Configure the build set(CMAKE_CXX_STANDARD 17) -set(CMAKE_C_STANDARD 99) +SET(CMAKE_C_STANDARD 99) +set(CMAKE_BUILD_TYPE Debug) -set(ftn_client_src - /usr/local/src/SmartRedis/src/fortran/fortran_c_interop.F90 - /usr/local/src/SmartRedis/src/fortran/dataset.F90 - /usr/local/src/SmartRedis/src/fortran/client.F90 +# Locate dependencies +find_library(SR_LIB smartredis REQUIRED) +find_library(SR_FTN_LIB smartredis-fortran REQUIRED) +set(SMARTREDIS_LIBRARIES + ${SR_LIB} + ${SR_FTN_LIB} ) -find_library(SR_LIB smartredis) - +# Define include directories for header files include_directories(SYSTEM /usr/local/include/smartredis ) -add_executable(docker_test +# Build the test +add_executable(docker_test_fortran test_docker.F90 - ${ftn_client_src} ) - -target_link_libraries(docker_test ${SR_LIB} pthread) +set_target_properties(docker_test_fortran PROPERTIES + OUTPUT_NAME docker_test +) +target_link_libraries(docker_test_fortran ${SMARTREDIS_LIBRARIES} pthread) diff --git a/2023-01/smartsim/smartredis/tests/docker/fortran/test_docker.F90 b/2023-01/smartsim/smartredis/tests/docker/fortran/test_docker.F90 index 0bc705fc..d889eebe 100644 --- a/2023-01/smartsim/smartredis/tests/docker/fortran/test_docker.F90 +++ b/2023-01/smartsim/smartredis/tests/docker/fortran/test_docker.F90 @@ -38,7 +38,7 @@ program main real(kind=8), dimension(dim1) :: tensor real(kind=8), dimension(dim1) :: returned - result = client%initialize(.FALSE., "test_docker.F90") + result = client%initialize("test_docker.F90") if (result .ne. SRNoError) stop call random_number(tensor) diff --git a/2023-01/smartsim/smartredis/tests/docker/python/test_docker.py b/2023-01/smartsim/smartredis/tests/docker/python/test_docker.py index 313a5aec..c66d4188 100644 --- a/2023-01/smartsim/smartredis/tests/docker/python/test_docker.py +++ b/2023-01/smartsim/smartredis/tests/docker/python/test_docker.py @@ -29,7 +29,7 @@ from smartredis import Client import numpy as np -client = Client(None, False) +client = Client(None) tensor = np.random.randint(-10, 10, size=(2,4)) diff --git a/2023-01/smartsim/smartredis/tests/fortran/CMakeLists.txt b/2023-01/smartsim/smartredis/tests/fortran/CMakeLists.txt index 66fce479..1f560350 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/CMakeLists.txt +++ b/2023-01/smartsim/smartredis/tests/fortran/CMakeLists.txt @@ -24,38 +24,91 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -project(FortranClientTester) - +# Project definition for the SmartRedis-Tests-Fortran project cmake_minimum_required(VERSION 3.13) +project(SmartRedis-Tests-Fortran) +# Enable language support for the tests enable_language(Fortran) -set(CMAKE_VERBOSE_MAKEFILE ON) -set(CMAKE_BUILD_TYPE DEBUG) +# Configure the build set(CMAKE_CXX_STANDARD 17) -set(CMAKE_C_STANDARD 99) +SET(CMAKE_C_STANDARD 99) +set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/../..") +include(smartredis_defs) -# Assume by default that users should link against the install directory in this repository +# Assume by default that users should link against the +# install directory in this repository if(NOT DEFINED SMARTREDIS_INSTALL_PATH) set(SMARTREDIS_INSTALL_PATH "../../install/") endif() -# Specify all pre-processor and library dependencies -find_library(SMARTREDIS_LIBRARY smartredis PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -find_library(SMARTREDIS_FORTRAN_LIBRARY smartredis-fortran PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -find_library(HIREDIS hiredis PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -find_library(REDISPP redis++ PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH REQUIRED) -set(SMARTREDIS_LIBRARIES - ${SMARTREDIS_LIBRARY} - ${SMARTREDIS_FORTRAN_LIBRARY} - ${HIREDIS} - ${REDISPP} +# Locate dependencies +# . Main SmartRedis Library (C/C++ based) +add_library(smartredis-main ${SMARTREDIS_LINK_MODE} IMPORTED) +find_library(SR_LIB ${SMARTREDIS_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} +) +set_target_properties(smartredis-main PROPERTIES + IMPORTED_LOCATION ${SR_LIB} +) +# . SmartRedis Fortran Library (Fortran based) +add_library(smartredis-fortran ${SMARTREDIS_LINK_MODE} IMPORTED) +find_library(SR_FTN_LIB ${SMARTREDIS_FORTRAN_LIB} + PATHS ${SMARTREDIS_INSTALL_PATH}/lib NO_DEFAULT_PATH + REQUIRED + ${SMARTREDIS_LINK_MODE} ) +set_target_properties(smartredis-fortran PROPERTIES + IMPORTED_LOCATION ${SR_FTN_LIB} +) + +# Select libraries for build +if (STATIC_BUILD) + # The CMake "preferred" approach only seems to work with the GNU + # compiler. We will streamline this in the future + if(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") + # Mark that SmartRedis requires the C++ linker + set_target_properties(smartredis-main PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES "CXX" + ) + set_target_properties(smartredis-fortran PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES "FORTRAN" + ) + else() # Tested with PGI, Intel + # For other compilers, don't set languages so that CMake will use the Fortran linker (default) + + # Add the stdc++ linker flag + set(CMAKE_EXE_LINKER_FLAGS "-lstdc++ ${CMAKE_EXE_LINKER_FLAGS}") + endif() + + # Static builds have an extra dependency on the Pthreads library + # The order of libraries here is crucial to get dependencies covered + find_package(Threads REQUIRED) + set(SMARTREDIS_LIBRARIES + smartredis-fortran + smartredis-main + Threads::Threads + ) +else() + # Shared builds only need the SmartRedis libraries + set(SMARTREDIS_LIBRARIES + smartredis-fortran + smartredis-main + ) +endif() + +# Define include directories for header files include_directories(SYSTEM /usr/local/include ${SMARTREDIS_INSTALL_PATH}/include ) +# Stuff the test_utils into a library to enable parallel builds +add_library(test-utils STATIC test_utils.F90) + # Define all the tests to be built list(APPEND EXECUTABLES client_test_dataset_aggregation @@ -65,21 +118,25 @@ list(APPEND EXECUTABLES client_test_misc_tensor client_test_mnist_multigpu client_test_mnist + client_test_prefixing client_test_put_get_1D client_test_put_get_2D client_test_put_get_3D client_test_put_get_unpack_dataset client_test_logging client_test_errors + client_test_configoptions ) +# Build the tests foreach(EXECUTABLE ${EXECUTABLES}) - - add_executable(${EXECUTABLE} + add_executable(${EXECUTABLE}_fortran_test ${EXECUTABLE}.F90 - test_utils.F90 ) - target_link_libraries(${EXECUTABLE} - ${SMARTREDIS_LIBRARIES} + set_target_properties(${EXECUTABLE}_fortran_test PROPERTIES + OUTPUT_NAME ${EXECUTABLE} + ) + target_link_libraries(${EXECUTABLE}_fortran_test + ${SMARTREDIS_LIBRARIES} test-utils ) endforeach() diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_configoptions.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_configoptions.F90 new file mode 100644 index 00000000..9111b81c --- /dev/null +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_configoptions.F90 @@ -0,0 +1,198 @@ +! BSD 2-Clause License +! +! Copyright (c) 2021-2023, Hewlett Packard Enterprise +! All rights reserved. +! +! Redistribution and use in source and binary forms, with or without +! modification, are permitted provided that the following conditions are met: +! +! 1. Redistributions of source code must retain the above copyright notice, this +! list of conditions and the following disclaimer. +! +! 2. Redistributions in binary form must reproduce the above copyright notice, +! this list of conditions and the following disclaimer in the documentation +! and/or other materials provided with the distribution. +! +! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +program main + use smartredis_configoptions, only : configoptions_type + use test_utils, only : setenv, unsetenv + use iso_fortran_env, only : STDERR => error_unit + use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int + use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t + + implicit none + +#include "enum_fortran.inc" + + type(configoptions_type) :: co + integer :: result + integer(kind=8) :: ivalue, iresult ! int + logical :: bvalue, bresult ! bool + character(kind=c_char, len=:), allocatable :: svalue, sresult ! string + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! Establish test keys + ! non-suffixed testing keys + call setenv("test_integer_key", "42") + call setenv("test_string_key", "charizard") + ! suffixed testing keys + call setenv("integer_key_suffixtest", "42") + call setenv("string_key_suffixtest", "charizard") + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! non-suffixed option testing + result = co%create_configoptions_from_environment(""); + if (result .ne. SRNoError) error stop + + ! integer option tests + write(*,*) "ConfigOption testing: integer option tests" + result = co%get_integer_option("test_integer_key", iresult) + if (result .ne. SRNoError) error stop + if (iresult .ne. 42) error stop + + result = co%is_configured( & + "test_integer_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .eqv. .true.) error stop + + result = co%get_integer_option( & + "test_integer_key_that_is_not_really_present", iresult) + if (result .eq. SRNoError) error stop + + ivalue = 42 + result = co%override_integer_option( & + "test_integer_key_that_is_not_really_present", ivalue) + if (result .ne. SRNoError) error stop + + result = co%get_integer_option( & + "test_integer_key_that_is_not_really_present", iresult) + if (result .ne. SRNoError) error stop + if (iresult .ne. 42) error stop + + result = co%is_configured( & + "test_integer_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .neqv. .true.) error stop + + + ! string option tests + write(*,*) "ConfigOption testing: string option tests" + result = co%get_string_option("test_string_key", sresult) + if (result .ne. SRNoError) error stop + if (sresult .ne. "charizard") error stop + + result = co%is_configured( & + "test_string_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .eqv. .true.) error stop + + result = co%get_string_option( & + "test_string_key_that_is_not_really_present", sresult) + if (result .eq. SRNoError) error stop + + svalue = "meowth" + result = co%override_string_option( & + "test_string_key_that_is_not_really_present", svalue) + if (result .ne. SRNoError) error stop + + result = co%get_string_option( & + "test_string_key_that_is_not_really_present", sresult) + if (result .ne. SRNoError) error stop + if (sresult .ne. "meowth") error stop + + result = co%is_configured( & + "test_string_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .neqv. .true.) error stop + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + ! suffixtest testing + result = co%create_configoptions_from_environment("suffixtest"); + if (result .ne. SRNoError) error stop + + ! integer option tests + write(*,*) "ConfigOption testing: suffixed integer option tests" + + result = co%get_integer_option("integer_key", iresult) + if (result .ne. SRNoError) error stop + if (iresult .ne. 42) error stop + + result = co%is_configured( & + "integer_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .eqv. .true.) error stop + + result = co%get_integer_option( & + "integer_key_that_is_not_really_present", iresult) + if (result .eq. SRNoError) error stop + + ivalue = 42 + result = co%override_integer_option( & + "integer_key_that_is_not_really_present", ivalue) + if (result .ne. SRNoError) error stop + + result = co%get_integer_option( & + "integer_key_that_is_not_really_present", iresult) + if (result .ne. SRNoError) error stop + if (iresult .ne. 42) error stop + + result = co%is_configured( & + "integer_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .neqv. .true.) error stop + + + ! string option tests + write(*,*) "ConfigOption testing: suffixed string option tests" + + result = co%get_string_option("string_key", sresult) + if (result .ne. SRNoError) error stop + if (sresult .ne. "charizard") error stop + + result = co%is_configured("string_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .eqv. .true.) error stop + + result = co%get_string_option( & + "string_key_that_is_not_really_present", sresult) + if (result .eq. SRNoError) error stop + + svalue = "meowth" + result = co%override_string_option( & + "string_key_that_is_not_really_present", svalue) + if (result .ne. SRNoError) error stop + + result = co%get_string_option( & + "string_key_that_is_not_really_present", sresult) + if (result .ne. SRNoError) error stop + if (sresult .ne. "meowth") error stop + + result = co%is_configured("string_key_that_is_not_really_present", bresult) + if (result .ne. SRNoError) error stop + if (bresult .neqv. .true.) error stop + + + !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + !! Clean up test keys + ! non-prefixed testing keys + call unsetenv("test_integer_key") + call unsetenv("test_string_key") + ! suffixed testing keys + call unsetenv("integer_key_suffixtest") + call unsetenv("string_key_suffixtest") + + ! Done + write(*,*) "ConfigOption testing: passed" +end program main diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset.F90 index 02ebfcc7..b4e82e5e 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset.F90 @@ -27,6 +27,8 @@ program main use iso_c_binding + use, intrinsic :: iso_fortran_env, only: stderr => error_unit + use, intrinsic :: iso_fortran_env, only: stdout => output_unit use smartredis_client, only : client_type use smartredis_dataset, only : dataset_type use test_utils, only : irand, use_cluster @@ -67,6 +69,9 @@ program main integer(kind=c_int64_t), dimension(dim1) :: meta_int64_vec integer(kind=c_int64_t), dimension(:), pointer :: meta_int64_recv + integer, dimension(10) :: dims + integer :: ndims + integer :: i, j, k type(dataset_type) :: dataset type(client_type) :: client @@ -75,7 +80,8 @@ program main integer :: result integer :: ttype integer :: mdtype - logical(kind=c_bool) :: exists + logical :: exists + character(kind=c_char, len=:), allocatable :: dumpstr call random_number(true_array_real_32) call random_number(true_array_real_64) @@ -104,6 +110,13 @@ program main result = dataset%get_tensor_type("true_array_real_32", ttype) if (result .ne. SRNoError) error stop if (ttype .ne. tensor_flt) error stop + ndims = size(dims) + result = dataset%get_tensor_dims("true_array_real_32", dims, ndims) + if (result .ne. SRNoError) error stop + if (3 .ne. ndims) error stop 'Wrong number of dimensions for true_array_real_32' + if (dim1 .ne. dims(1)) error stop 'Wrong size dim 1 for true_array_real_32' + if (dim2 .ne. dims(2)) error stop 'Wrong size dim 2 for true_array_real_32' + if (dim3 .ne. dims(3)) error stop 'Wrong size dim 3 for true_array_real_32' result = dataset%unpack_dataset_tensor("true_array_real_32", recv_array_real_32, shape(recv_array_real_32)) if (result .ne. SRNoError) error stop if (.not. all(true_array_real_32 == recv_array_real_32)) error stop 'true_array_real_32: FAILED' @@ -113,6 +126,13 @@ program main result = dataset%get_tensor_type("true_array_real_64", ttype) if (result .ne. SRNoError) error stop if (ttype .ne. tensor_dbl) error stop + ndims = size(dims) + result = dataset%get_tensor_dims("true_array_real_64", dims, ndims) + if (result .ne. SRNoError) error stop + if (3 .ne. ndims) error stop 'Wrong number of dimensions for true_array_real_64' + if (dim1 .ne. dims(1)) error stop 'Wrong size dim 1 for true_array_real_64' + if (dim2 .ne. dims(2)) error stop 'Wrong size dim 2 for true_array_real_64' + if (dim3 .ne. dims(3)) error stop 'Wrong size dim 3 for true_array_real_64' result = dataset%unpack_dataset_tensor("true_array_real_64", recv_array_real_64, shape(recv_array_real_64)) if (result .ne. SRNoError) error stop if (.not. all(true_array_real_64 == recv_array_real_64)) error stop 'true_array_real_64: FAILED' @@ -195,23 +215,29 @@ program main if (result .ne. SRNoError) error stop if (.not. all(meta_int64_recv == meta_int64_vec)) error stop 'meta_int64: FAILED' + ! Test dataset serialization + dumpstr = dataset%to_string() + if (dumpstr(1:7) .ne. "DataSet") error stop + call dataset%print_dataset() + call dataset%print_dataset(stdout) + ! test dataset_existence - result = client%initialize(use_cluster(), "client_test_dataset") + result = client%initialize("client_test_dataset") if (result .ne. SRNoError) error stop result = client%dataset_exists("nonexistent", exists) if (result .ne. SRNoError) error stop - if (exists) error stop 'non-existant dataset: FAILED' + if (exists) error stop 'non-existent dataset: FAILED' result = client%poll_dataset("nonexistent", 50, 5, exists) if (result .ne. SRNoError) error stop - if (exists) error stop 'non-existant dataset: FAILED' + if (exists) error stop 'non-existent dataset: FAILED' result = client%put_dataset(dataset) if (result .ne. SRNoError) error stop result = client%dataset_exists("test_dataset", exists) if (result .ne. SRNoError) error stop - if (.not. exists) error stop 'existant dataset: FAILED' + if (.not. exists) error stop 'existent dataset: FAILED' result = client%poll_dataset("test_dataset", 50, 5, exists) if (result .ne. SRNoError) error stop - if (.not. exists) error stop 'existant dataset: FAILED' + if (.not. exists) error stop 'existent dataset: FAILED' write(*,*) "Fortran Dataset: passed" diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset_aggregation.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset_aggregation.F90 index bc7b2468..67420b2e 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset_aggregation.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_dataset_aggregation.F90 @@ -51,8 +51,7 @@ program main character(len=12) :: dataset_name integer :: result - result = client%initialize(use_cluster(), & - "client_test_dataset_aggregation") + result = client%initialize("client_test_dataset_aggregation") if (result .ne. SRNoError) error stop call random_number(true_vectors) diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_ensemble.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_ensemble.F90 index 6cb520b0..d0522fad 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_ensemble.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_ensemble.F90 @@ -43,13 +43,13 @@ program main real, dimension(10) :: tensor type(client_type) :: client integer :: result -logical(kind=c_bool) :: exists +logical :: exists ensemble_keyout = "producer_0" call setenv("SSKEYIN", "producer_0,producer_1") call setenv("SSKEYOUT", ensemble_keyout) -result = client%initialize(use_cluster(), "client_test_ensemble") +result = client%initialize("client_test_ensemble") if (result .ne. SRNoError) error stop result = client%use_model_ensemble_prefix(.true.) if (result .ne. SRNoError) error stop @@ -74,7 +74,7 @@ program main endif script_key = "ensemble_script" -script_file = "../../cpp/mnist_data/data_processing_script.txt" +script_file = "../cpp/mnist_data/data_processing_script.txt" result = client%set_script_from_file(script_key, "CPU", script_file) if (result .ne. SRNoError) error stop result = client%model_exists(script_key, exists) @@ -85,7 +85,7 @@ program main endif model_key = "ensemble_model" -model_file = "../../cpp/mnist_data/mnist_cnn.pt" +model_file = "../cpp/mnist_data/mnist_cnn.pt" result = client%set_model_from_file(model_key, model_file, "TORCH", "CPU") if (result .ne. SRNoError) error stop 'set_model_from_file failed' result = client%model_exists(model_key, exists) @@ -103,7 +103,7 @@ program main call setenv("SSKEYIN", "producer_1,producer_0") call setenv("SSKEYOUT", ensemble_keyout) -result = client%initialize(use_cluster(), "client_test_ensemble") +result = client%initialize("client_test_ensemble") if (result .ne. SRNoError) error stop result = client%use_model_ensemble_prefix(.true.) if (result .ne. SRNoError) error stop diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_errors.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_errors.F90 index a515b1ff..9dbefe02 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_errors.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_errors.F90 @@ -39,11 +39,11 @@ program main type(client_type) :: client integer :: result - result = client%initialize(use_cluster()) - if (result .ne. SRNoError) error stop + result = client%initialize() + if (result .ne. SRNoError) error stop "Initialization failed" result = client%rename_tensor("vanilla", "chocolate") - if (result .eq. SRNoError) error stop + if (result .eq. SRNoError) error stop "rename didn't fail" write(*,*) "Printing last error retrieved as string" write(*,*) get_last_error() diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_initialized.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_initialized.F90 index dcbc6d22..2a30027a 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_initialized.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_initialized.F90 @@ -28,6 +28,7 @@ program main use smartredis_client, only : client_type use test_utils, only : use_cluster use iso_fortran_env, only : STDERR => error_unit + use iso_fortran_env, only : STDOUT => output_unit use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t @@ -37,14 +38,22 @@ program main type(client_type) :: client integer :: result + character(kind=c_char, len=:), allocatable :: client_str if (client%isinitialized()) error stop 'client not initialized' - result = client%initialize(use_cluster(), "client_test_initialized") + result = client%initialize("client_test_initialized") if (result .ne. SRNoError) error stop if (.not. client%isinitialized()) error stop 'client is initialized' + client_str = client%to_string() + if (client_str(1:6) .ne. "Client") error stop + write(*,*) client_str + + call client%print_client() + call client%print_client(STDOUT) + write(*,*) "client initialized: passed" end program main diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_logging.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_logging.F90 index 2f6023e0..c1499382 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_logging.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_logging.F90 @@ -46,7 +46,7 @@ program main result = logcontext%initialize("client_test_logging (logcontext)") if (result .ne. SRNoError) error stop - result = client%initialize(use_cluster(), "client_test_logging (client)") + result = client%initialize("client_test_logging (client)") if (result .ne. SRNoError) error stop result = dataset%initialize("client_test_logging (dataset)") if (result .ne. SRNoError) error stop diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_misc_tensor.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_misc_tensor.F90 index bb0f44f6..2c20b74f 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_misc_tensor.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_misc_tensor.F90 @@ -44,9 +44,9 @@ program main integer :: err_code integer :: result - logical(kind=c_bool) :: exists + logical :: exists - result = client%initialize(use_cluster(), "client_test_misc_tensor") + result = client%initialize("client_test_misc_tensor") if (result .ne. SRNoError) error stop print *, "Putting tensor" diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist.F90 index d3aae861..9cf43a1e 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist.F90 @@ -35,16 +35,16 @@ program mnist_test #include "enum_fortran.inc" character(len=*), parameter :: model_key = "mnist_model" - character(len=*), parameter :: model_file = "../../cpp/mnist_data/mnist_cnn.pt" + character(len=*), parameter :: model_file = "../cpp/mnist_data/mnist_cnn.pt" character(len=*), parameter :: script_key = "mnist_script" - character(len=*), parameter :: script_file = "../../cpp/mnist_data/data_processing_script.txt" + character(len=*), parameter :: script_file = "../cpp/mnist_data/data_processing_script.txt" type(client_type) :: client integer :: err_code character(len=2) :: key_suffix integer :: result - result = client%initialize(use_cluster(), "client_test_mnist") + result = client%initialize("client_test_mnist") if (result .ne. SRNoError) error stop result = client%set_model_from_file(model_key, model_file, "TORCH", "CPU") diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist_multigpu.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist_multigpu.F90 index de83efdc..e59fdae2 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist_multigpu.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_mnist_multigpu.F90 @@ -35,9 +35,9 @@ program mnist_test #include "enum_fortran.inc" character(len=*), parameter :: model_key = "mnist_model" - character(len=*), parameter :: model_file = "../../cpp/mnist_data/mnist_cnn.pt" + character(len=*), parameter :: model_file = "../cpp/mnist_data/mnist_cnn.pt" character(len=*), parameter :: script_key = "mnist_script" - character(len=*), parameter :: script_file = "../../cpp/mnist_data/data_processing_script.txt" + character(len=*), parameter :: script_file = "../cpp/mnist_data/data_processing_script.txt" integer, parameter :: first_gpu = 0 integer, parameter :: num_gpus = 1 integer, parameter :: offset = 0 @@ -47,8 +47,7 @@ program mnist_test character(len=2) :: key_suffix integer :: sr_return_code - sr_return_code = client%initialize(use_cluster(), & - "client_test_mnist_multigpu") + sr_return_code = client%initialize("client_test_mnist_multigpu") if (sr_return_code .ne. SRNoError) error stop sr_return_code = client%set_model_from_file_multigpu(model_key, model_file, "TORCH", first_gpu, num_gpus) diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_prefixing.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_prefixing.F90 new file mode 100644 index 00000000..e322f330 --- /dev/null +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_prefixing.F90 @@ -0,0 +1,101 @@ +! BSD 2-Clause License +! +! Copyright (c) 2021-2023, Hewlett Packard Enterprise +! All rights reserved. +! +! Redistribution and use in source and binary forms, with or without +! modification, are permitted provided that the following conditions are met: +! +! 1. Redistributions of source code must retain the above copyright notice, this +! list of conditions and the following disclaimer. +! +! 2. Redistributions in binary form must reproduce the above copyright notice, +! this list of conditions and the following disclaimer in the documentation +! and/or other materials provided with the distribution. +! +! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +program main + +use smartredis_client, only : client_type +use smartredis_dataset, only : dataset_type +use test_utils, only : setenv, use_cluster +use iso_fortran_env, only : STDERR => error_unit +use iso_c_binding, only : c_ptr, c_bool, c_null_ptr, c_char, c_int +use iso_c_binding, only : c_int8_t, c_int16_t, c_int32_t, c_int64_t, c_float, c_double, c_size_t + +implicit none + +#include "enum_fortran.inc" + +character(len=255) :: prefix +character(len=255) :: tensor_key, dataset_key, dataset_tensor_key + +real, dimension(10) :: tensor +type(client_type) :: client +type(dataset_type) :: dataset +integer :: result +logical :: exists + +prefix = "prefix_test" +call setenv("SSKEYIN", prefix) +call setenv("SSKEYOUT", prefix) + +result = client%initialize("client_test_prefixing") +if (result .ne. SRNoError) error stop +result = client%use_tensor_ensemble_prefix(.true.) +if (result .ne. SRNoError) error stop +result = client%use_dataset_ensemble_prefix(.true.) +if (result .ne. SRNoError) error stop + +! Put tensor and dataset into the database. Then check to make +! sure that keys were prefixed correctly + +tensor_key = "test_tensor" +result = client%put_tensor(tensor_key, tensor, shape(tensor)) +if (result .ne. SRNoError) error stop +result = client%tensor_exists(tensor_key, exists) +if (result .ne. SRNoError) error stop +if (.not. exists) then + write(STDERR,*) 'Tensor does not exist: ', tensor_key + error stop +endif +result = client%key_exists(trim(prefix)//"."//trim(tensor_key), exists) +if (result .ne. SRNoError) error stop +if (.not. exists) then + write(STDERR,*) 'Key does not exist: ', trim(prefix)//"."//tensor_key + error stop +endif + +dataset_key = "test_dataset" +result = dataset%initialize(dataset_key) +if (result .ne. SRNoError) error stop +dataset_tensor_key = "dataset_tensor" +result = dataset%add_tensor(tensor_key, tensor, shape(tensor)) +if (result .ne. SRNoError) error stop +result = client%put_dataset(dataset) +if (result .ne. SRNoError) error stop +result = client%dataset_exists(dataset_key, exists) +if (result .ne. SRNoError) error stop +if (.not. exists) then + write(STDERR,*) 'Dataset does not exist: ', dataset_key + error stop +endif +result = client%key_exists(trim(prefix)//".{"//trim(dataset_key)//"}.meta", exists) +if (result .ne. SRNoError) error stop +if (.not. exists) then + write(STDERR,*) 'Key does not exist: ', trim(prefix)//".{"//trim(dataset_key)//"}.meta" + error stop +endif + + +end program main diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_1D.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_1D.F90 index 97a2a493..9a064c38 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_1D.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_1D.F90 @@ -73,7 +73,7 @@ program main recv_array_integer_64(i) = irand() enddo - result = client%initialize(use_cluster(), "client_test_put_get_1D") + result = client%initialize("client_test_put_get_1D") if (result .ne. SRNoError) error stop result = client%put_tensor("true_array_real_32", true_array_real_32, shape(true_array_real_32)) diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_2D.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_2D.F90 index a7485ef9..a18b6b20 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_2D.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_2D.F90 @@ -74,7 +74,7 @@ program main recv_array_integer_64(i,j) = irand() enddo; enddo - result = client%initialize(use_cluster(), "client_test_put_get_2D") + result = client%initialize("client_test_put_get_2D") if (result .ne. SRNoError) error stop result = client%put_tensor("true_array_real_32", true_array_real_32, shape(true_array_real_32)) diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_3D.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_3D.F90 index 87510546..5a0c935f 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_3D.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_3D.F90 @@ -76,7 +76,7 @@ program main recv_array_integer_64(i,j,k) = irand() enddo; enddo; enddo - result = client%initialize(use_cluster(), "client_test_put_get_3D") + result = client%initialize("client_test_put_get_3D") if (result .ne. SRNoError) error stop result = client%put_tensor("true_array_real_32", true_array_real_32, shape(true_array_real_32)) diff --git a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_unpack_dataset.F90 b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_unpack_dataset.F90 index 6f1c70ec..5dc21e97 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_unpack_dataset.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/client_test_put_get_unpack_dataset.F90 @@ -61,8 +61,7 @@ program main integer :: err_code - result = client%initialize(use_cluster(), & - "client_test_put_get_unpack_dataset") + result = client%initialize("client_test_put_get_unpack_dataset") if (result .ne. SRNoError) error stop call random_number(true_array_real_32) diff --git a/2023-01/smartsim/smartredis/tests/fortran/test_fortran_client.py b/2023-01/smartsim/smartredis/tests/fortran/test_fortran_client.py index 0a50342b..cbe9dada 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/test_fortran_client.py +++ b/2023-01/smartsim/smartredis/tests/fortran/test_fortran_client.py @@ -26,13 +26,12 @@ import pytest from os import path as osp -import os +from os import getcwd, environ from glob import glob -from shutil import which from subprocess import Popen, PIPE, TimeoutExpired import time -test_gpu = os.environ.get("SMARTREDIS_TEST_DEVICE","cpu").lower() == "gpu" +test_gpu = environ.get("SMARTREDIS_TEST_DEVICE","cpu").lower() == "gpu" RANKS = 1 TEST_PATH = osp.dirname(osp.abspath(__file__)) @@ -41,23 +40,30 @@ def get_test_names(): """Obtain test names by globbing for client_test Add tests manually if necessary """ - glob_path = osp.join(TEST_PATH, "build/client_test*") + glob_path = osp.join(TEST_PATH, "client_test*") test_names = glob(glob_path) - test_names = [(pytest.param(test, id=osp.basename(test))) - for test in test_names if not "gpu" in test] + test_names = list(filter(lambda test: test.find('gpu') == -1, test_names)) + test_names = [(pytest.param(test, + id=osp.basename(test))) for test in test_names] return test_names @pytest.mark.parametrize("test", get_test_names()) -def test_fortran_client(test): +def test_fortran_client(test, build, link): """This function actually runs the tests using the parameterization function provided in Pytest :param test: a path to a test to run :type test: str """ - cmd = [] - cmd.append(test) + # Build the path to the test executable from the source file name + # . keep only the last three parts of the path: (language, basename) + test = "/".join(test.split("/")[-2:]) + # . drop the file extension + test = ".".join(test.split(".")[:-1]) + # . prepend the path to the built test executable + test = f"{getcwd()}/build/{build}/tests/{link}/{test}" + cmd = [test] print(f"Running test: {osp.basename(test)}") print(f"Test command {' '.join(cmd)}") execute_cmd(cmd) @@ -67,9 +73,8 @@ def execute_cmd(cmd_list): """Execute a command """ # spawning the subprocess and connecting to its output - run_path = osp.join(TEST_PATH, "build/") proc = Popen( - cmd_list, stderr=PIPE, stdout=PIPE, stdin=PIPE, cwd=run_path) + cmd_list, stderr=PIPE, stdout=PIPE, stdin=PIPE, cwd=TEST_PATH) try: out, err = proc.communicate(timeout=120) if out: @@ -104,7 +109,6 @@ def test_client_multigpu_mnist(): Test setting and running a machine learning model via the Fortran client on an orchestrator with multiple GPUs """ - - tester_path = osp.join(TEST_PATH, "build/client_test_mnist_multigpu") + tester_path = osp.join(TEST_PATH, "client_test_mnist_multigpu.F90") test_fortran_client(tester_path) - + diff --git a/2023-01/smartsim/smartredis/tests/fortran/test_utils.F90 b/2023-01/smartsim/smartredis/tests/fortran/test_utils.F90 index 57be3f8c..d0ff69ab 100644 --- a/2023-01/smartsim/smartredis/tests/fortran/test_utils.F90 +++ b/2023-01/smartsim/smartredis/tests/fortran/test_utils.F90 @@ -31,13 +31,19 @@ module test_utils implicit none; private interface - integer(kind=c_int) function setenv_c( env_var, env_val, replace ) bind(c,name='setenv') + integer(kind=c_int) function setenv_c(env_var, env_val, replace) bind(c, name='setenv') import c_int, c_char character(kind=c_char), intent(in) :: env_var(*) !< Name of the variable to set character(kind=c_char), intent(in) :: env_val(*) !< Value to set the variable to integer(kind=c_int), intent(in) :: replace !< If 1, overwrite the value, end function setenv_c + end interface + interface + integer(kind=c_int) function unsetenv_c( env_var ) bind(c, name='unsetenv') + import c_int, c_char + character(kind=c_char), intent(in) :: env_var(*) !< Name of the variable to clear + end function unsetenv_c end interface @@ -45,6 +51,7 @@ end function setenv_c public :: use_cluster public :: c_str public :: setenv + public :: unsetenv contains @@ -58,27 +65,25 @@ integer function irand() end function irand logical function use_cluster() + character(len=16) :: server_type - character(len=16) :: smartredis_test_cluster - - call get_environment_variable('SMARTREDIS_TEST_CLUSTER', smartredis_test_cluster) - smartredis_test_cluster = to_lower(smartredis_test_cluster) + call get_environment_variable('SR_DB_TYPE', server_type) + server_type = to_lower(server_type) use_cluster = .false. - if (len_trim(smartredis_test_cluster)>0) then - select case (smartredis_test_cluster) - case ('true') + if (len_trim(server_type)>0) then + select case (server_type) + case ('clustered') use_cluster = .true. - case ('false') + case ('standalone') use_cluster = .false. case default use_cluster = .false. end select endif - end function use_cluster !> Returns a lower case version of the string. Only supports a-z - function to_lower( str ) result(lower_str) + function to_lower(str) result(lower_str) character(len=*), intent(in ) :: str !< String character(len = len(str)) :: lower_str @@ -92,20 +97,18 @@ function to_lower( str ) result(lower_str) i_low = index(caps,str(i:i)) if (i_low > 0) lower_str(i:i) = lows(i_low:i_low) enddo - end function to_lower !> Convert a Fortran string to a C-string (i.e. append a null character) - function c_str( f_str ) + function c_str(f_str) character(len=*) :: f_str !< The original Fortran-style string character(kind=c_char,len=len_trim(f_str)+1) :: c_str !< The resultant C-style string c_str = trim(f_str)//C_NULL_CHAR - end function c_str - !> Set an environment value to a given value - subroutine setenv( env_var, env_val, replace ) + !> Set an environment variable to a given value + subroutine setenv(env_var, env_val, replace) character(len=*) :: env_var !< Environment variable to set character(len=*) :: env_val !< The value to set the variable logical, optional :: replace !< If true (default) overwrite the current value @@ -127,7 +130,21 @@ subroutine setenv( env_var, env_val, replace ) write(STDERR,*) "Error setting", c_env_var, c_env_val error stop endif - end subroutine setenv + !> Clear an environment variable + subroutine unsetenv(env_var) + character(len=*) :: env_var !< Environment variable to clear + + integer(kind=c_int) :: err_code + character(kind=c_char, len=len_trim(env_var)+1) :: c_env_var + c_env_var = c_str(env_var) + + err_code = unsetenv_c(c_env_var) + if (err_code /= 0) then + write(STDERR,*) "Error clearing", c_env_var + error stop + endif + end subroutine unsetenv + end module test_utils diff --git a/2023-01/smartsim/smartredis/tests/python/test_client.py b/2023-01/smartsim/smartredis/tests/python/test_client.py new file mode 100644 index 00000000..5ccf7f53 --- /dev/null +++ b/2023-01/smartsim/smartredis/tests/python/test_client.py @@ -0,0 +1,110 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os +import pytest + +from smartredis import Client, ConfigOptions + +def test_serialization(context): + c = Client(None, logger_name=context) + assert str(c) != repr(c) + + +def test_address(context): + # get env var to set through client init + ssdb = os.environ["SSDB"] + del os.environ["SSDB"] + + # client init should fail if SSDB not set + _ = Client(False, address=ssdb, logger_name=context) + + # check if SSDB was set anyway + assert os.environ["SSDB"] == ssdb + +# Globals for Client constructor testing +ac_original = Client._Client__address_construction +sc_original = Client._Client__standard_construction +cluster_mode = os.environ["SR_DB_TYPE"] == "Clustered" +target_address = os.environ["SSDB"] +co_envt = ConfigOptions.create_from_environment("") + +@pytest.mark.parametrize( + "args, kwargs, expected_constructor", [ + # address constructions + [(False,), {}, "address"], + [(False,), {"address": target_address}, "address"], + [(False,), {"address": target_address, "logger_name": "log_name"}, "address"], + [(False,), {"logger_name": "log_name"}, "address"], + [(False, target_address), {}, "address"], + [(False, target_address), {"logger_name": "log_name"}, "address"], + [(False, target_address, "log_name"), {}, "address"], + [(), {"cluster": cluster_mode}, "address"], + [(), {"cluster": cluster_mode, "address": target_address}, "address"], + [(), {"cluster": cluster_mode, "address": target_address, "logger_name": "log_name"}, "address"], + [(), {"cluster": cluster_mode, "logger_name": "log_name"}, "address"], + # standard constructions + [(None,), {}, "standard"], + [(None,), {"logger_name": "log_name"}, "standard"], + [(None, "log_name"), {}, "standard"], + [(co_envt,), {}, "standard"], + [(co_envt,), {"logger_name": "log_name"}, "standard"], + [(co_envt, "log_name"), {}, "standard"], + [(), {}, "standard"], + [(), {"config_options": None}, "standard"], + [(), {"config_options": None, "logger_name": "log_name"}, "standard"], + [(), {"config_options": co_envt}, "standard"], + [(), {"config_options": co_envt, "logger_name": "log_name"}, "standard"], + [(), {"logger_name": "log_name"}, "standard"], +]) +def test_client_constructor(args, kwargs, expected_constructor, monkeypatch): + ac_got_called = False + sc_got_called = False + + def mock_address_constructor(self, *a, **kw): + nonlocal ac_got_called + ac_got_called = True + return ac_original(self, *a, **kw) + + @staticmethod + def mock_standard_constructor(*a, **kw): + nonlocal sc_got_called + sc_got_called = True + return sc_original(*a, **kw) + + monkeypatch.setattr( + Client, "_Client__address_construction", mock_address_constructor) + monkeypatch.setattr( + Client, "_Client__standard_construction", mock_standard_constructor) + + Client(*args, **kwargs) + + if expected_constructor == "address": + assert ac_got_called + assert not sc_got_called + if expected_constructor == "standard": + assert not ac_got_called + assert sc_got_called diff --git a/2023-01/smartsim/smartredis/tests/python/test_configoptions.py b/2023-01/smartsim/smartredis/tests/python/test_configoptions.py new file mode 100644 index 00000000..0990a0b6 --- /dev/null +++ b/2023-01/smartsim/smartredis/tests/python/test_configoptions.py @@ -0,0 +1,96 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import os + +import pytest +from smartredis import * +from smartredis.error import * + +##### +# Test attempts to use API functions from non-factory object + +def test_non_factory_configobject(): + co = ConfigOptions() + with pytest.raises(RedisRuntimeError): + _ = co.get_integer_option("key") + with pytest.raises(RedisRuntimeError): + _ = co.get_string_option("key") + with pytest.raises(RedisRuntimeError): + _ = co.is_configured("key") + with pytest.raises(RedisRuntimeError): + _ = co.override_integer_option("key", 42) + with pytest.raises(RedisRuntimeError): + _ = co.override_string_option("key", "value") + +def test_options(monkeypatch): + monkeypatch.setenv("test_integer_key", "42") + monkeypatch.setenv("test_string_key", "charizard") + co = ConfigOptions.create_from_environment("") + + # integer option tests + assert co.get_integer_option("test_integer_key") == 42 + assert not co.is_configured("test_integer_key_that_is_not_really_present") + with pytest.raises(RedisKeyError): + _ = co.get_integer_option("test_integer_key_that_is_not_really_present") + co.override_integer_option("test_integer_key_that_is_not_really_present", 42) + assert co.is_configured("test_integer_key_that_is_not_really_present") + assert co.get_integer_option( + "test_integer_key_that_is_not_really_present") == 42 + + # string option tests + assert co.get_string_option("test_string_key") == "charizard" + assert not co.is_configured("test_string_key_that_is_not_really_present") + with pytest.raises(RedisKeyError): + _ = co.get_string_option("test_string_key_that_is_not_really_present") + co.override_string_option("test_string_key_that_is_not_really_present", "meowth") + assert co.is_configured("test_string_key_that_is_not_really_present") + assert co.get_string_option( + "test_string_key_that_is_not_really_present") == "meowth" + +def test_options_with_suffix(monkeypatch): + monkeypatch.setenv("integer_key_suffixtest", "42") + monkeypatch.setenv("string_key_suffixtest", "charizard") + co = ConfigOptions.create_from_environment("suffixtest") + + # integer option tests + assert co.get_integer_option("integer_key") == 42 + assert not co.is_configured("integer_key_that_is_not_really_present") + with pytest.raises(RedisKeyError): + _ = co.get_integer_option("integer_key_that_is_not_really_present") + co.override_integer_option("integer_key_that_is_not_really_present", 42) + assert co.get_integer_option("integer_key_that_is_not_really_present") == 42 + assert co.is_configured("integer_key_that_is_not_really_present") + + # string option tests + assert co.get_string_option("string_key") == "charizard" + assert not co.is_configured("string_key_that_is_not_really_present") + with pytest.raises(RedisKeyError): + _ = co.get_string_option("string_key_that_is_not_really_present") + co.override_string_option("string_key_that_is_not_really_present", "meowth") + assert co.is_configured("string_key_that_is_not_really_present") + assert co.get_string_option( + "string_key_that_is_not_really_present") == "meowth" diff --git a/2023-01/smartsim/smartredis/tests/python/test_dataset_aggregation.py b/2023-01/smartsim/smartredis/tests/python/test_dataset_aggregation.py index 1c826dad..ba806e50 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_dataset_aggregation.py +++ b/2023-01/smartsim/smartredis/tests/python/test_dataset_aggregation.py @@ -24,17 +24,15 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import os import numpy as np -import pytest from smartredis import Client, Dataset from smartredis.error import * from smartredis import * -def test_aggregation(use_cluster, context): +def test_aggregation(context): num_datasets = 4 - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) log_data(context, LLDebug, "Initialization complete") # Build datasets @@ -119,6 +117,14 @@ def test_aggregation(use_cluster, context): f"The list length of {list_length} does not match expected " f"value of {actual_length}.") log_data(context, LLDebug, "List length check") + + # Check the return of a range of datasets from the aggregated list + num_datasets = client.get_dataset_list_range(list_name, 0, 1) + if (len(num_datasets) != 2): + raise RuntimeError( + f"The length is {len(num_datasets)}, which does not " + f"match expected value of 2.") + log_data(context, LLDebug, "Retrieve datasets from list checked") # Retrieve datasets via the aggregation list datasets = client.get_datasets_from_list(list_name) @@ -128,7 +134,31 @@ def test_aggregation(use_cluster, context): f"does not match expected value of {list_length}.") for ds in datasets: check_dataset(ds) - log_data(context, LLDebug, "DataSet retrieval") + log_data(context, LLDebug, "DataSet list retrieval") + + # Rename a list of datasets + client.rename_list(list_name, "new_list_name") + renamed_list_datasets = client.get_datasets_from_list("new_list_name") + if len(renamed_list_datasets) != list_length: + raise RuntimeError( + f"The number of datasets received {len(datasets)} " + f"does not match expected value of {list_length}.") + for ds in renamed_list_datasets: + check_dataset(ds) + log_data(context, LLDebug, "DataSet list rename complete") + + # Copy a list of datasets + client.copy_list("new_list_name", "copied_list_name") + copied_list_datasets = client.get_datasets_from_list("copied_list_name") + if len(copied_list_datasets) != list_length: + raise RuntimeError( + f"The number of datasets received {len(datasets)} " + f"does not match expected value of {list_length}.") + for ds in copied_list_datasets: + check_dataset(ds) + log_data(context, LLDebug, "DataSet list copied") + + # ------------ helper functions --------------------------------- diff --git a/2023-01/smartsim/smartredis/tests/python/test_dataset_conversion.py b/2023-01/smartsim/smartredis/tests/python/test_dataset_conversion.py index 086fa365..ce66216f 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_dataset_conversion.py +++ b/2023-01/smartsim/smartredis/tests/python/test_dataset_conversion.py @@ -28,11 +28,11 @@ import xarray as xr except ImportError: xr = None -_xarray_not_found = 'optional package xarray not available in environment' +_xarray_not_found = "optional package xarray not available in environment" import numpy as np import pytest -from smartredis import Dataset, Client +from smartredis import Dataset from smartredis.dataset_utils import DatasetConverter from smartredis.error import * @@ -54,11 +54,11 @@ if xr: # Create 1D reference Xarray for testing ds_1d = xr.DataArray( - name = "1ddata", - data = data1d, - dims = "x", - coords = (longitude_coord_1d,), - attrs = data_attributes_1d, + name="1ddata", + data=data1d, + dims="x", + coords=(longitude_coord_1d,), + attrs=data_attributes_1d, ) # ----------Create reference 2D data for 2D tests --------- @@ -83,11 +83,11 @@ if xr: # Create 2D reference Xarray for testing ds_2d = xr.DataArray( - name = "2ddata", - data = data2d, - dims = ["x", "y"], - coords = (longitude_coord_2d, latitude_coord_2d), - attrs = data_attributes_2d, + name="2ddata", + data=data2d, + dims=["x", "y"], + coords=(longitude_coord_2d, latitude_coord_2d), + attrs=data_attributes_2d, ) # ----helper methods ------- @@ -171,11 +171,23 @@ def assert_equality_2d(dataset): # Compare tensor data and metadata for coordinate data extracted from dataset after # add_metadata_for_xarray call to generated 2D coordinate data assert (dataset.get_tensor("x") == longitude_2d).all() - assert dataset.get_meta_strings("x_coord_units")[0] == longitude_coord_2d[2]["x_coord_units"] - assert dataset.get_meta_strings("x_coord_longname")[0] == longitude_coord_2d[2]["x_coord_longname"] + assert ( + dataset.get_meta_strings("x_coord_units")[0] + == longitude_coord_2d[2]["x_coord_units"] + ) + assert ( + dataset.get_meta_strings("x_coord_longname")[0] + == longitude_coord_2d[2]["x_coord_longname"] + ) assert (dataset.get_tensor("y") == latitude_2d).all() - assert dataset.get_meta_strings("y_coord_units")[0] == latitude_coord_2d[2]["y_coord_units"] - assert dataset.get_meta_strings("y_coord_longname")[0] == latitude_coord_2d[2]["y_coord_longname"] + assert ( + dataset.get_meta_strings("y_coord_units")[0] + == latitude_coord_2d[2]["y_coord_units"] + ) + assert ( + dataset.get_meta_strings("y_coord_longname")[0] + == latitude_coord_2d[2]["y_coord_longname"] + ) # -------- start of tests -------------- @@ -191,17 +203,17 @@ def test_add_metadata_for_xarray_1d(): # Call method add_metadata_for_xarray on 1D dataset DatasetConverter.add_metadata_for_xarray( ds1, - data_names = ["1ddata"], - dim_names = ["dim_data_x"], - coord_names = ["x"], - attr_names = ["units", "longname", "convention"], + data_names=["1ddata"], + dim_names=["dim_data_x"], + coord_names=["x"], + attr_names=["units", "longname", "convention"], ) # Call method add_metadata_for_xarray for longitude coordinate DatasetConverter.add_metadata_for_xarray( ds1, - data_names = ["x"], - dim_names = ["dim_data_x"], - attr_names = ["x_coord_units", "x_coord_longname"], + data_names=["x"], + dim_names=["dim_data_x"], + attr_names=["x_coord_units", "x_coord_longname"], ) assert_equality_1d(ds1) @@ -214,17 +226,17 @@ def test_string_single_variable_param_names_add_metadata_for_xarray_1d(): # Call method add_metadata_for_xarray on 1D dataset DatasetConverter.add_metadata_for_xarray( ds1, - data_names = "1ddata", - dim_names= "dim_data_x", - coord_names = "x", - attr_names = ["units", "longname", "convention"], + data_names="1ddata", + dim_names="dim_data_x", + coord_names="x", + attr_names=["units", "longname", "convention"], ) # Call method add_metadata_for_xarray for longitude coordinate DatasetConverter.add_metadata_for_xarray( ds1, - data_names = "x", - dim_names = "dim_data_x", - attr_names = ["x_coord_units", "x_coord_longname"], + data_names="x", + dim_names="dim_data_x", + attr_names=["x_coord_units", "x_coord_longname"], ) assert_equality_1d(ds1) @@ -244,34 +256,34 @@ def test_bad_type_add_metadata_for_xarray_1d(): with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds1, - data_names = 1, - dim_names = dimname, - coord_names = coordname, - attr_names = attrname, + data_names=1, + dim_names=dimname, + coord_names=coordname, + attr_names=attrname, ) with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds1, - data_names = dataname, - dim_names = 2, - coord_names = coordname, - attr_names = attrname, + data_names=dataname, + dim_names=2, + coord_names=coordname, + attr_names=attrname, ) with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds1, - data_names = dataname, - dim_names = dimname, - coord_names = 3, - attr_names = attrname, + data_names=dataname, + dim_names=dimname, + coord_names=3, + attr_names=attrname, ) with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds1, - data_names = dataname, - dim_names = dimname, - coord_names = coordname, - attr_names = [4, 5, 6], + data_names=dataname, + dim_names=dimname, + coord_names=coordname, + attr_names=[4, 5, 6], ) @@ -290,24 +302,24 @@ def test_add_metadata_for_xarray_2d(): # Call add_metadata_for_xarray for 2D data DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["2ddata"], - dim_names = ["dim_data_x", "dim_data_y"], - coord_names = ["x", "y"], - attr_names = ["units", "longname", "convention"], + data_names=["2ddata"], + dim_names=["dim_data_x", "dim_data_y"], + coord_names=["x", "y"], + attr_names=["units", "longname", "convention"], ) # Call add_metadata_for_xarray for longitude coordinate DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["x"], - dim_names = ["dim_data_x"], - attr_names = ["x_coord_units", "x_coord_longname"], + data_names=["x"], + dim_names=["dim_data_x"], + attr_names=["x_coord_units", "x_coord_longname"], ) # Call add_metadata_for_xarray for latitude coordinate DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["y"], - dim_names = ["dim_data_y"], - attr_names = ["y_coord_units", "y_coord_longname"], + data_names=["y"], + dim_names=["dim_data_y"], + attr_names=["y_coord_units", "y_coord_longname"], ) assert_equality_2d(ds2) @@ -328,41 +340,41 @@ def test_bad_type_add_metadata_for_xarray_2d(): with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds2, - data_names = 1, - dim_names = dimname, - coord_names = coordname, - attr_names = attrname, + data_names=1, + dim_names=dimname, + coord_names=coordname, + attr_names=attrname, ) with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds2, - data_names = dataname, - dim_names = [1, 2], - coord_names = coordname, - attr_names = attrname, + data_names=dataname, + dim_names=[1, 2], + coord_names=coordname, + attr_names=attrname, ) with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds2, - data_names = dataname, - dim_names = dimname, - coord_names = [3, 4], - attr_names = attrname, + data_names=dataname, + dim_names=dimname, + coord_names=[3, 4], + attr_names=attrname, ) with pytest.raises(TypeError): DatasetConverter.add_metadata_for_xarray( ds2, - data_names = dataname, - dim_names = dimname, - coord_names = coordname, - attr_names = [5, 6, 7], + data_names=dataname, + dim_names=dimname, + coord_names=coordname, + attr_names=[5, 6, 7], ) # ------- beginning of 1d transform_to_xarray tests------- -@pytest.mark.skipif(not xr, reason = _xarray_not_found) +@pytest.mark.skipif(not xr, reason=_xarray_not_found) def test_transform_to_xarray_1d(): """Test transform_to_xarray method with correct 1d tensor data and assert equality @@ -374,17 +386,17 @@ def test_transform_to_xarray_1d(): # good data and prerequisite for transform_to_xarray DatasetConverter.add_metadata_for_xarray( ds1, - data_names = ["1ddata"], - dim_names = ["dim_data_x"], - coord_names = ["x"], - attr_names = ["units", "longname", "convention"], + data_names=["1ddata"], + dim_names=["dim_data_x"], + coord_names=["x"], + attr_names=["units", "longname", "convention"], ) # Call method add_metadata_for_xarray for longitude coordinate DatasetConverter.add_metadata_for_xarray( ds1, - data_names = ["x"], - dim_names = ["dim_data_x"], - attr_names = ["x_coord_units", "x_coord_longname"], + data_names=["x"], + dim_names=["dim_data_x"], + attr_names=["x_coord_units", "x_coord_longname"], ) # Compare generated Xarray from 1D data to initial Xarray d1_xarray_ret = DatasetConverter.transform_to_xarray(ds1) @@ -392,7 +404,7 @@ def test_transform_to_xarray_1d(): assert ds_1d.identical(d1_transformed) -@pytest.mark.skipif(not xr, reason = _xarray_not_found) +@pytest.mark.skipif(not xr, reason=_xarray_not_found) def test_bad_data_transform_to_xarray_1d(): """Test transform_to_xarray method with incorrect 1d data""" @@ -411,10 +423,10 @@ def test_bad_data_transform_to_xarray_1d(): # Test incorrect data in data_names parameter DatasetConverter.add_metadata_for_xarray( ds1, - data_names = ["baddata"], - dim_names = dimname, - coord_names = coordname, - attr_names = attrname, + data_names=["baddata"], + dim_names=dimname, + coord_names=coordname, + attr_names=attrname, ) # Call method add_metadata_for_xarray for x coordinate DatasetConverter.add_metadata_for_xarray( @@ -426,10 +438,10 @@ def test_bad_data_transform_to_xarray_1d(): # Test incorrect data in dim_names parameter DatasetConverter.add_metadata_for_xarray( ds1, - data_names = dataname, - dim_names = ["baddata1", "baddata2"], - coord_names = coordname, - attr_names = attrname, + data_names=dataname, + dim_names=["baddata1", "baddata2"], + coord_names=coordname, + attr_names=attrname, ) # Call method add_metadata_for_xarray for x coordinate DatasetConverter.add_metadata_for_xarray( @@ -441,10 +453,10 @@ def test_bad_data_transform_to_xarray_1d(): # Test incorrect data in coord_names parameter DatasetConverter.add_metadata_for_xarray( ds1, - data_names = dataname, - dim_names = dimname, - coord_names = ["baddata"], - attr_names = attrname, + data_names=dataname, + dim_names=dimname, + coord_names=["baddata"], + attr_names=attrname, ) # Call method add_metadata_for_xarray for x coordinate DatasetConverter.add_metadata_for_xarray( @@ -456,10 +468,10 @@ def test_bad_data_transform_to_xarray_1d(): # Test incorrect data in attr_names parameter DatasetConverter.add_metadata_for_xarray( ds1, - data_names = dataname, - dim_names = dimname, - coord_names = coordname, - attr_names = ["baddata1", "baddata2", "baddata3"], + data_names=dataname, + dim_names=dimname, + coord_names=coordname, + attr_names=["baddata1", "baddata2", "baddata3"], ) # Call method add_metadata_for_xarray for x coordinate DatasetConverter.add_metadata_for_xarray( @@ -472,7 +484,7 @@ def test_bad_data_transform_to_xarray_1d(): # ------- beginning of 2d transform_to_xarray tests------- -@pytest.mark.skipif(not xr, reason = _xarray_not_found) +@pytest.mark.skipif(not xr, reason=_xarray_not_found) def test_transform_to_xarray_2d(): """Test transform_to_xarray method with correct 2d tensor data and assert equality @@ -486,24 +498,24 @@ def test_transform_to_xarray_2d(): # Prerequisite for transform to xarray DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["2ddata"], - dim_names = ["dim_data_x", "dim_data_y"], - coord_names = ["x", "y"], - attr_names = ["units", "longname", "convention"], + data_names=["2ddata"], + dim_names=["dim_data_x", "dim_data_y"], + coord_names=["x", "y"], + attr_names=["units", "longname", "convention"], ) # Call add_metadata_for_xarray for longitude coordinate DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["x"], - dim_names = ["dim_data_x"], - attr_names = ["x_coord_units", "x_coord_longname"], + data_names=["x"], + dim_names=["dim_data_x"], + attr_names=["x_coord_units", "x_coord_longname"], ) # Call add_metadata_for_xarray for latitude coordinate DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["y"], - dim_names = ["dim_data_y"], - attr_names = ["y_coord_units", "y_coord_longname"], + data_names=["y"], + dim_names=["dim_data_y"], + attr_names=["y_coord_units", "y_coord_longname"], ) # Compare generated Xarray to initial Xarray d2_xarray_ret = DatasetConverter.transform_to_xarray(ds2) @@ -511,7 +523,7 @@ def test_transform_to_xarray_2d(): assert ds_2d.identical(d2_transformed) -@pytest.mark.skipif(not xr, reason = _xarray_not_found) +@pytest.mark.skipif(not xr, reason=_xarray_not_found) def test_bad_data_transform_to_xarray_2d(): """Test transform_to_xarray method with incorrect 2d data""" @@ -535,10 +547,10 @@ def test_bad_data_transform_to_xarray_2d(): # Test incorrect data in data_names parameter DatasetConverter.add_metadata_for_xarray( ds2, - data_names = ["baddata"], - dim_names = dimname, - coord_names = coordname, - attr_names = attrname, + data_names=["baddata"], + dim_names=dimname, + coord_names=coordname, + attr_names=attrname, ) DatasetConverter.add_metadata_for_xarray( ds2, data_names=c1_dataname, dim_names=c1_dimname, attr_names=c1_attrname @@ -552,10 +564,10 @@ def test_bad_data_transform_to_xarray_2d(): # Test incorrect data in dim_names parameter DatasetConverter.add_metadata_for_xarray( ds2, - data_names = dataname, - dim_names = ["baddata1", "baddata2"], - coord_names = coordname, - attr_names = attrname, + data_names=dataname, + dim_names=["baddata1", "baddata2"], + coord_names=coordname, + attr_names=attrname, ) DatasetConverter.add_metadata_for_xarray( ds2, data_names=c1_dataname, dim_names=c1_dimname, attr_names=c1_attrname @@ -569,10 +581,10 @@ def test_bad_data_transform_to_xarray_2d(): # Test incorrect data in coord_names parameter DatasetConverter.add_metadata_for_xarray( ds2, - data_names = dataname, - dim_names = dimname, - coord_names = ["baddata1", "baddata2"], - attr_names = attrname, + data_names=dataname, + dim_names=dimname, + coord_names=["baddata1", "baddata2"], + attr_names=attrname, ) DatasetConverter.add_metadata_for_xarray( ds2, data_names=c1_dataname, dim_names=c1_dimname, attr_names=c1_attrname @@ -586,10 +598,10 @@ def test_bad_data_transform_to_xarray_2d(): # Test incorrect data in attr_names parameter DatasetConverter.add_metadata_for_xarray( ds2, - data_names = dataname, - dim_names = dimname, - coord_names = coordname, - attr_names = ["baddata1", "baddata2", "baddata3"], + data_names=dataname, + dim_names=dimname, + coord_names=coordname, + attr_names=["baddata1", "baddata2", "baddata3"], ) DatasetConverter.add_metadata_for_xarray( ds2, data_names=c1_dataname, dim_names=c1_dimname, attr_names=c1_attrname @@ -599,3 +611,25 @@ def test_bad_data_transform_to_xarray_2d(): ) with pytest.raises(RedisKeyError): d2_xarray_ret = DatasetConverter.transform_to_xarray(ds2) + + +def test_raise_exception_if_xarray_not_found(monkeypatch): + """Test that a redis runtime error raised if xarray not found + and provide instruction on how to fix it + """ + import sys + import smartredis.dataset_utils as _dsu + + monkeypatch.setattr(sys, "path", []) + # `raising=False` in case xarray isn't actually installed + monkeypatch.delitem(sys.modules, "xarray", raising=False) + monkeypatch.setattr(_dsu, "xr", None) + + @_dsu._requires_xarray + def _say_hello(): + print("Hello World!") + + with pytest.raises(RedisRuntimeError) as e: + _say_hello() + + assert "smartredis[xarray]" in str(e) diff --git a/2023-01/smartsim/smartredis/tests/python/test_dataset_methods.py b/2023-01/smartsim/smartredis/tests/python/test_dataset_methods.py index 6b850643..debd5f6f 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_dataset_methods.py +++ b/2023-01/smartsim/smartredis/tests/python/test_dataset_methods.py @@ -27,6 +27,19 @@ import numpy as np from smartredis import Dataset +def test_serialize_dataset(): + """Test serializing a dataset + """ + dataset = Dataset("test-dataset") + data = np.uint8([2,4,8]) + dataset.add_tensor("u8_tensor", data) + data = np.double([2.0,4.1,8.3, 5.6]) + dataset.add_tensor("double_tensor", data) + dataset.add_meta_scalar("float2_scalar", float(3.1415926535)) + dataset.add_meta_scalar("float_scalar", np.double(3.1415926535)) + dataset.add_meta_string("metastring", "metavalue") + assert str(dataset) != repr(dataset) + def test_add_get_tensor(mock_data): """Test adding and retrieving 1D tensors to @@ -82,15 +95,20 @@ def test_add_get_strings(mock_data): data = mock_data.create_metadata_strings(10) add_get_strings(dataset, data) + def test_dataset_inspection(context): d = Dataset(context) - data = np.uint8([2, 4, 6, 8]) + data = np.uint8([[2, 4, 6, 8], [1, 3, 5, 7]]) d.add_tensor("u8_tensor", data) data = np.int16([1, 1, 2, 3, 5, 8]) d.add_tensor("i16_tensor", data) d.add_meta_string("metastring", "metavalue") d.add_meta_scalar("u32_scalar", np.uint32(42)) d.add_meta_scalar("double_scalar", np.double(3.1415926535)) + dims = d.get_tensor_dims("u8_tensor") + assert len(dims) == 2 + assert dims[0] == 2 + assert dims[1] == 4 tensornames = d.get_tensor_names() assert 2 == len(tensornames) diff --git a/2023-01/smartsim/smartredis/tests/python/test_dataset_ops.py b/2023-01/smartsim/smartredis/tests/python/test_dataset_ops.py index a516465b..2f52609c 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_dataset_ops.py +++ b/2023-01/smartsim/smartredis/tests/python/test_dataset_ops.py @@ -31,12 +31,12 @@ from smartredis.error import * -def test_copy_dataset(use_cluster, context): +def test_copy_dataset(context): # test copying dataset from one key to another dataset = create_dataset("test_dataset_copy") - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) client.put_dataset(dataset) client.copy_dataset("test_dataset_copy", "test_dataset_copied") @@ -67,12 +67,12 @@ def test_copy_dataset(use_cluster, context): ) -def test_rename_dataset(use_cluster, context): +def test_rename_dataset(context): # test renaming a dataset in the database dataset = create_dataset("dataset_rename") - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) client.put_dataset(dataset) client.rename_dataset("dataset_rename", "dataset_renamed") @@ -105,12 +105,12 @@ def test_rename_dataset(use_cluster, context): ) -def test_delete_dataset(use_cluster, context): +def test_delete_dataset(context): # test renaming a dataset in the database dataset = create_dataset("dataset_delete") - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) client.put_dataset(dataset) client.delete_dataset( @@ -123,25 +123,33 @@ def test_delete_dataset(use_cluster, context): # ----------- Error handling ------------------------------------ -def test_rename_nonexisting_dataset(use_cluster, context): +def test_rename_nonexisting_dataset(context): - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) with pytest.raises(RedisReplyError): client.rename_dataset("not-a-tensor", "still-not-a-tensor") -def test_copy_nonexistant_dataset(use_cluster, context): +def test_copy_nonexistant_dataset(context): - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) with pytest.raises(RedisReplyError): client.copy_dataset("not-a-tensor", "still-not-a-tensor") -def test_copy_not_dataset(use_cluster, context): +def test_dataset_get_name(): + """Test getting a dataset name + """ + dataset = Dataset("test-dataset") + name = dataset.get_name() + assert name == "test-dataset" + + +def test_copy_not_dataset(context): def test_func(param): print(param) - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) client.set_function("test_func_dataset", test_func) with pytest.raises(RedisReplyError): client.copy_dataset("test_func_dataset", "test_fork_dataset") diff --git a/2023-01/smartsim/smartredis/tests/python/test_errors.py b/2023-01/smartsim/smartredis/tests/python/test_errors.py index 779da0dd..67f9289a 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_errors.py +++ b/2023-01/smartsim/smartredis/tests/python/test_errors.py @@ -28,94 +28,137 @@ import numpy as np import pytest +from os import environ from smartredis import * from smartredis.error import * +from smartredis.util import Dtypes -def test_SSDB_not_set(use_cluster, context): +test_gpu = environ.get("SMARTREDIS_TEST_DEVICE","cpu").lower() == "gpu" + +@pytest.fixture +def cfg_opts() -> ConfigOptions: + opts = ConfigOptions.create_from_environment("") + return opts + + +def test_SSDB_not_set(context): ssdb = os.environ["SSDB"] del os.environ["SSDB"] with pytest.raises(RedisConnectionError): - c = Client(None, use_cluster, logger_name=context) + _ = Client(None, logger_name=context) os.environ["SSDB"] = ssdb -def test_bad_SSDB(use_cluster, context): +def test_bad_SSDB(context): ssdb = os.environ["SSDB"] del os.environ["SSDB"] os.environ["SSDB"] = "not-an-address:6379;" with pytest.raises(RedisConnectionError): - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) os.environ["SSDB"] = ssdb -def test_bad_get_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_get_tensor(context): + c = Client(None, logger_name=context) with pytest.raises(RedisReplyError): c.get_tensor("not-a-key") -def test_bad_get_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_get_dataset(context): + c = Client(None, logger_name=context) with pytest.raises(RedisKeyError): c.get_dataset("not-a-key") -def test_bad_script_file(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_script_file(context): + c = Client(None, logger_name=context) with pytest.raises(FileNotFoundError): c.set_script_from_file("key", "not-a-file") -def test_get_non_existant_script(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_get_non_existant_script(context): + c = Client(None, logger_name=context) with pytest.raises(RedisReplyError): script = c.get_script("not-a-script") -def test_bad_function_execution(use_cluster, context): +def test_bad_function_execution(context): """Error raised inside function""" - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_function("bad-function", bad_function) data = np.array([1, 2, 3, 4]) c.put_tensor("bad-func-tensor", data) with pytest.raises(RedisReplyError): c.run_script("bad-function", "bad_function", ["bad-func-tensor"], ["output"]) + with pytest.raises(RedisReplyError): + c.run_script("bad-function", "bad_function", "bad-func-tensor", "output") -def test_missing_script_function(use_cluster, context): +def test_missing_script_function(context): """User requests to run a function not in the script""" - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_function("bad-function", bad_function) with pytest.raises(RedisReplyError): - c.run_script( - "bad-function", "not-a-function-in-script", ["bad-func-tensor"], ["output"] - ) + c.run_script("bad-function", "not-a-function-in-script", ["bad-func-tensor"], ["output"]) + with pytest.raises(RedisReplyError): + c.run_script("bad-function", "not-a-function-in-script", "bad-func-tensor", "output") + +@pytest.mark.skipif( + not test_gpu, + reason="SMARTREDIS_TEST_DEVICE does not specify 'gpu'" +) +def test_bad_function_execution_multigpu(use_cluster, context): + """Error raised inside function""" + + c = Client(None, use_cluster, logger_name=context) + c.set_function_multigpu("bad-function", bad_function, 0, 1) + data = np.array([1, 2, 3, 4]) + c.put_tensor("bad-func-tensor", data) + with pytest.raises(RedisReplyError): + c.run_script_multigpu("bad-function", "bad_function", ["bad-func-tensor"], ["output"], 0, 0, 2) + with pytest.raises(RedisReplyError): + c.run_script_multigpu("bad-function", "bad_function", "bad-func-tensor", "output", 0, 0, 2) + + +@pytest.mark.skipif( + not test_gpu, + reason="SMARTREDIS_TEST_DEVICE does not specify 'gpu'" +) +def test_missing_script_function_multigpu(context): + """User requests to run a function not in the script""" + + c = Client(None, logger_name=context) + c.set_function_multigpu("bad-function", bad_function, 0, 1) + with pytest.raises(RedisReplyError): + c.run_script_multigpu("bad-function", "not-a-function-in-script", ["bad-func-tensor"], ["output"], 0, 0, 2) + with pytest.raises(RedisReplyError): + c.run_script_multigpu("bad-function", "not-a-function-in-script", "bad-func-tensor", "output", 0, 0, 2) -def test_wrong_model_name(mock_data, mock_model, use_cluster, context): +def test_wrong_model_name(mock_data, mock_model, context): """User requests to run a model that is not there""" data = mock_data.create_data(1) model = mock_model.create_torch_cnn() - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_model("simple_cnn", model, "TORCH", "CPU") c.put_tensor("input", data[0]) with pytest.raises(RedisReplyError): c.run_model("wrong_cnn", ["input"], ["output"]) -def test_wrong_model_name_from_file(mock_data, mock_model, use_cluster, context): +def test_wrong_model_name_from_file(mock_data, mock_model, context): """User requests to run a model that is not there that was loaded from file.""" try: data = mock_data.create_data(1) mock_model.create_torch_cnn(filepath="./torch_cnn.pt") - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_model_from_file("simple_cnn_from_file", "./torch_cnn.pt", "TORCH", "CPU") c.put_tensor("input", data[0]) with pytest.raises(RedisReplyError): @@ -124,16 +167,16 @@ def test_wrong_model_name_from_file(mock_data, mock_model, use_cluster, context) os.remove("torch_cnn.pt") -def test_bad_device(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_device(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_script("key", "some_script", device="not-a-gpu") ##### # Test type errors from bad parameter types to Client API calls -def test_bad_type_put_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_put_tensor(context): + c = Client(None, logger_name=context) array = np.array([1, 2, 3, 4]) with pytest.raises(TypeError): c.put_tensor(42, array) @@ -141,79 +184,79 @@ def test_bad_type_put_tensor(use_cluster, context): c.put_tensor("key", [1, 2, 3, 4]) -def test_unsupported_type_put_tensor(use_cluster, context): +def test_unsupported_type_put_tensor(context): """test an unsupported numpy type""" - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) data = np.array([1, 2, 3, 4]).astype(np.uint64) with pytest.raises(TypeError): c.put_tensor("key", data) -def test_bad_type_get_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_tensor(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_tensor(42) -def test_bad_type_delete_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_delete_tensor(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.delete_tensor(42) -def test_bad_type_copy_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_copy_tensor(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.copy_tensor(42, "newname") with pytest.raises(TypeError): c.copy_tensor("oldname", 42) -def test_bad_type_rename_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_rename_tensor(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.rename_tensor(42, "newname") with pytest.raises(TypeError): c.rename_tensor("oldname", 42) -def test_bad_type_put_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_put_dataset(context): + c = Client(None, logger_name=context) array = np.array([1, 2, 3, 4]) with pytest.raises(TypeError): c.put_dataset(array) -def test_bad_type_get_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_dataset(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_dataset(42) -def test_bad_type_delete_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_delete_dataset(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.delete_dataset(42) -def test_bad_type_copy_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_copy_dataset(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.copy_dataset(42, "dest") with pytest.raises(TypeError): c.copy_dataset("src", 42) -def test_bad_type_rename_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_rename_dataset(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.rename_dataset(42, "oldkey") with pytest.raises(TypeError): c.rename_dataset("newkey", 42) -def test_bad_type_set_function(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_function(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_function(42, bad_function) with pytest.raises(TypeError): @@ -221,8 +264,8 @@ def test_bad_type_set_function(use_cluster, context): with pytest.raises(TypeError): c.set_function("key", bad_function, 42) -def test_bad_type_set_function_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_function_multigpu(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_function_multigpu(42, bad_function, 0, 1) with pytest.raises(TypeError): @@ -236,8 +279,8 @@ def test_bad_type_set_function_multigpu(use_cluster, context): with pytest.raises(ValueError): c.set_function_multigpu("key", bad_function, 0, 0) # invalid num GPUs -def test_bad_type_set_script(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_script(context): + c = Client(None, logger_name=context) key = "key_for_script" script = "bad script but correct parameter type" device = "CPU" @@ -248,8 +291,8 @@ def test_bad_type_set_script(use_cluster, context): with pytest.raises(TypeError): c.set_script(key, script, 42) -def test_bad_type_set_script_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_script_multigpu(context): + c = Client(None, logger_name=context) key = "key_for_script" script = "bad script but correct parameter type" first_gpu = 0 @@ -267,8 +310,8 @@ def test_bad_type_set_script_multigpu(use_cluster, context): with pytest.raises(ValueError): c.set_script_multigpu(key, script, first_gpu, 0) -def test_bad_type_set_script_from_file(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_script_from_file(context): + c = Client(None, logger_name=context) key = "key_for_script" scriptfile = "bad filename but correct parameter type" device = "CPU" @@ -279,8 +322,8 @@ def test_bad_type_set_script_from_file(use_cluster, context): with pytest.raises(TypeError): c.set_script_from_file(key, scriptfile, 42) -def test_bad_type_set_script_from_file_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_script_from_file_multigpu(context): + c = Client(None, logger_name=context) key = "key_for_script" scriptfile = "bad filename but correct parameter type" first_gpu = 0 @@ -294,14 +337,30 @@ def test_bad_type_set_script_from_file_multigpu(use_cluster, context): with pytest.raises(TypeError): c.set_script_from_file_multigpu(key, scriptfile, first_gpu, "not an integer") -def test_bad_type_get_script(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_script(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_script(42) -def test_bad_type_run_script(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_run_script_str(context): + c = Client(None, logger_name=context) + key = "my_script" + fn_name = "phred" + inputs = "a string" + outputs = "another string" + with pytest.raises(TypeError): + c.run_script(42, fn_name, inputs, outputs) + with pytest.raises(TypeError): + c.run_script(key, 42, inputs, outputs) + with pytest.raises(TypeError): + c.run_script(key, fn_name, 42, outputs) + with pytest.raises(TypeError): + c.run_script(key, fn_name, inputs, 42) + + +def test_bad_type_run_script_list(context): + c = Client(None, logger_name=context) key = "my_script" fn_name = "phred" inputs = ["list", "of", "strings"] @@ -316,8 +375,37 @@ def test_bad_type_run_script(use_cluster, context): c.run_script(key, fn_name, inputs, 42) -def test_bad_type_run_script_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_run_script_multigpu_str(context): + c = Client(None, logger_name=context) + key = "my_script" + fn_name = "phred" + inputs = "a string" + outputs = "another string" + offset = 0 + first_gpu = 0 + num_gpus = 1 + with pytest.raises(TypeError): + c.run_script_multigpu(42, fn_name, inputs, outputs, offset, first_gpu, num_gpus) + with pytest.raises(TypeError): + c.run_script_multigpu(key, 42, inputs, outputs, offset, first_gpu, num_gpus) + with pytest.raises(TypeError): + c.run_script_multigpu(key, fn_name, 42, outputs, offset, first_gpu, num_gpus) + with pytest.raises(TypeError): + c.run_script_multigpu(key, fn_name, inputs, 42, offset, first_gpu, num_gpus) + with pytest.raises(TypeError): + c.run_script_multigpu(key, fn_name, inputs, outputs, "not an integer", first_gpu, num_gpus) + with pytest.raises(TypeError): + c.run_script_multigpu(key, fn_name, inputs, outputs, offset, "not an integer", num_gpus) + with pytest.raises(TypeError): + c.run_script_multigpu(key, fn_name, inputs, outputs, offset, first_gpu, "not an integer") + with pytest.raises(ValueError): + c.run_script_multigpu(key, fn_name, inputs, outputs, offset, -1, num_gpus) + with pytest.raises(ValueError): + c.run_script_multigpu(key, fn_name, inputs, outputs, offset, first_gpu, 0) + + +def test_bad_type_run_script_multigpu_list(context): + c = Client(None, logger_name=context) key = "my_script" fn_name = "phred" inputs = ["list", "of", "strings"] @@ -345,15 +433,15 @@ def test_bad_type_run_script_multigpu(use_cluster, context): c.run_script_multigpu(key, fn_name, inputs, outputs, offset, first_gpu, 0) -def test_bad_type_get_model(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_model(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_model(42) -def test_bad_type_set_model(mock_model, use_cluster, context): +def test_bad_type_set_model(mock_model, context): model = mock_model.create_torch_cnn() - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_model(42, model, "TORCH", "CPU") with pytest.raises(TypeError): @@ -371,10 +459,10 @@ def test_bad_type_set_model(mock_model, use_cluster, context): with pytest.raises(TypeError): c.set_model("simple_cnn", model, "TORCH", "CPU", tag=42) -def test_bad_type_set_model_multigpu(mock_model, use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_model_multigpu(mock_model, context): + c = Client(None, logger_name=context) model = mock_model.create_torch_cnn() - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_model_multigpu(42, model, "TORCH", 0, 1) with pytest.raises(TypeError): @@ -397,9 +485,9 @@ def test_bad_type_set_model_multigpu(mock_model, use_cluster, context): c.set_model_multigpu("simple_cnn", model, "TORCH", 0, 1, tag=42) -def test_bad_type_set_model_from_file(use_cluster, context): +def test_bad_type_set_model_from_file(context): modelfile = "bad filename but right parameter type" - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_model_from_file(42, modelfile, "TORCH", "CPU") with pytest.raises(TypeError): @@ -419,9 +507,9 @@ def test_bad_type_set_model_from_file(use_cluster, context): with pytest.raises(TypeError): c.set_model_from_file("simple_cnn", modelfile, "TORCH", "CPU", tag=42) -def test_bad_type_set_model_from_file_multigpu(use_cluster, context): +def test_bad_type_set_model_from_file_multigpu(context): modelfile = "bad filename but right parameter type" - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_model_from_file_multigpu(42, modelfile, "TORCH", 0, 1) with pytest.raises(TypeError): @@ -438,17 +526,19 @@ def test_bad_type_set_model_from_file_multigpu(use_cluster, context): c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, batch_size="not_an_integer") with pytest.raises(TypeError): c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, min_batch_size="not_an_integer") + with pytest.raises(TypeError): + c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, min_batch_timeout="not_an_integer") with pytest.raises(TypeError): c.set_model_from_file_multigpu("simple_cnn", modelfile, "TORCH", 0, 1, tag=42) -def test_bad_type_run_model(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_run_model(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.run_model(42) -def test_bad_type_run_model_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_run_model_multigpu(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.run_model_multigpu(42, 0, 0, 1) with pytest.raises(TypeError): @@ -462,8 +552,8 @@ def test_bad_type_run_model_multigpu(use_cluster, context): with pytest.raises(ValueError): c.run_model_multigpu("simple_cnn", 0, 0, 0) -def test_bad_type_delete_model_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_delete_model_multigpu(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.delete_model_multigpu(42, 0, 1) with pytest.raises(TypeError): @@ -475,8 +565,8 @@ def test_bad_type_delete_model_multigpu(use_cluster, context): with pytest.raises(ValueError): c.delete_model_multigpu("simple_cnn", 0, 0) -def test_bad_type_delete_script_multigpu(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_delete_script_multigpu(context): + c = Client(None, logger_name=context) script_name = "my_script" with pytest.raises(TypeError): c.delete_script_multigpu(42, 0, 1) @@ -489,32 +579,32 @@ def test_bad_type_delete_script_multigpu(use_cluster, context): with pytest.raises(ValueError): c.delete_script_multigpu(script_name, 0, 0) -def test_bad_type_tensor_exists(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_tensor_exists(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.tensor_exists(42) -def test_bad_type_dataset_exists(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_dataset_exists(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.dataset_exists(42) -def test_bad_type_model_exists(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_model_exists(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.model_exists(42) -def test_bad_type_key_exists(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_key_exists(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.key_exists(42) -def test_bad_type_poll_key(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_key(context): + c = Client(None, logger_name=context) name = "some_key" freq = 42 num_tries = 42 @@ -527,8 +617,8 @@ def test_bad_type_poll_key(use_cluster, context): c.poll_key(name, freq, bogus) -def test_bad_type_poll_tensor(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_tensor(context): + c = Client(None, logger_name=context) name = "some_key" freq = 42 num_tries = 42 @@ -541,8 +631,8 @@ def test_bad_type_poll_tensor(use_cluster, context): c.poll_tensor(name, freq, bogus) -def test_bad_type_poll_dataset(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_dataset(context): + c = Client(None, logger_name=context) name = "some_key" freq = 42 num_tries = 42 @@ -555,8 +645,8 @@ def test_bad_type_poll_dataset(use_cluster, context): c.poll_dataset(name, freq, bogus) -def test_bad_type_poll_model(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_model(context): + c = Client(None, logger_name=context) name = "some_key" freq = 42 num_tries = 42 @@ -569,44 +659,50 @@ def test_bad_type_poll_model(use_cluster, context): c.poll_model(name, freq, bogus) -def test_bad_type_set_data_source(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_set_data_source(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.set_data_source(42) -def test_bad_type_use_model_ensemble_prefix(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_use_model_ensemble_prefix(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.use_model_ensemble_prefix("not a boolean") -def test_bad_type_use_list_ensemble_prefix(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_use_list_ensemble_prefix(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.use_list_ensemble_prefix("not a boolean") -def test_bad_type_use_tensor_ensemble_prefix(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_use_tensor_ensemble_prefix(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.use_tensor_ensemble_prefix("not a boolean") -def test_bad_type_get_db_node_info(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_use_dataset_ensemble_prefix(context): + c = Client(None, logger_name=context) + with pytest.raises(TypeError): + c.use_dataset_ensemble_prefix("not a boolean") + + +def test_bad_type_get_db_node_info(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_db_node_info("not a list") -def test_bad_type_get_db_cluster_info(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_db_cluster_info(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_db_cluster_info("not a list") -def test_bad_type_get_ai_info(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_ai_info(context): + c = Client(None, logger_name=context) address = ["list", "of", "str"] key = "ai.info.key" with pytest.raises(TypeError): @@ -617,22 +713,22 @@ def test_bad_type_get_ai_info(use_cluster, context): c.get_ai_info(address, key, "not a boolean") -def test_bad_type_flush_db(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_flush_db(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.flush_db("not a list") -def test_bad_type_config_get(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_config_get(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.config_get("timeout", 42) with pytest.raises(TypeError): c.config_get(42, "address") -def test_bad_type_config_set(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_config_set(context): + c = Client(None, logger_name=context) param = "timeout" value = "never" address = "127.0.0.1:6379" @@ -644,42 +740,42 @@ def test_bad_type_config_set(use_cluster, context): c.config_set(param, value, 42) -def test_bad_type_save(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_save(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.save("not a list") -def test_bad_type_append_to_list(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_append_to_list(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.append_to_list(42, 42) -def test_bad_type_delete_list(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_delete_list(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.delete_list(42) -def test_bad_type_copy_list(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_copy_list(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.copy_list(42, "dest") with pytest.raises(TypeError): c.copy_list("src", 42) -def test_bad_type_rename_list(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_rename_list(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.rename_list(42, "dest") with pytest.raises(TypeError): c.rename_list("src", 42) -def test_bad_type_get_list_length(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_list_length(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_list_length(42) -def test_bad_type_poll_list_length(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_list_length(context): + c = Client(None, logger_name=context) name = "mylist" len = 42 pollfreq = 42 @@ -693,8 +789,8 @@ def test_bad_type_poll_list_length(use_cluster, context): with pytest.raises(TypeError): c.poll_list_length(name, len, pollfreq, "not an integer") -def test_bad_type_poll_list_length_gte(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_list_length_gte(context): + c = Client(None, logger_name=context) name = "mylist" len = 42 pollfreq = 42 @@ -708,8 +804,8 @@ def test_bad_type_poll_list_length_gte(use_cluster, context): with pytest.raises(TypeError): c.poll_list_length_gte(name, len, pollfreq, "not an integer") -def test_bad_type_poll_list_length_lte(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_poll_list_length_lte(context): + c = Client(None, logger_name=context) name = "mylist" len = 42 pollfreq = 42 @@ -723,13 +819,13 @@ def test_bad_type_poll_list_length_lte(use_cluster, context): with pytest.raises(TypeError): c.poll_list_length_lte(name, len, pollfreq, "not an integer") -def test_bad_type_get_datasets_from_list(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_datasets_from_list(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.get_datasets_from_list(42) -def test_bad_type_get_dataset_list_range(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_get_dataset_list_range(context): + c = Client(None, logger_name=context) listname = "my_list" start_index = 0 end_index = 42 @@ -740,11 +836,19 @@ def test_bad_type_get_dataset_list_range(use_cluster, context): with pytest.raises(TypeError): c.get_dataset_list_range(listname, start_index, "not an integer") +def test_bad_type_set_model_chunk_size(context): + c = Client(None, logger_name=context) + with pytest.raises(TypeError): + c.set_model_chunk_size("not an integer") + +##### +# Test type errors from bad parameter types to logging calls + @pytest.mark.parametrize("log_fn", [ (log_data,), (log_warning,), (log_error,) ]) -def test_bad_type_log_function(use_cluster, context, log_fn): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_log_function(context, log_fn): + c = Client(None, logger_name=context) with pytest.raises(TypeError): log_fn(42, LLInfo, "Data to be logged") with pytest.raises(TypeError): @@ -752,8 +856,8 @@ def test_bad_type_log_function(use_cluster, context, log_fn): with pytest.raises(TypeError): log_fn("test_bad_type_log_function", LLInfo, 42) -def test_bad_type_client_log(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_bad_type_client_log(context): + c = Client(None, logger_name=context) with pytest.raises(TypeError): c.log_data("Not a logging level", "Data to be logged") with pytest.raises(TypeError): @@ -896,6 +1000,19 @@ def test_get_metadata_field_type_wrong_type(): with pytest.raises(TypeError): d.get_metadata_field_type(42) +def test_from_string_wrong_type(): + """A call to Dataset.get_metadata_field_type is made with the wrong type + """ + with pytest.raises(TypeError): + Dtypes.from_string("Incorrect input") + +def test_metadata_from_numpy_wrong_type(): + """A call to Dataset.add_meta_scalar is made with the wrong type + """ + array = np.array(["Incorrect Input"]) + with pytest.raises(TypeError): + Dtypes.metadata_from_numpy(array) + def test_get_tensor_names_wrong_type(): """A call to Dataset.get_tensor_names is made with the wrong type """ @@ -903,6 +1020,56 @@ def test_get_tensor_names_wrong_type(): with pytest.raises(TypeError): d.get_tensor_names(42) +##### +# Test type errors from bad parameter types to ConfigOptions API calls + +def test_create_from_environment_wrong_type(): + """Ensure create_from_environment doesn't accept an invalid db_prefix param""" + with pytest.raises(TypeError): + _ = ConfigOptions.create_from_environment(42) + +def test_get_integer_option_wrong_type(cfg_opts: ConfigOptions): + """Ensure get_integer_option raises an exception on an invalid key type""" + + with pytest.raises(TypeError): + _ = cfg_opts.get_integer_option(42) + +def test_get_string_option_wrong_type(cfg_opts: ConfigOptions): + """Ensure get_string_option raises an exception on an invalid key type""" + with pytest.raises(TypeError): + _ = cfg_opts.get_string_option(42) + +def test_is_configured_wrong_type(cfg_opts: ConfigOptions): + """Ensure is_configured raises an exception on an invalid key type""" + with pytest.raises(TypeError): + _ = cfg_opts.is_configured(42) + +def test_override_integer_option_wrong_type(cfg_opts: ConfigOptions): + """Ensure override_integer_option raises an exception on an invalid key type + and when an invalid value for the target storage type is encountered""" + key = 42 + value = 42 + with pytest.raises(TypeError): + _ = cfg_opts.override_integer_option(key, value) + + key = "key" + value = "stringval" + with pytest.raises(TypeError): + _ = cfg_opts.override_integer_option(key, value) + +def test_override_string_option_wrong_type(cfg_opts: ConfigOptions): + """Ensure override_string_option raises an exception on an invalid key type + and when an invalid value for the target storage type is encountered""" + + key = 42 + value = "stringval" + with pytest.raises(TypeError): + _ = cfg_opts.override_string_option(key, value) + + key = "stringkey" + value = 42 + with pytest.raises(TypeError): + _ = cfg_opts.override_string_option(key, value) #### # Utility functions diff --git a/2023-01/smartsim/smartredis/tests/python/test_logging.py b/2023-01/smartsim/smartredis/tests/python/test_logging.py index 100016cc..c9245d9d 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_logging.py +++ b/2023-01/smartsim/smartredis/tests/python/test_logging.py @@ -31,7 +31,7 @@ @pytest.mark.parametrize("log_level", [ LLQuiet, LLInfo, LLDebug, LLDeveloper ]) -def test_logging_string(use_cluster, context, log_level): +def test_logging_string(context, log_level): log_data(context, log_level, f"This is data logged from a string ({log_level.name})") log_warning(context, log_level, f"This is a warning logged from a string ({log_level.name})") log_error(context, log_level, f"This is an error logged from a string ({log_level.name})") @@ -39,8 +39,8 @@ def test_logging_string(use_cluster, context, log_level): @pytest.mark.parametrize("log_level", [ LLQuiet, LLInfo, LLDebug, LLDeveloper ]) -def test_logging_client(use_cluster, context, log_level): - c = Client(None, use_cluster, logger_name=context) +def test_logging_client(context, log_level): + c = Client(None, logger_name=context) c.log_data(log_level, f"This is data logged from a client ({log_level.name})") c.log_warning(log_level, f"This is a warning logged from a client ({log_level.name})") c.log_error(log_level, f"This is an error logged from a client ({log_level.name})") diff --git a/2023-01/smartsim/smartredis/tests/python/test_model_methods_torch.py b/2023-01/smartsim/smartredis/tests/python/test_model_methods_torch.py index 9220b675..e1a9bec9 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_model_methods_torch.py +++ b/2023-01/smartsim/smartredis/tests/python/test_model_methods_torch.py @@ -27,21 +27,26 @@ import os import torch +import pytest +from os import environ from smartredis import Client +from smartredis.error import * +test_gpu = environ.get("SMARTREDIS_TEST_DEVICE","cpu").lower() == "gpu" -def test_set_model(mock_model, use_cluster, context): +def test_set_model(mock_model, context): model = mock_model.create_torch_cnn() - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_model("simple_cnn", model, "TORCH", "CPU") returned_model = c.get_model("simple_cnn") assert model == returned_model -def test_set_model_from_file(mock_model, use_cluster, context): +def test_set_model_from_file(mock_model, context): try: mock_model.create_torch_cnn(filepath="./torch_cnn.pt") - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) + c.set_model_chunk_size(1024 * 1024) c.set_model_from_file("file_cnn", "./torch_cnn.pt", "TORCH", "CPU") assert c.model_exists("file_cnn") returned_model = c.get_model("file_cnn") @@ -54,10 +59,10 @@ def test_set_model_from_file(mock_model, use_cluster, context): os.remove("torch_cnn.pt") -def test_torch_inference(mock_model, use_cluster, context): +def test_torch_inference(mock_model, context): # get model and set into database model = mock_model.create_torch_cnn() - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_model("torch_cnn", model, "TORCH") # setup input tensor @@ -68,3 +73,124 @@ def test_torch_inference(mock_model, use_cluster, context): c.run_model("torch_cnn", inputs=["torch_cnn_input"], outputs=["torch_cnn_output"]) out_data = c.get_tensor("torch_cnn_output") assert out_data.shape == (1, 1, 1, 1) + +def test_batch_exceptions(mock_model, context): + # get model and set into database + mock_model.create_torch_cnn(filepath="./torch_cnn.pt") + model = mock_model.create_torch_cnn() + c = Client(None, logger_name=context) + batch_size = 1 + min_batch_size = 1 + min_batch_timeout = 1 + with pytest.raises(RedisRuntimeError): + c.set_model_from_file( + "file_cnn", "./torch_cnn.pt", "TORCH", "CPU", + batch_size=0, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model_from_file( + "file_cnn", "./torch_cnn.pt", "TORCH", "CPU", + batch_size=0, min_batch_size=min_batch_size, min_batch_timeout=0 + ) + with pytest.raises(RedisRuntimeError): + c.set_model_from_file( + "file_cnn", "./torch_cnn.pt", "TORCH", "CPU", + batch_size=batch_size, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model_from_file_multigpu( + "file_cnn", "./torch_cnn.pt", "TORCH", 1, 1, + batch_size=0, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model_from_file_multigpu( + "file_cnn", "./torch_cnn.pt", "TORCH", 1, 1, + batch_size=0, min_batch_size=min_batch_size, min_batch_timeout=0 + ) + with pytest.raises(RedisRuntimeError): + c.set_model_from_file_multigpu( + "file_cnn", "./torch_cnn.pt", "TORCH", 1, 1, + batch_size=batch_size, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model( + "file_cnn", model, "TORCH", "CPU", + batch_size=0, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model( + "file_cnn", model, "TORCH", "CPU", + batch_size=0, min_batch_size=min_batch_size, min_batch_timeout=0 + ) + with pytest.raises(RedisRuntimeError): + c.set_model( + "file_cnn", model, "TORCH", "CPU", + batch_size=batch_size, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model_multigpu( + "file_cnn", model, "TORCH", 1, 1, + batch_size=0, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + with pytest.raises(RedisRuntimeError): + c.set_model_multigpu( + "file_cnn", model, "TORCH", 1, 1, + batch_size=0, min_batch_size=min_batch_size, min_batch_timeout=0 + ) + with pytest.raises(RedisRuntimeError): + c.set_model_multigpu( + "file_cnn", model, "TORCH", 1, 1, + batch_size=batch_size, min_batch_size=0, min_batch_timeout=min_batch_timeout + ) + +def test_batch_warning_set_model_from_file(mock_model, context, capfd): + # get model and set into database + mock_model.create_torch_cnn(filepath="./torch_cnn.pt") + c = Client(None, logger_name=context) + c.set_model_from_file( + "file_cnn", "./torch_cnn.pt", "TORCH", "CPU", + batch_size=1, min_batch_size=1, min_batch_timeout=0 + ) + captured = capfd.readouterr() + assert "WARNING" in captured.err + +@pytest.mark.skipif( + not test_gpu, + reason="SMARTREDIS_TEST_DEVICE does not specify 'gpu'" +) +def test_batch_warning_set_model_from_file_multigpu(mock_model, context, capfd): + # get model and set into database + mock_model.create_torch_cnn(filepath="./torch_cnn.pt") + c = Client(None, logger_name=context) + c.set_model_from_file_multigpu( + "file_cnn", "./torch_cnn.pt", "TORCH", 1, 1, + batch_size=1, min_batch_size=1, min_batch_timeout=0 + ) + captured = capfd.readouterr() + assert "WARNING" in captured.err + +def test_batch_warning_set_model(mock_model, context, capfd): + # get model and set into database + model = mock_model.create_torch_cnn() + c = Client(None, logger_name=context) + c.set_model( + "file_cnn", model, "TORCH", "CPU", + batch_size=1, min_batch_size=1, min_batch_timeout=0 + ) + captured = capfd.readouterr() + assert "WARNING" in captured.err + +@pytest.mark.skipif( + not test_gpu, + reason="SMARTREDIS_TEST_DEVICE does not specify 'gpu'" +) +def test_batch_warning_set_model_multigpu(mock_model, context, capfd): + # get model and set into database + model = mock_model.create_torch_cnn() + c = Client(None, logger_name=context) + c.set_model_multigpu( + "file_cnn", model, "TORCH", 1, 1, + batch_size=1, min_batch_size=1, min_batch_timeout=0 + ) + captured = capfd.readouterr() + assert "WARNING" in captured.err diff --git a/2023-01/smartsim/smartredis/tests/python/test_nonkeyed_cmd.py b/2023-01/smartsim/smartredis/tests/python/test_nonkeyed_cmd.py index 79a280df..9a0fb93b 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_nonkeyed_cmd.py +++ b/2023-01/smartsim/smartredis/tests/python/test_nonkeyed_cmd.py @@ -29,45 +29,30 @@ import numpy as np import pytest from smartredis import Client +from smartredis import ConfigOptions from smartredis.error import * -def test_dbnode_info_command(use_cluster, context): - # get env var to set through client init +def test_dbnode_info_command(context): ssdb = os.environ["SSDB"] - db_info_addr = [ssdb] - del os.environ["SSDB"] - - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) - - info = client.get_db_node_info(db_info_addr) - + addresses = ssdb.split(',') + client = Client(None, logger_name=context) + info = client.get_db_node_info(addresses) assert len(info) > 0 - -def test_dbcluster_info_command(mock_model, use_cluster, context): - # get env var to set through client init +def test_dbcluster_info_command(mock_model, context): ssdb = os.environ["SSDB"] - address = [ssdb] - del os.environ["SSDB"] - - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + addresses = ssdb.split(',') + co = ConfigOptions().create_from_environment("") + client = Client(co, logger_name=context) - if use_cluster: - info = client.get_db_cluster_info(address) + if os.environ["SR_DB_TYPE"] == "Clustered": + info = client.get_db_cluster_info(addresses) assert len(info) > 0 else: # cannot call get_db_cluster_info in non-cluster environment with pytest.raises(RedisReplyError): - client.get_db_cluster_info(address) - - # get env var to set through client init - ssdb = os.environ["SSDB"] - address = [ssdb] - del os.environ["SSDB"] - - # Init client - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + client.get_db_cluster_info(addresses) # Get a mock model model = mock_model.create_torch_cnn() @@ -76,7 +61,7 @@ def test_dbcluster_info_command(mock_model, use_cluster, context): client.set_model("ai_info_cnn", model, "TORCH", "CPU") # Check with valid address and model key - ai_info = client.get_ai_info(address, "ai_info_cnn") + ai_info = client.get_ai_info(addresses, "ai_info_cnn") assert len(ai_info) != 0 # Check that invalid address throws error @@ -85,83 +70,63 @@ def test_dbcluster_info_command(mock_model, use_cluster, context): # Check that invalid model name throws error with pytest.raises(RedisRuntimeError): - client.get_ai_info(address, "bad_key") + client.get_ai_info(addresses, "bad_key") -def test_flushdb_command(use_cluster, context): +def test_flushdb_command(context): # from within the testing framework, there is no way # of knowing each db node that is being used, so skip # if on cluster - if use_cluster: - return - # get env var to set through client init ssdb = os.environ["SSDB"] - address = [ssdb] - del os.environ["SSDB"] + addresses = ssdb.split(',') + if os.environ["SR_DB_TYPE"] == "Clustered": + return - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + client = Client(None, logger_name=context) # add key to client via put_tensor tensor = np.array([1, 2]) client.put_tensor("test_copy", tensor) assert client.tensor_exists("test_copy") - client.flush_db(address) + client.flush_db(addresses) assert not client.tensor_exists("test_copy") -def test_config_set_get_command(use_cluster, context): +def test_config_set_get_command(context): # get env var to set through client init ssdb = os.environ["SSDB"] - - del os.environ["SSDB"] - - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + client = Client(None, logger_name=context) value = "6000" client.config_set("lua-time-limit", value, ssdb) - get_reply = client.config_get("lua-time-limit", ssdb) assert len(get_reply) > 0 assert get_reply["lua-time-limit"] == value -def test_config_set_command_DNE(use_cluster, context): - # get env var to set through client init +def test_config_set_command_DNE(context): ssdb = os.environ["SSDB"] - - del os.environ["SSDB"] - - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + client = Client(None, logger_name=context) # The CONFIG parameter "config_param_DNE" is unsupported with pytest.raises(RedisReplyError): client.config_set("config_param_DNE", "10", ssdb) -def test_config_get_command_DNE(use_cluster, context): - # get env var to set through client init +def test_config_get_command_DNE(context): ssdb = os.environ["SSDB"] - - del os.environ["SSDB"] - - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + client = Client(None, logger_name=context) # CONFIG GET returns an empty dictionary if the config_param is unsupported get_reply = client.config_get("config_param_DNE", ssdb) assert get_reply == dict() -def test_save_command(use_cluster, mock_data, context): - # get env var to set through client init +def test_save_command(context): ssdb = os.environ["SSDB"] - if use_cluster: - addresses = ssdb.split(",") - else: - addresses = [ssdb] - del os.environ["SSDB"] + client = Client(None, logger_name=context) - # client init should fail if SSDB not set - client = Client(address=ssdb, cluster=use_cluster, logger_name=context) + addresses = ssdb.split(",") # for each address, check that the timestamp of the last SAVE increases after calling Client::save for address in addresses: diff --git a/2023-01/smartsim/smartredis/tests/python/test_prefixing.py b/2023-01/smartsim/smartredis/tests/python/test_prefixing.py new file mode 100644 index 00000000..02347063 --- /dev/null +++ b/2023-01/smartsim/smartredis/tests/python/test_prefixing.py @@ -0,0 +1,115 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import numpy as np +import os + +from smartredis import Client, Dataset + +def test_prefixing(context, monkeypatch): + # configure prefix variables + monkeypatch.setenv("SSKEYOUT", "prefix_test") + monkeypatch.setenv("SSKEYIN", "prefix_test,prefix_ignore") + + # Set up client + c = Client(logger_name=context) + c.use_dataset_ensemble_prefix(True) + c.use_tensor_ensemble_prefix(True) + c.set_data_source("prefix_test") + + # Create Dataset + d = Dataset("test_dataset") + data = np.uint16([1, 2, 3, 4]) + d.add_tensor("dataset_tensor", data) + c.put_dataset(d) + c.put_tensor("test_tensor", data) + + # Validate keys to see whether prefixing was applied properly + assert c.dataset_exists("test_dataset") + assert c.key_exists("prefix_test.{test_dataset}.meta") + assert not c.key_exists("test_dataset") + assert c.tensor_exists("test_tensor") + assert c.key_exists("prefix_test.test_tensor") + assert not c.key_exists("test_tensor") + +def test_model_prefixing(mock_model, context, monkeypatch): + # configure prefix variables + monkeypatch.setenv("SSKEYOUT", "prefix_test") + monkeypatch.setenv("SSKEYIN", "prefix_test,prefix_ignore") + + # Set up client + c = Client(logger_name=context) + c.use_model_ensemble_prefix(True) + c.set_data_source("prefix_test") + + # Create model + model = mock_model.create_torch_cnn() + c.set_model("simple_cnn", model, "TORCH", "CPU") + + # Validate keys to see whether prefixing was applied properly + assert c.model_exists("simple_cnn") + assert not c.key_exists("simple_cnn") + + +def test_list_prefixing(context, monkeypatch): + # configure prefix variables + monkeypatch.setenv("SSKEYOUT", "prefix_test") + monkeypatch.setenv("SSKEYIN", "prefix_test,prefix_ignore") + + # Set up client + c = Client(logger_name=context) + c.use_list_ensemble_prefix(True) + c.set_data_source("prefix_test") + + # Build datasets + num_datasets = 4 + original_datasets = [create_dataset(f"dataset_{i}") for i in range(num_datasets)] + + # Make sure the list is cleared + list_name = "dataset_test_list" + c.delete_list(list_name) + + # Put datasets into the list + for i in range(num_datasets): + c.put_dataset(original_datasets[i]) + c.append_to_list(list_name, original_datasets[i]) + + # Validate keys to see whether prefixing was applied properly + assert c.key_exists("prefix_test.dataset_test_list") + assert not c.key_exists("dataset_test_list") + +# ------------ helper functions --------------------------------- + +def create_dataset(name): + array = np.array([1, 2, 3, 4]) + string = "test_meta_strings" + scalar = 7 + + dataset = Dataset(name) + dataset.add_tensor("test_array", array) + dataset.add_meta_string("test_string", string) + dataset.add_meta_scalar("test_scalar", scalar) + return dataset \ No newline at end of file diff --git a/2023-01/smartsim/smartredis/tests/python/test_put_get_dataset.py b/2023-01/smartsim/smartredis/tests/python/test_put_get_dataset.py index ecba9ec2..007c7330 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_put_get_dataset.py +++ b/2023-01/smartsim/smartredis/tests/python/test_put_get_dataset.py @@ -30,8 +30,8 @@ from smartredis import Client, Dataset -def test_put_get_dataset(mock_data, use_cluster, context): - """test sending and recieving a dataset with 2D tensors +def test_put_get_dataset(mock_data, context): + """test sending and receiving a dataset with 2D tensors of every datatype """ @@ -43,7 +43,7 @@ def test_put_get_dataset(mock_data, use_cluster, context): key = f"tensor_{str(index)}" dataset.add_tensor(key, tensor) - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) assert not client.dataset_exists( "nonexistent-dataset" @@ -64,7 +64,7 @@ def test_put_get_dataset(mock_data, use_cluster, context): ) -def test_augment_dataset(mock_data, use_cluster, context): +def test_augment_dataset(mock_data, context): """Test sending, receiving, altering, and sending a Dataset. """ @@ -75,7 +75,7 @@ def test_augment_dataset(mock_data, use_cluster, context): dataset_name = "augment-dataset" # Initialize a client - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) # Create a dataset to put into the database dataset = Dataset(dataset_name) diff --git a/2023-01/smartsim/smartredis/tests/python/test_put_get_tensor.py b/2023-01/smartsim/smartredis/tests/python/test_put_get_tensor.py index f067e17f..ea40628d 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_put_get_tensor.py +++ b/2023-01/smartsim/smartredis/tests/python/test_put_get_tensor.py @@ -31,28 +31,28 @@ # ----- Tests ----------------------------------------------------------- -def test_1D_put_get(mock_data, use_cluster, context): +def test_1D_put_get(mock_data, context): """Test put/get_tensor for 1D numpy arrays""" - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) data = mock_data.create_data(10) send_get_arrays(client, data) -def test_2D_put_get(mock_data, use_cluster, context): +def test_2D_put_get(mock_data, context): """Test put/get_tensor for 2D numpy arrays""" - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) data = mock_data.create_data((10, 10)) send_get_arrays(client, data) -def test_3D_put_get(mock_data, use_cluster, context): +def test_3D_put_get(mock_data, context): """Test put/get_tensor for 3D numpy arrays""" - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) data = mock_data.create_data((10, 10, 10)) send_get_arrays(client, data) diff --git a/2023-01/smartsim/smartredis/tests/python/test_script_methods.py b/2023-01/smartsim/smartredis/tests/python/test_script_methods.py index 4e929566..83c0dff7 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_script_methods.py +++ b/2023-01/smartsim/smartredis/tests/python/test_script_methods.py @@ -24,9 +24,10 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import pytest import inspect import os.path as osp - +from os import environ import numpy as np import torch from smartredis import Client @@ -34,25 +35,27 @@ file_path = osp.dirname(osp.abspath(__file__)) -def test_set_get_function(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +test_gpu = environ.get("SMARTREDIS_TEST_DEVICE","cpu").lower() == "gpu" + +def test_set_get_function(context): + c = Client(None, logger_name=context) c.set_function("test-set-function", one_to_one) script = c.get_script("test-set-function") sent_script = inspect.getsource(one_to_one) assert script == sent_script -def test_set_get_script(use_cluster, context): - c = Client(None, use_cluster, logger_name=context) +def test_set_get_script(context): + c = Client(None, logger_name=context) sent_script = read_script_from_file() c.set_script("test-set-script", sent_script) script = c.get_script("test-set-script") assert sent_script == script -def test_set_script_from_file(use_cluster, context): +def test_set_script_from_file(context): sent_script = read_script_from_file() - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.set_script_from_file( "test-script-file", osp.join(file_path, "./data_processing_script.txt") ) @@ -63,22 +66,22 @@ def test_set_script_from_file(use_cluster, context): assert not c.model_exists("test-script-file") -def test_run_script(use_cluster, context): +def test_run_script_str(context): data = np.array([[1, 2, 3, 4, 5]]) - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.put_tensor("script-test-data", data) c.set_function("one-to-one", one_to_one) - c.run_script("one-to-one", "one_to_one", ["script-test-data"], ["script-test-out"]) + c.run_script("one-to-one", "one_to_one", "script-test-data", "script-test-out") out = c.get_tensor("script-test-out") assert out == 5 -def test_run_script_multi(use_cluster, context): +def test_run_script_list(context): data = np.array([[1, 2, 3, 4]]) data_2 = np.array([[5, 6, 7, 8]]) - c = Client(None, use_cluster, logger_name=context) + c = Client(None, logger_name=context) c.put_tensor("srpt-multi-out-data-1", data) c.put_tensor("srpt-multi-out-data-2", data_2) c.set_function("two-to-one", two_to_one) @@ -94,6 +97,47 @@ def test_run_script_multi(use_cluster, context): out, expected, "Returned array from script not equal to expected result" ) +@pytest.mark.skipif( + not test_gpu, + reason="SMARTREDIS_TEST_DEVICE does not specify 'gpu'" +) +def test_run_script_multigpu_str(use_cluster, context): + data = np.array([[1, 2, 3, 4, 5]]) + + c = Client(None, use_cluster, logger_name=context) + c.put_tensor("script-test-data", data) + c.set_function_multigpu("one-to-one", one_to_one, 0, 2) + c.run_script_multigpu("one-to-one", "one_to_one", "script-test-data", "script-test-out", 0, 0, 2) + out = c.get_tensor("script-test-out") + assert out == 5 + +@pytest.mark.skipif( + not test_gpu, + reason="SMARTREDIS_TEST_DEVICE does not specify 'gpu'" +) +def test_run_script_multigpu_list(use_cluster, context): + data = np.array([[1, 2, 3, 4]]) + data_2 = np.array([[5, 6, 7, 8]]) + + c = Client(None, use_cluster, logger_name=context) + c.put_tensor("srpt-multi-out-data-1", data) + c.put_tensor("srpt-multi-out-data-2", data_2) + c.set_function_multigpu("two-to-one", two_to_one, 0, 2) + c.run_script_multigpu( + "two-to-one", + "two_to_one", + ["srpt-multi-out-data-1", "srpt-multi-out-data-2"], + ["srpt-multi-out-output"], + 0, + 0, + 2 + ) + out = c.get_tensor("srpt-multi-out-output") + expected = np.array([4, 8]) + np.testing.assert_array_equal( + out, expected, "Returned array from script not equal to expected result" + ) + def one_to_one(data): """Sample torchscript script that returns the diff --git a/2023-01/smartsim/smartredis/tests/python/test_tensor_ops.py b/2023-01/smartsim/smartredis/tests/python/test_tensor_ops.py index f25dde36..89ddd3f6 100644 --- a/2023-01/smartsim/smartredis/tests/python/test_tensor_ops.py +++ b/2023-01/smartsim/smartredis/tests/python/test_tensor_ops.py @@ -31,25 +31,26 @@ from smartredis.error import RedisReplyError -def test_copy_tensor(use_cluster, context): +def test_copy_tensor(context): # test copying tensor - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) tensor = np.array([1, 2]) client.put_tensor("test_copy", tensor) client.copy_tensor("test_copy", "test_copied") - + bool_poll_key = client.poll_key(get_prefix() + "test_copy", 100, 100) + assert bool_poll_key == True assert client.key_exists(get_prefix() + "test_copy") assert client.key_exists(get_prefix() + "test_copied") returned = client.get_tensor("test_copied") assert np.array_equal(tensor, returned) -def test_rename_tensor(use_cluster, context): +def test_rename_tensor(context): # test renaming tensor - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) tensor = np.array([1, 2]) client.put_tensor("test_rename", tensor) @@ -61,10 +62,10 @@ def test_rename_tensor(use_cluster, context): assert np.array_equal(tensor, returned) -def test_delete_tensor(use_cluster, context): +def test_delete_tensor(context): # test renaming tensor - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) tensor = np.array([1, 2]) client.put_tensor("test_delete", tensor) @@ -76,25 +77,25 @@ def test_delete_tensor(use_cluster, context): # --------------- Error handling ---------------------- -def test_rename_nonexisting_key(use_cluster, context): +def test_rename_nonexisting_key(context): - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) with pytest.raises(RedisReplyError): client.rename_tensor("not-a-tensor", "still-not-a-tensor") -def test_copy_nonexistant_key(use_cluster, context): +def test_copy_nonexistant_key(context): - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) with pytest.raises(RedisReplyError): client.copy_tensor("not-a-tensor", "still-not-a-tensor") -def test_copy_not_tensor(use_cluster, context): +def test_copy_not_tensor(context): def test_func(param): print(param) - client = Client(None, use_cluster, logger_name=context) + client = Client(None, logger_name=context) client.set_function("test_func", test_func) with pytest.raises(RedisReplyError): client.copy_tensor("test_func", "test_fork") diff --git a/2023-01/smartsim/smartredis/utils/create_cluster/local_cluster.py b/2023-01/smartsim/smartredis/utils/create_cluster/local_cluster.py deleted file mode 100644 index b4c0af04..00000000 --- a/2023-01/smartsim/smartredis/utils/create_cluster/local_cluster.py +++ /dev/null @@ -1,110 +0,0 @@ -# BSD 2-Clause License -# -# Copyright (c) 2021-2023, Hewlett Packard Enterprise -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -from subprocess import Popen, TimeoutExpired, PIPE, SubprocessError, run -from time import sleep - -def stop_cluster(n_nodes, port): - """Stop a redis cluster and clear the files - associated with it - """ - import os - redis = os.getenv('REDIS_INSTALL_PATH') + '/redis-cli' - - - pids = [] - for i in range(n_nodes): - cmd = redis + ' -p ' + str(port+i) + ' shutdown' - pid = Popen([cmd], shell=True) - pids.append(pid) - - sleep(1) - fname = str(port+i) + ".log" - if os.path.exists(fname): - os.remove(fname) - - fname = str(port+i) + ".conf" - if os.path.exists(fname): - os.remove(fname) - - fname = 'dump.rdb' - if os.path.exists(fname): - os.remove(fname) - - return pids - -def create_cluster(n_nodes, port): - """Creates a cluster starting with port at - 127.0.0.1""" - - import os - - # Start servers - host = '127.0.0.1' - redis = os.getenv('REDIS_INSTALL_PATH') + '/redis-server' - test_device = os.environ.get("SMARTREDIS_TEST_DEVICE","cpu").lower() - redisai = os.getenv(f'REDISAI_{test_device.upper()}_INSTALL_PATH') + '/redisai.so ' - pids = [] - - for i in range(n_nodes): - l_port = port + i - cmd = redis + ' --port ' + str(l_port) + " --cluster-enabled yes --cluster-config-file " + str(l_port) + ".conf --loadmodule " + \ - redisai + " --protected-mode no --loglevel notice " - log_file = "--logfile " + str(l_port) + ".log" - cmd += log_file + ' ' - print(cmd) - pid = Popen(cmd, shell=True) - pids.append(pid) - sleep(2) - # Create cluster - redis_cli = os.getenv('REDIS_INSTALL_PATH') + '/redis-cli' - cluster_str=' ' - for i in range(n_nodes): - cluster_str += '127.0.0.1:' + str(port+i) + ' ' - cmd = " ".join((redis_cli, "--cluster create", cluster_str, "--cluster-replicas 0")) - print(cmd) - proc = run([cmd], input="yes", encoding="utf-8", shell=True) - if proc.returncode != 0: - raise SubprocessError("Cluster could not be created!") - else: - print("Cluster has been setup!") - - return pids - -if __name__ == "__main__": - import argparse - import time - - parser = argparse.ArgumentParser() - parser.add_argument('--port', type=int, default=6379) - parser.add_argument('--nodes', type=int, default=3) - parser.add_argument('--stop', action='store_true') - args = parser.parse_args() - - if(args.stop): - stop_cluster(args.nodes, args.port) - else: - create_cluster(args.nodes, args.port) diff --git a/2023-01/smartsim/smartredis/utils/create_cluster/smartredisdb.conf b/2023-01/smartsim/smartredis/utils/create_cluster/smartredisdb.conf deleted file mode 100644 index 727824db..00000000 --- a/2023-01/smartsim/smartredis/utils/create_cluster/smartredisdb.conf +++ /dev/null @@ -1,1563 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./keydb-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 loopback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -#bind 127.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode no - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 4096 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel verbose - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -#save 900 1 -#save 300 10 -#save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Replica replication. Use replicaof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# +------------------+ +---------------+ -# | Master | ---> | Replica | -# | (receive writes) | | (exact copy) | -# +------------------+ +---------------+ -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of replicas. -# 2) Redis replicas are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition replicas automatically try to reconnect to masters -# and resynchronize with them. -# -# replicaof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the replica to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the replica request. -# -# masterauth -# -# However this is not enough if you are using Redis ACLs (for Redis version -# 6 or greater), and the default user is not capable of running the PSYNC -# command and/or other commands needed for replication. In this case it's -# better to configure a special user to use with replication, and specify the -# masteruser configuration as such: -# -# masteruser -# -# When masteruser is specified, the replica will authenticate against its -# master using the new AUTH form: AUTH . - -# When a replica loses its connection with the master, or when the replication -# is still in progress, the replica can act in two different ways: -# -# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if replica-serve-stale-data is set to 'no' the replica will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, -# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, -# COMMAND, POST, HOST: and LATENCY. -# -replica-serve-stale-data yes - -# You can configure a replica instance to accept writes or not. Writing against -# a replica instance may be useful to store some ephemeral data (because data -# written on a replica will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default replicas are read-only. -# -# Note: read only replicas are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only replica exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only replicas using 'rename-command' to shadow all the -# administrative / dangerous commands. -replica-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New replicas and reconnecting replicas that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the replicas. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the replicas incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to replica sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more replicas -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new replicas arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple replicas -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the replicas. -# -# This is important since once the transfer starts, it is not possible to serve -# new replicas arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more replicas arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Replicas send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_replica_period option. The default value is 10 -# seconds. -# -# repl-ping-replica-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of replica. -# 2) Master timeout from the point of view of replicas (data, pings). -# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-replica-period otherwise a timeout will be detected -# every time there is low traffic between the master and the replica. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the replica socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to replicas. But this can add a delay for -# the data to appear on the replica side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the replica side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and replicas are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# replica data when replicas are disconnected for some time, so that when a replica -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the replica missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the replica can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a replica connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected replicas for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last replica disconnected, for -# the backlog buffer to be freed. -# -# Note that replicas never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the replicas: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The replica priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a replica to promote into a -# master if the master is no longer working correctly. -# -# A replica with a low priority number is considered better for promotion, so -# for instance if there are three replicas with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the replica as not able to perform the -# role of master, so a replica with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -replica-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N replicas connected, having a lag less or equal than M seconds. -# -# The N replicas need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the replica, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough replicas -# are available, to the specified number of seconds. -# -# For example to require at least 3 replicas with a lag <= 10 seconds use: -# -# min-replicas-to-write 3 -# min-replicas-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-replicas-to-write is set to 0 (feature disabled) and -# min-replicas-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# replicas in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover replica instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a replica is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the replica to connect with the master. -# -# Port: The port is communicated by the replica during the replication -# handshake, and is normally the port that the replica is using to -# listen for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the replica may be actually reachable via different IP and port -# pairs. The following two options can be used by a replica in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# replica-announce-ip 5.5.5.5 -# replica-announce-port 1234 - -################################## SECURITY ################################### - -# Warning: since Redis is pretty fast an outside user can try up to -# 1 million passwords per second against a modern box. This means that you -# should use very strong passwords, otherwise they will be very easy to break. -# Note that because the password is really a shared secret between the client -# and the server, and should not be memorized by any human, the password -# can be easily a long string from /dev/urandom or whatever, so by using a -# long and unguessable password no brute force attack will be possible. - -# Redis ACL users are defined in the following format: -# -# user ... acl rules ... -# -# For example: -# -# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 -# -# The special username "default" is used for new connections. If this user -# has the "nopass" rule, then new connections will be immediately authenticated -# as the "default" user without the need of any password provided via the -# AUTH command. Otherwise if the "default" user is not flagged with "nopass" -# the connections will start in not authenticated state, and will require -# AUTH (or the HELLO command AUTH option) in order to be authenticated and -# start to work. -# -# The ACL rules that describe what an user can do are the following: -# -# on Enable the user: it is possible to authenticate as this user. -# off Disable the user: it's no longer possible to authenticate -# with this user, however the already authenticated connections -# will still work. -# + Allow the execution of that command -# - Disallow the execution of that command -# +@ Allow the execution of all the commands in such category -# with valid categories are like @admin, @set, @sortedset, ... -# and so forth, see the full list in the server.c file where -# the Redis command table is described and defined. -# The special category @all means all the commands, but currently -# present in the server, and that will be loaded in the future -# via modules. -# +|subcommand Allow a specific subcommand of an otherwise -# disabled command. Note that this form is not -# allowed as negative like -DEBUG|SEGFAULT, but -# only additive starting with "+". -# allcommands Alias for +@all. Note that it implies the ability to execute -# all the future commands loaded via the modules system. -# nocommands Alias for -@all. -# ~ Add a pattern of keys that can be mentioned as part of -# commands. For instance ~* allows all the keys. The pattern -# is a glob-style pattern like the one of KEYS. -# It is possible to specify multiple patterns. -# allkeys Alias for ~* -# resetkeys Flush the list of allowed keys patterns. -# > Add this passowrd to the list of valid password for the user. -# For example >mypass will add "mypass" to the list. -# This directive clears the "nopass" flag (see later). -# < Remove this password from the list of valid passwords. -# nopass All the set passwords of the user are removed, and the user -# is flagged as requiring no password: it means that every -# password will work against this user. If this directive is -# used for the default user, every new connection will be -# immediately authenticated with the default user without -# any explicit AUTH command required. Note that the "resetpass" -# directive will clear this condition. -# resetpass Flush the list of allowed passwords. Moreover removes the -# "nopass" status. After "resetpass" the user has no associated -# passwords and there is no way to authenticate without adding -# some password (or setting it as "nopass" later). -# reset Performs the following actions: resetpass, resetkeys, off, -# -@all. The user returns to the same state it has immediately -# after its creation. -# -# ACL rules can be specified in any order: for instance you can start with -# passwords, then flags, or key patterns. However note that the additive -# and subtractive rules will CHANGE MEANING depending on the ordering. -# For instance see the following example: -# -# user alice on +@all -DEBUG ~* >somepassword -# -# This will allow "alice" to use all the commands with the exception of the -# DEBUG command, since +@all added all the commands to the set of the commands -# alice can use, and later DEBUG was removed. However if we invert the order -# of two ACL rules the result will be different: -# -# user alice on -DEBUG +@all ~* >somepassword -# -# Now DEBUG was removed when alice had yet no commands in the set of allowed -# commands, later all the commands are added, so the user will be able to -# execute everything. -# -# Basically ACL rules are processed left-to-right. -# -# For more information about ACL configuration please refer to -# the Redis web site at https://redis.io/topics/acl - -# Using an external ACL file -# -# Instead of configuring users here in this file, it is possible to use -# a stand-alone file just listing users. The two methods cannot be mixed: -# if you configure users here and at the same time you activate the exteranl -# ACL file, the server will refuse to start. -# -# The format of the external ACL user file is exactly the same as the -# format that is used inside redis.conf to describe users. -# -# aclfile /etc/redis/users.acl - -# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatiblity -# layer on top of the new ACL system. The option effect will be just setting -# the password for the default user. Clients will still authenticate using -# AUTH as usually, or more explicitly with AUTH default -# if they follow the new protocol: both will work. -# -# requirepass foobared - -# Command renaming (DEPRECATED). -# -# ------------------------------------------------------------------------ -# WARNING: avoid using this option if possible. Instead use ACLs to remove -# commands from the default user, and put them only in some admin user you -# create for administrative purposes. -# ------------------------------------------------------------------------ -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to replicas may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have replicas attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the replicas are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of replicas is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have replicas attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for replica -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -# Starting from Redis 5, by default a replica will ignore its maxmemory setting -# (unless it is promoted to master after a failover or manually). It means -# that the eviction of keys will be just handled by the master, sending the -# DEL commands to the replica as keys evict in the master side. -# -# This behavior ensures that masters and replicas stay consistent, and is usually -# what you want, however if your replica is writable, or you want the replica to have -# a different memory setting, and you are sure all the writes performed to the -# replica are idempotent, then you may change this default (but be sure to understand -# what you are doing). -# -# Note that since the replica by default does not evict, it may end using more -# memory than the one set via maxmemory (there are certain buffers that may -# be larger on the replica, or data structures may sometimes take more memory and so -# forth). So make sure you monitor your replicas and make sure they have enough -# memory to never hit a real out-of-memory condition before the master hits -# the configured maxmemory setting. -# -# replica-ignore-maxmemory yes - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a replica performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transferred. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -replica-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "keydb-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -aof-use-rdb-preamble yes - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### - -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A replica of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a replica to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple replicas able to failover, they exchange messages -# in order to try to give an advantage to the replica with the best -# replication offset (more data from the master processed). -# Replicas will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single replica computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the replica will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a replica will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * replica-validity-factor) + repl-ping-replica-period -# -# So for example if node-timeout is 30 seconds, and the replica-validity-factor -# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the -# replica will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large replica-validity-factor may allow replicas with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a replica at all. -# -# For maximum availability, it is possible to set the replica-validity-factor -# to a value of 0, which means, that replicas will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-replica-validity-factor 10 - -# Cluster replicas are able to migrate to orphaned masters, that are masters -# that are left without working replicas. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working replicas. -# -# Replicas migrate to orphaned masters only if there are still at least a -# given number of other working replicas for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a replica -# will migrate only if there is at least 1 other working replica for its master -# and so forth. It usually reflects the number of replicas you want for every -# master in your cluster. -# -# Default is 1 (replicas migrate only if their masters remain with at least -# one replica). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -cluster-require-full-coverage no - -# This option, when set to yes, prevents replicas from trying to failover its -# master during master failures. However the master can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-replica-no-failover no - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### GOPHER SERVER ################################# - -# Redis contains an implementation of the Gopher protocol, as specified in -# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). -# -# The Gopher protocol was very popular in the late '90s. It is an alternative -# to the web, and the implementation both server and client side is so simple -# that the Redis server has just 100 lines of code in order to implement this -# support. -# -# What do you do with Gopher nowadays? Well Gopher never *really* died, and -# lately there is a movement in order for the Gopher more hierarchical content -# composed of just plain text documents to be resurrected. Some want a simpler -# internet, others believe that the mainstream internet became too much -# controlled, and it's cool to create an alternative space for people that -# want a bit of fresh air. -# -# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol -# as a gift. -# -# --- HOW IT WORKS? --- -# -# The Redis Gopher support uses the inline protocol of Redis, and specifically -# two kind of inline requests that were anyway illegal: an empty request -# or any request that starts with "/" (there are no Redis commands starting -# with such a slash). Normal RESP2/RESP3 requests are completely out of the -# path of the Gopher protocol implementation and are served as usually as well. -# -# If you open a connection to Redis when Gopher is enabled and send it -# a string like "/foo", if there is a key named "/foo" it is served via the -# Gopher protocol. -# -# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher -# talking), you likely need a script like the following: -# -# https://github.com/antirez/gopher2redis -# -# --- SECURITY WARNING --- -# -# If you plan to put Redis on the internet in a publicly accessible address -# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. -# Once a password is set: -# -# 1. The Gopher server (when enabled, not by default) will kill serve -# content via Gopher. -# 2. However other commands cannot be called before the client will -# authenticate. -# -# So use the 'requirepass' option to protect your instance. -# -# To enable Gopher support uncomment the following line and set -# the option from no (the default) to yes. -# -# gopher-enabled no - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Streams macro node max size / items. The stream data structure is a radix -# tree of big nodes that encode multiple items inside. Using this configuration -# it is possible to configure how big a single node can be in bytes, and the -# maximum number of items it may contain before switching to a new node when -# appending new stream entries. If any of the following settings are set to -# zero, the limit is ignored, so for instance it is possible to set just a -# max entires limit by setting max-bytes to 0 and max-entries to the desired -# value. -stream-node-max-bytes 4096 -stream-node-max-entries 100 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing no - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# replica -> replica clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and replica clients, since -# subscribers and replicas receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit replica 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit -# here. -# -proto-max-bulk-len 1gb - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# Normally it is useful to have an HZ value which is proportional to the -# number of clients connected. This is useful in order, for instance, to -# avoid too many clients are processed for each background task invocation -# in order to avoid latency spikes. -# -# Since the default HZ value by default is conservatively set to 10, Redis -# offers, and enables by default, the ability to use an adaptive HZ value -# which will temporary raise when there are many connected clients. -# -# When dynamic HZ is enabled, the actual configured HZ will be used as -# as a baseline, but multiples of the configured HZ value will be actually -# used as needed once more clients are connected. In this way an idle -# instance will use very little CPU time while a busy instance will be -# more responsive. -dynamic-hz yes - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# When redis saves RDB file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -rdb-save-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# keydb-benchmark -n 1000000 incr foo -# keydb-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 5 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 - -# Maximum number of set/hash/zset/list fields that will be processed from -# the main dictionary scan -# active-defrag-max-scan-fields 1000 - -# Path to directory for file backed scratchpad. The file backed scratchpad -# reduces memory requirements by storing rarely accessed data on disk -# instead of RAM. A temporary file will be created in this directory. -# scratch-file-path /tmp/ - -# Number of worker threads serving requests. This number should be related to the performance -# of your network hardware, not the number of cores on your machine. We don't recommend going -# above 4 at this time. By default this is set 1. -#server-threads 4 - -# Should KeyDB pin threads to CPUs? By default this is disabled, and KeyDB will not bind threads. -# When enabled threads are bount to cores sequentially starting at core 0. -# server-thread-affinity true - -# Uncomment the option below to enable Active Active support. Note that -# replicas will still sync in the normal way and incorrect ordering when -# bringing up replicas can result in data loss (the first master will win). -# active-replica yes diff --git a/2023-01/smartsim/smartredis/utils/launch_redis.py b/2023-01/smartsim/smartredis/utils/launch_redis.py new file mode 100644 index 00000000..b8dc2c0b --- /dev/null +++ b/2023-01/smartsim/smartredis/utils/launch_redis.py @@ -0,0 +1,256 @@ +# BSD 2-Clause License +# +# Copyright (c) 2021-2023, Hewlett Packard Enterprise +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +from subprocess import Popen, SubprocessError, run, DEVNULL +from time import sleep +import argparse +import os +import pathlib + +def check_availability(n_nodes, port, udsport): + """Repeat a command until it is successful + """ + num_tries = 5 + is_uds = udsport is not None + if is_uds: + n_nodes = 1 + cicd = os.getenv('SR_CICD_EXECUTION') + is_cicd = False if cicd is None else cicd.lower() == "true" + if is_cicd: + rediscli = 'redis-cli' + else: + rediscli = ( + pathlib.Path(__file__).parent.parent + / "third-party/redis/src/redis-cli" + ).resolve() + for i in range(n_nodes): + connection = f"-s {udsport}" if is_uds else f"-p {port + i}" + set_cmd = f"{rediscli} {connection} set __test__ __test__" + del_cmd = f"{rediscli} {connection} del __test__" + command_succeeded = False + for _ in range(num_tries): + try: + run(set_cmd.split(), shell=False, stdout=DEVNULL, stderr=DEVNULL) + run(del_cmd.split(), shell=False, stdout=DEVNULL, stderr=DEVNULL) + command_succeeded = True + break + except Exception: + # That try failed, so just retry + sleep(5) + if not command_succeeded: + raise RuntimeError(f"Failed to validate availability for connection {connection}") + +def stop_db(n_nodes, port, udsport): + """Stop a redis cluster and clear the files + associated with it + """ + is_uds = udsport is not None + if is_uds: + n_nodes = 1 + cicd = os.getenv('SR_CICD_EXECUTION') + is_cicd = False if cicd is None else cicd.lower() == "true" + + # It's clobberin' time! + if is_cicd: + rediscli = 'redis-cli' + else: + rediscli = ( + pathlib.Path(__file__).parent.parent + / "third-party/redis/src/redis-cli" + ).resolve() + + # Clobber the server(s) + procs = [] + for i in range(n_nodes): + connection = f"-s {udsport}" if is_uds else f"-p {port + i}" + cmd = f"{rediscli} {connection} shutdown" + print(cmd) + proc = Popen(cmd.split(), shell=False) + procs.append(proc) + + # Make sure that all servers are down + # Let exceptions propagate to the caller + for proc in procs: + _ = proc.communicate(timeout=15) + if proc.returncode != 0: + raise RuntimeError("Failed to kill Redis server!") + + # clean up after ourselves + for i in range(n_nodes): + fname = f"{port+i}.log" + if os.path.exists(fname): + os.remove(fname) + + fname = f"{port+i}.conf" + if os.path.exists(fname): + os.remove(fname) + + other_files = [ + 'dump.rdb', + 'single.log', + 'UDS.log', + ] + for fname in other_files: + if os.path.exists(fname): + os.remove(fname) + + # Pause to give Redis time to die + sleep(2) + +def prepare_uds_socket(udsport): + """Sets up the UDS socket""" + if udsport is None: + return # Silently bail + uds_abs = pathlib.Path(udsport).resolve() + basedir = uds_abs.parent + basedir.mkdir(exist_ok=True) + uds_abs.touch() + uds_abs.chmod(0o777) + +def create_db(n_nodes, port, device, rai_ver, udsport): + """Creates a redis database starting with port at 127.0.0.1 + + For a standalone server, the command issued should be equivalent to: + redis-server --port $PORT --daemonize yes \ + --logfile "single.log" \ + --loadmodule $REDISAI_MODULES + + For a clustered server, the command issued should be equivalent to: + redis-server --port $port --cluster-enabled yes --daemonize yes \ + --cluster-config-file "$port.conf" --protected-mode no --save "" \ + --logfile "$port.log" \ + --loadmodule $REDISAI_MODULES + + For a UDS server, the command issued should be equivalent to: + redis-server --unixsocket $SOCKET --unixsocketperm 777 --port 0 --bind 127.0.0.1 \ + --daemonize yes --protected-mode no --logfile "uds.log" \ + --loadmodule $REDISAI_MODULES + + where: + PORT ranges from port to port + n_nodes - 1 + REDISAI_MODULES is read from the environment or calculated relative to this file + """ + + # Set up configuration + is_uds = udsport is not None + if is_uds: + n_nodes = 1 + is_cluster = n_nodes > 1 + cicd = os.getenv('SR_CICD_EXECUTION') + is_cicd = False if cicd is None else cicd.lower() == "true" + + if is_cicd: + redisserver = "redis-server" + else: + redisserver = ( + pathlib.Path(__file__).parent.parent + / "third-party/redis/src/redis-server" + ).resolve() + rediscli = "redis-cli" if is_cicd else os.path.dirname(redisserver) + "/redis-cli" + test_device = device if device is not None else os.environ.get( + "SMARTREDIS_TEST_DEVICE","cpu").lower() + if is_cicd: + redisai = os.getenv(f'REDISAI_{test_device.upper()}_INSTALL_PATH') + '/redisai.so' + redisai_modules = os.getenv("REDISAI_MODULES") + if redisai_modules is None: + raise RuntimeError("REDISAI_MODULES environment variable is not set!") + rai_clause = f"--loadmodule {redisai_modules}" + else: + if not rai_ver: + raise RuntimeError("RedisAI version not specified") + redisai_dir = ( + pathlib.Path(__file__).parent.parent + / f"third-party/RedisAI/{rai_ver}/install-{test_device}" + ).resolve() + redisai = redisai_dir / "redisai.so" + tf_loc = redisai_dir / "backends/redisai_tensorflow/redisai_tensorflow.so" + torch_loc = redisai_dir / "backends/redisai_torch/redisai_torch.so" + rai_clause = f"--loadmodule {redisai} TF {tf_loc} TORCH {torch_loc}" + uds_clause = "" + if is_uds: + prepare_uds_socket(udsport) + uds_clause = f"--bind 127.0.0.1 --unixsocket {udsport} --unixsocketperm 777" + daemonize_clause = "--daemonize yes" + cluster_clause = "--cluster-enabled yes" if is_cluster else "" + prot_clause = "--protected-mode no" if is_cluster or is_uds else "" + save_clause = '--save ""' if is_cluster else "" + + # Start servers + procs = [] + for i in range(n_nodes): + l_port = port + i + port_clause = f"--port {l_port}" if not is_uds else "--port 0" + if is_cluster: + log_clause = f"--logfile {l_port}.log" + cluster_cfg_clause = f"--cluster-config-file {l_port}.conf" + else: + log_clause = "--logfile " + ("UDS.log" if is_uds else "single.log") + cluster_cfg_clause = "" + log_clause += " --loglevel notice" + cmd = f"{redisserver} {port_clause} {daemonize_clause} {cluster_clause} " + \ + f"{cluster_cfg_clause} {log_clause} {uds_clause} {rai_clause} " + \ + f"{prot_clause} {save_clause}" + + print(cmd) + proc = Popen(cmd.split(), shell=False) + procs.append(proc) + + # Make sure that all servers are up + # Let exceptions propagate to the caller + check_availability(n_nodes, port, udsport) + for proc in procs: + _ = proc.communicate(timeout=15) + if proc.returncode != 0: + raise RuntimeError("Failed to launch Redis server!") + + # Create cluster for clustered Redis request + if n_nodes > 1: + cluster_str = " ".join(f"127.0.0.1:{port + i}" for i in range(n_nodes)) + cmd = f"{rediscli} --cluster create {cluster_str} --cluster-replicas 0" + print(cmd) + proc = run(cmd.split(), input="yes", encoding="utf-8", shell=False) + if proc.returncode != 0: + raise SubprocessError("Cluster could not be created!") + sleep(2) + print("Cluster has been setup!") + else: + print("Server has been setup!") + check_availability(n_nodes, port, udsport) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--port', type=int, default=6379) + parser.add_argument('--nodes', type=int, default=3) + parser.add_argument('--rai', type=str, default=None) + parser.add_argument('--device', type=str, default="cpu") + parser.add_argument('--udsport', type=str, default=None) + parser.add_argument('--stop', action='store_true') + args = parser.parse_args() + + if args.stop: + stop_db(args.nodes, args.port, args.udsport) + else: + create_db(args.nodes, args.port, args.device, args.rai, args.udsport) diff --git a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/files b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/files index 316ea6da..d7075d2f 100644 --- a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/files +++ b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/files @@ -1,3 +1,4 @@ +smartRedisAdapter.C smartSimFunctionObject.C LIB = $(FOAM_USER_LIBBIN)/libsmartSimFunctionObject diff --git a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/options b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/options index 5ae61837..72712db5 100644 --- a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/options +++ b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/Make/options @@ -3,12 +3,10 @@ EXE_INC = \ -Wno-old-style-cast \ -I$(LIB_SRC)/finiteVolume/lnInclude \ -I$(LIB_SRC)/meshTools/lnInclude \ - -I$(FOAM_SMARTREDIS_INCLUDE) \ - -I$(FOAM_SMARTREDIS_DEP_INCLUDE) + -I$(SMARTREDIS_INCLUDE) LIB_LIBS = \ - -L$(FOAM_SMARTREDIS_LIB) \ -lfiniteVolume \ -lmeshTools \ - -L$(FOAM_SMARTREDIS_LIB) -lhiredis -lredis++ \ + -L$(SMARTREDIS_LIB) -lhiredis -lredis++ \ -lsmartredis diff --git a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartRedisAdapter.C b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartRedisAdapter.C new file mode 100644 index 00000000..7854bfab --- /dev/null +++ b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartRedisAdapter.C @@ -0,0 +1,68 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2023 AUTHOR,AFFILIATION +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +\*---------------------------------------------------------------------------*/ + +#include "smartRedisAdapter.H" +#include "Time.H" + + +// * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * // + +namespace Foam +{ + defineTypeNameAndDebug(smartRedisAdapter, 0); +} + + +// * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * // + +Foam::smartRedisAdapter::smartRedisAdapter +( + const IOobject& io, + const dictionary& dict +) +: + regIOobject(io), + refCount(), + clusterMode_(dict.getOrDefault("clusterMode", true)), + client_(clusterMode_, io.name()) // deprecated constructor though +{ +} + +Foam::smartRedisAdapter::smartRedisAdapter +( + smartRedisAdapter* ptr +) +: + regIOobject(*ptr), + refCount(), + clusterMode_(ptr->clusterMode_), + client_(std::move(ptr->client_)) // no copy of the client +{ +} + + +// ************************************************************************* // diff --git a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartRedisAdapter.H b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartRedisAdapter.H new file mode 100644 index 00000000..857ca8a8 --- /dev/null +++ b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartRedisAdapter.H @@ -0,0 +1,113 @@ +/*---------------------------------------------------------------------------*\ + ========= | + \\ / F ield | OpenFOAM: The Open Source CFD Toolbox + \\ / O peration | + \\ / A nd | www.openfoam.com + \\/ M anipulation | +------------------------------------------------------------------------------- + Copyright (C) 2023 AUTHOR, AFFILIATION +------------------------------------------------------------------------------- +License + This file is part of OpenFOAM. + + OpenFOAM is free software: you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OpenFOAM is distributed in the hope that it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + for more details. + + You should have received a copy of the GNU General Public License + along with OpenFOAM. If not, see . + +Class + Foam::smartRedisAdapter + +Description + +\*---------------------------------------------------------------------------*/ + +#ifndef smartRedisAdapter_H +#define smartRedisAdapter_H + +#include "regIOobject.H" +#include "client.h" + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +namespace Foam +{ + +/*---------------------------------------------------------------------------*\ + Class smartRedisAdapter Declaration +\*---------------------------------------------------------------------------*/ + +class smartRedisAdapter +: + public regIOobject, + public refCount +{ +protected: + + // Protected Data + + //- cluster mode + bool clusterMode_; + + //- SmartRedis Database Client + SmartRedis::Client client_; + +public: + + //- Runtime type information + TypeName("smartRedisAdapter"); + + + // Constructors + + //- Construct from Time and dictionary + smartRedisAdapter + ( + const IOobject& io, + const dictionary& dict + ); + + //- Construct from a pointer by moving the client + explicit smartRedisAdapter + ( + smartRedisAdapter* ptr + ); + + //- No copy construct + smartRedisAdapter(const smartRedisAdapter&) = delete; + + //- No copy assignment + void operator=(const smartRedisAdapter&) = delete; + + + //- Destructor + virtual ~smartRedisAdapter() = default; + + + // Member Functions + + //- return the client's instance + SmartRedis::Client& client() { return client_; }; + + //- Implement writing to ostream from regIOobject + virtual bool writeData(Ostream&) const { return true; } +}; + + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +} // End namespace Foam + +// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // + +#endif + +// ************************************************************************* // diff --git a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.C b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.C index 11928706..eb6c28d5 100644 --- a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.C +++ b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.C @@ -27,6 +27,7 @@ License #include "IOdictionary.H" #include "objectRegistry.H" +#include "smartRedisAdapter.H" #include "smartSimFunctionObject.H" #include "Time.H" #include "fvMesh.H" @@ -59,10 +60,44 @@ Foam::functionObjects::smartSimFunctionObject::smartSimFunctionObject ) : fvMeshFunctionObject(name, runTime, dict), - clusterMode_(dict.getOrDefault("clusterMode", true)), + clientName_(dict.getOrDefault("clientName", "default")), fieldNames_(dict.get("fieldNames")), fieldDimensions_(dict.get("fieldDimensions")), - client_(clusterMode_) + redisDB_( + runTime.foundObject(clientName_) + ? &runTime.lookupObjectRef(clientName_) + : new smartRedisAdapter + ( + IOobject + ( + clientName_, + runTime.timeName(), + runTime, + IOobject::NO_READ, + IOobject::AUTO_WRITE + ), + dict + ) + ) + //redisDB_( + // runTime.foundObject(clientName_) + // ? std::make_shared(&runTime.lookupObjectRef(clientName_)) + // : std::make_shared + // ( + // new smartRedisAdapter + // ( + // IOobject + // ( + // clientName_, + // runTime.timeName(), + // runTime, + // IOobject::NO_READ, + // IOobject::AUTO_WRITE + // ), + // dict + // ) + // ) + //) { read(dict); } @@ -117,7 +152,7 @@ bool Foam::functionObjects::smartSimFunctionObject::end() const volScalarField& sField = mesh_.lookupObject(fieldNames_[fieldI]); // Send the cell-centered scalar field to SmartRedis - client_.put_tensor(sField.name(), (void*)sField.internalField().cdata(), dims, + client().put_tensor(sField.name(), (void*)sField.internalField().cdata(), dims, SRTensorTypeDouble, SRMemLayoutContiguous); } @@ -127,7 +162,7 @@ bool Foam::functionObjects::smartSimFunctionObject::end() const volVectorField& vField = mesh_.lookupObject(fieldNames_[fieldI]); // Send the cell-centered scalar field to SmartRedis - client_.put_tensor(vField.name(), (void*)vField.internalField().cdata(), dims, + client().put_tensor(vField.name(), (void*)vField.internalField().cdata(), dims, SRTensorTypeDouble, SRMemLayoutContiguous); } else if (fieldDimensions_[fieldI] == 6) // TODO(TM): symmTensor field diff --git a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.H b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.H index 83ecbd8c..8676125f 100644 --- a/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.H +++ b/2023-01/smartsim/smartsim_function_object/smartSimFunctionObject/smartSimFunctionObject.H @@ -148,7 +148,7 @@ SourceFiles #define smartSimFunctionObject_H #include "fvMeshFunctionObject.H" -#include "client.h" +#include "smartRedisAdapter.H" // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * // @@ -167,9 +167,9 @@ class smartSimFunctionObject { // Private Data - //- Set to false if not using a clustered database - bool clusterMode_; - + //- Client name (for debug output) + word clientName_; + //- List of field names to send-receive from SmartRedis wordList fieldNames_; @@ -178,8 +178,9 @@ class smartSimFunctionObject // dimension and 6 is a symmetric tensor field dimension. labelList fieldDimensions_; - //- SmartRedis Database Client - SmartRedis::Client client_; + //- The RedisAI database client + tmp redisDB_; + //std::shared_ptr redisDB_; public: @@ -217,6 +218,8 @@ public: virtual bool end(); virtual bool write(); + + SmartRedis::Client& client() { return redisDB_->client(); } }; diff --git a/2023-01/smartsim/smartsim_function_object/tests/Make/files b/2023-01/smartsim/smartsim_function_object/tests/Make/files new file mode 100644 index 00000000..1aa5be69 --- /dev/null +++ b/2023-01/smartsim/smartsim_function_object/tests/Make/files @@ -0,0 +1,5 @@ +$(FOAM_FOAMUT)/tests/testDriver.C + +smartSimFOTest.C + +EXE = ./testDriver diff --git a/2023-01/smartsim/smartsim_function_object/tests/Make/options b/2023-01/smartsim/smartsim_function_object/tests/Make/options new file mode 100644 index 00000000..9804b166 --- /dev/null +++ b/2023-01/smartsim/smartsim_function_object/tests/Make/options @@ -0,0 +1,17 @@ +EXE_INC = -std=c++2a \ + -Wno-old-style-cast \ + -I$(LIB_SRC)/finiteVolume/lnInclude \ + -I$(LIB_SRC)/meshTools/lnInclude \ + -I$(SMARTREDIS_INCLUDE) \ + -I$(FOAM_FOAMUT)/smartSimFunctionObject/lnInclude \ + -I$(FOAM_USER_LIBBIN)/catch2/include \ + -I$(LIB_SRC)/Pstream/lnInclude + +EXE_LIBS = \ + -lfiniteVolume \ + -lmeshTools \ + -L$(FOAM_USER_LIBBIN) -L$(FOAM_LIBBIN)/$(FOAM_MPI) \ + -L$(FOAM_USER_LIBBIN)/catch2/lib -l:libCatch2.a \ + -lPstream \ + -L$(SMARTREDIS_LIB) -lhiredis -lredis++ \ + -lsmartredis -lsmartSimFunctionObject diff --git a/2023-01/smartsim/smartsim_function_object/tests/smartSimFOTest.C b/2023-01/smartsim/smartsim_function_object/tests/smartSimFOTest.C new file mode 100644 index 00000000..18a9d9c3 --- /dev/null +++ b/2023-01/smartsim/smartsim_function_object/tests/smartSimFOTest.C @@ -0,0 +1,47 @@ +#include "IOobject.H" +#include "PstreamReduceOps.H" +#include "catch2/catch_all.hpp" +#include "catch2/catch_test_macros.hpp" +#include "fvCFD.H" +#include "fvMesh.H" + +#include "smartSimFunctionObject.H" +#include "functionObjectList.H" + +using namespace Foam; +extern Time* timePtr; +extern argList* argsPtr; + +TEST_CASE("Shared SmartRedis client", "[cavity][serial][parallel]") +{ + Time& runTime = *timePtr; + FatalError.dontThrowExceptions(); + fvMesh mesh + ( + IOobject + ( + polyMesh::defaultRegion, + runTime.constant(), + runTime, + IOobject::MUST_READ, + IOobject::NO_WRITE + ) + ); + dictionary dict0; + dict0.set("region", polyMesh::defaultRegion); + dict0.set("type", "smartSimFunctionObject"); + dict0.set("fieldNames", wordList()); + dict0.set("fieldDimensions", labelList()); + dict0.set("clusterMode", false); + dict0.set("clientName", "default"); + dictionary dict1; + dict1.set("region", polyMesh::defaultRegion); + dict1.set("type", "smartSimFunctionObject"); + dict1.set("fieldNames", wordList()); + dict1.set("fieldDimensions", labelList()); + dict1.set("clusterMode", false); + dict1.set("clientName", "default"); + functionObjects::smartSimFunctionObject o0("smartSim0", runTime, dict0); + functionObjects::smartSimFunctionObject o1("smartSim1", runTime, dict1); + REQUIRE(&o0.client() == &o1.client()); +} diff --git a/2023-01/smartsim/smartsim_mesh_motion/configure-smartredis.sh b/2023-01/smartsim/smartsim_mesh_motion/configure-smartredis.sh deleted file mode 100755 index 8f9bfaa5..00000000 --- a/2023-01/smartsim/smartsim_mesh_motion/configure-smartredis.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env bash - -# Define environmental variables for including and linking SmartRedis in -# OpenFOAM applications and libraries. - -echo INFO: make sure you just sourced configure-smartredis.sh in its folder. - -export FOAM_SMARTREDIS=$HOME/smartredis -export FOAM_SMARTREDIS_INCLUDE=$FOAM_SMARTREDIS/include -export FOAM_SMARTREDIS_DEP_INCLUDE=$FOAM_SMARTREDIS/install/include -export FOAM_SMARTREDIS_LIB=$FOAM_SMARTREDIS/install/lib -export FOAM_SMARTREDIS_BUILD_LIB=$FOAM_SMARTREDIS/build -export LD_LIBRARY_PATH=$FOAM_SMARTREDIS_BUILD_LIB:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=$FOAM_SMARTREDIS_LIB:$LD_LIBRARY_PATH -export SSDB="127.0.0.1:8000" # for multinode setup let smartsim do this diff --git a/2023-01/smartsim/smartsim_mesh_motion/displacementSmartSimMotionSolver/Make/options b/2023-01/smartsim/smartsim_mesh_motion/displacementSmartSimMotionSolver/Make/options index 3b7e72a8..5fa89b20 100644 --- a/2023-01/smartsim/smartsim_mesh_motion/displacementSmartSimMotionSolver/Make/options +++ b/2023-01/smartsim/smartsim_mesh_motion/displacementSmartSimMotionSolver/Make/options @@ -8,8 +8,7 @@ EXE_INC = \ -I$(LIB_SRC)/dynamicMesh/lnInclude \ -I$(LIB_SRC)/functionObjects/forces/lnInclude \ -I$(LIB_SRC)/fvMotionSolver/lnInclude \ - -I$(FOAM_SMARTREDIS_INCLUDE) \ - -I$(FOAM_SMARTREDIS_DEP_INCLUDE) + -I$(SMARTREDIS_INCLUDE) LIB_LIBS = \ -lfiniteVolume \ @@ -19,7 +18,7 @@ LIB_LIBS = \ -lmeshTools \ -ldynamicMesh \ -lfvMotionSolvers \ - -L$(FOAM_SMARTREDIS_LIB) -lhiredis -lredis++ \ + -L$(SMARTREDIS_LIB) -lhiredis -lredis++ \ -lsmartredis /* -lforces include in controlDict if needed */