From a6677caa2a5d180aef9f922cbd2a8ea3dd7a730b Mon Sep 17 00:00:00 2001 From: AJ Schmidt Date: Tue, 14 Jul 2020 17:05:13 -0400 Subject: [PATCH 01/75] update master references --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++-- CONTRIBUTING.md | 4 +--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 9c42cda720..d889b2d593 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,9 +34,9 @@ Here are some guidelines to help the review process go smoothly. features or make changes out of the scope of those requested by the reviewer (doing this just add delays as already reviewed code ends up having to be re-reviewed/it is hard to tell what is new etc!). Further, please do not - rebase your branch on master/force push/rewrite history, doing any of these + rebase your branch on main/force push/rewrite history, doing any of these causes the context of any comments made by reviewers to be lost. If - conflicts occur against master they should be resolved by merging master + conflicts occur against main they should be resolved by merging main into the branch used for making the pull request. Many thanks in advance for your cooperation! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 41624274fb..1a4d74d625 100755 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,7 +23,7 @@ into three categories: ### Your first issue -1. Read the project's [README.md](https://github.com/rapidsai/RAFT/blob/master/README.md) +1. Read the project's [README.md](https://github.com/rapidsai/RAFT/blob/main/README.md) to learn how to setup the development environment 2. Find an issue to work on. The best way is to look for the [good first issue](https://github.com/rapidsai/RAFT/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) or [help wanted](https://github.com/rapidsai/RAFT/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) labels @@ -52,5 +52,3 @@ implementation of the issue, ask them in the issue instead of the PR. ## Attribution Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md - - From 309ea1a8559489ca0746e6cf0143098bac13cc94 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Wed, 6 Apr 2022 15:07:22 +0000 Subject: [PATCH 02/75] REL v21.08.00 release From e987ec83b2601c0598036c552d11194f3925ae53 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Wed, 6 Apr 2022 15:21:59 +0000 Subject: [PATCH 03/75] REL v22.04.00 release From 0b55c32c27959cb7789030ae80a257ed9121ab70 Mon Sep 17 00:00:00 2001 From: AJ Schmidt Date: Tue, 7 Jun 2022 10:41:30 -0400 Subject: [PATCH 04/75] Add `conda` compilers (#702) Our `devel` Docker containers need to be switched to using `conda` compilers to resolve a linking error. `raft` is in those containers, but hasn't yet been built with `conda` compilers. This PR addresses that. These changes won't cleanly merge into `branch-22.08` unfortunately due to the changes in #641, but we can address that another time. Authors: - AJ Schmidt (https://github.com/ajschmidt8) - Corey J. Nolet (https://github.com/cjnolet) - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Corey J. Nolet (https://github.com/cjnolet) --- build.sh | 26 +++++++++---------- .../libraft_distance/conda_build_config.yaml | 11 ++++++++ conda/recipes/libraft_distance/meta.yaml | 9 ++++--- .../libraft_headers/conda_build_config.yaml | 11 ++++++++ conda/recipes/libraft_headers/meta.yaml | 9 ++++--- .../libraft_nn/conda_build_config.yaml | 11 ++++++++ conda/recipes/libraft_nn/meta.yaml | 9 ++++--- .../recipes/pylibraft/conda_build_config.yaml | 14 ++++++++++ conda/recipes/pylibraft/meta.yaml | 14 ++++++---- conda/recipes/pyraft/conda_build_config.yaml | 15 +++++++++++ conda/recipes/pyraft/meta.yaml | 14 ++++++---- 11 files changed, 111 insertions(+), 32 deletions(-) create mode 100644 conda/recipes/libraft_distance/conda_build_config.yaml create mode 100644 conda/recipes/libraft_headers/conda_build_config.yaml create mode 100644 conda/recipes/libraft_nn/conda_build_config.yaml create mode 100644 conda/recipes/pylibraft/conda_build_config.yaml diff --git a/build.sh b/build.sh index 6e79bec897..bdaa607691 100755 --- a/build.sh +++ b/build.sh @@ -101,12 +101,12 @@ function cmakeArgs { # There are possible weird edge cases that may cause this regex filter to output nothing and fail silently # the true pipe will catch any weird edge cases that may happen and will cause the program to fall back # on the invalid option error - CMAKE_ARGS=$(echo $ARGS | { grep -Eo "\-\-cmake\-args=\".+\"" || true; }) - if [[ -n ${CMAKE_ARGS} ]]; then - # Remove the full CMAKE_ARGS argument from list of args so that it passes validArgs function - ARGS=${ARGS//$CMAKE_ARGS/} + EXTRA_CMAKE_ARGS=$(echo $ARGS | { grep -Eo "\-\-cmake\-args=\".+\"" || true; }) + if [[ -n ${EXTRA_CMAKE_ARGS} ]]; then + # Remove the full EXTRA_CMAKE_ARGS argument from list of args so that it passes validArgs function + ARGS=${ARGS//$EXTRA_CMAKE_ARGS/} # Filter the full argument down to just the extra string that will be added to cmake call - CMAKE_ARGS=$(echo $CMAKE_ARGS | grep -Eo "\".+\"" | sed -e 's/^"//' -e 's/"$//') + EXTRA_CMAKE_ARGS=$(echo $EXTRA_CMAKE_ARGS | grep -Eo "\".+\"" | sed -e 's/^"//' -e 's/"$//') fi fi } @@ -196,9 +196,9 @@ if [[ ${CMAKE_TARGET} == "" ]]; then CMAKE_TARGET="all" fi -# Append `-DFIND_RAFT_CPP=ON` to CMAKE_ARGS unless a user specified the option. -if [[ "${CMAKE_ARGS}" != *"DFIND_RAFT_CPP"* ]]; then - CMAKE_ARGS="${CMAKE_ARGS} -DFIND_RAFT_CPP=ON" +# Append `-DFIND_RAFT_CPP=ON` to EXTRA_CMAKE_ARGS unless a user specified the option. +if [[ "${EXTRA_CMAKE_ARGS}" != *"DFIND_RAFT_CPP"* ]]; then + EXTRA_CMAKE_ARGS="${EXTRA_CMAKE_ARGS} -DFIND_RAFT_CPP=ON" fi # If clean given, run it prior to any other steps @@ -250,7 +250,7 @@ if (( ${NUMARGS} == 0 )) || hasArg libraft || hasArg docs || hasArg tests || has -DRAFT_COMPILE_DIST_LIBRARY=${COMPILE_DIST_LIBRARY} \ -DRAFT_USE_FAISS_STATIC=${BUILD_STATIC_FAISS} \ -DRAFT_ENABLE_thrust_DEPENDENCY=${ENABLE_thrust_DEPENDENCY} \ - ${CMAKE_ARGS} + ${EXTRA_CMAKE_ARGS} if [[ ${CMAKE_TARGET} != "" ]]; then echo "-- Compiling targets: ${CMAKE_TARGET}, verbose=${VERBOSE_FLAG}" @@ -266,9 +266,9 @@ fi if (( ${NUMARGS} == 0 )) || hasArg pyraft || hasArg docs; then cd ${REPODIR}/python/raft - python setup.py build_ext -j${PARALLEL_LEVEL:-1} --inplace -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} -DCMAKE_LIBRARY_PATH=${LIBRAFT_BUILD_DIR} ${CMAKE_ARGS} + python setup.py build_ext -j${PARALLEL_LEVEL:-1} --inplace -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} -DCMAKE_LIBRARY_PATH=${LIBRAFT_BUILD_DIR} ${EXTRA_CMAKE_ARGS} if [[ ${INSTALL_TARGET} != "" ]]; then - python setup.py install --single-version-externally-managed --record=record.txt -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} ${CMAKE_ARGS} + python setup.py install --single-version-externally-managed --record=record.txt -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} ${EXTRA_CMAKE_ARGS} fi fi @@ -276,9 +276,9 @@ fi if (( ${NUMARGS} == 0 )) || hasArg pylibraft; then cd ${REPODIR}/python/pylibraft - python setup.py build_ext -j${PARALLEL_LEVEL:-1} --inplace -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} -DCMAKE_LIBRARY_PATH=${LIBRAFT_BUILD_DIR} ${CMAKE_ARGS} + python setup.py build_ext -j${PARALLEL_LEVEL:-1} --inplace -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} -DCMAKE_LIBRARY_PATH=${LIBRAFT_BUILD_DIR} ${EXTRA_CMAKE_ARGS} if [[ ${INSTALL_TARGET} != "" ]]; then - python setup.py install --single-version-externally-managed --record=record.txt -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} ${CMAKE_ARGS} + python setup.py install --single-version-externally-managed --record=record.txt -- -DCMAKE_PREFIX_PATH=${INSTALL_PREFIX} ${EXTRA_CMAKE_ARGS} fi fi diff --git a/conda/recipes/libraft_distance/conda_build_config.yaml b/conda/recipes/libraft_distance/conda_build_config.yaml new file mode 100644 index 0000000000..322fe6faac --- /dev/null +++ b/conda/recipes/libraft_distance/conda_build_config.yaml @@ -0,0 +1,11 @@ +c_compiler_version: + - 9 + +cxx_compiler_version: + - 9 + +cuda_compiler: + - nvcc + +sysroot_version: + - "2.17" diff --git a/conda/recipes/libraft_distance/meta.yaml b/conda/recipes/libraft_distance/meta.yaml index 9b78bd15f3..31ac73a562 100644 --- a/conda/recipes/libraft_distance/meta.yaml +++ b/conda/recipes/libraft_distance/meta.yaml @@ -18,9 +18,6 @@ build: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} script_env: - - CC - - CXX - - CUDAHOSTCXX - PARALLEL_LEVEL - VERSION_SUFFIX - PROJECT_FLASH @@ -33,9 +30,15 @@ build: - SCCACHE_BUCKET=rapids-sccache - SCCACHE_REGION=us-west-2 - SCCACHE_IDLE_TIMEOUT=32768 + ignore_run_exports_from: + - {{ compiler('cuda') }} requirements: build: + - {{ compiler('c') }} + - {{ compiler('cxx') }} + - {{ compiler('cuda') }} {{ cuda_version }} + - sysroot_{{ target_platform }} {{ sysroot_version }} - cmake>=3.20.1,!=3.23.0 host: - libraft-headers {{ version }} diff --git a/conda/recipes/libraft_headers/conda_build_config.yaml b/conda/recipes/libraft_headers/conda_build_config.yaml new file mode 100644 index 0000000000..322fe6faac --- /dev/null +++ b/conda/recipes/libraft_headers/conda_build_config.yaml @@ -0,0 +1,11 @@ +c_compiler_version: + - 9 + +cxx_compiler_version: + - 9 + +cuda_compiler: + - nvcc + +sysroot_version: + - "2.17" diff --git a/conda/recipes/libraft_headers/meta.yaml b/conda/recipes/libraft_headers/meta.yaml index fd95da66ee..8ff538f2a6 100644 --- a/conda/recipes/libraft_headers/meta.yaml +++ b/conda/recipes/libraft_headers/meta.yaml @@ -18,9 +18,6 @@ build: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} script_env: - - CC - - CXX - - CUDAHOSTCXX - PARALLEL_LEVEL - VERSION_SUFFIX - PROJECT_FLASH @@ -33,9 +30,15 @@ build: - SCCACHE_BUCKET=rapids-sccache - SCCACHE_REGION=us-west-2 - SCCACHE_IDLE_TIMEOUT=32768 + ignore_run_exports_from: + - {{ compiler('cuda') }} requirements: build: + - {{ compiler('c') }} + - {{ compiler('cxx') }} + - {{ compiler('cuda') }} {{ cuda_version }} + - sysroot_{{ target_platform }} {{ sysroot_version }} - cmake>=3.20.1,!=3.23.0 host: - nccl>=2.9.9 diff --git a/conda/recipes/libraft_nn/conda_build_config.yaml b/conda/recipes/libraft_nn/conda_build_config.yaml new file mode 100644 index 0000000000..322fe6faac --- /dev/null +++ b/conda/recipes/libraft_nn/conda_build_config.yaml @@ -0,0 +1,11 @@ +c_compiler_version: + - 9 + +cxx_compiler_version: + - 9 + +cuda_compiler: + - nvcc + +sysroot_version: + - "2.17" diff --git a/conda/recipes/libraft_nn/meta.yaml b/conda/recipes/libraft_nn/meta.yaml index fa3392ddc8..1604c15acc 100644 --- a/conda/recipes/libraft_nn/meta.yaml +++ b/conda/recipes/libraft_nn/meta.yaml @@ -18,9 +18,6 @@ build: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} script_env: - - CC - - CXX - - CUDAHOSTCXX - PARALLEL_LEVEL - VERSION_SUFFIX - PROJECT_FLASH @@ -33,9 +30,15 @@ build: - SCCACHE_BUCKET=rapids-sccache - SCCACHE_REGION=us-west-2 - SCCACHE_IDLE_TIMEOUT=32768 + ignore_run_exports_from: + - {{ compiler('cuda') }} requirements: build: + - {{ compiler('c') }} + - {{ compiler('cxx') }} + - {{ compiler('cuda') }} {{ cuda_version }} + - sysroot_{{ target_platform }} {{ sysroot_version }} - cmake>=3.20.1,!=3.23.0 host: - libraft-headers {{ version }} diff --git a/conda/recipes/pylibraft/conda_build_config.yaml b/conda/recipes/pylibraft/conda_build_config.yaml new file mode 100644 index 0000000000..5c2fa69f8e --- /dev/null +++ b/conda/recipes/pylibraft/conda_build_config.yaml @@ -0,0 +1,14 @@ +c_compiler_version: + - 9 + +cxx_compiler_version: + - 9 + +cuda_compiler: + - nvcc + +sysroot_version: + - "2.17" + +cmake_version: + - ">=3.20.1,!=3.23.0" diff --git a/conda/recipes/pylibraft/meta.yaml b/conda/recipes/pylibraft/meta.yaml index 4576e5146f..d19961d39d 100644 --- a/conda/recipes/pylibraft/meta.yaml +++ b/conda/recipes/pylibraft/meta.yaml @@ -18,17 +18,21 @@ source: build: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_py{{ py_version }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} - script_env: - - CC - - CXX - - VERSION_SUFFIX + ignore_run_exports_from: + - {{ compiler('cuda') }} requirements: build: + - cmake {{ cmake_version }} + - {{ compiler('c') }} + - {{ compiler('cxx') }} + - {{ compiler('cuda') }} {{ cuda_version }} + - sysroot_{{ target_platform }} {{ sysroot_version }} + host: - python x.x - setuptools - cython>=0.29,<0.30 - - cmake>=3.20.1,!=3.23.0 + - cmake {{ cmake_version }} - scikit-build>=0.13.1 - rmm {{ minor_version }} - libraft-headers {{ version }} diff --git a/conda/recipes/pyraft/conda_build_config.yaml b/conda/recipes/pyraft/conda_build_config.yaml index c542be2d20..fdc9a58423 100644 --- a/conda/recipes/pyraft/conda_build_config.yaml +++ b/conda/recipes/pyraft/conda_build_config.yaml @@ -1,2 +1,17 @@ +c_compiler_version: + - 9 + +cxx_compiler_version: + - 9 + +cuda_compiler: + - nvcc + +sysroot_version: + - "2.17" + ucx_version: - "1.12.1" + +cmake_version: + - ">=3.20.1,!=3.23.0" diff --git a/conda/recipes/pyraft/meta.yaml b/conda/recipes/pyraft/meta.yaml index 86159622d3..f0c89f411b 100644 --- a/conda/recipes/pyraft/meta.yaml +++ b/conda/recipes/pyraft/meta.yaml @@ -19,17 +19,21 @@ source: build: number: {{ GIT_DESCRIBE_NUMBER }} string: cuda{{ cuda_major }}_py{{ py_version }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} - script_env: - - CC - - CXX - - VERSION_SUFFIX + ignore_run_exports_from: + - {{ compiler('cuda') }} requirements: build: + - cmake {{ cmake_version }} + - {{ compiler('c') }} + - {{ compiler('cxx') }} + - {{ compiler('cuda') }} {{ cuda_version }} + - sysroot_{{ target_platform }} {{ sysroot_version }} + host: - python x.x - setuptools - cython>=0.29,<0.30 - - cmake>=3.20.1,!=3.23.0 + - cmake {{ cmake_version }} - scikit-build>=0.13.1 - rmm {{ minor_version }} - libraft-headers {{ version }} From 229b9f81b580386a0d2b81ca74e7f5f6be6594f5 Mon Sep 17 00:00:00 2001 From: Raymond Douglass Date: Tue, 7 Jun 2022 11:27:39 -0400 Subject: [PATCH 05/75] update changelog --- CHANGELOG.md | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cf5f673997..b67ff97ded 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,66 @@ -# 22.06.00 (Date TBD) +# raft 22.06.00 (7 Jun 2022) -Please see https://github.com/rapidsai//releases/tag/v22.06.00a for the latest changes to this development branch. +## 🚨 Breaking Changes + +- Rng: removed cyclic dependency creating hard-to-debug compiler errors ([#639](https://github.com/rapidsai/raft/pull/639)) [@MatthiasKohl](https://github.com/MatthiasKohl) +- Allow enabling NVTX markers by downstream projects after install ([#610](https://github.com/rapidsai/raft/pull/610)) [@achirkin](https://github.com/achirkin) +- Rng: expose host-rng-state in host-only API ([#609](https://github.com/rapidsai/raft/pull/609)) [@MatthiasKohl](https://github.com/MatthiasKohl) + +## 🐛 Bug Fixes + +- For fixing the cuGraph test failures with PCG ([#690](https://github.com/rapidsai/raft/pull/690)) [@vinaydes](https://github.com/vinaydes) +- Fix excessive memory used in selection test ([#689](https://github.com/rapidsai/raft/pull/689)) [@achirkin](https://github.com/achirkin) +- Revert print vector changes because of std::vector<bool> ([#681](https://github.com/rapidsai/raft/pull/681)) [@lowener](https://github.com/lowener) +- fix race in fusedL2knn smem read/write by adding a syncwarp ([#679](https://github.com/rapidsai/raft/pull/679)) [@mdoijade](https://github.com/mdoijade) +- gemm: fix parameter C mistakenly set as const ([#664](https://github.com/rapidsai/raft/pull/664)) [@achirkin](https://github.com/achirkin) +- Fix SelectionTest: allow different indices when keys are equal. ([#659](https://github.com/rapidsai/raft/pull/659)) [@achirkin](https://github.com/achirkin) +- Revert recent cmake updates ([#657](https://github.com/rapidsai/raft/pull/657)) [@cjnolet](https://github.com/cjnolet) +- Don't install component dependency files in raft-header only mode ([#655](https://github.com/rapidsai/raft/pull/655)) [@robertmaynard](https://github.com/robertmaynard) +- Rng: removed cyclic dependency creating hard-to-debug compiler errors ([#639](https://github.com/rapidsai/raft/pull/639)) [@MatthiasKohl](https://github.com/MatthiasKohl) +- Fixing raft compile bug w/ RNG changes ([#634](https://github.com/rapidsai/raft/pull/634)) [@cjnolet](https://github.com/cjnolet) +- Get `libcudacxx` from `cuco` ([#632](https://github.com/rapidsai/raft/pull/632)) [@trxcllnt](https://github.com/trxcllnt) +- RNG API fixes ([#630](https://github.com/rapidsai/raft/pull/630)) [@MatthiasKohl](https://github.com/MatthiasKohl) +- Fix mdspan accessor mixin offset policy. ([#628](https://github.com/rapidsai/raft/pull/628)) [@trivialfis](https://github.com/trivialfis) +- Branch 22.06 merge 22.04 ([#625](https://github.com/rapidsai/raft/pull/625)) [@cjnolet](https://github.com/cjnolet) +- fix issue in fusedL2knn which happens when rows are multiple of 256 ([#604](https://github.com/rapidsai/raft/pull/604)) [@mdoijade](https://github.com/mdoijade) + +## 🚀 New Features + +- Restore changes from #653 and #655 and correct cmake component dependencies ([#686](https://github.com/rapidsai/raft/pull/686)) [@robertmaynard](https://github.com/robertmaynard) +- Adding handle and stream to pylibraft ([#683](https://github.com/rapidsai/raft/pull/683)) [@cjnolet](https://github.com/cjnolet) +- Map CMake install components to conda library packages ([#653](https://github.com/rapidsai/raft/pull/653)) [@robertmaynard](https://github.com/robertmaynard) +- Rng: expose host-rng-state in host-only API ([#609](https://github.com/rapidsai/raft/pull/609)) [@MatthiasKohl](https://github.com/MatthiasKohl) +- mdspan/mdarray template functions and utilities ([#601](https://github.com/rapidsai/raft/pull/601)) [@divyegala](https://github.com/divyegala) + +## 🛠️ Improvements + +- Change build.sh to find C++ library by default ([#697](https://github.com/rapidsai/raft/pull/697)) [@vyasr](https://github.com/vyasr) +- Pin `dask` and `distributed` for release ([#693](https://github.com/rapidsai/raft/pull/693)) [@galipremsagar](https://github.com/galipremsagar) +- Pin `dask` & `distributed` for release ([#680](https://github.com/rapidsai/raft/pull/680)) [@galipremsagar](https://github.com/galipremsagar) +- Improve logging ([#673](https://github.com/rapidsai/raft/pull/673)) [@achirkin](https://github.com/achirkin) +- Fix minor errors in CMake configuration ([#662](https://github.com/rapidsai/raft/pull/662)) [@vyasr](https://github.com/vyasr) +- Pulling mdspan fork (from official rapids repo) into raft to remove dependency ([#649](https://github.com/rapidsai/raft/pull/649)) [@cjnolet](https://github.com/cjnolet) +- Fixing the unit test issue(s) in RAFT ([#646](https://github.com/rapidsai/raft/pull/646)) [@vinaydes](https://github.com/vinaydes) +- Build pyraft with scikit-build ([#644](https://github.com/rapidsai/raft/pull/644)) [@vyasr](https://github.com/vyasr) +- Some fixes to pairwise distances for cupy integration ([#643](https://github.com/rapidsai/raft/pull/643)) [@cjnolet](https://github.com/cjnolet) +- Require UCX 1.12.1+ ([#638](https://github.com/rapidsai/raft/pull/638)) [@jakirkham](https://github.com/jakirkham) +- Updating raft rng host public API and adding docs ([#636](https://github.com/rapidsai/raft/pull/636)) [@cjnolet](https://github.com/cjnolet) +- Build pylibraft with scikit-build ([#633](https://github.com/rapidsai/raft/pull/633)) [@vyasr](https://github.com/vyasr) +- Add `cuda_lib_dir` to `library_dirs`, allow changing `UCX`/`RMM`/`Thrust`/`spdlog` locations via envvars in `setup.py` ([#624](https://github.com/rapidsai/raft/pull/624)) [@trxcllnt](https://github.com/trxcllnt) +- Remove perf prints from MST ([#623](https://github.com/rapidsai/raft/pull/623)) [@divyegala](https://github.com/divyegala) +- Enable components installation using CMake ([#621](https://github.com/rapidsai/raft/pull/621)) [@Ethyling](https://github.com/Ethyling) +- Allow nullptr as input-indices argument of select_k ([#618](https://github.com/rapidsai/raft/pull/618)) [@achirkin](https://github.com/achirkin) +- Update CMake pinning to allow newer CMake versions ([#617](https://github.com/rapidsai/raft/pull/617)) [@vyasr](https://github.com/vyasr) +- Unpin `dask` & `distributed` for development ([#616](https://github.com/rapidsai/raft/pull/616)) [@galipremsagar](https://github.com/galipremsagar) +- Improve performance of select-top-k RADIX implementation ([#615](https://github.com/rapidsai/raft/pull/615)) [@achirkin](https://github.com/achirkin) +- Moving more prims benchmarks to RAFT ([#613](https://github.com/rapidsai/raft/pull/613)) [@cjnolet](https://github.com/cjnolet) +- Allow enabling NVTX markers by downstream projects after install ([#610](https://github.com/rapidsai/raft/pull/610)) [@achirkin](https://github.com/achirkin) +- Improve performance of select-top-k WARP_SORT implementation ([#606](https://github.com/rapidsai/raft/pull/606)) [@achirkin](https://github.com/achirkin) +- Enable building static libs ([#602](https://github.com/rapidsai/raft/pull/602)) [@trxcllnt](https://github.com/trxcllnt) +- Update `ucx-py` version ([#596](https://github.com/rapidsai/raft/pull/596)) [@ajschmidt8](https://github.com/ajschmidt8) +- Fix merge conflicts ([#587](https://github.com/rapidsai/raft/pull/587)) [@ajschmidt8](https://github.com/ajschmidt8) +- Making cuco, thrust, and mdspan optional dependencies. ([#585](https://github.com/rapidsai/raft/pull/585)) [@cjnolet](https://github.com/cjnolet) +- Some RBC3D fixes ([#530](https://github.com/rapidsai/raft/pull/530)) [@cjnolet](https://github.com/cjnolet) # raft 22.04.00 (6 Apr 2022) From 3e5a625cf905174e95d241c613fe8c5a290b45a3 Mon Sep 17 00:00:00 2001 From: Raymond Douglass Date: Tue, 7 Jun 2022 14:23:22 -0400 Subject: [PATCH 06/75] FIX update-version.sh --- ci/release/update-version.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index 0ee92a6e22..8b7bc4fba2 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -36,7 +36,6 @@ sed_runner 's/'"pylibraft_version .*)"'/'"pylibraft_version ${NEXT_FULL_TAG})"'/ sed_runner 's/'"pyraft_version .*)"'/'"pyraft_version ${NEXT_FULL_TAG})"'/g' python/raft/CMakeLists.txt sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' cpp/CMakeLists.txt sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' python/pylibraft/CMakeLists.txt -sed_runner 's/'"branch-.*\/RAPIDS.cmake"'/'"branch-${NEXT_SHORT_TAG}\/RAPIDS.cmake"'/g' python/pyraft/CMakeLists.txt for FILE in conda/environments/*.yml; do sed_runner "s/ucx-py=.*/ucx-py=${NEXT_UCX_PY_VERSION}/g" ${FILE}; From ed2c5295874994051f33ded1d7a3fcef4a02d480 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Tue, 7 Jun 2022 18:25:13 +0000 Subject: [PATCH 07/75] REL v22.06.00 release --- cpp/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 9902bdded7..b0af6c1f9c 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -28,7 +28,7 @@ include(rapids-find) rapids_cuda_init_architectures(RAFT) -project(RAFT VERSION ${RAFT_VERSION} LANGUAGES CXX CUDA) +project(RAFT VERSION 22.06.00 LANGUAGES CXX CUDA) # Needed because GoogleBenchmark changes the state of FindThreads.cmake, causing subsequent runs to # have different values for the `Threads::Threads` target. Setting this flag ensures From 87a7d16c71c8f38331eda3f0a95d9863dce685b4 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Wed, 17 Aug 2022 20:56:14 +0000 Subject: [PATCH 08/75] REL v22.08.00 release --- docs/source/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 630e1ff71e..21c7838c42 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,9 +84,9 @@ # built documents. # # The short X.Y version. -version = "22.08" +version = '22.08' # The full version, including alpha/beta/rc tags. -release = "22.08.00" +release = '22.08.00' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 31ae59710e072d082477b63061f5445ba7ae35d6 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Wed, 12 Oct 2022 15:31:53 +0000 Subject: [PATCH 09/75] REL v22.10.00 release --- docs/source/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9aa3a19310..1e7d5a912a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -84,9 +84,9 @@ # built documents. # # The short X.Y version. -version = "22.10" +version = '22.10' # The full version, including alpha/beta/rc tags. -release = "22.10.00" +release = '22.10.00' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 08abc7286388fca71cd35aee3e4c470bdd809d6e Mon Sep 17 00:00:00 2001 From: "Corey J. Nolet" Date: Fri, 4 Nov 2022 10:08:23 -0400 Subject: [PATCH 10/75] [HOTFIX] Update cuda-python dependency to 11.7.1 (#963) @shwina I'm going to apologize ahead of time for this, but i was trying to forward merge your branch 22.10 locally to create a new PR from it and I accidentally pushed to your remote branch. I cherry-picked the commits over to a new branch for the hotfix. Authors: - Bradley Dice (https://github.com/bdice) - Ashwin Srinath (https://github.com/shwina) Approvers: - Ray Douglass (https://github.com/raydouglass) --- conda/environments/raft_dev_cuda11.0.yml | 2 +- conda/environments/raft_dev_cuda11.2.yml | 2 +- conda/environments/raft_dev_cuda11.4.yml | 2 +- conda/environments/raft_dev_cuda11.5.yml | 4 ++-- conda/recipes/pylibraft/meta.yaml | 4 ++-- conda/recipes/raft-dask/meta.yaml | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/conda/environments/raft_dev_cuda11.0.yml b/conda/environments/raft_dev_cuda11.0.yml index 3c2b30d263..053adf8a7b 100644 --- a/conda/environments/raft_dev_cuda11.0.yml +++ b/conda/environments/raft_dev_cuda11.0.yml @@ -1,9 +1,9 @@ name: raft_dev channels: - rapidsai -- nvidia - rapidsai-nightly - conda-forge +- nvidia dependencies: - c-compiler - cxx-compiler diff --git a/conda/environments/raft_dev_cuda11.2.yml b/conda/environments/raft_dev_cuda11.2.yml index d8cb5759c1..caca5c591e 100644 --- a/conda/environments/raft_dev_cuda11.2.yml +++ b/conda/environments/raft_dev_cuda11.2.yml @@ -1,9 +1,9 @@ name: raft_dev channels: - rapidsai -- nvidia - rapidsai-nightly - conda-forge +- nvidia dependencies: - c-compiler - cxx-compiler diff --git a/conda/environments/raft_dev_cuda11.4.yml b/conda/environments/raft_dev_cuda11.4.yml index 74ee0366ca..07f8918689 100644 --- a/conda/environments/raft_dev_cuda11.4.yml +++ b/conda/environments/raft_dev_cuda11.4.yml @@ -1,9 +1,9 @@ name: raft_dev channels: - rapidsai -- nvidia - rapidsai-nightly - conda-forge +- nvidia dependencies: - c-compiler - cxx-compiler diff --git a/conda/environments/raft_dev_cuda11.5.yml b/conda/environments/raft_dev_cuda11.5.yml index fca6684bc8..0a29a94298 100644 --- a/conda/environments/raft_dev_cuda11.5.yml +++ b/conda/environments/raft_dev_cuda11.5.yml @@ -1,14 +1,14 @@ name: raft_dev channels: - rapidsai -- nvidia - rapidsai-nightly - conda-forge +- nvidia dependencies: - c-compiler - cxx-compiler - cudatoolkit=11.5 -- cuda-python >=11.5,<11.7.1 +- cuda-python >=11.7.1,<12.0 - ninja - clang=11.1.0 - clang-tools=11.1.0 diff --git a/conda/recipes/pylibraft/meta.yaml b/conda/recipes/pylibraft/meta.yaml index 3d19892657..68e2d5952d 100644 --- a/conda/recipes/pylibraft/meta.yaml +++ b/conda/recipes/pylibraft/meta.yaml @@ -37,12 +37,12 @@ requirements: - libraft-headers {{ version }} - libraft-distance {{ version }} - cudatoolkit {{ cuda_version }}.* - - cuda-python >=11.5,<11.7.1 + - cuda-python >=11.7.1,<12.0 run: - python x.x - libraft-headers {{ version }} - libraft-distance {{ version }} - - cuda-python >=11.5,<11.7.1 + - cuda-python >=11.7.1,<12.0 - {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }} tests: # [linux64] diff --git a/conda/recipes/raft-dask/meta.yaml b/conda/recipes/raft-dask/meta.yaml index 4e10294db7..c0198dd974 100644 --- a/conda/recipes/raft-dask/meta.yaml +++ b/conda/recipes/raft-dask/meta.yaml @@ -37,7 +37,7 @@ requirements: - rmm {{ minor_version }} - pylibraft {{ version }} - cudatoolkit {{ cuda_version }}.* - - cuda-python >=11.5,<11.7.1 + - cuda-python >=11.7.1,<12.0 - nccl>=2.9.9 - ucx {{ ucx_version }} - ucx-py {{ ucx_py_version }} @@ -53,7 +53,7 @@ requirements: - ucx-proc=*=gpu - dask==2022.9.2 - distributed==2022.9.2 - - cuda-python >=11.5,<11.7.1 + - cuda-python >=11.7.1,<12.0 - joblib >=0.11 - {{ pin_compatible('cudatoolkit', max_pin='x', min_pin='x') }} From f7d2335d0c59fac283aa234d022f9c742ac2978e Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Fri, 4 Nov 2022 14:09:55 +0000 Subject: [PATCH 11/75] REL v22.10.01 release --- cpp/CMakeLists.txt | 2 +- docs/source/conf.py | 2 +- python/pylibraft/CMakeLists.txt | 2 +- python/raft-dask/CMakeLists.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index ce6eb00bc1..36f88170e0 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -14,7 +14,7 @@ # limitations under the License. #============================================================================= set(RAPIDS_VERSION "22.10") -set(RAFT_VERSION "22.10.00") +set(RAFT_VERSION "22.10.01") cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) include(../fetch_rapids.cmake) diff --git a/docs/source/conf.py b/docs/source/conf.py index 1e7d5a912a..f065da7417 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -86,7 +86,7 @@ # The short X.Y version. version = '22.10' # The full version, including alpha/beta/rc tags. -release = '22.10.00' +release = '22.10.01' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/python/pylibraft/CMakeLists.txt b/python/pylibraft/CMakeLists.txt index 499ee7de99..b09d32ac1f 100644 --- a/python/pylibraft/CMakeLists.txt +++ b/python/pylibraft/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(pylibraft_version 22.10.00) +set(pylibraft_version 22.10.01) include(../../fetch_rapids.cmake) diff --git a/python/raft-dask/CMakeLists.txt b/python/raft-dask/CMakeLists.txt index a2e495605d..d17e7d10ed 100644 --- a/python/raft-dask/CMakeLists.txt +++ b/python/raft-dask/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(raft_dask_version 22.10.00) +set(raft_dask_version 22.10.01) include(../../fetch_rapids.cmake) From 9a716b7d7672a69c77277c391e3ee76172ebd3d8 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Thu, 8 Dec 2022 15:27:24 +0000 Subject: [PATCH 12/75] REL v22.12.00 release From a655c9a7651ac7fa7246a686cd5c17fc2b8efa76 Mon Sep 17 00:00:00 2001 From: gpuCI <38199262+GPUtester@users.noreply.github.com> Date: Wed, 14 Dec 2022 15:24:34 +0000 Subject: [PATCH 13/75] REL v22.12.01 release --- cpp/CMakeLists.txt | 2 +- docs/source/conf.py | 2 +- python/pylibraft/CMakeLists.txt | 2 +- python/raft-dask/CMakeLists.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 9c4e946884..2fd10fe067 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -12,7 +12,7 @@ # the License. # ============================================================================= set(RAPIDS_VERSION "22.12") -set(RAFT_VERSION "22.12.00") +set(RAFT_VERSION "22.12.01") cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) include(../fetch_rapids.cmake) diff --git a/docs/source/conf.py b/docs/source/conf.py index 00cfdd7c09..e0c1f4543a 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -87,7 +87,7 @@ # The short X.Y version. version = '22.12' # The full version, including alpha/beta/rc tags. -release = '22.12.00' +release = '22.12.01' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/python/pylibraft/CMakeLists.txt b/python/pylibraft/CMakeLists.txt index 19246ef0f8..ba3c9a453e 100644 --- a/python/pylibraft/CMakeLists.txt +++ b/python/pylibraft/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(pylibraft_version 22.12.00) +set(pylibraft_version 22.12.01) include(../../fetch_rapids.cmake) diff --git a/python/raft-dask/CMakeLists.txt b/python/raft-dask/CMakeLists.txt index 2fbb616d96..2a3b8390e3 100644 --- a/python/raft-dask/CMakeLists.txt +++ b/python/raft-dask/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(raft_dask_version 22.12.00) +set(raft_dask_version 22.12.01) include(../../fetch_rapids.cmake) From 69dce2d4733d76bfdfbcd907f8dc4eae2905e5a0 Mon Sep 17 00:00:00 2001 From: Raymond Douglass Date: Thu, 9 Feb 2023 11:02:23 -0500 Subject: [PATCH 14/75] REL v23.02.00 release --- conda/environments/all_cuda-118_arch-x86_64.yaml | 2 +- dependencies.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 7dc305bf97..b15297b106 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -43,6 +43,6 @@ dependencies: - sphinx-markdown-tables - sysroot_linux-64==2.17 - ucx-proc=*=gpu -- ucx-py=0.30 +- ucx-py=0.30.* - ucx>=1.13.0 name: all_cuda-118_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index 921e1ca46e..d18b137366 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -170,7 +170,7 @@ dependencies: - dask==2023.1.1 - distributed==2023.1.1 - ucx>=1.13.0 - - ucx-py=0.30 + - ucx-py=0.30.* - ucx-proc=*=gpu - libfaiss>=1.7.1=cuda* - faiss-proc=*=cuda From 7d1057e77c71c0cb9d28043e3f1db036995ffe56 Mon Sep 17 00:00:00 2001 From: Raymond Douglass Date: Wed, 12 Apr 2023 10:29:14 -0400 Subject: [PATCH 15/75] REL v23.04.00 release --- conda/environments/all_cuda-118_arch-x86_64.yaml | 2 +- dependencies.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index fd2d1d2280..e234170038 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -52,6 +52,6 @@ dependencies: - sphinx-markdown-tables - sysroot_linux-64==2.17 - ucx-proc=*=gpu -- ucx-py==0.31.* +- ucx-py=0.31.* - ucx>=1.13.0 name: all_cuda-118_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index af29bf68a8..ce69ae05d9 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -271,7 +271,7 @@ dependencies: - joblib>=0.11 - numba>=0.49 - *numpy - - ucx-py==0.31.* + - ucx-py=0.31.* - output_types: conda packages: - dask-core==2023.3.2 From dc800d6fc61e3eb236992ce6aa02933b42a589fe Mon Sep 17 00:00:00 2001 From: Raymond Douglass Date: Fri, 21 Apr 2023 09:15:08 -0400 Subject: [PATCH 16/75] REL v23.04.01 release --- cpp/CMakeLists.txt | 2 +- docs/source/conf.py | 2 +- python/pylibraft/CMakeLists.txt | 2 +- python/pylibraft/pylibraft/__init__.py | 2 +- python/pylibraft/pyproject.toml | 2 +- python/raft-dask/CMakeLists.txt | 2 +- python/raft-dask/pyproject.toml | 2 +- python/raft-dask/raft_dask/__init__.py | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 4753b534e4..8d8099cc39 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -11,7 +11,7 @@ # or implied. See the License for the specific language governing permissions and limitations under # the License. set(RAPIDS_VERSION "23.04") -set(RAFT_VERSION "23.04.00") +set(RAFT_VERSION "23.04.01") cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) include(../fetch_rapids.cmake) diff --git a/docs/source/conf.py b/docs/source/conf.py index 33a8a9217a..09cb36fb0d 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -69,7 +69,7 @@ # The short X.Y version. version = '23.04' # The full version, including alpha/beta/rc tags. -release = '23.04.00' +release = '23.04.01' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/python/pylibraft/CMakeLists.txt b/python/pylibraft/CMakeLists.txt index 3c2b093362..713eb9c29d 100644 --- a/python/pylibraft/CMakeLists.txt +++ b/python/pylibraft/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(pylibraft_version 23.04.00) +set(pylibraft_version 23.04.01) include(../../fetch_rapids.cmake) diff --git a/python/pylibraft/pylibraft/__init__.py b/python/pylibraft/pylibraft/__init__.py index 39145085f0..8587d1e244 100644 --- a/python/pylibraft/pylibraft/__init__.py +++ b/python/pylibraft/pylibraft/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "23.04.00" +__version__ = "23.04.01" diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index fed15bbab0..359e93faea 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -28,7 +28,7 @@ build-backend = "setuptools.build_meta" [project] name = "pylibraft" -version = "23.04.00" +version = "23.04.01" description = "RAFT: Reusable Algorithms Functions and other Tools" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/raft-dask/CMakeLists.txt b/python/raft-dask/CMakeLists.txt index 49e7f50c27..fa15c4cc9d 100644 --- a/python/raft-dask/CMakeLists.txt +++ b/python/raft-dask/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(raft_dask_version 23.04.00) +set(raft_dask_version 23.04.01) include(../../fetch_rapids.cmake) diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index ba6cd7ccae..467fe6ab76 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -25,7 +25,7 @@ requires = [ [project] name = "raft-dask" -version = "23.04.00" +version = "23.04.01" description = "Reusable Accelerated Functions & Tools Dask Infrastructure" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/raft-dask/raft_dask/__init__.py b/python/raft-dask/raft_dask/__init__.py index 4f4700df48..7d26626f6b 100644 --- a/python/raft-dask/raft_dask/__init__.py +++ b/python/raft-dask/raft_dask/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "23.04.00" +__version__ = "23.04.01" From c931b613c51e5fce03db651cce8be295b4ac1e9a Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 7 Jun 2023 11:34:48 -0400 Subject: [PATCH 17/75] REL v23.06.00 release --- conda/environments/all_cuda-118_arch-x86_64.yaml | 2 +- dependencies.yaml | 2 +- docs/source/conf.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index c32596f5b6..9cb299889d 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -54,6 +54,6 @@ dependencies: - sphinx-markdown-tables - sysroot_linux-64==2.17 - ucx-proc=*=gpu -- ucx-py==0.32.* +- ucx-py=0.32.* - ucx>=1.13.0 name: all_cuda-118_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index f58d931e42..97d5731881 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -276,7 +276,7 @@ dependencies: - joblib>=0.11 - numba>=0.57 - *numpy - - ucx-py==0.32.* + - ucx-py=0.32.* - output_types: conda packages: - dask-core==2023.3.2 diff --git a/docs/source/conf.py b/docs/source/conf.py index afc1fb9b8b..acf6bae362 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -70,9 +70,9 @@ version = '23.06' # The full version, including alpha/beta/rc tags. <<<<<<< HEAD -release = '23.04.01' +release = '23.06.00' ||||||| 994e6c8b -release = '23.04.00' +release = '23.06.00' ======= release = '23.06.00' >>>>>>> upstream/branch-23.06 From 9147c9078c6c3be70e7cf4f1202018ba8c1d1b2e Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Mon, 12 Jun 2023 11:49:07 -0400 Subject: [PATCH 18/75] REL v23.06.01 release --- cpp/CMakeLists.txt | 2 +- docs/source/conf.py | 6 +++--- python/pylibraft/CMakeLists.txt | 2 +- python/pylibraft/pylibraft/__init__.py | 2 +- python/pylibraft/pyproject.toml | 2 +- python/raft-dask/CMakeLists.txt | 2 +- python/raft-dask/pyproject.toml | 2 +- python/raft-dask/raft_dask/__init__.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index eb35554768..317a21dce3 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -11,7 +11,7 @@ # or implied. See the License for the specific language governing permissions and limitations under # the License. set(RAPIDS_VERSION "23.06") -set(RAFT_VERSION "23.06.00") +set(RAFT_VERSION "23.06.01") cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) include(../fetch_rapids.cmake) diff --git a/docs/source/conf.py b/docs/source/conf.py index acf6bae362..d0397cff8c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -70,11 +70,11 @@ version = '23.06' # The full version, including alpha/beta/rc tags. <<<<<<< HEAD -release = '23.06.00' +release = '23.06.01' ||||||| 994e6c8b -release = '23.06.00' +release = '23.06.01' ======= -release = '23.06.00' +release = '23.06.01' >>>>>>> upstream/branch-23.06 # The language for content autogenerated by Sphinx. Refer to documentation diff --git a/python/pylibraft/CMakeLists.txt b/python/pylibraft/CMakeLists.txt index 069bd98222..21b3979190 100644 --- a/python/pylibraft/CMakeLists.txt +++ b/python/pylibraft/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(pylibraft_version 23.06.00) +set(pylibraft_version 23.06.01) include(../../fetch_rapids.cmake) diff --git a/python/pylibraft/pylibraft/__init__.py b/python/pylibraft/pylibraft/__init__.py index aebaa4e272..6c3da9d43b 100644 --- a/python/pylibraft/pylibraft/__init__.py +++ b/python/pylibraft/pylibraft/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "23.06.00" +__version__ = "23.06.01" diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index 4aa11b41ea..1520a2785c 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -28,7 +28,7 @@ build-backend = "setuptools.build_meta" [project] name = "pylibraft" -version = "23.06.00" +version = "23.06.01" description = "RAFT: Reusable Algorithms Functions and other Tools" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/raft-dask/CMakeLists.txt b/python/raft-dask/CMakeLists.txt index b157abf309..79a9dce3b0 100644 --- a/python/raft-dask/CMakeLists.txt +++ b/python/raft-dask/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(raft_dask_version 23.06.00) +set(raft_dask_version 23.06.01) include(../../fetch_rapids.cmake) diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index 45563e4281..1fbadd517f 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -25,7 +25,7 @@ requires = [ [project] name = "raft-dask" -version = "23.06.00" +version = "23.06.01" description = "Reusable Accelerated Functions & Tools Dask Infrastructure" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/raft-dask/raft_dask/__init__.py b/python/raft-dask/raft_dask/__init__.py index 9582da4851..0c85866a77 100644 --- a/python/raft-dask/raft_dask/__init__.py +++ b/python/raft-dask/raft_dask/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "23.06.00" +__version__ = "23.06.01" From 7dd2f6dcc1425f33601afb6fa5652b1f3346d885 Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 5 Jul 2023 13:06:24 -0400 Subject: [PATCH 19/75] REL v23.06.02 release --- cpp/CMakeLists.txt | 2 +- docs/source/conf.py | 6 +++--- python/pylibraft/CMakeLists.txt | 2 +- python/pylibraft/pylibraft/__init__.py | 2 +- python/pylibraft/pyproject.toml | 2 +- python/raft-dask/CMakeLists.txt | 2 +- python/raft-dask/pyproject.toml | 2 +- python/raft-dask/raft_dask/__init__.py | 2 +- 8 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 317a21dce3..9f3031c6d2 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -11,7 +11,7 @@ # or implied. See the License for the specific language governing permissions and limitations under # the License. set(RAPIDS_VERSION "23.06") -set(RAFT_VERSION "23.06.01") +set(RAFT_VERSION "23.06.02") cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) include(../fetch_rapids.cmake) diff --git a/docs/source/conf.py b/docs/source/conf.py index d0397cff8c..62fb2b2148 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -70,11 +70,11 @@ version = '23.06' # The full version, including alpha/beta/rc tags. <<<<<<< HEAD -release = '23.06.01' +release = '23.06.02' ||||||| 994e6c8b -release = '23.06.01' +release = '23.06.02' ======= -release = '23.06.01' +release = '23.06.02' >>>>>>> upstream/branch-23.06 # The language for content autogenerated by Sphinx. Refer to documentation diff --git a/python/pylibraft/CMakeLists.txt b/python/pylibraft/CMakeLists.txt index 21b3979190..c498ede26e 100644 --- a/python/pylibraft/CMakeLists.txt +++ b/python/pylibraft/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(pylibraft_version 23.06.01) +set(pylibraft_version 23.06.02) include(../../fetch_rapids.cmake) diff --git a/python/pylibraft/pylibraft/__init__.py b/python/pylibraft/pylibraft/__init__.py index 6c3da9d43b..ed57e3d7fa 100644 --- a/python/pylibraft/pylibraft/__init__.py +++ b/python/pylibraft/pylibraft/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "23.06.01" +__version__ = "23.06.02" diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index 1520a2785c..ac60af89d1 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -28,7 +28,7 @@ build-backend = "setuptools.build_meta" [project] name = "pylibraft" -version = "23.06.01" +version = "23.06.02" description = "RAFT: Reusable Algorithms Functions and other Tools" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/raft-dask/CMakeLists.txt b/python/raft-dask/CMakeLists.txt index 79a9dce3b0..c2623383ae 100644 --- a/python/raft-dask/CMakeLists.txt +++ b/python/raft-dask/CMakeLists.txt @@ -14,7 +14,7 @@ cmake_minimum_required(VERSION 3.23.1 FATAL_ERROR) -set(raft_dask_version 23.06.01) +set(raft_dask_version 23.06.02) include(../../fetch_rapids.cmake) diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index 1fbadd517f..0d0919f421 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -25,7 +25,7 @@ requires = [ [project] name = "raft-dask" -version = "23.06.01" +version = "23.06.02" description = "Reusable Accelerated Functions & Tools Dask Infrastructure" readme = { file = "README.md", content-type = "text/markdown" } authors = [ diff --git a/python/raft-dask/raft_dask/__init__.py b/python/raft-dask/raft_dask/__init__.py index 0c85866a77..fd509419a4 100644 --- a/python/raft-dask/raft_dask/__init__.py +++ b/python/raft-dask/raft_dask/__init__.py @@ -13,4 +13,4 @@ # limitations under the License. # -__version__ = "23.06.01" +__version__ = "23.06.02" From e588d7b52d209f0df460e8d08dba62768c69417e Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 9 Aug 2023 13:02:45 -0400 Subject: [PATCH 20/75] REL v23.08.00 release From afdddfb3d0c1612b42e85fc2da9c25f5a8f3f72f Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 11 Oct 2023 11:00:06 -0400 Subject: [PATCH 21/75] REL v23.10.00 release From 599651efb899cfd5995bea65c33a2bbd00dff5c2 Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 6 Dec 2023 10:47:17 -0500 Subject: [PATCH 22/75] REL v23.12.00 release --- python/pylibraft/pyproject.toml | 2 +- python/raft-dask/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index b946a3b942..f50d435309 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -65,7 +65,7 @@ Documentation = "https://docs.rapids.ai/api/raft/stable/" license-files = ["LICENSE"] [tool.setuptools.dynamic] -version = {file = "pylibraft/VERSION"} +version = "23.12.00" [tool.isort] line_length = 79 diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index be030f839d..871e3c3aa5 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -63,7 +63,7 @@ Documentation = "https://docs.rapids.ai/api/raft/stable/" license-files = ["LICENSE"] [tool.setuptools.dynamic] -version = {file = "raft_dask/VERSION"} +version = "23.12.00" [tool.isort] line_length = 79 From 9e2d627736ec60a92f0e44391f5bba27d572ff2d Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 6 Dec 2023 11:27:22 -0500 Subject: [PATCH 23/75] REL Revert update-version.sh changes for release This reverts commit 599651efb899cfd5995bea65c33a2bbd00dff5c2. --- python/pylibraft/pyproject.toml | 2 +- python/raft-dask/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index f50d435309..b946a3b942 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -65,7 +65,7 @@ Documentation = "https://docs.rapids.ai/api/raft/stable/" license-files = ["LICENSE"] [tool.setuptools.dynamic] -version = "23.12.00" +version = {file = "pylibraft/VERSION"} [tool.isort] line_length = 79 diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index 871e3c3aa5..be030f839d 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -63,7 +63,7 @@ Documentation = "https://docs.rapids.ai/api/raft/stable/" license-files = ["LICENSE"] [tool.setuptools.dynamic] -version = "23.12.00" +version = {file = "raft_dask/VERSION"} [tool.isort] line_length = 79 From 698d6c7b2203ed423d283bc268796eb828b2098d Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Mon, 12 Feb 2024 16:23:40 -0500 Subject: [PATCH 24/75] REL v24.02.00 release From fa44bcc8149ec1b760c3cd8948a460cba0bc4f40 Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 10 Apr 2024 11:06:34 -0400 Subject: [PATCH 25/75] REL v24.04.00 release --- .github/workflows/pr.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index ada46141a7..048faa3a99 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -104,7 +104,7 @@ jobs: script: ci/test_wheel_raft_dask.sh devcontainer: secrets: inherit - uses: rapidsai/shared-workflows/.github/workflows/build-in-devcontainer.yaml@fix/devcontainer-json-location + uses: rapidsai/shared-workflows/.github/workflows/build-in-devcontainer.yaml@branch-24.04 with: arch: '["amd64"]' cuda: '["12.2"]' From 63a506d361a4d37ba063f1ed4e72526efbc0699c Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 5 Jun 2024 11:05:38 -0400 Subject: [PATCH 26/75] REL v24.06.00 release From 427ea26d68bc8a0629c22dad6db1b4ee4623212d Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 5 Jun 2024 12:14:21 -0400 Subject: [PATCH 27/75] add in support for preprocessing with bm25 and tfidf --- .../sparse/matrix/detail/preprocessing.cuh | 215 ++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 cpp/include/raft/sparse/matrix/detail/preprocessing.cuh diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh new file mode 100644 index 0000000000..d3098d855d --- /dev/null +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +struct bm25 { + bm25(int num_docs, float avg_doc_len, float k_param, float b_param) + { + total_docs = num_docs; + avg_doc_length = avg_doc_len; + k = k_param; + b = b_param; + } + + template + float __device__ operator()(const T1& values, const T1& doc_length, const T1& num_docs_term_occ) + { + return raft::log(total_docs / (1 + num_docs_term_occ)) * + ((values * (k + 1)) / (values + k * (1 - b + b * (doc_length / avg_doc_length)))); + } + float avg_doc_length; + int total_docs; + float k; + float b; +}; + +struct tfidf { + tfidf(int total_docs_param) { total_docs = total_docs_param; } + + template + float __device__ operator()(const T1& values, const T2& num_docs_term_occ) + { + return raft::log(1 + values) * raft::log(total_docs / (1 + num_docs_term_occ)); + } + int total_docs; +}; + +template +struct mapper { + mapper(raft::device_vector_view map) : map(map) {} + + __host__ __device__ void operator()(T& value) const + { + const T& new_value = map[value]; + if (new_value) { + value = new_value; + } else { + value = 0; + } + } + + raft::device_vector_view map; +}; + +template +void get_uniques_counts(raft::resources& handle, + raft::device_vector_view sort_vector, + raft::device_vector_view secondary_vector, + raft::device_vector_view data, + raft::device_vector_view itr_vals, + raft::device_vector_view keys_out, + raft::device_vector_view counts_out) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + raft::sparse::op::coo_sort(sort_vector.size(), + secondary_vector.size(), + data.size(), + sort_vector.data_handle(), + secondary_vector.data_handle(), + data.data_handle(), + stream); + + thrust::reduce_by_key(raft::resource::get_thrust_policy(handle), + sort_vector.data_handle(), + sort_vector.data_handle() + sort_vector.size(), + itr_vals.data_handle(), + keys_out.data_handle(), + counts_out.data_handle()); +} + +template +void create_mapped_vector(raft::resources& handle, + raft::device_vector_view origin, + raft::device_vector_view keys, + raft::device_vector_view counts, + raft::device_vector_view result) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + auto host_keys = raft::make_host_vector(handle, keys.size()); + + raft::copy(host_keys.data_handle(), keys.data_handle(), keys.size(), stream); + raft::linalg::map(handle, result, raft::cast_op{}, raft::make_const_mdspan(origin)); + auto origin_map = raft::make_device_vector(handle, host_keys(host_keys.size()) + 1); + + thrust::scatter(raft::resource::get_thrust_policy(handle), + counts.data_handle(), + counts.data_handle() + counts.size(), + keys.data_handle(), + origin_map.data_handle()); + + thrust::for_each(raft::resource::get_thrust_policy(handle), + result.data_handle(), + result.data_handle() + result.size(), + mapper(raft::make_const_mdspan(origin_map.view()))); +} + +template +std::tuple sparse_search_preprocess(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view doc_lengths, + raft::device_vector_view term_counts) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + auto num_rows = + raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); + + auto row_keys = raft::make_device_vector(handle, num_rows); + auto row_counts = raft::make_device_vector(handle, num_rows); + auto row_fill = raft::make_device_vector(handle, rows.size()); + + // the amount of columns(documents) that each row(term) is found in + thrust::fill(raft::resource::get_thrust_policy(handle), + row_fill.data_handle(), + row_fill.data_handle() + row_fill.size(), + 1.0f); + get_uniques_counts( + handle, rows, columns, values, row_fill.view(), row_keys.view(), row_counts.view()); + + create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), term_counts); + auto num_cols = + raft::sparse::neighbors::get_n_components(columns.data_handle(), columns.size(), stream); + auto col_keys = raft::make_device_vector(handle, num_cols); + auto col_counts = raft::make_device_vector(handle, num_cols); + + get_uniques_counts(handle, columns, rows, values, values, col_keys.view(), col_counts.view()); + + int total_document_lengths = thrust::reduce(raft::resource::get_thrust_policy(handle), + col_counts.data_handle(), + col_counts.data_handle() + col_counts.size()); + float avg_doc_length = float(total_document_lengths) / col_keys.size(); + + create_mapped_vector( + handle, columns, col_keys.view(), col_counts.view(), doc_lengths); + return {col_keys.size(), avg_doc_length}; +} + +template +void encode_tfidf(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out) +{ + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); + auto [doc_count, avg_doc_length] = sparse_search_preprocess( + handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + + raft::linalg::map(handle, + values_out, + tfidf(doc_count), + raft::make_const_mdspan(values), + raft::make_const_mdspan(term_counts.view())); +} + +template +void encode_bm25(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out, + float k_param = 1.6f, + float b_param = 0.75) +{ + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); + auto [doc_count, avg_doc_length] = sparse_search_preprocess( + handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + + raft::linalg::map(handle, + values_out, + bm25(doc_count, avg_doc_length, k_param, b_param), + raft::make_const_mdspan(values), + raft::make_const_mdspan(doc_lengths.view()), + raft::make_const_mdspan(term_counts.view())); +} \ No newline at end of file From ffbfbc7f18621fe21f0995dc42cdcec0620246ba Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Thu, 6 Jun 2024 17:32:29 -0400 Subject: [PATCH 28/75] add in test cases and header file --- .../sparse/matrix/detail/preprocessing.cuh | 20 ++-- .../raft/sparse/matrix/preprocessing.cuh | 41 ++++++++ cpp/test/CMakeLists.txt | 36 +++---- cpp/test/sparse/preprocess.cu | 97 +++++++++++++++++++ 4 files changed, 160 insertions(+), 34 deletions(-) create mode 100644 cpp/include/raft/sparse/matrix/preprocessing.cuh create mode 100644 cpp/test/sparse/preprocess.cu diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index d3098d855d..7489658066 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -23,12 +23,13 @@ #include #include #include -#include #include #include #include +namespace raft::sparse::matrix::detail { + struct bm25 { bm25(int num_docs, float avg_doc_len, float k_param, float b_param) { @@ -192,17 +193,17 @@ void encode_tfidf(raft::resources& handle, raft::make_const_mdspan(term_counts.view())); } -template +template void encode_bm25(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view values_out, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out, float k_param = 1.6f, float b_param = 0.75) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); auto [doc_count, avg_doc_length] = sparse_search_preprocess( handle, rows, columns, values, doc_lengths.view(), term_counts.view()); @@ -212,4 +213,5 @@ void encode_bm25(raft::resources& handle, raft::make_const_mdspan(values), raft::make_const_mdspan(doc_lengths.view()), raft::make_const_mdspan(term_counts.view())); -} \ No newline at end of file +} +} // namespace raft::sparse::matrix::detail \ No newline at end of file diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh new file mode 100644 index 0000000000..8386dc4bc8 --- /dev/null +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace raft::sparse::matrix { + +template +void encode_bm25(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out, + float k_param = 1.6f, + float b_param = 0.75) +{ + return matrix::detail::encode_bm25( + handle, rows, columns, values, values_out, k_param, b_param); +} +} // namespace raft::sparse::matrix diff --git a/cpp/test/CMakeLists.txt b/cpp/test/CMakeLists.txt index ff0518a4d0..2cd5a29ea0 100644 --- a/cpp/test/CMakeLists.txt +++ b/cpp/test/CMakeLists.txt @@ -96,17 +96,8 @@ endfunction() if(BUILD_TESTS) ConfigureTest( - NAME - CLUSTER_TEST - PATH - cluster/kmeans.cu - cluster/kmeans_balanced.cu - cluster/kmeans_find_k.cu - cluster/cluster_solvers.cu - cluster/linkage.cu - cluster/spectral.cu - LIB - EXPLICIT_INSTANTIATE_ONLY + NAME CLUSTER_TEST PATH cluster/kmeans.cu cluster/kmeans_balanced.cu cluster/kmeans_find_k.cu + cluster/cluster_solvers.cu cluster/linkage.cu cluster/spectral.cu LIB EXPLICIT_INSTANTIATE_ONLY ) ConfigureTest( @@ -143,8 +134,8 @@ if(BUILD_TESTS) ) ConfigureTest( - NAME CORE_TEST PATH core/stream_view.cpp core/mdspan_copy.cpp LIB - EXPLICIT_INSTANTIATE_ONLY NOCUDA + NAME CORE_TEST PATH core/stream_view.cpp core/mdspan_copy.cpp LIB EXPLICIT_INSTANTIATE_ONLY + NOCUDA ) ConfigureTest( @@ -299,8 +290,8 @@ if(BUILD_TESTS) ) ConfigureTest( - NAME SOLVERS_TEST PATH cluster/cluster_solvers_deprecated.cu linalg/eigen_solvers.cu - lap/lap.cu sparse/mst.cu LIB EXPLICIT_INSTANTIATE_ONLY + NAME SOLVERS_TEST PATH cluster/cluster_solvers_deprecated.cu linalg/eigen_solvers.cu lap/lap.cu + sparse/mst.cu LIB EXPLICIT_INSTANTIATE_ONLY ) ConfigureTest( @@ -325,22 +316,17 @@ if(BUILD_TESTS) sparse/spgemmi.cu sparse/spmm.cu sparse/symmetrize.cu + sparse/preprocess.cu ) ConfigureTest( - NAME SPARSE_DIST_TEST PATH sparse/dist_coo_spmv.cu sparse/distance.cu - sparse/gram.cu LIB EXPLICIT_INSTANTIATE_ONLY + NAME SPARSE_DIST_TEST PATH sparse/dist_coo_spmv.cu sparse/distance.cu sparse/gram.cu LIB + EXPLICIT_INSTANTIATE_ONLY ) ConfigureTest( - NAME - SPARSE_NEIGHBORS_TEST - PATH - sparse/neighbors/cross_component_nn.cu - sparse/neighbors/brute_force.cu - sparse/neighbors/knn_graph.cu - LIB - EXPLICIT_INSTANTIATE_ONLY + NAME SPARSE_NEIGHBORS_TEST PATH sparse/neighbors/cross_component_nn.cu + sparse/neighbors/brute_force.cu sparse/neighbors/knn_graph.cu LIB EXPLICIT_INSTANTIATE_ONLY ) ConfigureTest( diff --git a/cpp/test/sparse/preprocess.cu b/cpp/test/sparse/preprocess.cu new file mode 100644 index 0000000000..7dbe7c5e0a --- /dev/null +++ b/cpp/test/sparse/preprocess.cu @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../test_utils.cuh" + +#include +#include +#include +#include +#include + +#include + +#include +#include +// #include +// #include +// #include + +namespace raft { +namespace sparse { + +template +struct SparsePreprocessInputs { + std::vector rows_h; + std::vector columns_h; + std::vector values_h; +}; + +template +class SparsePreprocessTest + : public ::testing::TestWithParam> { + public: + SparsePreprocessTest() + : params(::testing::TestWithParam>::GetParam()), + stream(resource::get_cuda_stream(handle)), + rows(params.rows_h.size(), stream), + columns(params.columns_h.size(), stream), + values(params.values_h.size(), stream), + result(params.values_h.size(), stream) + { + } + + protected: + void SetUp() override {} + + void Run() + { + auto rows = raft::make_device_vector(handle, params.rows_h.size()); + auto columns = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); + + sparse::matrix::encode_bm25( + handle, rows.view(), columns.view(), values.view(), result.view()); + + RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); + + ASSERT_TRUE(values.size() == values.size()); + // raft::devArrMatch(, nnz, raft::Compare())); + } + + protected: + raft::resources handle; + cudaStream_t stream; + + SparsePreprocessInputs params; + rmm::device_uvector rows, columns; + rmm::device_uvector values, result; +}; +using SparsePreprocessTestF = SparsePreprocessTest; +TEST_P(SparsePreprocessTestF, Result) { Run(); } + +const std::vector> sparse_preprocess_inputs = { + {{0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, + {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, + {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, +}; + +INSTANTIATE_TEST_CASE_P(SparsePreprocessTest, + SparsePreprocessTestF, + ::testing::ValuesIn(sparse_preprocess_inputs)); +} // namespace sparse +} // namespace raft \ No newline at end of file From 2d82acabfd945a529df85bece20b1e6cc4df034f Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Tue, 25 Jun 2024 12:52:02 -0400 Subject: [PATCH 29/75] add tfidf coo support --- .../sparse/matrix/detail/preprocessing.cuh | 48 ++++++++---- .../raft/sparse/matrix/preprocessing.cuh | 19 +++++ cpp/test/sparse/preprocess.cu | 74 +++++++++++++++++-- 3 files changed, 121 insertions(+), 20 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 7489658066..755fa7122f 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -14,6 +14,7 @@ * limitations under the License. */ +#include #include #include #include @@ -21,6 +22,7 @@ #include #include #include +#include #include #include @@ -107,9 +109,9 @@ void get_uniques_counts(raft::resources& handle, template void create_mapped_vector(raft::resources& handle, - raft::device_vector_view origin, - raft::device_vector_view keys, - raft::device_vector_view counts, + const raft::device_vector_view origin, + const raft::device_vector_view keys, + const raft::device_vector_view counts, raft::device_vector_view result) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); @@ -117,14 +119,14 @@ void create_mapped_vector(raft::resources& handle, raft::copy(host_keys.data_handle(), keys.data_handle(), keys.size(), stream); raft::linalg::map(handle, result, raft::cast_op{}, raft::make_const_mdspan(origin)); - auto origin_map = raft::make_device_vector(handle, host_keys(host_keys.size()) + 1); + int new_key_size = host_keys(host_keys.size() - 1) + 1; + auto origin_map = raft::make_device_vector(handle, new_key_size); thrust::scatter(raft::resource::get_thrust_policy(handle), counts.data_handle(), counts.data_handle() + counts.size(), keys.data_handle(), origin_map.data_handle()); - thrust::for_each(raft::resource::get_thrust_policy(handle), result.data_handle(), result.data_handle() + result.size(), @@ -174,15 +176,35 @@ std::tuple sparse_search_preprocess(raft::resources& handle, return {col_keys.size(), avg_doc_length}; } -template +template +void encode_tfidf(raft::resources& handle, + raft::device_coordinate_structure_view coo_in, + raft::device_vector_view values_out) +{ + auto rows = coo_in.get_rows(); + auto columns = coo_in.get_columns(); + auto values = coo_in.get_elements(); + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); + auto [doc_count, avg_doc_length] = sparse_search_preprocess( + handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + + raft::linalg::map(handle, + values_out, + tfidf(doc_count), + raft::make_const_mdspan(values), + raft::make_const_mdspan(term_counts.view())); +} + +template void encode_tfidf(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view values_out) + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); auto [doc_count, avg_doc_length] = sparse_search_preprocess( handle, rows, columns, values, doc_lengths.view(), term_counts.view()); @@ -200,7 +222,7 @@ void encode_bm25(raft::resources& handle, raft::device_vector_view values, raft::device_vector_view values_out, float k_param = 1.6f, - float b_param = 0.75) + float b_param = 0.75f) { auto doc_lengths = raft::make_device_vector(handle, columns.size()); auto term_counts = raft::make_device_vector(handle, rows.size()); diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index 8386dc4bc8..510819c1b5 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -38,4 +38,23 @@ void encode_bm25(raft::resources& handle, return matrix::detail::encode_bm25( handle, rows, columns, values, values_out, k_param, b_param); } + +template +void encode_tfidf(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out) +{ + return matrix::detail::encode_tfidf(handle, rows, columns, values, values_out); +} + +template +void encode_tfidf(raft::resources& handle, + raft::device_coordinate_structure_view coo_in, + raft::device_vector_view values_out) +{ + return matrix::detail::encode_tfidf(handle, coo_in, values_out); +} + } // namespace raft::sparse::matrix diff --git a/cpp/test/sparse/preprocess.cu b/cpp/test/sparse/preprocess.cu index 7dbe7c5e0a..e5975289d6 100644 --- a/cpp/test/sparse/preprocess.cu +++ b/cpp/test/sparse/preprocess.cu @@ -16,6 +16,7 @@ #include "../test_utils.cuh" +#include #include #include #include @@ -41,10 +42,9 @@ struct SparsePreprocessInputs { }; template -class SparsePreprocessTest - : public ::testing::TestWithParam> { +class SparseBM25Test : public ::testing::TestWithParam> { public: - SparsePreprocessTest() + SparseBM25Test() : params(::testing::TestWithParam>::GetParam()), stream(resource::get_cuda_stream(handle)), rows(params.rows_h.size(), stream), @@ -81,8 +81,63 @@ class SparsePreprocessTest rmm::device_uvector rows, columns; rmm::device_uvector values, result; }; -using SparsePreprocessTestF = SparsePreprocessTest; -TEST_P(SparsePreprocessTestF, Result) { Run(); } + +template +class SparseTFIDFTest : public ::testing::TestWithParam> { + public: + SparseTFIDFTest() + : params(::testing::TestWithParam>::GetParam()), + stream(resource::get_cuda_stream(handle)), + rows(params.rows_h.size(), stream), + columns(params.columns_h.size(), stream), + values(params.values_h.size(), stream), + result(params.values_h.size(), stream) + { + } + + protected: + void SetUp() override {} + + void Run() + { + auto rows = raft::make_device_vector(handle, params.rows_h.size()); + auto columns = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); + + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + auto coo_view = raft::make_device_coordinate_structure_view(handle, + params.rows_h.data(), + params.columns_h.data(), + params.rows_h.size(), + params.columns_h.size(), + params.values_h.size()); + raft::update_device( + coo_view.get_elements().data(), params.values_h.data(), params.values_h.size(), stream); + + sparse::matrix::encode_tfidf(handle, coo_view, result.view()); + + RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); + + ASSERT_TRUE(values.size() == result.size()); + // raft::devArrMatch(, nnz, raft::Compare())); + } + + protected: + raft::resources handle; + cudaStream_t stream; + + SparsePreprocessInputs params; + rmm::device_uvector rows, columns; + rmm::device_uvector values, result; +}; + +using SparseBM25TestF = SparseBM25Test; +TEST_P(SparseBM25TestF, Result) { Run(); } + +using SparseTFIDFTestF = SparseBM25Test; +TEST_P(SparseTFIDFTestF, Result) { Run(); } const std::vector> sparse_preprocess_inputs = { {{0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, @@ -90,8 +145,13 @@ const std::vector> sparse_preprocess_inputs = {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, }; -INSTANTIATE_TEST_CASE_P(SparsePreprocessTest, - SparsePreprocessTestF, +INSTANTIATE_TEST_CASE_P(SparseBM25Test, + SparseBM25TestF, ::testing::ValuesIn(sparse_preprocess_inputs)); + +INSTANTIATE_TEST_CASE_P(SparseTFIDFTest, + SparseTFIDFTestF, + ::testing::ValuesIn(sparse_preprocess_inputs)); + } // namespace sparse } // namespace raft \ No newline at end of file From dc01bc13bb260c1d574213097a46ebd704c13ccf Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Tue, 25 Jun 2024 15:18:23 -0400 Subject: [PATCH 30/75] add in header for coo tfidf --- .../sparse/matrix/detail/preprocessing.cuh | 30 +++++++++++-------- .../raft/sparse/matrix/preprocessing.cuh | 8 +++++ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 755fa7122f..59c9f06bd1 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -181,19 +181,10 @@ void encode_tfidf(raft::resources& handle, raft::device_coordinate_structure_view coo_in, raft::device_vector_view values_out) { - auto rows = coo_in.get_rows(); - auto columns = coo_in.get_columns(); - auto values = coo_in.get_elements(); - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); - auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view()); - - raft::linalg::map(handle, - values_out, - tfidf(doc_count), - raft::make_const_mdspan(values), - raft::make_const_mdspan(term_counts.view())); + auto rows = coo_in.get_rows(); + auto columns = coo_in.get_columns(); + auto values = coo_in.get_elements(); + encode_tfidf(handle, rows, columns, values, values_out); } template @@ -215,6 +206,19 @@ void encode_tfidf(raft::resources& handle, raft::make_const_mdspan(term_counts.view())); } +template +void encode_bm25(raft::resources& handle, + raft::device_coordinate_structure_view coo_in, + raft::device_vector_view values_out, + float k_param = 1.6f, + float b_param = 0.75f) +{ + auto rows = coo_in.get_rows(); + auto columns = coo_in.get_columns(); + auto values = coo_in.get_elements(); + encode_bm25(handle, rows, columns, values, values_out, k_param, b_param); +} + template void encode_bm25(raft::resources& handle, raft::device_vector_view rows, diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index 510819c1b5..95898d0403 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -39,6 +39,14 @@ void encode_bm25(raft::resources& handle, handle, rows, columns, values, values_out, k_param, b_param); } +template +void encode_bm25(raft::resources& handle, + raft::device_coordinate_structure_view coo_in, + raft::device_vector_view values_out) +{ + return matrix::detail::encode_bm25(handle, coo_in, values_out); +} + template void encode_tfidf(raft::resources& handle, raft::device_vector_view rows, From 6f4745d3ba5e36ba9ba21a93219aea373c21ce35 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 26 Jun 2024 00:32:18 -0400 Subject: [PATCH 31/75] add bm25 test support coo in and refactor tfidf support --- .../sparse/matrix/detail/preprocessing.cuh | 48 +++++++--- .../raft/sparse/matrix/preprocessing.cuh | 4 +- cpp/test/sparse/preprocess.cu | 89 ++++++------------- 3 files changed, 68 insertions(+), 73 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 59c9f06bd1..808d18c40b 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -178,13 +178,27 @@ std::tuple sparse_search_preprocess(raft::resources& handle, template void encode_tfidf(raft::resources& handle, - raft::device_coordinate_structure_view coo_in, + raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out) { - auto rows = coo_in.get_rows(); - auto columns = coo_in.get_columns(); - auto values = coo_in.get_elements(); - encode_tfidf(handle, rows, columns, values, values_out); + auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), + coo_in.structure_view().get_rows().size()); + auto columns = raft::make_device_vector_view(coo_in.structure_view().get_cols().data(), + coo_in.structure_view().get_cols().size()); + auto values = + raft::make_device_vector_view(coo_in.get_elements().data(), coo_in.get_elements().size()); + ; + + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); + auto [doc_count, avg_doc_length] = sparse_search_preprocess( + handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + + raft::linalg::map(handle, + values_out, + tfidf(doc_count), + raft::make_const_mdspan(values), + raft::make_const_mdspan(term_counts.view())); } template @@ -208,15 +222,29 @@ void encode_tfidf(raft::resources& handle, template void encode_bm25(raft::resources& handle, - raft::device_coordinate_structure_view coo_in, + raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out, float k_param = 1.6f, float b_param = 0.75f) { - auto rows = coo_in.get_rows(); - auto columns = coo_in.get_columns(); - auto values = coo_in.get_elements(); - encode_bm25(handle, rows, columns, values, values_out, k_param, b_param); + auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), + coo_in.structure_view().get_rows().size()); + auto columns = raft::make_device_vector_view(coo_in.structure_view().get_cols().data(), + coo_in.structure_view().get_cols().size()); + auto values = + raft::make_device_vector_view(coo_in.get_elements().data(), coo_in.get_elements().size()); + ; + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); + auto [doc_count, avg_doc_length] = sparse_search_preprocess( + handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + + raft::linalg::map(handle, + values_out, + bm25(doc_count, avg_doc_length, k_param, b_param), + raft::make_const_mdspan(values), + raft::make_const_mdspan(doc_lengths.view()), + raft::make_const_mdspan(term_counts.view())); } template diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index 95898d0403..263e00bac8 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -41,7 +41,7 @@ void encode_bm25(raft::resources& handle, template void encode_bm25(raft::resources& handle, - raft::device_coordinate_structure_view coo_in, + raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out) { return matrix::detail::encode_bm25(handle, coo_in, values_out); @@ -59,7 +59,7 @@ void encode_tfidf(raft::resources& handle, template void encode_tfidf(raft::resources& handle, - raft::device_coordinate_structure_view coo_in, + raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out) { return matrix::detail::encode_tfidf(handle, coo_in, values_out); diff --git a/cpp/test/sparse/preprocess.cu b/cpp/test/sparse/preprocess.cu index e5975289d6..e9a16419f2 100644 --- a/cpp/test/sparse/preprocess.cu +++ b/cpp/test/sparse/preprocess.cu @@ -42,81 +42,52 @@ struct SparsePreprocessInputs { }; template -class SparseBM25Test : public ::testing::TestWithParam> { +class SparseTest : public ::testing::TestWithParam> { public: - SparseBM25Test() + SparseTest() : params(::testing::TestWithParam>::GetParam()), stream(resource::get_cuda_stream(handle)), rows(params.rows_h.size(), stream), columns(params.columns_h.size(), stream), values(params.values_h.size(), stream), result(params.values_h.size(), stream) + { } protected: void SetUp() override {} - void Run() + void Run(bool bm25_on) { + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + auto rows = raft::make_device_vector(handle, params.rows_h.size()); auto columns = raft::make_device_vector(handle, params.columns_h.size()); auto values = raft::make_device_vector(handle, params.values_h.size()); auto result = raft::make_device_vector(handle, params.values_h.size()); - sparse::matrix::encode_bm25( - handle, rows.view(), columns.view(), values.view(), result.view()); + raft::copy(rows.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); + raft::copy(columns.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); - RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); + raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); - ASSERT_TRUE(values.size() == values.size()); - // raft::devArrMatch(, nnz, raft::Compare())); - } + auto coo_structure = raft::make_device_coordinate_structure_view(rows.data_handle(), + columns.data_handle(), + int(rows.size()), + int(columns.size()), + int(values.size())); - protected: - raft::resources handle; - cudaStream_t stream; + auto coo_view = raft::make_device_coo_matrix_view(values.data_handle(), coo_structure); - SparsePreprocessInputs params; - rmm::device_uvector rows, columns; - rmm::device_uvector values, result; -}; - -template -class SparseTFIDFTest : public ::testing::TestWithParam> { - public: - SparseTFIDFTest() - : params(::testing::TestWithParam>::GetParam()), - stream(resource::get_cuda_stream(handle)), - rows(params.rows_h.size(), stream), - columns(params.columns_h.size(), stream), - values(params.values_h.size(), stream), - result(params.values_h.size(), stream) - { - } - - protected: - void SetUp() override {} - - void Run() - { - auto rows = raft::make_device_vector(handle, params.rows_h.size()); - auto columns = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); - - cudaStream_t stream = raft::resource::get_cuda_stream(handle); - - auto coo_view = raft::make_device_coordinate_structure_view(handle, - params.rows_h.data(), - params.columns_h.data(), - params.rows_h.size(), - params.columns_h.size(), - params.values_h.size()); raft::update_device( - coo_view.get_elements().data(), params.values_h.data(), params.values_h.size(), stream); + coo_view.get_elements().data(), values.data_handle(), values.size(), stream); - sparse::matrix::encode_tfidf(handle, coo_view, result.view()); + if (bm25_on) { + sparse::matrix::encode_bm25(handle, coo_view, result.view()); + } else { + sparse::matrix::encode_tfidf(handle, coo_view, result.view()); + } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); @@ -131,13 +102,14 @@ class SparseTFIDFTest : public ::testing::TestWithParam params; rmm::device_uvector rows, columns; rmm::device_uvector values, result; + bool bm25; }; -using SparseBM25TestF = SparseBM25Test; -TEST_P(SparseBM25TestF, Result) { Run(); } +using SparseTestFF = SparseTest; +TEST_P(SparseTestFF, Result) { Run(false); } -using SparseTFIDFTestF = SparseBM25Test; -TEST_P(SparseTFIDFTestF, Result) { Run(); } +using SparseTestFT = SparseTest; +TEST_P(SparseTestFT, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { {{0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, @@ -145,13 +117,8 @@ const std::vector> sparse_preprocess_inputs = {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, }; -INSTANTIATE_TEST_CASE_P(SparseBM25Test, - SparseBM25TestF, - ::testing::ValuesIn(sparse_preprocess_inputs)); - -INSTANTIATE_TEST_CASE_P(SparseTFIDFTest, - SparseTFIDFTestF, - ::testing::ValuesIn(sparse_preprocess_inputs)); +INSTANTIATE_TEST_CASE_P(SparseTest, SparseTestFF, ::testing::ValuesIn(sparse_preprocess_inputs)); +INSTANTIATE_TEST_CASE_P(SparseTest, SparseTestFT, ::testing::ValuesIn(sparse_preprocess_inputs)); } // namespace sparse } // namespace raft \ No newline at end of file From 987ff5e2ce4995dbeb3eb814457004bc92425901 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Fri, 28 Jun 2024 14:28:02 -0400 Subject: [PATCH 32/75] add in long test for coo to csr convert test --- cpp/test/sparse/convert_csr.cu | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/cpp/test/sparse/convert_csr.cu b/cpp/test/sparse/convert_csr.cu index 1cd49b0bbd..eee9e1d6ec 100644 --- a/cpp/test/sparse/convert_csr.cu +++ b/cpp/test/sparse/convert_csr.cu @@ -89,6 +89,45 @@ TEST_P(SortedCOOToCSR, Result) INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR, ::testing::ValuesIn(inputsf)); +typedef SparseConvertCSRTest SortedLongCOOToCSR; +TEST_P(SortedLongCOOToCSR, Result) +{ + cudaStream_t stream; + cudaStreamCreate(&stream); + + int nnz = 90; + int rows_size = 14; + + int* in_h = new int[nnz]{0, 0, 0, 0, 2, 2, 2, 2, 2, 8, 16, 16, 16, 16, 18, 18, 18, 18, + 18, 18, 18, 18, 18, 18, 18, 18, 24, 64, 64, 64, 64, 64, 64, 64, 64, 64, + 64, 64, 64, 64, 64, 66, 66, 66, 66, 66, 66, 66, 66, 72, 74, 80, 80, 80, + 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 82, 82, 82, 82, + 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 88, 90, 90}; + int* exp_h = new int[rows_size]{0, 4, 9, 10, 14, 26, 27, 41, 49, 50, 51, 68, 87, 88}; + + rmm::device_uvector in(nnz, stream); + rmm::device_uvector exp(rows_size, stream); + rmm::device_uvector out(rows_size, stream); + RAFT_CUDA_TRY(cudaMemsetAsync(in.data(), 0, in.size() * sizeof(int), stream)); + RAFT_CUDA_TRY(cudaMemsetAsync(exp.data(), 0, exp.size() * sizeof(int), stream)); + RAFT_CUDA_TRY(cudaMemsetAsync(out.data(), 0, out.size() * sizeof(int), stream)); + + raft::update_device(in.data(), in_h, nnz, stream); + raft::update_device(exp.data(), exp_h, rows_size, stream); + + convert::sorted_coo_to_csr(in.data(), nnz, out.data(), rows_size, stream); + std::cout << "assert is next " << std::endl; + ASSERT_TRUE( + raft::devArrMatch(out.data(), exp.data(), rows_size, raft::Compare(), stream)); + + cudaStreamDestroy(stream); + + delete[] in_h; + delete[] exp_h; +} + +INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedLongCOOToCSR, ::testing::ValuesIn(inputsf)); + /******************************** adj graph ********************************/ template From c46008c55e815e75aab49c5f3ea1fcd261624c93 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Fri, 28 Jun 2024 14:31:51 -0400 Subject: [PATCH 33/75] remove unneeded print statement --- cpp/test/sparse/convert_csr.cu | 1 - 1 file changed, 1 deletion(-) diff --git a/cpp/test/sparse/convert_csr.cu b/cpp/test/sparse/convert_csr.cu index eee9e1d6ec..41132a9c48 100644 --- a/cpp/test/sparse/convert_csr.cu +++ b/cpp/test/sparse/convert_csr.cu @@ -116,7 +116,6 @@ TEST_P(SortedLongCOOToCSR, Result) raft::update_device(exp.data(), exp_h, rows_size, stream); convert::sorted_coo_to_csr(in.data(), nnz, out.data(), rows_size, stream); - std::cout << "assert is next " << std::endl; ASSERT_TRUE( raft::devArrMatch(out.data(), exp.data(), rows_size, raft::Compare(), stream)); From 81bb89d3f593426b8c805b417d5a1bbdd58465b1 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Fri, 28 Jun 2024 17:27:11 -0400 Subject: [PATCH 34/75] remove unneeded test --- cpp/test/sparse/convert_csr.cu | 38 ---------------------------------- 1 file changed, 38 deletions(-) diff --git a/cpp/test/sparse/convert_csr.cu b/cpp/test/sparse/convert_csr.cu index 41132a9c48..1cd49b0bbd 100644 --- a/cpp/test/sparse/convert_csr.cu +++ b/cpp/test/sparse/convert_csr.cu @@ -89,44 +89,6 @@ TEST_P(SortedCOOToCSR, Result) INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedCOOToCSR, ::testing::ValuesIn(inputsf)); -typedef SparseConvertCSRTest SortedLongCOOToCSR; -TEST_P(SortedLongCOOToCSR, Result) -{ - cudaStream_t stream; - cudaStreamCreate(&stream); - - int nnz = 90; - int rows_size = 14; - - int* in_h = new int[nnz]{0, 0, 0, 0, 2, 2, 2, 2, 2, 8, 16, 16, 16, 16, 18, 18, 18, 18, - 18, 18, 18, 18, 18, 18, 18, 18, 24, 64, 64, 64, 64, 64, 64, 64, 64, 64, - 64, 64, 64, 64, 64, 66, 66, 66, 66, 66, 66, 66, 66, 72, 74, 80, 80, 80, - 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 82, 82, 82, 82, - 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 88, 90, 90}; - int* exp_h = new int[rows_size]{0, 4, 9, 10, 14, 26, 27, 41, 49, 50, 51, 68, 87, 88}; - - rmm::device_uvector in(nnz, stream); - rmm::device_uvector exp(rows_size, stream); - rmm::device_uvector out(rows_size, stream); - RAFT_CUDA_TRY(cudaMemsetAsync(in.data(), 0, in.size() * sizeof(int), stream)); - RAFT_CUDA_TRY(cudaMemsetAsync(exp.data(), 0, exp.size() * sizeof(int), stream)); - RAFT_CUDA_TRY(cudaMemsetAsync(out.data(), 0, out.size() * sizeof(int), stream)); - - raft::update_device(in.data(), in_h, nnz, stream); - raft::update_device(exp.data(), exp_h, rows_size, stream); - - convert::sorted_coo_to_csr(in.data(), nnz, out.data(), rows_size, stream); - ASSERT_TRUE( - raft::devArrMatch(out.data(), exp.data(), rows_size, raft::Compare(), stream)); - - cudaStreamDestroy(stream); - - delete[] in_h; - delete[] exp_h; -} - -INSTANTIATE_TEST_CASE_P(SparseConvertCSRTest, SortedLongCOOToCSR, ::testing::ValuesIn(inputsf)); - /******************************** adj graph ********************************/ template From ff1991f5e95a1e472e5b730426f79438d31ecbd0 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Tue, 2 Jul 2024 21:51:08 -0400 Subject: [PATCH 35/75] add csr and coo matrix bfknn apis --- cpp/include/raft/sparse/neighbors/knn.cuh | 169 ++++++++++++++++++++++ 1 file changed, 169 insertions(+) diff --git a/cpp/include/raft/sparse/neighbors/knn.cuh b/cpp/include/raft/sparse/neighbors/knn.cuh index 2cf68818aa..73520eb572 100644 --- a/cpp/include/raft/sparse/neighbors/knn.cuh +++ b/cpp/include/raft/sparse/neighbors/knn.cuh @@ -30,8 +30,11 @@ " Please use the sparse/spatial version instead.") #endif +#include #include +#include #include +#include namespace raft::sparse::neighbors { @@ -103,4 +106,170 @@ void brute_force_knn(const value_idx* idxIndptr, metricArg); } +/** + * Search the sparse kNN for the k-nearest neighbors of a set of sparse query vectors + * using some distance implementation + * @param[in] csr_idx index csr matrix + * @param[in] csr_query query csr matrix + * @param[out] output_indices dense matrix for output indices (size n_query_rows * k) + * @param[out] output_dists dense matrix for output distances (size n_query_rows * k) + * @param[in] k the number of neighbors to query + * @param[in] handle CUDA resource::get_cuda_stream(handle) to order operations with respect to + * @param[in] batch_size_index maximum number of rows to use from index matrix per batch + * @param[in] batch_size_query maximum number of rows to use from query matrix per batch + * @param[in] metric distance metric/measure to use + * @param[in] metricArg potential argument for metric (currently unused) + */ +template +void brute_force_knn(raft::device_csr_matrix csr_idx, + raft::device_csr_matrix csr_query, + value_idx* output_indices, + value_t* output_dists, + int k, + raft::resources const& handle, + size_t batch_size_index = 2 << 14, // approx 1M + size_t batch_size_query = 2 << 14, + raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded, + float metricArg = 0) +{ + auto idxIndptr = csr_idx.structure_view().get_indptr(); + auto idxIndices = csr_idx.structure_view().get_indices(); + auto idxData = csr_idx.view().get_elements(); + + auto queryIndptr = csr_query.structure_view().get_indptr(); + auto queryIndices = csr_query.structure_view().get_indices(); + auto queryData = csr_query.view().get_elements(); + brute_force::knn(idxIndptr.data(), + idxIndices.data(), + idxData.data(), + idxData.size(), + csr_idx.structure_view().get_n_rows() - 1, + csr_idx.structure_view().get_n_cols(), + queryIndptr.data(), + queryIndices.data(), + queryData.data(), + queryData.size(), + csr_query.structure_view().get_n_rows() - 1, + csr_query.structure_view().get_n_cols(), + output_indices, + output_dists, + k, + handle, + batch_size_index, + batch_size_query, + metric, + metricArg); +} + +/** + * Search the sparse kNN for the k-nearest neighbors of a set of sparse query vectors + * using some distance implementation + * @param[in] coo_idx index coo matrix + * @param[in] coo_query query coo matrix + * @param[out] output_indices dense matrix for output indices (size n_query_rows * k) + * @param[out] output_dists dense matrix for output distances (size n_query_rows * k) + * @param[in] k the number of neighbors to query + * @param[in] handle CUDA resource::get_cuda_stream(handle) to order operations with respect to + * @param[in] batch_size_index maximum number of rows to use from index matrix per batch + * @param[in] batch_size_query maximum number of rows to use from query matrix per batch + * @param[in] metric distance metric/measure to use + * @param[in] metricArg potential argument for metric (currently unused) + */ +template +void brute_force_knn(raft::device_coo_matrix coo_idx, + raft::device_coo_matrix coo_query, + value_idx* output_indices, + value_t* output_dists, + int k, + raft::resources const& handle, + size_t batch_size_index = 2 << 14, // approx 1M + size_t batch_size_query = 2 << 14, + raft::distance::DistanceType metric = raft::distance::DistanceType::L2Expanded, + float metricArg = 0) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + auto idxRows = coo_idx.structure_view().get_rows(); + auto idxCols = coo_idx.structure_view().get_cols(); + auto idxData = coo_idx.view().get_elements(); + + auto queryRows = coo_query.structure_view().get_rows(); + auto queryCols = coo_query.structure_view().get_cols(); + auto queryData = coo_query.view().get_elements(); + + raft::sparse::op::coo_sort(idxRows.size(), + idxCols.size(), + idxData.size(), + idxRows.data(), + idxCols.data(), + idxRows.data(), + stream); + + raft::sparse::op::coo_sort(queryRows.size(), + queryCols.size(), + queryData.size(), + queryRows.data(), + queryCols.data(), + queryData.data(), + stream); + + auto idxRowsCsr = + raft::make_device_vector(handle, coo_query.structure_view().get_n_rows()); + auto queryRowsCsr = + raft::make_device_vector(handle, coo_query.structure_view().get_n_rows()); + + raft::sparse::convert::sorted_coo_to_csr(idxRows.data(), + int(idxRows.size()), + idxRowsCsr.data_handle(), + coo_idx.structure_view().get_n_rows(), + stream); + + raft::sparse::convert::sorted_coo_to_csr(queryRows.data(), + int(queryRows.size()), + queryRowsCsr.data_handle(), + coo_query.structure_view().get_n_rows(), + stream); + + brute_force::knn(idxRowsCsr.data_handle(), + idxCols.data(), + idxData.data(), + idxData.size(), + coo_idx.structure_view().get_n_rows() - 1, + coo_idx.structure_view().get_n_cols(), + queryRowsCsr.data_handle(), + queryCols.data(), + queryData.data(), + queryData.size(), + coo_query.structure_view().get_n_rows() - 1, + coo_query.structure_view().get_n_cols(), + output_indices, + output_dists, + k, + handle, + coo_idx.structure_view().get_n_rows(), + coo_query.structure_view().get_n_rows(), + metric, + metricArg); +} + }; // namespace raft::sparse::neighbors From c593f4e8b485974951e02c5934954dc5c08dddd2 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Tue, 2 Jul 2024 23:40:18 -0400 Subject: [PATCH 36/75] add knn to preprocess tests --- cpp/test/sparse/preprocess.cu | 57 +++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/cpp/test/sparse/preprocess.cu b/cpp/test/sparse/preprocess.cu index e9a16419f2..51aa87ec78 100644 --- a/cpp/test/sparse/preprocess.cu +++ b/cpp/test/sparse/preprocess.cu @@ -36,6 +36,8 @@ namespace sparse { template struct SparsePreprocessInputs { + int n_rows; + int n_cols; std::vector rows_h; std::vector columns_h; std::vector values_h; @@ -47,6 +49,8 @@ class SparseTest : public ::testing::TestWithParam>::GetParam()), stream(resource::get_cuda_stream(handle)), + n_rows(params.n_rows), + n_cols(params.n_cols), rows(params.rows_h.size(), stream), columns(params.columns_h.size(), stream), values(params.values_h.size(), stream), @@ -60,35 +64,45 @@ class SparseTest : public ::testing::TestWithParam(handle, params.rows_h.size()); - auto columns = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); + auto rows = raft::make_device_vector(handle, params.rows_h.size()); + auto columns = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); raft::copy(rows.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); raft::copy(columns.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); - auto coo_structure = raft::make_device_coordinate_structure_view(rows.data_handle(), - columns.data_handle(), - int(rows.size()), - int(columns.size()), - int(values.size())); - - auto coo_view = raft::make_device_coo_matrix_view(values.data_handle(), coo_structure); - - raft::update_device( - coo_view.get_elements().data(), values.data_handle(), values.size(), stream); + auto coo_struct_view = raft::make_device_coordinate_structure_view( + rows.data_handle(), columns.data_handle(), params.n_rows, params.n_cols, int(values.size())); + auto coo_matrix = raft::make_device_coo_matrix(handle, coo_struct_view); + raft::update_device( + coo_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); if (bm25_on) { - sparse::matrix::encode_bm25(handle, coo_view, result.view()); + sparse::matrix::encode_bm25(handle, coo_matrix.view(), result.view()); } else { - sparse::matrix::encode_tfidf(handle, coo_view, result.view()); + sparse::matrix::encode_tfidf(handle, coo_matrix.view(), result.view()); } + auto out_rows_coo = + raft::make_device_vector(handle, coo_matrix.structure_view().get_n_rows() * k); + auto out_dists_coo = raft::make_device_vector( + handle, coo_matrix.structure_view().get_n_rows() * k); + + raft::sparse::neighbors::brute_force_knn(coo_matrix, + coo_matrix, + out_rows_coo.data_handle(), + out_dists_coo.data_handle(), + k, + handle, + coo_matrix.structure_view().get_n_rows(), + coo_matrix.structure_view().get_n_rows(), + raft::distance::DistanceType::L1); + RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); ASSERT_TRUE(values.size() == result.size()); @@ -100,6 +114,7 @@ class SparseTest : public ::testing::TestWithParam params; + int n_rows, n_cols; rmm::device_uvector rows, columns; rmm::device_uvector values, result; bool bm25; @@ -112,9 +127,11 @@ using SparseTestFT = SparseTest; TEST_P(SparseTestFT, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { - {{0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, - {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, - {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, + {9, // n_rows + 4, // n_cols + {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows + {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols + {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals }; INSTANTIATE_TEST_CASE_P(SparseTest, SparseTestFF, ::testing::ValuesIn(sparse_preprocess_inputs)); From 0febb55f1fcbdb2516142d92b6e76d26476a04ec Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 3 Jul 2024 21:32:08 -0400 Subject: [PATCH 37/75] all tests in place and refactor code --- .../sparse/matrix/detail/preprocessing.cuh | 110 ++++---- .../raft/sparse/matrix/preprocessing.cuh | 16 ++ cpp/test/sparse/preprocess.cu | 141 ----------- cpp/test/sparse/preprocess_coo.cu | 235 ++++++++++++++++++ cpp/test/sparse/preprocess_csr.cu | 154 ++++++++++++ 5 files changed, 473 insertions(+), 183 deletions(-) delete mode 100644 cpp/test/sparse/preprocess.cu create mode 100644 cpp/test/sparse/preprocess_coo.cu create mode 100644 cpp/test/sparse/preprocess_csr.cu diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 808d18c40b..af41251a76 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -178,17 +179,11 @@ std::tuple sparse_search_preprocess(raft::resources& handle, template void encode_tfidf(raft::resources& handle, - raft::device_coo_matrix_view coo_in, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, raft::device_vector_view values_out) { - auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), - coo_in.structure_view().get_rows().size()); - auto columns = raft::make_device_vector_view(coo_in.structure_view().get_cols().data(), - coo_in.structure_view().get_cols().size()); - auto values = - raft::make_device_vector_view(coo_in.get_elements().data(), coo_in.get_elements().size()); - ; - auto doc_lengths = raft::make_device_vector(handle, columns.size()); auto term_counts = raft::make_device_vector(handle, rows.size()); auto [doc_count, avg_doc_length] = sparse_search_preprocess( @@ -203,37 +198,49 @@ void encode_tfidf(raft::resources& handle, template void encode_tfidf(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, + raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); - auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), + coo_in.structure_view().get_rows().size()); + auto columns = raft::make_device_vector_view(coo_in.structure_view().get_cols().data(), + coo_in.structure_view().get_cols().size()); + auto values = raft::make_device_vector_view(coo_in.get_elements().data(), + coo_in.get_elements().size()); + + encode_tfidf(handle, rows, columns, values, values_out); +} - raft::linalg::map(handle, - values_out, - tfidf(doc_count), - raft::make_const_mdspan(values), - raft::make_const_mdspan(term_counts.view())); +template +void encode_tfidf(raft::resources& handle, + raft::device_csr_matrix_view csr_in, + raft::device_vector_view values_out) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + auto indptr = raft::make_device_vector_view( + csr_in.structure_view().get_indptr().data(), csr_in.structure_view().get_indptr().size()); + auto indices = raft::make_device_vector_view( + csr_in.structure_view().get_indices().data(), csr_in.structure_view().get_indices().size()); + auto values = raft::make_device_vector_view(csr_in.get_elements().data(), + csr_in.get_elements().size()); + + auto rows = raft::make_device_vector(handle, values.size()); + raft::sparse::convert::csr_to_coo( + indptr.data_handle(), indptr.size(), rows.data_handle(), rows.size(), stream); + + encode_tfidf(handle, rows.view(), indices, values, values_out); } template void encode_bm25(raft::resources& handle, - raft::device_coo_matrix_view coo_in, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, raft::device_vector_view values_out, float k_param = 1.6f, float b_param = 0.75f) { - auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), - coo_in.structure_view().get_rows().size()); - auto columns = raft::make_device_vector_view(coo_in.structure_view().get_cols().data(), - coo_in.structure_view().get_cols().size()); - auto values = - raft::make_device_vector_view(coo_in.get_elements().data(), coo_in.get_elements().size()); - ; auto doc_lengths = raft::make_device_vector(handle, columns.size()); auto term_counts = raft::make_device_vector(handle, rows.size()); auto [doc_count, avg_doc_length] = sparse_search_preprocess( @@ -249,23 +256,42 @@ void encode_bm25(raft::resources& handle, template void encode_bm25(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, + raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out, float k_param = 1.6f, float b_param = 0.75f) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); - auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), + coo_in.structure_view().get_rows().size()); + auto columns = raft::make_device_vector_view(coo_in.structure_view().get_cols().data(), + coo_in.structure_view().get_cols().size()); + auto values = raft::make_device_vector_view(coo_in.get_elements().data(), + coo_in.get_elements().size()); + + encode_bm25(handle, rows, columns, values, values_out); +} - raft::linalg::map(handle, - values_out, - bm25(doc_count, avg_doc_length, k_param, b_param), - raft::make_const_mdspan(values), - raft::make_const_mdspan(doc_lengths.view()), - raft::make_const_mdspan(term_counts.view())); +template +void encode_bm25(raft::resources& handle, + raft::device_csr_matrix_view csr_in, + raft::device_vector_view values_out, + float k_param = 1.6f, + float b_param = 0.75f) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + auto indptr = raft::make_device_vector_view( + csr_in.structure_view().get_indptr().data(), csr_in.structure_view().get_indptr().size()); + auto indices = raft::make_device_vector_view( + csr_in.structure_view().get_indices().data(), csr_in.structure_view().get_indices().size()); + auto values = raft::make_device_vector_view(csr_in.get_elements().data(), + csr_in.get_elements().size()); + + auto rows = raft::make_device_vector(handle, values.size()); + raft::sparse::convert::csr_to_coo( + indptr.data_handle(), indptr.size(), rows.data_handle(), rows.size(), stream); + + encode_bm25(handle, rows.view(), indices, values, values_out); } + } // namespace raft::sparse::matrix::detail \ No newline at end of file diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index 263e00bac8..e72b74697b 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -47,6 +47,14 @@ void encode_bm25(raft::resources& handle, return matrix::detail::encode_bm25(handle, coo_in, values_out); } +template +void encode_bm25(raft::resources& handle, + raft::device_csr_matrix_view csr_in, + raft::device_vector_view values_out) +{ + return matrix::detail::encode_bm25(handle, csr_in, values_out); +} + template void encode_tfidf(raft::resources& handle, raft::device_vector_view rows, @@ -65,4 +73,12 @@ void encode_tfidf(raft::resources& handle, return matrix::detail::encode_tfidf(handle, coo_in, values_out); } +template +void encode_tfidf(raft::resources& handle, + raft::device_csr_matrix_view csr_in, + raft::device_vector_view values_out) +{ + return matrix::detail::encode_tfidf(handle, csr_in, values_out); +} + } // namespace raft::sparse::matrix diff --git a/cpp/test/sparse/preprocess.cu b/cpp/test/sparse/preprocess.cu deleted file mode 100644 index 51aa87ec78..0000000000 --- a/cpp/test/sparse/preprocess.cu +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2024, NVIDIA CORPORATION. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "../test_utils.cuh" - -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -// #include -// #include -// #include - -namespace raft { -namespace sparse { - -template -struct SparsePreprocessInputs { - int n_rows; - int n_cols; - std::vector rows_h; - std::vector columns_h; - std::vector values_h; -}; - -template -class SparseTest : public ::testing::TestWithParam> { - public: - SparseTest() - : params(::testing::TestWithParam>::GetParam()), - stream(resource::get_cuda_stream(handle)), - n_rows(params.n_rows), - n_cols(params.n_cols), - rows(params.rows_h.size(), stream), - columns(params.columns_h.size(), stream), - values(params.values_h.size(), stream), - result(params.values_h.size(), stream) - - { - } - - protected: - void SetUp() override {} - - void Run(bool bm25_on) - { - int k = 3; - cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto rows = raft::make_device_vector(handle, params.rows_h.size()); - auto columns = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); - - raft::copy(rows.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); - raft::copy(columns.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); - - raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); - - auto coo_struct_view = raft::make_device_coordinate_structure_view( - rows.data_handle(), columns.data_handle(), params.n_rows, params.n_cols, int(values.size())); - auto coo_matrix = raft::make_device_coo_matrix(handle, coo_struct_view); - raft::update_device( - coo_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); - - if (bm25_on) { - sparse::matrix::encode_bm25(handle, coo_matrix.view(), result.view()); - } else { - sparse::matrix::encode_tfidf(handle, coo_matrix.view(), result.view()); - } - - auto out_rows_coo = - raft::make_device_vector(handle, coo_matrix.structure_view().get_n_rows() * k); - auto out_dists_coo = raft::make_device_vector( - handle, coo_matrix.structure_view().get_n_rows() * k); - - raft::sparse::neighbors::brute_force_knn(coo_matrix, - coo_matrix, - out_rows_coo.data_handle(), - out_dists_coo.data_handle(), - k, - handle, - coo_matrix.structure_view().get_n_rows(), - coo_matrix.structure_view().get_n_rows(), - raft::distance::DistanceType::L1); - - RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); - - ASSERT_TRUE(values.size() == result.size()); - // raft::devArrMatch(, nnz, raft::Compare())); - } - - protected: - raft::resources handle; - cudaStream_t stream; - - SparsePreprocessInputs params; - int n_rows, n_cols; - rmm::device_uvector rows, columns; - rmm::device_uvector values, result; - bool bm25; -}; - -using SparseTestFF = SparseTest; -TEST_P(SparseTestFF, Result) { Run(false); } - -using SparseTestFT = SparseTest; -TEST_P(SparseTestFT, Result) { Run(true); } - -const std::vector> sparse_preprocess_inputs = { - {9, // n_rows - 4, // n_cols - {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows - {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols - {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals -}; - -INSTANTIATE_TEST_CASE_P(SparseTest, SparseTestFF, ::testing::ValuesIn(sparse_preprocess_inputs)); -INSTANTIATE_TEST_CASE_P(SparseTest, SparseTestFT, ::testing::ValuesIn(sparse_preprocess_inputs)); - -} // namespace sparse -} // namespace raft \ No newline at end of file diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu new file mode 100644 index 0000000000..4633b5d32c --- /dev/null +++ b/cpp/test/sparse/preprocess_coo.cu @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../test_utils.cuh" + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace raft { +namespace sparse { + +template +struct SparsePreprocessInputs { + int n_rows; + int n_cols; + std::vector rows_h; + std::vector columns_h; + // std::vector tfidf_vals_h; + // std::vector out_idxs_bm25_h; + // std::vector out_dists_bm25_h; + // std::vector out_idxs_tfidf_h; + // std::vector out_dists_tfidf_h; + std::vector values_h; +}; + +template +class SparsePreprocessCoo + : public ::testing::TestWithParam> { + public: + SparsePreprocessCoo() + : params(::testing::TestWithParam>::GetParam()), + stream(resource::get_cuda_stream(handle)), + n_rows(params.n_rows), + n_cols(params.n_cols), + rows(params.rows_h.size(), stream), + columns(params.columns_h.size(), stream), + values(params.values_h.size(), stream), + result(params.values_h.size(), stream) + // bm25_vals(params.bm25_vals_h.size(), stream) + // tfidf_vals(params.tfidf_vals_h.size(), stream), + // out_idxs_bm25(params.out_idxs_bm25_h.size(), stream), + // out_idxs_tfidf(params.out_idxs_tfidf_h.size(), stream), + // out_dists_bm25(params.out_dists_bm25_h.size(), stream), + // out_dists_tfidf(params.out_dists_tfidf_h.size(), stream) + { + } + + protected: + void SetUp() override {} + + void Run(bool bm25_on) + { + int k = 2; + std::vector bm25_vals_h = {0.850086, + 1.15525, + 0.682645, + 0.860915, + 0.99021, + 0.860915, + 0.850086, + 0.850086, + 0.850086, + 1.25152}; // bm25 + std::vector tfidf_vals_h = {0.480453, + 0.7615, + 0.7615, + 0.960906, + 1.11558, + 0.960906, + 0.480453, + 0.480453, + 0.480453, + 0.7615}; // tfidf + std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, + 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, + 0, 0.199406, 0, 0.154671, 0, 0.154671, 0, 0.199406, + 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}; + + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + auto rows = raft::make_device_vector(handle, params.rows_h.size()); + auto columns = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); + auto bm25_vals = raft::make_device_vector(handle, bm25_vals_h.size()); + auto tfidf_vals = raft::make_device_vector(handle, tfidf_vals_h.size()); + auto out_idxs_bm25 = raft::make_device_vector(handle, out_idxs_bm25_h.size()); + // auto out_idxs_tfidf = raft::make_device_vector(handle, + // params.out_idxs_tfidf_h.size()); + auto out_dists_bm25 = raft::make_device_vector(handle, out_dists_bm25_h.size()); + // auto out_dists_tfidf = raft::make_device_vector(handle, + // params.out_dists_tfidf_h.size()); + + raft::copy(rows.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); + raft::copy(columns.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); + raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); + raft::copy(bm25_vals.data_handle(), bm25_vals_h.data(), bm25_vals_h.size(), stream); + raft::copy(tfidf_vals.data_handle(), tfidf_vals_h.data(), tfidf_vals_h.size(), stream); + raft::copy(out_idxs_bm25.data_handle(), out_idxs_bm25_h.data(), out_idxs_bm25_h.size(), stream); + // raft::copy(out_idxs_tfidf.data_handle(), params.out_idxs_tfidf_h.data(), + // params.out_idxs_tfidf_h.size(), stream); + raft::copy( + out_dists_bm25.data_handle(), out_dists_bm25_h.data(), out_dists_bm25_h.size(), stream); + // raft::copy(out_dists_tfidf.data_handle(), params.out_dists_tfidf_h.data(), + // params.out_dists_tfidf_h.size(), stream); + + auto coo_struct_view = raft::make_device_coordinate_structure_view( + rows.data_handle(), columns.data_handle(), params.n_rows, params.n_cols, int(values.size())); + auto c_matrix = raft::make_device_coo_matrix(handle, coo_struct_view); + raft::update_device( + c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); + + if (bm25_on) { + sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + ASSERT_TRUE(raft::devArrMatch(bm25_vals.data_handle(), + result.data_handle(), + result.size(), + raft::CompareApprox(1e-6), + stream)); + } else { + sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + ASSERT_TRUE(raft::devArrMatch(tfidf_vals.data_handle(), + result.data_handle(), + result.size(), + raft::CompareApprox(1e-6), + stream)); + } + + raft::update_device( + c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); + + auto out_indices = + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + auto out_dists = + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + + raft::sparse::neighbors::brute_force_knn(c_matrix, + c_matrix, + out_indices.data_handle(), + out_dists.data_handle(), + k, + handle, + c_matrix.structure_view().get_n_rows(), + c_matrix.structure_view().get_n_rows(), + raft::distance::DistanceType::L1); + + if (bm25_on) { + ASSERT_TRUE(raft::devArrMatch(out_idxs_bm25.data_handle(), + out_indices.data_handle(), + out_indices.size(), + raft::CompareApprox(1e-6), + stream)); + ASSERT_TRUE(raft::devArrMatch(out_dists_bm25.data_handle(), + out_dists.data_handle(), + out_dists.size(), + raft::CompareApprox(1e-6), + stream)); + } + // else { + // ASSERT_TRUE(raft::devArrMatch( + // out_idxs_tfidf.data_handle(), out_indices.data_handle(), out_indices.size(), + // raft::Compare(), stream)); + // ASSERT_TRUE(raft::devArrMatch( + // out_dists_tfidf.data_handle(), out_dists.data_handle(), out_dists.size(), + // raft::Compare(), stream)); + // } + RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); + } + + protected: + raft::resources handle; + cudaStream_t stream; + + SparsePreprocessInputs params; + rmm::device_uvector rows, columns; + rmm::device_uvector values, result; + int n_rows, n_cols; + // rmm::device_uvector rows, columns, out_idxs_bm25, out_idxs_tfidf; + // rmm::device_uvector values, result, bm25_vals, tfidf_vals, out_dists_bm25, + // out_dists_tfidf; + bool bm25; +}; + +using SparsePreprocessTfidfCoo = SparsePreprocessCoo; +TEST_P(SparsePreprocessTfidfCoo, Result) { Run(false); } + +using SparsePreprocessBm25Coo = SparsePreprocessCoo; +TEST_P(SparsePreprocessBm25Coo, Result) { Run(true); } + +const std::vector> sparse_preprocess_inputs = { + {12, // n_rows + 5, // n_cols + {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows + {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, + // {0.480453, 0.7615, 0.7615, 0.960906, 1.11558, 0.960906, 0.480453, 0.480453, 0.480453, + // 0.7615}, // tfidf {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, + // 0}, // out_idx_bm25 {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, 0, 0.199406, 0, 0.154671, 0, + // 0.154671, 0, 0.199406, 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}, //out_dists_bm25 {0, 3, + // 1, 2, 1, 2, 3, 0, 4, 8, 5, 10, 6, 5, 7, 8, 8, 7, 9, 1, 10, 5, 0, 0}, // out_idx_tfidf {0, + // 0.305159, 0, 0, 0, 0, 0, 0.305159, 0, 0.167441, 0, 0.0108292, 0, 0.129295, 0, 0.0108292, 0, + // 0.0108292, 0, 0.850086, 0, 0.0108292, 0, 0}}, // out_dists_tfidf + {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals +}; + +INSTANTIATE_TEST_CASE_P(SparsePreprocessCoo, + SparsePreprocessTfidfCoo, + ::testing::ValuesIn(sparse_preprocess_inputs)); +INSTANTIATE_TEST_CASE_P(SparsePreprocessCoo, + SparsePreprocessBm25Coo, + ::testing::ValuesIn(sparse_preprocess_inputs)); + +} // namespace sparse +} // namespace raft \ No newline at end of file diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu new file mode 100644 index 0000000000..2e5c0bf6d8 --- /dev/null +++ b/cpp/test/sparse/preprocess_csr.cu @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../test_utils.cuh" + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +namespace raft { +namespace sparse { + +template +struct SparsePreprocessInputs { + int n_rows; + int n_cols; + std::vector rows_h; + std::vector columns_h; + std::vector values_h; +}; + +template +class SparsePreprocessCSR + : public ::testing::TestWithParam> { + public: + SparsePreprocessCSR() + : params(::testing::TestWithParam>::GetParam()), + stream(resource::get_cuda_stream(handle)), + n_rows(params.n_rows), + n_cols(params.n_cols), + rows(params.rows_h.size(), stream), + columns(params.columns_h.size(), stream), + values(params.values_h.size(), stream), + result(params.values_h.size(), stream) + + { + } + + protected: + void SetUp() override {} + + void Run(bool bm25_on) + { + int k = 2; + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + auto indptr = raft::make_device_vector(handle, params.rows_h.size()); + auto indices = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); + + raft::copy(indptr.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); + raft::copy(indices.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); + raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); + + auto csr_struct_view = raft::make_device_compressed_structure_view(indptr.data_handle(), + indices.data_handle(), + params.n_rows, + params.n_cols, + int(values.size())); + auto c_matrix = raft::make_device_csr_matrix(handle, csr_struct_view); + + raft::update_device( + c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); + + if (bm25_on) { + sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + } else { + sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + } + + raft::update_device( + c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); + + auto out_indices = + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + auto out_dists = + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + + raft::sparse::neighbors::brute_force_knn(c_matrix, + c_matrix, + out_indices.data_handle(), + out_dists.data_handle(), + k, + handle, + c_matrix.structure_view().get_n_rows(), + c_matrix.structure_view().get_n_rows(), + raft::distance::DistanceType::L1); + + RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); + + ASSERT_TRUE(values.size() == result.size()); + // raft::devArrMatch(, nnz, raft::Compare())); + } + + protected: + raft::resources handle; + cudaStream_t stream; + + SparsePreprocessInputs params; + int n_rows, n_cols; + rmm::device_uvector rows, columns; + rmm::device_uvector values, result; + bool bm25; +}; + +using SparsePreprocessTfidfCsr = SparsePreprocessCSR; +TEST_P(SparsePreprocessTfidfCsr, Result) { Run(false); } + +using SparsePreprocessBm25Csr = SparsePreprocessCSR; +TEST_P(SparsePreprocessBm25Csr, Result) { Run(true); } + +const std::vector> sparse_preprocess_inputs = { + {12, // n_rows + 5, // n_cols + {0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9}, // rows + {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols + {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals + // {0.850086, 1.15525, 0.682645, 0.860915, 0.99021, 0.860915, 0.850086, 0.850086, + // 0.850086, 1.25152}, // bm25 {0.480453, 0.7615, 0.7615, 0.960906, 1.11558, 0.960906, 0.480453, + // 0.480453, 0.480453, 0.7615}, // tfidf {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, 6, 5, 7, 4, 8, 4, + // 9, 1, 10, 1, 0, 0}, //out_idx {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, 0, 0.199406, 0, 0.154671, + // 0, 0.154671, 0, 0.199406, 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}}, //out_dists +}; + +INSTANTIATE_TEST_CASE_P(SparsePreprocessCSR, + SparsePreprocessTfidfCsr, + ::testing::ValuesIn(sparse_preprocess_inputs)); +INSTANTIATE_TEST_CASE_P(SparsePreprocessCSR, + SparsePreprocessBm25Csr, + ::testing::ValuesIn(sparse_preprocess_inputs)); + +} // namespace sparse +} // namespace raft \ No newline at end of file From 6477cd49a02c44b2296182e66255f906a55a45df Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 3 Jul 2024 21:33:25 -0400 Subject: [PATCH 38/75] add in cmake for test files --- cpp/test/CMakeLists.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cpp/test/CMakeLists.txt b/cpp/test/CMakeLists.txt index 2cd5a29ea0..8fffb34a54 100644 --- a/cpp/test/CMakeLists.txt +++ b/cpp/test/CMakeLists.txt @@ -316,7 +316,8 @@ if(BUILD_TESTS) sparse/spgemmi.cu sparse/spmm.cu sparse/symmetrize.cu - sparse/preprocess.cu + sparse/preprocess_csr.cu + sparse/preprocess_coo.cu ) ConfigureTest( From c836ba8e691800f03031dc23be56fed39da66e66 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Thu, 4 Jul 2024 00:27:56 -0400 Subject: [PATCH 39/75] adjust tests, coo now passes all checks --- cpp/test/sparse/preprocess_coo.cu | 166 +++++++++++++++--------------- cpp/test/sparse/preprocess_csr.cu | 100 ++++++++++++++++-- 2 files changed, 172 insertions(+), 94 deletions(-) diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index 4633b5d32c..1fc0f27b81 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -58,12 +58,6 @@ class SparsePreprocessCoo columns(params.columns_h.size(), stream), values(params.values_h.size(), stream), result(params.values_h.size(), stream) - // bm25_vals(params.bm25_vals_h.size(), stream) - // tfidf_vals(params.tfidf_vals_h.size(), stream), - // out_idxs_bm25(params.out_idxs_bm25_h.size(), stream), - // out_idxs_tfidf(params.out_idxs_tfidf_h.size(), stream), - // out_dists_bm25(params.out_dists_bm25_h.size(), stream), - // out_dists_tfidf(params.out_dists_tfidf_h.size(), stream) { } @@ -72,46 +66,52 @@ class SparsePreprocessCoo void Run(bool bm25_on) { - int k = 2; - std::vector bm25_vals_h = {0.850086, - 1.15525, - 0.682645, - 0.860915, - 0.99021, - 0.860915, - 0.850086, - 0.850086, - 0.850086, - 1.25152}; // bm25 - std::vector tfidf_vals_h = {0.480453, - 0.7615, - 0.7615, - 0.960906, - 1.11558, - 0.960906, - 0.480453, - 0.480453, - 0.480453, - 0.7615}; // tfidf - std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, - 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, - 0, 0.199406, 0, 0.154671, 0, 0.154671, 0, 0.199406, - 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}; + int k = 2; + std::vector bm25_vals_h = {0.850086, + 1.15525, + 0.682645, + 0.860915, + 0.99021, + 0.860915, + 0.850086, + 0.850086, + 0.850086, + 1.25152}; // bm25 + std::vector tfidf_vals_h = {0.480453, + 0.7615, + 0.7615, + 0.960906, + 1.11558, + 0.960906, + 0.480453, + 0.480453, + 0.480453, + 0.7615}; // tfidf + std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 8, 5, 10, + 6, 5, 7, 8, 8, 7, 9, 1, 10, 5, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, + 0, 0.167441, 0, 0.0108292, 0, 0.129295, 0, 0.0108292, + 0, 0.0108292, 0, 0.850086, 0, 0.0108292, 0, 0}; + std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, + 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, 0}; + std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, + 0, 0.199406, 0, 0.154671, 0, 0.154671, 0, 0.199406, + 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}; cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto rows = raft::make_device_vector(handle, params.rows_h.size()); - auto columns = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); - auto bm25_vals = raft::make_device_vector(handle, bm25_vals_h.size()); - auto tfidf_vals = raft::make_device_vector(handle, tfidf_vals_h.size()); - auto out_idxs_bm25 = raft::make_device_vector(handle, out_idxs_bm25_h.size()); - // auto out_idxs_tfidf = raft::make_device_vector(handle, - // params.out_idxs_tfidf_h.size()); - auto out_dists_bm25 = raft::make_device_vector(handle, out_dists_bm25_h.size()); - // auto out_dists_tfidf = raft::make_device_vector(handle, - // params.out_dists_tfidf_h.size()); + auto rows = raft::make_device_vector(handle, params.rows_h.size()); + auto columns = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); + auto bm25_vals = raft::make_device_vector(handle, bm25_vals_h.size()); + auto tfidf_vals = raft::make_device_vector(handle, tfidf_vals_h.size()); + auto out_idxs_bm25 = raft::make_device_vector(handle, out_idxs_bm25_h.size()); + auto out_idxs_tfidf = + raft::make_device_vector(handle, out_idxs_tfidf_h.size()); + auto out_dists_bm25 = + raft::make_device_vector(handle, out_dists_bm25_h.size()); + auto out_dists_tfidf = + raft::make_device_vector(handle, out_dists_tfidf_h.size()); raft::copy(rows.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); raft::copy(columns.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); @@ -119,73 +119,77 @@ class SparsePreprocessCoo raft::copy(bm25_vals.data_handle(), bm25_vals_h.data(), bm25_vals_h.size(), stream); raft::copy(tfidf_vals.data_handle(), tfidf_vals_h.data(), tfidf_vals_h.size(), stream); raft::copy(out_idxs_bm25.data_handle(), out_idxs_bm25_h.data(), out_idxs_bm25_h.size(), stream); - // raft::copy(out_idxs_tfidf.data_handle(), params.out_idxs_tfidf_h.data(), - // params.out_idxs_tfidf_h.size(), stream); + raft::copy( + out_idxs_tfidf.data_handle(), out_idxs_tfidf_h.data(), out_idxs_tfidf_h.size(), stream); raft::copy( out_dists_bm25.data_handle(), out_dists_bm25_h.data(), out_dists_bm25_h.size(), stream); - // raft::copy(out_dists_tfidf.data_handle(), params.out_dists_tfidf_h.data(), - // params.out_dists_tfidf_h.size(), stream); + raft::copy( + out_dists_tfidf.data_handle(), out_dists_tfidf_h.data(), out_dists_tfidf_h.size(), stream); auto coo_struct_view = raft::make_device_coordinate_structure_view( rows.data_handle(), columns.data_handle(), params.n_rows, params.n_cols, int(values.size())); - auto c_matrix = raft::make_device_coo_matrix(handle, coo_struct_view); - raft::update_device( + auto c_matrix = + raft::make_device_coo_matrix(handle, coo_struct_view); + raft::update_device( c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); if (bm25_on) { - sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); ASSERT_TRUE(raft::devArrMatch(bm25_vals.data_handle(), result.data_handle(), result.size(), - raft::CompareApprox(1e-6), + raft::CompareApprox(2e-5), stream)); } else { - sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); ASSERT_TRUE(raft::devArrMatch(tfidf_vals.data_handle(), result.data_handle(), result.size(), - raft::CompareApprox(1e-6), + raft::CompareApprox(2e-5), stream)); } - raft::update_device( + raft::update_device( c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); auto out_indices = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); auto out_dists = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); - - raft::sparse::neighbors::brute_force_knn(c_matrix, - c_matrix, - out_indices.data_handle(), - out_dists.data_handle(), - k, - handle, - c_matrix.structure_view().get_n_rows(), - c_matrix.structure_view().get_n_rows(), - raft::distance::DistanceType::L1); + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + + raft::sparse::neighbors::brute_force_knn(c_matrix, + c_matrix, + out_indices.data_handle(), + out_dists.data_handle(), + k, + handle, + c_matrix.structure_view().get_n_rows(), + c_matrix.structure_view().get_n_rows(), + raft::distance::DistanceType::L1); if (bm25_on) { ASSERT_TRUE(raft::devArrMatch(out_idxs_bm25.data_handle(), out_indices.data_handle(), out_indices.size(), - raft::CompareApprox(1e-6), + raft::Compare(), stream)); ASSERT_TRUE(raft::devArrMatch(out_dists_bm25.data_handle(), out_dists.data_handle(), out_dists.size(), - raft::CompareApprox(1e-6), + raft::CompareApprox(2e-5), + stream)); + } else { + ASSERT_TRUE(raft::devArrMatch(out_idxs_tfidf.data_handle(), + out_indices.data_handle(), + out_indices.size(), + raft::Compare(), + stream)); + ASSERT_TRUE(raft::devArrMatch(out_dists_tfidf.data_handle(), + out_dists.data_handle(), + out_dists.size(), + raft::CompareApprox(2e-5), stream)); } - // else { - // ASSERT_TRUE(raft::devArrMatch( - // out_idxs_tfidf.data_handle(), out_indices.data_handle(), out_indices.size(), - // raft::Compare(), stream)); - // ASSERT_TRUE(raft::devArrMatch( - // out_dists_tfidf.data_handle(), out_dists.data_handle(), out_dists.size(), - // raft::Compare(), stream)); - // } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } @@ -214,13 +218,7 @@ const std::vector> sparse_preprocess_inputs = 5, // n_cols {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, - // {0.480453, 0.7615, 0.7615, 0.960906, 1.11558, 0.960906, 0.480453, 0.480453, 0.480453, - // 0.7615}, // tfidf {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, - // 0}, // out_idx_bm25 {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, 0, 0.199406, 0, 0.154671, 0, - // 0.154671, 0, 0.199406, 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}, //out_dists_bm25 {0, 3, - // 1, 2, 1, 2, 3, 0, 4, 8, 5, 10, 6, 5, 7, 8, 8, 7, 9, 1, 10, 5, 0, 0}, // out_idx_tfidf {0, - // 0.305159, 0, 0, 0, 0, 0, 0.305159, 0, 0.167441, 0, 0.0108292, 0, 0.129295, 0, 0.0108292, 0, - // 0.0108292, 0, 0.850086, 0, 0.0108292, 0, 0}}, // out_dists_tfidf + // out_dists_tfidf {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals }; diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index 2e5c0bf6d8..b02cc410c8 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -62,16 +62,65 @@ class SparsePreprocessCSR void Run(bool bm25_on) { - int k = 2; + int k = 2; + std::vector bm25_vals_h = {0.850086, + 1.15525, + 0.682645, + 0.860915, + 0.99021, + 0.860915, + 0.850086, + 0.850086, + 0.850086, + 1.25152}; // bm25 + std::vector tfidf_vals_h = {0.480453, + 0.7615, + 0.7615, + 0.960906, + 1.11558, + 0.960906, + 0.480453, + 0.480453, + 0.480453, + 0.7615}; // tfidf + std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 8, 5, 10, + 6, 5, 7, 8, 8, 7, 9, 1, 10, 5, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, + 0, 0.167441, 0, 0.0108292, 0, 0.129295, 0, 0.0108292, + 0, 0.0108292, 0, 0.850086, 0, 0.0108292, 0, 0}; + std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, + 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, 0}; + std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, + 0, 0.199406, 0, 0.154671, 0, 0.154671, 0, 0.199406, + 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}; + cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto indptr = raft::make_device_vector(handle, params.rows_h.size()); - auto indices = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); + auto indptr = raft::make_device_vector(handle, params.rows_h.size()); + auto indices = raft::make_device_vector(handle, params.columns_h.size()); + auto values = raft::make_device_vector(handle, params.values_h.size()); + auto result = raft::make_device_vector(handle, params.values_h.size()); + auto bm25_vals = raft::make_device_vector(handle, bm25_vals_h.size()); + auto tfidf_vals = raft::make_device_vector(handle, tfidf_vals_h.size()); + auto out_idxs_bm25 = raft::make_device_vector(handle, out_idxs_bm25_h.size()); + auto out_idxs_tfidf = + raft::make_device_vector(handle, out_idxs_tfidf_h.size()); + auto out_dists_bm25 = + raft::make_device_vector(handle, out_dists_bm25_h.size()); + auto out_dists_tfidf = + raft::make_device_vector(handle, out_dists_tfidf_h.size()); raft::copy(indptr.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); raft::copy(indices.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); + raft::copy(bm25_vals.data_handle(), bm25_vals_h.data(), bm25_vals_h.size(), stream); + raft::copy(tfidf_vals.data_handle(), tfidf_vals_h.data(), tfidf_vals_h.size(), stream); + raft::copy(out_idxs_bm25.data_handle(), out_idxs_bm25_h.data(), out_idxs_bm25_h.size(), stream); + raft::copy( + out_idxs_tfidf.data_handle(), out_idxs_tfidf_h.data(), out_idxs_tfidf_h.size(), stream); + raft::copy( + out_dists_bm25.data_handle(), out_dists_bm25_h.data(), out_dists_bm25_h.size(), stream); + raft::copy( + out_dists_tfidf.data_handle(), out_dists_tfidf_h.data(), out_dists_tfidf_h.size(), stream); auto csr_struct_view = raft::make_device_compressed_structure_view(indptr.data_handle(), indices.data_handle(), @@ -84,9 +133,19 @@ class SparsePreprocessCSR c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); if (bm25_on) { - sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + ASSERT_TRUE(raft::devArrMatch(bm25_vals.data_handle(), + result.data_handle(), + result.size(), + raft::CompareApprox(2e-5), + stream)); } else { - sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + ASSERT_TRUE(raft::devArrMatch(tfidf_vals.data_handle(), + result.data_handle(), + result.size(), + raft::CompareApprox(2e-5), + stream)); } raft::update_device( @@ -107,10 +166,31 @@ class SparsePreprocessCSR c_matrix.structure_view().get_n_rows(), raft::distance::DistanceType::L1); - RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); + if (bm25_on) { + ASSERT_TRUE(raft::devArrMatch(out_idxs_bm25.data_handle(), + out_indices.data_handle(), + out_indices.size(), + raft::Compare(), + stream)); + ASSERT_TRUE(raft::devArrMatch(out_dists_bm25.data_handle(), + out_dists.data_handle(), + out_dists.size(), + raft::CompareApprox(2e-5), + stream)); + } else { + ASSERT_TRUE(raft::devArrMatch(out_idxs_tfidf.data_handle(), + out_indices.data_handle(), + out_indices.size(), + raft::Compare(), + stream)); + ASSERT_TRUE(raft::devArrMatch(out_dists_tfidf.data_handle(), + out_dists.data_handle(), + out_dists.size(), + raft::CompareApprox(2e-5), + stream)); + } - ASSERT_TRUE(values.size() == result.size()); - // raft::devArrMatch(, nnz, raft::Compare())); + RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } protected: From ce8253e0ed81e3b539d0476dc8214869239e038d Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Sat, 6 Jul 2024 14:24:21 -0400 Subject: [PATCH 40/75] csr and coo tests passing, refactor feature preprocessing --- .../sparse/matrix/detail/preprocessing.cuh | 166 +++++++++++++----- .../raft/sparse/matrix/preprocessing.cuh | 13 +- cpp/test/sparse/preprocess_coo.cu | 44 +++-- cpp/test/sparse/preprocess_csr.cu | 40 ++--- 4 files changed, 169 insertions(+), 94 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index af41251a76..f18a6c1a12 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -89,9 +89,12 @@ void get_uniques_counts(raft::resources& handle, raft::device_vector_view data, raft::device_vector_view itr_vals, raft::device_vector_view keys_out, - raft::device_vector_view counts_out) + raft::device_vector_view counts_out, + int n_rows, + int n_cols) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); + raft::sparse::op::coo_sort(sort_vector.size(), secondary_vector.size(), data.size(), @@ -135,59 +138,98 @@ void create_mapped_vector(raft::resources& handle, } template -std::tuple sparse_search_preprocess(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view doc_lengths, - raft::device_vector_view term_counts) +void get_term_counts(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view term_counts, + T1 n_rows, + T1 n_cols) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto num_rows = raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); - auto row_keys = raft::make_device_vector(handle, num_rows); - auto row_counts = raft::make_device_vector(handle, num_rows); - auto row_fill = raft::make_device_vector(handle, rows.size()); + auto row_keys = raft::make_device_vector(handle, num_rows); + auto row_counts = raft::make_device_vector(handle, num_rows); + auto row_fill = raft::make_device_vector(handle, rows.size()); // the amount of columns(documents) that each row(term) is found in thrust::fill(raft::resource::get_thrust_policy(handle), row_fill.data_handle(), row_fill.data_handle() + row_fill.size(), 1.0f); - get_uniques_counts( - handle, rows, columns, values, row_fill.view(), row_keys.view(), row_counts.view()); + get_uniques_counts(handle, + rows, + columns, + values, + row_fill.view(), + row_keys.view(), + row_counts.view(), + n_rows, + n_cols); + + create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), term_counts); +} + +template +std::tuple get_feature_data(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view doc_lengths, + T1 n_rows, + T1 n_cols) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); - create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), term_counts); auto num_cols = raft::sparse::neighbors::get_n_components(columns.data_handle(), columns.size(), stream); - auto col_keys = raft::make_device_vector(handle, num_cols); - auto col_counts = raft::make_device_vector(handle, num_cols); - - get_uniques_counts(handle, columns, rows, values, values, col_keys.view(), col_counts.view()); + auto col_keys = raft::make_device_vector(handle, num_cols); + auto col_counts = raft::make_device_vector(handle, num_cols); + get_uniques_counts( + handle, columns, rows, values, values, col_keys.view(), col_counts.view(), n_rows, n_cols); int total_document_lengths = thrust::reduce(raft::resource::get_thrust_policy(handle), col_counts.data_handle(), col_counts.data_handle() + col_counts.size()); float avg_doc_length = float(total_document_lengths) / col_keys.size(); - create_mapped_vector( - handle, columns, col_keys.view(), col_counts.view(), doc_lengths); + create_mapped_vector(handle, columns, col_keys.view(), col_counts.view(), doc_lengths); return {col_keys.size(), avg_doc_length}; } +template +std::tuple sparse_search_preprocess(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view doc_lengths, + raft::device_vector_view term_counts, + T1 n_rows, + T1 n_cols) +{ + auto [n_feature_keys, avg_feature_len] = + get_feature_data(handle, rows, columns, values, doc_lengths, n_rows, n_cols); + + get_term_counts(handle, rows, columns, values, term_counts, n_rows, n_cols); + + return {n_feature_keys, avg_feature_len}; +} + template -void encode_tfidf(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view values_out) +void base_encode_tfidf(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out, + int n_rows, + int n_cols) { auto doc_lengths = raft::make_device_vector(handle, columns.size()); auto term_counts = raft::make_device_vector(handle, rows.size()); auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + handle, rows, columns, values, doc_lengths.view(), term_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, @@ -208,7 +250,13 @@ void encode_tfidf(raft::resources& handle, auto values = raft::make_device_vector_view(coo_in.get_elements().data(), coo_in.get_elements().size()); - encode_tfidf(handle, rows, columns, values, values_out); + base_encode_tfidf(handle, + rows, + columns, + values, + values_out, + coo_in.structure_view().get_n_rows(), + coo_in.structure_view().get_n_cols()); } template @@ -226,25 +274,37 @@ void encode_tfidf(raft::resources& handle, csr_in.get_elements().size()); auto rows = raft::make_device_vector(handle, values.size()); - raft::sparse::convert::csr_to_coo( - indptr.data_handle(), indptr.size(), rows.data_handle(), rows.size(), stream); - - encode_tfidf(handle, rows.view(), indices, values, values_out); + raft::sparse::convert::csr_to_coo(indptr.data_handle(), + csr_in.structure_view().get_n_rows(), + rows.data_handle(), + rows.size(), + stream); + + base_encode_tfidf(handle, + rows.view(), + indices, + values, + values_out, + csr_in.structure_view().get_n_rows(), + csr_in.structure_view().get_n_cols()); } template -void encode_bm25(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view values_out, - float k_param = 1.6f, - float b_param = 0.75f) +void base_encode_bm25(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view values_out, + int n_rows, + int n_cols, + float k_param = 1.6f, + float b_param = 0.75f) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); + auto doc_lengths = raft::make_device_vector(handle, columns.size()); + auto term_counts = raft::make_device_vector(handle, rows.size()); + auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view()); + handle, rows, columns, values, doc_lengths.view(), term_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, @@ -268,7 +328,13 @@ void encode_bm25(raft::resources& handle, auto values = raft::make_device_vector_view(coo_in.get_elements().data(), coo_in.get_elements().size()); - encode_bm25(handle, rows, columns, values, values_out); + base_encode_bm25(handle, + rows, + columns, + values, + values_out, + coo_in.structure_view().get_n_rows(), + coo_in.structure_view().get_n_cols()); } template @@ -288,10 +354,20 @@ void encode_bm25(raft::resources& handle, csr_in.get_elements().size()); auto rows = raft::make_device_vector(handle, values.size()); - raft::sparse::convert::csr_to_coo( - indptr.data_handle(), indptr.size(), rows.data_handle(), rows.size(), stream); - encode_bm25(handle, rows.view(), indices, values, values_out); + raft::sparse::convert::csr_to_coo(indptr.data_handle(), + csr_in.structure_view().get_n_rows(), + rows.data_handle(), + rows.size(), + stream); + + base_encode_bm25(handle, + rows.view(), + indices, + values, + values_out, + csr_in.structure_view().get_n_rows(), + csr_in.structure_view().get_n_cols()); } } // namespace raft::sparse::matrix::detail \ No newline at end of file diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index e72b74697b..10bf3794c9 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -32,11 +32,13 @@ void encode_bm25(raft::resources& handle, raft::device_vector_view columns, raft::device_vector_view values, raft::device_vector_view values_out, + int n_rows, + int n_cols, float k_param = 1.6f, float b_param = 0.75) { - return matrix::detail::encode_bm25( - handle, rows, columns, values, values_out, k_param, b_param); + return matrix::detail::base_encode_bm25( + handle, rows, columns, values, values_out, n_rows, n_cols, k_param, b_param); } template @@ -60,9 +62,12 @@ void encode_tfidf(raft::resources& handle, raft::device_vector_view rows, raft::device_vector_view columns, raft::device_vector_view values, - raft::device_vector_view values_out) + raft::device_vector_view values_out, + int n_rows, + int n_cols) { - return matrix::detail::encode_tfidf(handle, rows, columns, values, values_out); + return matrix::detail::base_encode_tfidf( + handle, rows, columns, values, values_out, n_rows, n_cols); } template diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index 1fc0f27b81..48f7802994 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -37,11 +37,7 @@ struct SparsePreprocessInputs { int n_cols; std::vector rows_h; std::vector columns_h; - // std::vector tfidf_vals_h; - // std::vector out_idxs_bm25_h; - // std::vector out_dists_bm25_h; - // std::vector out_idxs_tfidf_h; - // std::vector out_dists_tfidf_h; + std::vector values_h; }; @@ -70,33 +66,33 @@ class SparsePreprocessCoo std::vector bm25_vals_h = {0.850086, 1.15525, 0.682645, + 0.421071, + 0.421071, 0.860915, - 0.99021, - 0.860915, - 0.850086, - 0.850086, + 1.40796, + 1.15525, 0.850086, - 1.25152}; // bm25 + 1.39344}; // bm25 std::vector tfidf_vals_h = {0.480453, 0.7615, 0.7615, - 0.960906, - 1.11558, - 0.960906, 0.480453, 0.480453, + 0.960906, + 1.11558, + 0.7615, 0.480453, - 0.7615}; // tfidf - std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 8, 5, 10, - 6, 5, 7, 8, 8, 7, 9, 1, 10, 5, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, - 0, 0.167441, 0, 0.0108292, 0, 0.129295, 0, 0.0108292, - 0, 0.0108292, 0, 0.850086, 0, 0.0108292, 0, 0}; - std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, - 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, 0}; - std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, - 0, 0.199406, 0, 0.154671, 0, 0.154671, 0, 0.199406, - 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}; + 0.960906}; // tfidf + std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, + 5, 6, 7, 4, 8, 7, 9, 1, 10, 5, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, + 0, 0.17827, 0, 0, 0, 0, 0, 0.17827, + 0, 0.54704, 0, 1.15525, 0, 0.429015, 0, 0}; + std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 10, + 5, 10, 7, 8, 8, 7, 9, 1, 5, 10, 0, 0}; + std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, + 0, 0.199406, 0, 0, 0, 0, 0, 0.154671, + 0, 0.154671, 0, 0.7615, 0, 0, 0, 0}; cudaStream_t stream = raft::resource::get_cuda_stream(handle); auto rows = raft::make_device_vector(handle, params.rows_h.size()); diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index b02cc410c8..e6c5c616e7 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -66,33 +66,33 @@ class SparsePreprocessCSR std::vector bm25_vals_h = {0.850086, 1.15525, 0.682645, + 0.421071, + 0.421071, 0.860915, - 0.99021, - 0.860915, - 0.850086, - 0.850086, + 1.40796, + 1.15525, 0.850086, - 1.25152}; // bm25 + 1.39344}; // bm25 std::vector tfidf_vals_h = {0.480453, 0.7615, 0.7615, - 0.960906, - 1.11558, - 0.960906, 0.480453, 0.480453, + 0.960906, + 1.11558, + 0.7615, 0.480453, - 0.7615}; // tfidf - std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 8, 5, 10, - 6, 5, 7, 8, 8, 7, 9, 1, 10, 5, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, - 0, 0.167441, 0, 0.0108292, 0, 0.129295, 0, 0.0108292, - 0, 0.0108292, 0, 0.850086, 0, 0.0108292, 0, 0}; - std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, - 6, 5, 7, 4, 8, 4, 9, 1, 10, 1, 0, 0}; - std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, - 0, 0.199406, 0, 0.154671, 0, 0.154671, 0, 0.199406, - 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}; + 0.960906}; // tfidf + std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, + 5, 6, 7, 4, 8, 7, 9, 1, 10, 5, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, + 0, 0.17827, 0, 0, 0, 0, 0, 0.17827, + 0, 0.54704, 0, 1.15525, 0, 0.429015, 0, 0}; + std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 10, + 5, 10, 7, 8, 8, 7, 9, 1, 5, 10, 0, 0}; + std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, + 0, 0.199406, 0, 0, 0, 0, 0, 0.154671, + 0, 0.154671, 0, 0.7615, 0, 0, 0, 0}; cudaStream_t stream = raft::resource::get_cuda_stream(handle); auto indptr = raft::make_device_vector(handle, params.rows_h.size()); @@ -147,10 +147,8 @@ class SparsePreprocessCSR raft::CompareApprox(2e-5), stream)); } - raft::update_device( c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); - auto out_indices = raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); auto out_dists = From 442cd7a1d79e0a6c1475aedbd090e047933867ee Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Sun, 7 Jul 2024 11:34:14 -0400 Subject: [PATCH 41/75] refactor names to make more generic --- .../sparse/matrix/detail/preprocessing.cuh | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index f18a6c1a12..4c8cda6238 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -138,13 +138,13 @@ void create_mapped_vector(raft::resources& handle, } template -void get_term_counts(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view term_counts, - T1 n_rows, - T1 n_cols) +void get_id_counts(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view id_counts, + T1 n_rows, + T1 n_cols) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); auto num_rows = @@ -169,7 +169,7 @@ void get_term_counts(raft::resources& handle, n_rows, n_cols); - create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), term_counts); + create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts); } template @@ -177,7 +177,7 @@ std::tuple get_feature_data(raft::resources& handle, raft::device_vector_view rows, raft::device_vector_view columns, raft::device_vector_view values, - raft::device_vector_view doc_lengths, + raft::device_vector_view feat_lengths, T1 n_rows, T1 n_cols) { @@ -190,13 +190,13 @@ std::tuple get_feature_data(raft::resources& handle, get_uniques_counts( handle, columns, rows, values, values, col_keys.view(), col_counts.view(), n_rows, n_cols); - int total_document_lengths = thrust::reduce(raft::resource::get_thrust_policy(handle), - col_counts.data_handle(), - col_counts.data_handle() + col_counts.size()); - float avg_doc_length = float(total_document_lengths) / col_keys.size(); + int total_feature_lengths = thrust::reduce(raft::resource::get_thrust_policy(handle), + col_counts.data_handle(), + col_counts.data_handle() + col_counts.size()); + float avg_feat_length = float(total_feature_lengths) / col_keys.size(); - create_mapped_vector(handle, columns, col_keys.view(), col_counts.view(), doc_lengths); - return {col_keys.size(), avg_doc_length}; + create_mapped_vector(handle, columns, col_keys.view(), col_counts.view(), feat_lengths); + return {col_keys.size(), avg_feat_length}; } template @@ -204,15 +204,15 @@ std::tuple sparse_search_preprocess(raft::resources& handle, raft::device_vector_view rows, raft::device_vector_view columns, raft::device_vector_view values, - raft::device_vector_view doc_lengths, - raft::device_vector_view term_counts, + raft::device_vector_view feat_lengths, + raft::device_vector_view id_counts, T1 n_rows, T1 n_cols) { auto [n_feature_keys, avg_feature_len] = - get_feature_data(handle, rows, columns, values, doc_lengths, n_rows, n_cols); + get_feature_data(handle, rows, columns, values, feat_lengths, n_rows, n_cols); - get_term_counts(handle, rows, columns, values, term_counts, n_rows, n_cols); + get_id_counts(handle, rows, columns, values, id_counts, n_rows, n_cols); return {n_feature_keys, avg_feature_len}; } @@ -226,16 +226,16 @@ void base_encode_tfidf(raft::resources& handle, int n_rows, int n_cols) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); - auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view(), n_rows, n_cols); + auto feat_lengths = raft::make_device_vector(handle, columns.size()); + auto id_counts = raft::make_device_vector(handle, rows.size()); + auto [feat_count, avg_feat_length] = sparse_search_preprocess( + handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, - tfidf(doc_count), + tfidf(feat_count), raft::make_const_mdspan(values), - raft::make_const_mdspan(term_counts.view())); + raft::make_const_mdspan(id_counts.view())); } template From b1720c74703292ab6e030d8ad90e04e9e9996322 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Sun, 7 Jul 2024 13:50:08 -0400 Subject: [PATCH 42/75] further refactor to feature and id variable names --- .../sparse/matrix/detail/preprocessing.cuh | 144 +++++++++--------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 4c8cda6238..0dba56fdde 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -34,35 +34,35 @@ namespace raft::sparse::matrix::detail { struct bm25 { - bm25(int num_docs, float avg_doc_len, float k_param, float b_param) + bm25(int num_feats, float avg_feat_len, float k_param, float b_param) { - total_docs = num_docs; - avg_doc_length = avg_doc_len; - k = k_param; - b = b_param; + total_feats = num_feats; + avg_feat_length = avg_feat_len; + k = k_param; + b = b_param; } template - float __device__ operator()(const T1& values, const T1& doc_length, const T1& num_docs_term_occ) + float __device__ operator()(const T1& values, const T1& feat_lengths, const T1& num_feats_id_occ) { - return raft::log(total_docs / (1 + num_docs_term_occ)) * - ((values * (k + 1)) / (values + k * (1 - b + b * (doc_length / avg_doc_length)))); + return raft::log(total_feats / (1 + num_feats_id_occ)) * + ((values * (k + 1)) / (values + k * (1 - b + b * (feat_lengths / avg_feat_length)))); } - float avg_doc_length; - int total_docs; + float avg_feat_length; + int total_feats; float k; float b; }; struct tfidf { - tfidf(int total_docs_param) { total_docs = total_docs_param; } + tfidf(int total_feats_param) { total_feats = total_feats_param; } template - float __device__ operator()(const T1& values, const T2& num_docs_term_occ) + float __device__ operator()(const T1& values, const T2& num_feats_id_occ) { - return raft::log(1 + values) * raft::log(total_docs / (1 + num_docs_term_occ)); + return raft::log(1 + values) * raft::log(total_feats / (1 + num_feats_id_occ)); } - int total_docs; + int total_feats; }; template @@ -82,14 +82,14 @@ struct mapper { raft::device_vector_view map; }; -template +template void get_uniques_counts(raft::resources& handle, - raft::device_vector_view sort_vector, - raft::device_vector_view secondary_vector, - raft::device_vector_view data, - raft::device_vector_view itr_vals, - raft::device_vector_view keys_out, - raft::device_vector_view counts_out, + raft::device_vector_view sort_vector, + raft::device_vector_view secondary_vector, + raft::device_vector_view data, + raft::device_vector_view itr_vals, + raft::device_vector_view keys_out, + raft::device_vector_view counts_out, int n_rows, int n_cols) { @@ -111,20 +111,20 @@ void get_uniques_counts(raft::resources& handle, counts_out.data_handle()); } -template +template void create_mapped_vector(raft::resources& handle, - const raft::device_vector_view origin, - const raft::device_vector_view keys, - const raft::device_vector_view counts, - raft::device_vector_view result) + const raft::device_vector_view origin, + const raft::device_vector_view keys, + const raft::device_vector_view counts, + raft::device_vector_view result) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto host_keys = raft::make_host_vector(handle, keys.size()); + auto host_keys = raft::make_host_vector(handle, keys.size()); raft::copy(host_keys.data_handle(), keys.data_handle(), keys.size(), stream); raft::linalg::map(handle, result, raft::cast_op{}, raft::make_const_mdspan(origin)); int new_key_size = host_keys(host_keys.size() - 1) + 1; - auto origin_map = raft::make_device_vector(handle, new_key_size); + auto origin_map = raft::make_device_vector(handle, new_key_size); thrust::scatter(raft::resource::get_thrust_policy(handle), counts.data_handle(), @@ -137,12 +137,12 @@ void create_mapped_vector(raft::resources& handle, mapper(raft::make_const_mdspan(origin_map.view()))); } -template +template void get_id_counts(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view id_counts, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view id_counts, T1 n_rows, T1 n_cols) { @@ -150,11 +150,11 @@ void get_id_counts(raft::resources& handle, auto num_rows = raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); - auto row_keys = raft::make_device_vector(handle, num_rows); - auto row_counts = raft::make_device_vector(handle, num_rows); - auto row_fill = raft::make_device_vector(handle, rows.size()); + auto row_keys = raft::make_device_vector(handle, num_rows); + auto row_counts = raft::make_device_vector(handle, num_rows); + auto row_fill = raft::make_device_vector(handle, rows.size()); - // the amount of columns(documents) that each row(term) is found in + // the amount of columns(features) that each row(id) is found in thrust::fill(raft::resource::get_thrust_policy(handle), row_fill.data_handle(), row_fill.data_handle() + row_fill.size(), @@ -172,12 +172,12 @@ void get_id_counts(raft::resources& handle, create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts); } -template +template std::tuple get_feature_data(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view feat_lengths, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view feat_lengths, T1 n_rows, T1 n_cols) { @@ -185,8 +185,8 @@ std::tuple get_feature_data(raft::resources& handle, auto num_cols = raft::sparse::neighbors::get_n_components(columns.data_handle(), columns.size(), stream); - auto col_keys = raft::make_device_vector(handle, num_cols); - auto col_counts = raft::make_device_vector(handle, num_cols); + auto col_keys = raft::make_device_vector(handle, num_cols); + auto col_counts = raft::make_device_vector(handle, num_cols); get_uniques_counts( handle, columns, rows, values, values, col_keys.view(), col_counts.view(), n_rows, n_cols); @@ -199,13 +199,13 @@ std::tuple get_feature_data(raft::resources& handle, return {col_keys.size(), avg_feat_length}; } -template +template std::tuple sparse_search_preprocess(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view feat_lengths, - raft::device_vector_view id_counts, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view feat_lengths, + raft::device_vector_view id_counts, T1 n_rows, T1 n_cols) { @@ -226,9 +226,9 @@ void base_encode_tfidf(raft::resources& handle, int n_rows, int n_cols) { - auto feat_lengths = raft::make_device_vector(handle, columns.size()); - auto id_counts = raft::make_device_vector(handle, rows.size()); - auto [feat_count, avg_feat_length] = sparse_search_preprocess( + auto feat_lengths = raft::make_device_vector(handle, columns.size()); + auto id_counts = raft::make_device_vector(handle, rows.size()); + auto [feat_count, avg_feat_length] = sparse_search_preprocess( handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, @@ -273,12 +273,12 @@ void encode_tfidf(raft::resources& handle, auto values = raft::make_device_vector_view(csr_in.get_elements().data(), csr_in.get_elements().size()); - auto rows = raft::make_device_vector(handle, values.size()); - raft::sparse::convert::csr_to_coo(indptr.data_handle(), - csr_in.structure_view().get_n_rows(), - rows.data_handle(), - rows.size(), - stream); + auto rows = raft::make_device_vector(handle, values.size()); + raft::sparse::convert::csr_to_coo(indptr.data_handle(), + csr_in.structure_view().get_n_rows(), + rows.data_handle(), + rows.size(), + stream); base_encode_tfidf(handle, rows.view(), @@ -300,18 +300,18 @@ void base_encode_bm25(raft::resources& handle, float k_param = 1.6f, float b_param = 0.75f) { - auto doc_lengths = raft::make_device_vector(handle, columns.size()); - auto term_counts = raft::make_device_vector(handle, rows.size()); + auto feat_lengths = raft::make_device_vector(handle, columns.size()); + auto id_counts = raft::make_device_vector(handle, rows.size()); - auto [doc_count, avg_doc_length] = sparse_search_preprocess( - handle, rows, columns, values, doc_lengths.view(), term_counts.view(), n_rows, n_cols); + auto [feat_count, avg_feat_length] = sparse_search_preprocess( + handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, - bm25(doc_count, avg_doc_length, k_param, b_param), + bm25(feat_count, avg_feat_length, k_param, b_param), raft::make_const_mdspan(values), - raft::make_const_mdspan(doc_lengths.view()), - raft::make_const_mdspan(term_counts.view())); + raft::make_const_mdspan(feat_lengths.view()), + raft::make_const_mdspan(id_counts.view())); } template @@ -353,13 +353,13 @@ void encode_bm25(raft::resources& handle, auto values = raft::make_device_vector_view(csr_in.get_elements().data(), csr_in.get_elements().size()); - auto rows = raft::make_device_vector(handle, values.size()); + auto rows = raft::make_device_vector(handle, values.size()); - raft::sparse::convert::csr_to_coo(indptr.data_handle(), - csr_in.structure_view().get_n_rows(), - rows.data_handle(), - rows.size(), - stream); + raft::sparse::convert::csr_to_coo(indptr.data_handle(), + csr_in.structure_view().get_n_rows(), + rows.data_handle(), + rows.size(), + stream); base_encode_bm25(handle, rows.view(), From 3365ec3725f106f7f6b24f6a5a1dcd0553b3a591 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 8 Jul 2024 16:29:30 -0400 Subject: [PATCH 43/75] add documentation and refactor to use num rows and num cols from matrix --- .../sparse/matrix/detail/preprocessing.cuh | 196 +++++++++++++----- 1 file changed, 145 insertions(+), 51 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 0dba56fdde..fb00e1ff14 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -33,6 +33,14 @@ namespace raft::sparse::matrix::detail { +/** + * @brief Calculates the BM25 values for a target matrix. Term frequency is calculate using + * logrithmically scaled frequency. + * @param num_feats: The total number of features in the matrix + * @param avg_feat_len: The avg length of all features combined. + * @param k_param: K value required by BM25 algorithm. + * @param b_param: B value required by BM25 algorithm. + */ struct bm25 { bm25(int num_feats, float avg_feat_len, float k_param, float b_param) { @@ -54,6 +62,11 @@ struct bm25 { float b; }; +/** + * @brief Calculates the tfidf values for a target matrix. Term frequency is calculate using + * logrithmically scaled frequency. + * @param total_feats_param: The total number of features in the matrix + */ struct tfidf { tfidf(int total_feats_param) { total_feats = total_feats_param; } @@ -82,6 +95,16 @@ struct mapper { raft::device_vector_view map; }; +/** + * @brief Get unique counts + * @param handle: raft resource handle + * @param sort_vector: Input COO array that contains the keys. + * @param secondary_vector: Input with secondary keys of COO, (columns or rows). + * @param data: Input COO values array. + * @param itr_vals: Input array used to calculate counts. + * @param keys_out: Output array with one entry for each key. (same size as counts_out) + * @param counts_out: Output array with cumulative sum for each key. (same size as keys_out) + */ template void get_uniques_counts(raft::resources& handle, raft::device_vector_view sort_vector, @@ -89,9 +112,7 @@ void get_uniques_counts(raft::resources& handle, raft::device_vector_view data, raft::device_vector_view itr_vals, raft::device_vector_view keys_out, - raft::device_vector_view counts_out, - int n_rows, - int n_cols) + raft::device_vector_view counts_out) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); @@ -111,6 +132,15 @@ void get_uniques_counts(raft::resources& handle, counts_out.data_handle()); } +/** + * @brief Compute cumulative sum for each unique value in the origin array + * @param handle: raft resource handle + * @param origin: Input array that has values to use for computation + * @param keys: Output array that has keys, should be the size of unique + * @param counts: Output array that contains the computed counts + * @param results: Output array that broadcasts the counts origin value positions. Same size as + * origin array. + */ template void create_mapped_vector(raft::resources& handle, const raft::device_vector_view origin, @@ -137,21 +167,27 @@ void create_mapped_vector(raft::resources& handle, mapper(raft::make_const_mdspan(origin_map.view()))); } +/** + * @brief Compute row(id) counts + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param id_counts: Output array that stores counts per row, broadcasted to same shape as rows. + * @param n_rows: Number of rows in matrix + */ template void get_id_counts(raft::resources& handle, raft::device_vector_view rows, raft::device_vector_view columns, raft::device_vector_view values, raft::device_vector_view id_counts, - T1 n_rows, - T1 n_cols) + T1 n_rows) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto num_rows = - raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); - auto row_keys = raft::make_device_vector(handle, num_rows); - auto row_counts = raft::make_device_vector(handle, num_rows); + auto row_keys = raft::make_device_vector(handle, n_rows); + auto row_counts = raft::make_device_vector(handle, n_rows); auto row_fill = raft::make_device_vector(handle, rows.size()); // the amount of columns(features) that each row(id) is found in @@ -159,64 +195,82 @@ void get_id_counts(raft::resources& handle, row_fill.data_handle(), row_fill.data_handle() + row_fill.size(), 1.0f); - get_uniques_counts(handle, - rows, - columns, - values, - row_fill.view(), - row_keys.view(), - row_counts.view(), - n_rows, - n_cols); + get_uniques_counts( + handle, rows, columns, values, row_fill.view(), row_keys.view(), row_counts.view()); create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts); } +/** + * @brief Gather per feature mean values, returns the cumulative avg feature length. + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param feat_lengths: Output array that stores mean per feature value + * @param n_cols: Number of columns in matrix + */ template -std::tuple get_feature_data(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view feat_lengths, - T1 n_rows, - T1 n_cols) +int get_feature_data(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view feat_lengths, + T1 n_cols) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto num_cols = - raft::sparse::neighbors::get_n_components(columns.data_handle(), columns.size(), stream); - auto col_keys = raft::make_device_vector(handle, num_cols); - auto col_counts = raft::make_device_vector(handle, num_cols); + auto col_keys = raft::make_device_vector(handle, n_cols); + auto col_counts = raft::make_device_vector(handle, n_cols); - get_uniques_counts( - handle, columns, rows, values, values, col_keys.view(), col_counts.view(), n_rows, n_cols); + get_uniques_counts(handle, columns, rows, values, values, col_keys.view(), col_counts.view()); int total_feature_lengths = thrust::reduce(raft::resource::get_thrust_policy(handle), col_counts.data_handle(), col_counts.data_handle() + col_counts.size()); - float avg_feat_length = float(total_feature_lengths) / col_keys.size(); + float avg_feat_length = float(total_feature_lengths) / n_cols; create_mapped_vector(handle, columns, col_keys.view(), col_counts.view(), feat_lengths); - return {col_keys.size(), avg_feat_length}; + return avg_feat_length; } +/** + * @brief Gather per feature mean values and id counts, returns the cumulative avg feature length. + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param feat_lengths: Output array that stores mean per feature value + * @param id_counts: Output array that stores id(row) counts for nz values + * @param n_rows: Number of rows in matrix + * @param n_cols: Number of columns in matrix + */ template -std::tuple sparse_search_preprocess(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view feat_lengths, - raft::device_vector_view id_counts, - T1 n_rows, - T1 n_cols) +int sparse_search_preprocess(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view feat_lengths, + raft::device_vector_view id_counts, + T1 n_rows, + T1 n_cols) { - auto [n_feature_keys, avg_feature_len] = - get_feature_data(handle, rows, columns, values, feat_lengths, n_rows, n_cols); + auto avg_feature_len = get_feature_data(handle, rows, columns, values, feat_lengths, n_cols); - get_id_counts(handle, rows, columns, values, id_counts, n_rows, n_cols); + get_id_counts(handle, rows, columns, values, id_counts, n_rows); - return {n_feature_keys, avg_feature_len}; + return avg_feature_len; } +/** + * @brief Use TFIDF algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param values_out: Output COO values array + * @param n_rows: Number of rows in matrix + * @param n_cols: Number of columns in matrix + */ template void base_encode_tfidf(raft::resources& handle, raft::device_vector_view rows, @@ -226,18 +280,24 @@ void base_encode_tfidf(raft::resources& handle, int n_rows, int n_cols) { - auto feat_lengths = raft::make_device_vector(handle, columns.size()); - auto id_counts = raft::make_device_vector(handle, rows.size()); - auto [feat_count, avg_feat_length] = sparse_search_preprocess( + auto feat_lengths = raft::make_device_vector(handle, columns.size()); + auto id_counts = raft::make_device_vector(handle, rows.size()); + auto avg_feat_length = sparse_search_preprocess( handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, - tfidf(feat_count), + tfidf(n_cols), raft::make_const_mdspan(values), raft::make_const_mdspan(id_counts.view())); } +/** + * @brief Use TFIDF algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param coo_in: Input COO matrix + * @param values_out: Output COO values array + */ template void encode_tfidf(raft::resources& handle, raft::device_coo_matrix_view coo_in, @@ -259,6 +319,12 @@ void encode_tfidf(raft::resources& handle, coo_in.structure_view().get_n_cols()); } +/** + * @brief Use TFIDF algorithm to encode features in CSR sparse matrix + * @param handle: raft resource handle + * @param csr_in: Input CSR matrix + * @param values_out: Output values array + */ template void encode_tfidf(raft::resources& handle, raft::device_csr_matrix_view csr_in, @@ -289,6 +355,18 @@ void encode_tfidf(raft::resources& handle, csr_in.structure_view().get_n_cols()); } +/** + * @brief Use BM25 algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param values_out: Output COO values array + * @param n_rows: Number of rows in matrix + * @param n_cols: Number of columns in matrix + * @param k_param: K value to use for BM25 algorithm + * @param b_param: B value to use for BM25 algorithm + */ template void base_encode_bm25(raft::resources& handle, raft::device_vector_view rows, @@ -303,17 +381,25 @@ void base_encode_bm25(raft::resources& handle, auto feat_lengths = raft::make_device_vector(handle, columns.size()); auto id_counts = raft::make_device_vector(handle, rows.size()); - auto [feat_count, avg_feat_length] = sparse_search_preprocess( + auto avg_feat_length = sparse_search_preprocess( handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, - bm25(feat_count, avg_feat_length, k_param, b_param), + bm25(n_cols, avg_feat_length, k_param, b_param), raft::make_const_mdspan(values), raft::make_const_mdspan(feat_lengths.view()), raft::make_const_mdspan(id_counts.view())); } +/** + * @brief Use BM25 algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param coo_in: Input COO matrix + * @param values_out: Output values array + * @param k_param: K value to use for BM25 algorithm + * @param b_param: B value to use for BM25 algorithm + */ template void encode_bm25(raft::resources& handle, raft::device_coo_matrix_view coo_in, @@ -337,6 +423,14 @@ void encode_bm25(raft::resources& handle, coo_in.structure_view().get_n_cols()); } +/** + * @brief Use BM25 algorithm to encode features in CSR sparse matrix + * @param handle: raft resource handle + * @param csr_in: Input CSR matrix + * @param values_out: Output values array + * @param k_param: K value to use for BM25 algorithm + * @param b_param: B value to use for BM25 algorithm + */ template void encode_bm25(raft::resources& handle, raft::device_csr_matrix_view csr_in, From 06b6df204b4225565915cacf2b8361a515e659b6 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 8 Jul 2024 16:30:14 -0400 Subject: [PATCH 44/75] update tests to reflect values given refactor --- cpp/test/sparse/preprocess_coo.cu | 59 ++++++++++---------- cpp/test/sparse/preprocess_csr.cu | 89 +++++++++++++++---------------- 2 files changed, 69 insertions(+), 79 deletions(-) diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index 48f7802994..9917f5bdb7 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -37,7 +37,6 @@ struct SparsePreprocessInputs { int n_cols; std::vector rows_h; std::vector columns_h; - std::vector values_h; }; @@ -63,36 +62,36 @@ class SparsePreprocessCoo void Run(bool bm25_on) { int k = 2; - std::vector bm25_vals_h = {0.850086, - 1.15525, - 0.682645, - 0.421071, - 0.421071, - 0.860915, - 1.40796, - 1.15525, - 0.850086, - 1.39344}; // bm25 - std::vector tfidf_vals_h = {0.480453, - 0.7615, - 0.7615, - 0.480453, - 0.480453, - 0.960906, - 1.11558, - 0.7615, - 0.480453, - 0.960906}; // tfidf + std::vector bm25_vals_h = {1.03581, + 1.44385, + 0.794119, + 0.476471, + 0.476471, + 1.02101, + 1.798, + 1.44385, + 1.03581, + 1.78677}; // bm25 + std::vector tfidf_vals_h = {0.635124, + 1.00665, + 1.00665, + 0.635124, + 0.635124, + 1.27025, + 1.47471, + 1.00665, + 0.635124, + 1.27025}; // tfidf std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, 5, 6, 7, 4, 8, 7, 9, 1, 10, 5, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, - 0, 0.17827, 0, 0, 0, 0, 0, 0.17827, - 0, 0.54704, 0, 1.15525, 0, 0.429015, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.408045, 0, 0, 0, 0, 0, 0.408045, + 0, 0.226891, 0, 0, 0, 0, 0, 0.226891, + 0, 0.776995, 0, 1.44385, 0, 0.559336, 0, 0}; std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 10, 5, 10, 7, 8, 8, 7, 9, 1, 5, 10, 0, 0}; - std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, - 0, 0.199406, 0, 0, 0, 0, 0, 0.154671, - 0, 0.154671, 0, 0.7615, 0, 0, 0, 0}; + std::vector out_dists_tfidf_h = {0, 0.371524, 0, 0, 0, 0, 0, 0.371524, + 0, 0.2636, 0, 0, 0, 0, 0, 0.204464, + 0, 0.204464, 0, 1.00665, 0, 0, 0, 0}; cudaStream_t stream = raft::resource::get_cuda_stream(handle); auto rows = raft::make_device_vector(handle, params.rows_h.size()); @@ -194,13 +193,9 @@ class SparsePreprocessCoo cudaStream_t stream; SparsePreprocessInputs params; + int n_rows, n_cols; rmm::device_uvector rows, columns; rmm::device_uvector values, result; - int n_rows, n_cols; - // rmm::device_uvector rows, columns, out_idxs_bm25, out_idxs_tfidf; - // rmm::device_uvector values, result, bm25_vals, tfidf_vals, out_dists_bm25, - // out_dists_tfidf; - bool bm25; }; using SparsePreprocessTfidfCoo = SparsePreprocessCoo; diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index e6c5c616e7..cba2e394e4 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -63,36 +63,36 @@ class SparsePreprocessCSR void Run(bool bm25_on) { int k = 2; - std::vector bm25_vals_h = {0.850086, - 1.15525, - 0.682645, - 0.421071, - 0.421071, - 0.860915, - 1.40796, - 1.15525, - 0.850086, - 1.39344}; // bm25 - std::vector tfidf_vals_h = {0.480453, - 0.7615, - 0.7615, - 0.480453, - 0.480453, - 0.960906, - 1.11558, - 0.7615, - 0.480453, - 0.960906}; // tfidf + std::vector bm25_vals_h = {1.03581, + 1.44385, + 0.794119, + 0.476471, + 0.476471, + 1.02101, + 1.798, + 1.44385, + 1.03581, + 1.78677}; // bm25 + std::vector tfidf_vals_h = {0.635124, + 1.00665, + 1.00665, + 0.635124, + 0.635124, + 1.27025, + 1.47471, + 1.00665, + 0.635124, + 1.27025}; // tfidf std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, 5, 6, 7, 4, 8, 7, 9, 1, 10, 5, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.305159, 0, 0, 0, 0, 0, 0.305159, - 0, 0.17827, 0, 0, 0, 0, 0, 0.17827, - 0, 0.54704, 0, 1.15525, 0, 0.429015, 0, 0}; + std::vector out_dists_bm25_h = {0, 0.408045, 0, 0, 0, 0, 0, 0.408045, + 0, 0.226891, 0, 0, 0, 0, 0, 0.226891, + 0, 0.776995, 0, 1.44385, 0, 0.559336, 0, 0}; std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 10, 5, 10, 7, 8, 8, 7, 9, 1, 5, 10, 0, 0}; - std::vector out_dists_tfidf_h = {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, - 0, 0.199406, 0, 0, 0, 0, 0, 0.154671, - 0, 0.154671, 0, 0.7615, 0, 0, 0, 0}; + std::vector out_dists_tfidf_h = {0, 0.371524, 0, 0, 0, 0, 0, 0.371524, + 0, 0.2636, 0, 0, 0, 0, 0, 0.204464, + 0, 0.204464, 0, 1.00665, 0, 0, 0, 0}; cudaStream_t stream = raft::resource::get_cuda_stream(handle); auto indptr = raft::make_device_vector(handle, params.rows_h.size()); @@ -127,9 +127,10 @@ class SparsePreprocessCSR params.n_rows, params.n_cols, int(values.size())); - auto c_matrix = raft::make_device_csr_matrix(handle, csr_struct_view); + auto c_matrix = + raft::make_device_csr_matrix(handle, csr_struct_view); - raft::update_device( + raft::update_device( c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); if (bm25_on) { @@ -147,22 +148,22 @@ class SparsePreprocessCSR raft::CompareApprox(2e-5), stream)); } - raft::update_device( + raft::update_device( c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); auto out_indices = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); auto out_dists = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); - - raft::sparse::neighbors::brute_force_knn(c_matrix, - c_matrix, - out_indices.data_handle(), - out_dists.data_handle(), - k, - handle, - c_matrix.structure_view().get_n_rows(), - c_matrix.structure_view().get_n_rows(), - raft::distance::DistanceType::L1); + raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); + + raft::sparse::neighbors::brute_force_knn(c_matrix, + c_matrix, + out_indices.data_handle(), + out_dists.data_handle(), + k, + handle, + c_matrix.structure_view().get_n_rows(), + c_matrix.structure_view().get_n_rows(), + raft::distance::DistanceType::L1); if (bm25_on) { ASSERT_TRUE(raft::devArrMatch(out_idxs_bm25.data_handle(), @@ -199,7 +200,6 @@ class SparsePreprocessCSR int n_rows, n_cols; rmm::device_uvector rows, columns; rmm::device_uvector values, result; - bool bm25; }; using SparsePreprocessTfidfCsr = SparsePreprocessCSR; @@ -214,11 +214,6 @@ const std::vector> sparse_preprocess_inputs = {0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9}, // rows {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals - // {0.850086, 1.15525, 0.682645, 0.860915, 0.99021, 0.860915, 0.850086, 0.850086, - // 0.850086, 1.25152}, // bm25 {0.480453, 0.7615, 0.7615, 0.960906, 1.11558, 0.960906, 0.480453, - // 0.480453, 0.480453, 0.7615}, // tfidf {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, 6, 5, 7, 4, 8, 4, - // 9, 1, 10, 1, 0, 0}, //out_idx {0, 0.281047, 0, 0, 0, 0, 0, 0.281047, 0, 0.199406, 0, 0.154671, - // 0, 0.154671, 0, 0.199406, 0, 0.281047, 0, 0.480453, 0, 0.480453, 0, 0}}, //out_dists }; INSTANTIATE_TEST_CASE_P(SparsePreprocessCSR, From 034d2c51decf19494272cd5b710cca92e062c19b Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 8 Jul 2024 16:30:55 -0400 Subject: [PATCH 45/75] add documentation --- .../raft/sparse/matrix/preprocessing.cuh | 62 +++++++++++++++++-- 1 file changed, 58 insertions(+), 4 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index 10bf3794c9..e6c737efae 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -26,6 +26,18 @@ namespace raft::sparse::matrix { +/** + * @brief Use BM25 algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param values_out: Output COO values array + * @param n_rows: Number of rows in matrix + * @param n_cols: Number of columns in matrix + * @param k_param: K value to use for BM25 algorithm + * @param b_param: B value to use for BM25 algorithm + */ template void encode_bm25(raft::resources& handle, raft::device_vector_view rows, @@ -41,22 +53,52 @@ void encode_bm25(raft::resources& handle, handle, rows, columns, values, values_out, n_rows, n_cols, k_param, b_param); } +/** + * @brief Use BM25 algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param coo_in: Input COO matrix + * @param values_out: Output values array + * @param k_param: K value to use for BM25 algorithm + * @param b_param: B value to use for BM25 algorithm + */ template void encode_bm25(raft::resources& handle, raft::device_coo_matrix_view coo_in, - raft::device_vector_view values_out) + raft::device_vector_view values_out, + float k_param = 1.6f, + float b_param = 0.75) { - return matrix::detail::encode_bm25(handle, coo_in, values_out); + return matrix::detail::encode_bm25(handle, coo_in, values_out, k_param, b_param); } +/** + * @brief Use BM25 algorithm to encode features in CSR sparse matrix + * @param handle: raft resource handle + * @param csr_in: Input CSR matrix + * @param values_out: Output values array + * @param k_param: K value to use for BM25 algorithm + * @param b_param: B value to use for BM25 algorithm + */ template void encode_bm25(raft::resources& handle, raft::device_csr_matrix_view csr_in, - raft::device_vector_view values_out) + raft::device_vector_view values_out, + float k_param = 1.6f, + float b_param = 0.75) { - return matrix::detail::encode_bm25(handle, csr_in, values_out); + return matrix::detail::encode_bm25(handle, csr_in, values_out, k_param, b_param); } +/** + * @brief Use TFIDF algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param rows: Input COO rows array + * @param columns: Input COO columns array + * @param values: Input COO values array + * @param values_out: Output COO values array + * @param n_rows: Number of rows in matrix + * @param n_cols: Number of columns in matrix + */ template void encode_tfidf(raft::resources& handle, raft::device_vector_view rows, @@ -70,6 +112,12 @@ void encode_tfidf(raft::resources& handle, handle, rows, columns, values, values_out, n_rows, n_cols); } +/** + * @brief Use TFIDF algorithm to encode features in COO sparse matrix + * @param handle: raft resource handle + * @param coo_in: Input COO matrix + * @param values_out: Output COO values array + */ template void encode_tfidf(raft::resources& handle, raft::device_coo_matrix_view coo_in, @@ -78,6 +126,12 @@ void encode_tfidf(raft::resources& handle, return matrix::detail::encode_tfidf(handle, coo_in, values_out); } +/** + * @brief Use TFIDF algorithm to encode features in CSR sparse matrix + * @param handle: raft resource handle + * @param csr_in: Input CSR matrix + * @param values_out: Output values array + */ template void encode_tfidf(raft::resources& handle, raft::device_csr_matrix_view csr_in, From 04bb0070aa0bd4ef3d94411876cb86b8e3212d25 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 8 Jul 2024 17:31:17 -0400 Subject: [PATCH 46/75] removed unnecessary imports and variables --- .../sparse/matrix/detail/preprocessing.cuh | 4 ---- cpp/test/sparse/preprocess_coo.cu | 20 +++++-------------- cpp/test/sparse/preprocess_csr.cu | 12 +---------- 3 files changed, 6 insertions(+), 30 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index fb00e1ff14..74842dd2bd 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -18,17 +18,13 @@ #include #include #include -#include #include #include #include -#include #include -#include #include #include -#include #include namespace raft::sparse::matrix::detail { diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index 9917f5bdb7..1e915b9cde 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -46,13 +46,7 @@ class SparsePreprocessCoo public: SparsePreprocessCoo() : params(::testing::TestWithParam>::GetParam()), - stream(resource::get_cuda_stream(handle)), - n_rows(params.n_rows), - n_cols(params.n_cols), - rows(params.rows_h.size(), stream), - columns(params.columns_h.size(), stream), - values(params.values_h.size(), stream), - result(params.values_h.size(), stream) + stream(resource::get_cuda_stream(handle)) { } @@ -193,9 +187,6 @@ class SparsePreprocessCoo cudaStream_t stream; SparsePreprocessInputs params; - int n_rows, n_cols; - rmm::device_uvector rows, columns; - rmm::device_uvector values, result; }; using SparsePreprocessTfidfCoo = SparsePreprocessCoo; @@ -205,11 +196,10 @@ using SparsePreprocessBm25Coo = SparsePreprocessCoo; TEST_P(SparsePreprocessBm25Coo, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { - {12, // n_rows - 5, // n_cols - {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows - {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, - // out_dists_tfidf + {12, // n_rows + 5, // n_cols + {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows + {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals }; diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index cba2e394e4..1c816892ed 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -46,14 +46,7 @@ class SparsePreprocessCSR public: SparsePreprocessCSR() : params(::testing::TestWithParam>::GetParam()), - stream(resource::get_cuda_stream(handle)), - n_rows(params.n_rows), - n_cols(params.n_cols), - rows(params.rows_h.size(), stream), - columns(params.columns_h.size(), stream), - values(params.values_h.size(), stream), - result(params.values_h.size(), stream) - + stream(resource::get_cuda_stream(handle)) { } @@ -197,9 +190,6 @@ class SparsePreprocessCSR cudaStream_t stream; SparsePreprocessInputs params; - int n_rows, n_cols; - rmm::device_uvector rows, columns; - rmm::device_uvector values, result; }; using SparsePreprocessTfidfCsr = SparsePreprocessCSR; From 3747291f50236feeaa4f7a2a28b839d86a2bf2e4 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 8 Jul 2024 23:03:20 -0400 Subject: [PATCH 47/75] fix function docs to reflect behavior more correctly --- cpp/include/raft/sparse/matrix/detail/preprocessing.cuh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 74842dd2bd..67c1c563ab 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -134,7 +134,7 @@ void get_uniques_counts(raft::resources& handle, * @param origin: Input array that has values to use for computation * @param keys: Output array that has keys, should be the size of unique * @param counts: Output array that contains the computed counts - * @param results: Output array that broadcasts the counts origin value positions. Same size as + * @param results: Output array that scatters the counts to origin value positions. Same size as * origin array. */ template @@ -169,7 +169,7 @@ void create_mapped_vector(raft::resources& handle, * @param rows: Input COO rows array * @param columns: Input COO columns array * @param values: Input COO values array - * @param id_counts: Output array that stores counts per row, broadcasted to same shape as rows. + * @param id_counts: Output array that stores counts per row, scattered to same shape as rows. * @param n_rows: Number of rows in matrix */ template From 3d66d4b4d0430970fb94fae09e462e9b97d791c1 Mon Sep 17 00:00:00 2001 From: Julio Perez <37191411+jperez999@users.noreply.github.com> Date: Wed, 10 Jul 2024 14:35:52 -0400 Subject: [PATCH 48/75] Update docs/source/contributing.md Co-authored-by: Bradley Dice --- docs/source/contributing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/contributing.md b/docs/source/contributing.md index 446e7b2a7b..47eb88c429 100755 --- a/docs/source/contributing.md +++ b/docs/source/contributing.md @@ -88,4 +88,4 @@ others know you are working on it. If you have any questions related to the implementation of the issue, ask them in the issue instead of the PR. ## Attribution -Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md +Portions adopted from https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md From 2b70436623a7c5ed54d3510646a3ed6d01210d39 Mon Sep 17 00:00:00 2001 From: Julio Perez <37191411+jperez999@users.noreply.github.com> Date: Wed, 10 Jul 2024 14:36:00 -0400 Subject: [PATCH 49/75] Update .github/PULL_REQUEST_TEMPLATE.md Co-authored-by: Bradley Dice --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d889b2d593..f54ba61c7e 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -36,7 +36,7 @@ Here are some guidelines to help the review process go smoothly. re-reviewed/it is hard to tell what is new etc!). Further, please do not rebase your branch on main/force push/rewrite history, doing any of these causes the context of any comments made by reviewers to be lost. If - conflicts occur against main they should be resolved by merging main + conflicts occur they should be resolved by merging the target branch into the branch used for making the pull request. Many thanks in advance for your cooperation! From 84ffc8b1684fa74978691741dd1376330e229270 Mon Sep 17 00:00:00 2001 From: Julio Perez <37191411+jperez999@users.noreply.github.com> Date: Wed, 10 Jul 2024 14:36:08 -0400 Subject: [PATCH 50/75] Update .github/PULL_REQUEST_TEMPLATE.md Co-authored-by: Bradley Dice --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f54ba61c7e..caf46f5d6a 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,7 +34,7 @@ Here are some guidelines to help the review process go smoothly. features or make changes out of the scope of those requested by the reviewer (doing this just add delays as already reviewed code ends up having to be re-reviewed/it is hard to tell what is new etc!). Further, please do not - rebase your branch on main/force push/rewrite history, doing any of these + rebase your branch/force push/rewrite history, doing any of these causes the context of any comments made by reviewers to be lost. If conflicts occur they should be resolved by merging the target branch into the branch used for making the pull request. From dd404d7b8811472c3dbca92d16ff31c666a30a0e Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 7 Aug 2024 13:01:22 -0400 Subject: [PATCH 51/75] REL v24.08.00 release --- .../all_cuda-118_arch-aarch64.yaml | 14 ++++----- .../all_cuda-118_arch-x86_64.yaml | 14 ++++----- .../all_cuda-125_arch-aarch64.yaml | 14 ++++----- .../all_cuda-125_arch-x86_64.yaml | 14 ++++----- .../bench_ann_cuda-118_arch-aarch64.yaml | 4 +-- .../bench_ann_cuda-118_arch-x86_64.yaml | 4 +-- .../bench_ann_cuda-120_arch-aarch64.yaml | 4 +-- .../bench_ann_cuda-120_arch-x86_64.yaml | 4 +-- dependencies.yaml | 30 +++++++++---------- python/pylibraft/pyproject.toml | 4 +-- python/raft-dask/pyproject.toml | 8 ++--- 11 files changed, 57 insertions(+), 57 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-aarch64.yaml b/conda/environments/all_cuda-118_arch-aarch64.yaml index 0728f1257d..f2ae41822c 100644 --- a/conda/environments/all_cuda-118_arch-aarch64.yaml +++ b/conda/environments/all_cuda-118_arch-aarch64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.* -- distributed-ucxx==0.39.* +- dask-cuda==24.8.*,>=0.0.0a0 +- distributed-ucxx==0.39.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - nccl>=2.9.9 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-aarch64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.* +- pylibraft==24.8.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.* +- rapids-dask-dependency==24.8.*,>=0.0.0a0 - recommonmark -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.39.* +- ucx-py==0.39.*,>=0.0.0a0 name: all_cuda-118_arch-aarch64 diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index a46ace8a29..8f5f4d3088 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.* -- distributed-ucxx==0.39.* +- dask-cuda==24.8.*,>=0.0.0a0 +- distributed-ucxx==0.39.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - nccl>=2.9.9 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.* +- pylibraft==24.8.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.* +- rapids-dask-dependency==24.8.*,>=0.0.0a0 - recommonmark -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.39.* +- ucx-py==0.39.*,>=0.0.0a0 name: all_cuda-118_arch-x86_64 diff --git a/conda/environments/all_cuda-125_arch-aarch64.yaml b/conda/environments/all_cuda-125_arch-aarch64.yaml index 1f7604a5b5..2042156224 100644 --- a/conda/environments/all_cuda-125_arch-aarch64.yaml +++ b/conda/environments/all_cuda-125_arch-aarch64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.* -- distributed-ucxx==0.39.* +- dask-cuda==24.8.*,>=0.0.0a0 +- distributed-ucxx==0.39.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - nccl>=2.9.9 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.* +- pylibraft==24.8.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.* +- rapids-dask-dependency==24.8.*,>=0.0.0a0 - recommonmark -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.39.* +- ucx-py==0.39.*,>=0.0.0a0 name: all_cuda-125_arch-aarch64 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 576666e2f4..a2586cc211 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.* -- distributed-ucxx==0.39.* +- dask-cuda==24.8.*,>=0.0.0a0 +- distributed-ucxx==0.39.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - nccl>=2.9.9 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.* +- pylibraft==24.8.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.* +- rapids-dask-dependency==24.8.*,>=0.0.0a0 - recommonmark -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.39.* +- ucx-py==0.39.*,>=0.0.0a0 name: all_cuda-125_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml index 1116bbb971..000a8f4a1c 100644 --- a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - matplotlib - nccl>=2.9.9 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-118_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml index 85121af42f..52b3a8dc69 100644 --- a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - matplotlib - nccl>=2.9.9 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-118_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml index 7abf661289..27baeda4b8 100644 --- a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - matplotlib - nccl>=2.9.9 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-120_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml index e9f299cf67..5274d56bf6 100644 --- a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.* +- libucxx==0.39.*,>=0.0.0a0 - matplotlib - nccl>=2.9.9 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.* +- rmm==24.8.*,>=0.0.0a0 - scikit-build-core>=0.7.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-120_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index 34e7998ddf..e1cc919d83 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -170,7 +170,7 @@ dependencies: - c-compiler - cxx-compiler - nccl>=2.9.9 - - libucxx==0.39.* + - libucxx==0.39.*,>=0.0.0a0 specific: - output_types: conda matrices: @@ -209,7 +209,7 @@ dependencies: common: - output_types: [conda] packages: - - &rmm_unsuffixed rmm==24.8.* + - &rmm_unsuffixed rmm==24.8.*,>=0.0.0a0 - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -236,12 +236,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &rmm_cu12 rmm-cu12==24.8.* + - &rmm_cu12 rmm-cu12==24.8.*,>=0.0.0a0 - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &rmm_cu11 rmm-cu11==24.8.* + - &rmm_cu11 rmm-cu11==24.8.*,>=0.0.0a0 - {matrix: null, packages: [*rmm_unsuffixed] } checks: common: @@ -479,15 +479,15 @@ dependencies: common: - output_types: [conda, pyproject] packages: - - dask-cuda==24.8.* + - dask-cuda==24.8.*,>=0.0.0a0 - joblib>=0.11 - numba>=0.57 - *numpy - - rapids-dask-dependency==24.8.* + - rapids-dask-dependency==24.8.*,>=0.0.0a0 - output_types: conda packages: - - &pylibraft_unsuffixed pylibraft==24.8.* - - &ucx_py_unsuffixed ucx-py==0.39.* + - &pylibraft_unsuffixed pylibraft==24.8.*,>=0.0.0a0 + - &ucx_py_unsuffixed ucx-py==0.39.*,>=0.0.0a0 - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -501,14 +501,14 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &pylibraft_cu12 pylibraft-cu12==24.8.* - - &ucx_py_cu12 ucx-py-cu12==0.39.* + - &pylibraft_cu12 pylibraft-cu12==24.8.*,>=0.0.0a0 + - &ucx_py_cu12 ucx-py-cu12==0.39.*,>=0.0.0a0 - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &pylibraft_cu11 pylibraft-cu11==24.8.* - - &ucx_py_cu11 ucx-py-cu11==0.39.* + - &pylibraft_cu11 pylibraft-cu11==24.8.*,>=0.0.0a0 + - &ucx_py_cu11 ucx-py-cu11==0.39.*,>=0.0.0a0 - {matrix: null, packages: [*pylibraft_unsuffixed, *ucx_py_unsuffixed]} test_python_common: common: @@ -528,7 +528,7 @@ dependencies: packages: # UCXX is not currently a hard-dependency thus only installed during tests, # this will change in the future. - - &distributed_ucxx_unsuffixed distributed-ucxx==0.39.* + - &distributed_ucxx_unsuffixed distributed-ucxx==0.39.*,>=0.0.0a0 - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -541,12 +541,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu12==0.39.* + - distributed-ucxx-cu12==0.39.*,>=0.0.0a0 - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu11==0.39.* + - distributed-ucxx-cu11==0.39.*,>=0.0.0a0 - {matrix: null, packages: [*distributed_ucxx_unsuffixed]} depends_on_ucx_build: common: diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index 8b61df5d1b..e32cf5f902 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -33,7 +33,7 @@ requires-python = ">=3.9" dependencies = [ "cuda-python", "numpy>=1.23,<2.0a0", - "rmm==24.8.*", + "rmm==24.8.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", @@ -120,7 +120,7 @@ requires = [ "cuda-python", "cython>=3.0.0", "ninja", - "rmm==24.8.*", + "rmm==24.8.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. dependencies-file = "../../dependencies.yaml" matrix-entry = "cuda_suffixed=true" diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index 29c0db0048..99afd1379a 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -31,14 +31,14 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.9" dependencies = [ - "dask-cuda==24.8.*", + "dask-cuda==24.8.*,>=0.0.0a0", "distributed-ucxx==0.39.*", "joblib>=0.11", "numba>=0.57", "numpy>=1.23,<2.0a0", - "pylibraft==24.8.*", - "rapids-dask-dependency==24.8.*", - "ucx-py==0.39.*", + "pylibraft==24.8.*,>=0.0.0a0", + "rapids-dask-dependency==24.8.*,>=0.0.0a0", + "ucx-py==0.39.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", From 185da16b2208f527c9cbb76796c311d3aab35814 Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 7 Aug 2024 14:37:41 -0400 Subject: [PATCH 52/75] REL v24.08.01 release --- VERSION | 2 +- .../all_cuda-118_arch-aarch64.yaml | 14 ++++----- .../all_cuda-118_arch-x86_64.yaml | 14 ++++----- .../all_cuda-125_arch-aarch64.yaml | 14 ++++----- .../all_cuda-125_arch-x86_64.yaml | 14 ++++----- .../bench_ann_cuda-118_arch-aarch64.yaml | 4 +-- .../bench_ann_cuda-118_arch-x86_64.yaml | 4 +-- .../bench_ann_cuda-120_arch-aarch64.yaml | 4 +-- .../bench_ann_cuda-120_arch-x86_64.yaml | 4 +-- dependencies.yaml | 30 +++++++++---------- python/pylibraft/pyproject.toml | 4 +-- python/raft-dask/pyproject.toml | 8 ++--- 12 files changed, 58 insertions(+), 58 deletions(-) diff --git a/VERSION b/VERSION index ec8489fda9..4b234d35fa 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -24.08.00 +24.08.01 diff --git a/conda/environments/all_cuda-118_arch-aarch64.yaml b/conda/environments/all_cuda-118_arch-aarch64.yaml index f2ae41822c..0728f1257d 100644 --- a/conda/environments/all_cuda-118_arch-aarch64.yaml +++ b/conda/environments/all_cuda-118_arch-aarch64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.*,>=0.0.0a0 -- distributed-ucxx==0.39.*,>=0.0.0a0 +- dask-cuda==24.8.* +- distributed-ucxx==0.39.* - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - nccl>=2.9.9 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-aarch64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.*,>=0.0.0a0 +- pylibraft==24.8.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.*,>=0.0.0a0 +- rapids-dask-dependency==24.8.* - recommonmark -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.39.*,>=0.0.0a0 +- ucx-py==0.39.* name: all_cuda-118_arch-aarch64 diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 8f5f4d3088..a46ace8a29 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.*,>=0.0.0a0 -- distributed-ucxx==0.39.*,>=0.0.0a0 +- dask-cuda==24.8.* +- distributed-ucxx==0.39.* - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - nccl>=2.9.9 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.*,>=0.0.0a0 +- pylibraft==24.8.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.*,>=0.0.0a0 +- rapids-dask-dependency==24.8.* - recommonmark -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.39.*,>=0.0.0a0 +- ucx-py==0.39.* name: all_cuda-118_arch-x86_64 diff --git a/conda/environments/all_cuda-125_arch-aarch64.yaml b/conda/environments/all_cuda-125_arch-aarch64.yaml index 2042156224..1f7604a5b5 100644 --- a/conda/environments/all_cuda-125_arch-aarch64.yaml +++ b/conda/environments/all_cuda-125_arch-aarch64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.*,>=0.0.0a0 -- distributed-ucxx==0.39.*,>=0.0.0a0 +- dask-cuda==24.8.* +- distributed-ucxx==0.39.* - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - nccl>=2.9.9 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.*,>=0.0.0a0 +- pylibraft==24.8.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.*,>=0.0.0a0 +- rapids-dask-dependency==24.8.* - recommonmark -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.39.*,>=0.0.0a0 +- ucx-py==0.39.* name: all_cuda-125_arch-aarch64 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index a2586cc211..576666e2f4 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.8.*,>=0.0.0a0 -- distributed-ucxx==0.39.*,>=0.0.0a0 +- dask-cuda==24.8.* +- distributed-ucxx==0.39.* - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - nccl>=2.9.9 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.8.*,>=0.0.0a0 +- pylibraft==24.8.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.8.*,>=0.0.0a0 +- rapids-dask-dependency==24.8.* - recommonmark -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.39.*,>=0.0.0a0 +- ucx-py==0.39.* name: all_cuda-125_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml index 000a8f4a1c..1116bbb971 100644 --- a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - matplotlib - nccl>=2.9.9 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-118_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml index 52b3a8dc69..85121af42f 100644 --- a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - matplotlib - nccl>=2.9.9 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-118_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml index 27baeda4b8..7abf661289 100644 --- a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - matplotlib - nccl>=2.9.9 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-120_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml index 5274d56bf6..e9f299cf67 100644 --- a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.39.*,>=0.0.0a0 +- libucxx==0.39.* - matplotlib - nccl>=2.9.9 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.8.*,>=0.0.0a0 +- rmm==24.8.* - scikit-build-core>=0.7.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-120_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index e1cc919d83..34e7998ddf 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -170,7 +170,7 @@ dependencies: - c-compiler - cxx-compiler - nccl>=2.9.9 - - libucxx==0.39.*,>=0.0.0a0 + - libucxx==0.39.* specific: - output_types: conda matrices: @@ -209,7 +209,7 @@ dependencies: common: - output_types: [conda] packages: - - &rmm_unsuffixed rmm==24.8.*,>=0.0.0a0 + - &rmm_unsuffixed rmm==24.8.* - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -236,12 +236,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &rmm_cu12 rmm-cu12==24.8.*,>=0.0.0a0 + - &rmm_cu12 rmm-cu12==24.8.* - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &rmm_cu11 rmm-cu11==24.8.*,>=0.0.0a0 + - &rmm_cu11 rmm-cu11==24.8.* - {matrix: null, packages: [*rmm_unsuffixed] } checks: common: @@ -479,15 +479,15 @@ dependencies: common: - output_types: [conda, pyproject] packages: - - dask-cuda==24.8.*,>=0.0.0a0 + - dask-cuda==24.8.* - joblib>=0.11 - numba>=0.57 - *numpy - - rapids-dask-dependency==24.8.*,>=0.0.0a0 + - rapids-dask-dependency==24.8.* - output_types: conda packages: - - &pylibraft_unsuffixed pylibraft==24.8.*,>=0.0.0a0 - - &ucx_py_unsuffixed ucx-py==0.39.*,>=0.0.0a0 + - &pylibraft_unsuffixed pylibraft==24.8.* + - &ucx_py_unsuffixed ucx-py==0.39.* - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -501,14 +501,14 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &pylibraft_cu12 pylibraft-cu12==24.8.*,>=0.0.0a0 - - &ucx_py_cu12 ucx-py-cu12==0.39.*,>=0.0.0a0 + - &pylibraft_cu12 pylibraft-cu12==24.8.* + - &ucx_py_cu12 ucx-py-cu12==0.39.* - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &pylibraft_cu11 pylibraft-cu11==24.8.*,>=0.0.0a0 - - &ucx_py_cu11 ucx-py-cu11==0.39.*,>=0.0.0a0 + - &pylibraft_cu11 pylibraft-cu11==24.8.* + - &ucx_py_cu11 ucx-py-cu11==0.39.* - {matrix: null, packages: [*pylibraft_unsuffixed, *ucx_py_unsuffixed]} test_python_common: common: @@ -528,7 +528,7 @@ dependencies: packages: # UCXX is not currently a hard-dependency thus only installed during tests, # this will change in the future. - - &distributed_ucxx_unsuffixed distributed-ucxx==0.39.*,>=0.0.0a0 + - &distributed_ucxx_unsuffixed distributed-ucxx==0.39.* - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -541,12 +541,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu12==0.39.*,>=0.0.0a0 + - distributed-ucxx-cu12==0.39.* - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu11==0.39.*,>=0.0.0a0 + - distributed-ucxx-cu11==0.39.* - {matrix: null, packages: [*distributed_ucxx_unsuffixed]} depends_on_ucx_build: common: diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index e32cf5f902..8b61df5d1b 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -33,7 +33,7 @@ requires-python = ">=3.9" dependencies = [ "cuda-python", "numpy>=1.23,<2.0a0", - "rmm==24.8.*,>=0.0.0a0", + "rmm==24.8.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", @@ -120,7 +120,7 @@ requires = [ "cuda-python", "cython>=3.0.0", "ninja", - "rmm==24.8.*,>=0.0.0a0", + "rmm==24.8.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. dependencies-file = "../../dependencies.yaml" matrix-entry = "cuda_suffixed=true" diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index 99afd1379a..29c0db0048 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -31,14 +31,14 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.9" dependencies = [ - "dask-cuda==24.8.*,>=0.0.0a0", + "dask-cuda==24.8.*", "distributed-ucxx==0.39.*", "joblib>=0.11", "numba>=0.57", "numpy>=1.23,<2.0a0", - "pylibraft==24.8.*,>=0.0.0a0", - "rapids-dask-dependency==24.8.*,>=0.0.0a0", - "ucx-py==0.39.*,>=0.0.0a0", + "pylibraft==24.8.*", + "rapids-dask-dependency==24.8.*", + "ucx-py==0.39.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", From 05f4af244b398d1fef5540509928f00a7f6845e4 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 11 Sep 2024 05:03:04 -0400 Subject: [PATCH 53/75] fix preprocessing and make tests run on r random at generation --- .../sparse/matrix/detail/preprocessing.cuh | 154 ++++++---- cpp/test/preprocess_utils.cu | 290 ++++++++++++++++++ cpp/test/sparse/preprocess_coo.cu | 189 +++++------- cpp/test/sparse/preprocess_csr.cu | 197 +++++------- 4 files changed, 545 insertions(+), 285 deletions(-) create mode 100644 cpp/test/preprocess_utils.cu diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 67c1c563ab..010336a0be 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - +#include #include #include #include @@ -21,12 +21,19 @@ #include #include #include +#include +#include #include +#include #include +#include -#include +#include #include +#include +#include + namespace raft::sparse::matrix::detail { /** @@ -37,8 +44,9 @@ namespace raft::sparse::matrix::detail { * @param k_param: K value required by BM25 algorithm. * @param b_param: B value required by BM25 algorithm. */ +template struct bm25 { - bm25(int num_feats, float avg_feat_len, float k_param, float b_param) + bm25(T1 num_feats, T2 avg_feat_len, T2 k_param, T2 b_param) { total_feats = num_feats; avg_feat_length = avg_feat_len; @@ -46,11 +54,13 @@ struct bm25 { b = b_param; } - template - float __device__ operator()(const T1& values, const T1& feat_lengths, const T1& num_feats_id_occ) + float __device__ operator()(const T2& value, const T2& num_feats_id_occ, const T2& feat_length) { - return raft::log(total_feats / (1 + num_feats_id_occ)) * - ((values * (k + 1)) / (values + k * (1 - b + b * (feat_lengths / avg_feat_length)))); + float tf = raft::log(1 + (value / feat_length)); + float idf = raft::log(total_feats / num_feats_id_occ); + float bm = ((k + 1) * tf) / (k * ((1.0f - b) + b * (feat_length / avg_feat_length)) + tf); + + return idf * bm; } float avg_feat_length; int total_feats; @@ -63,28 +73,30 @@ struct bm25 { * logrithmically scaled frequency. * @param total_feats_param: The total number of features in the matrix */ +template struct tfidf { - tfidf(int total_feats_param) { total_feats = total_feats_param; } + tfidf(T1 total_feats_param) { total_feats = total_feats_param; } - template - float __device__ operator()(const T1& values, const T2& num_feats_id_occ) + float __device__ operator()(const T1& value, const T2& num_feats_id_occ, const T2& feat_length) { - return raft::log(1 + values) * raft::log(total_feats / (1 + num_feats_id_occ)); + float tf = raft::log(1 + (value / feat_length)); + float idf = raft::log(total_feats / num_feats_id_occ); + return tf * idf; } int total_feats; }; template struct mapper { - mapper(raft::device_vector_view map) : map(map) {} + mapper(raft::device_vector_view map) : map(map) {} - __host__ __device__ void operator()(T& value) const + float __device__ operator()(const T& value) { - const T& new_value = map[value]; + T new_value = map[value]; if (new_value) { - value = new_value; + return new_value; } else { - value = 0; + return 0.0; } } @@ -111,7 +123,6 @@ void get_uniques_counts(raft::resources& handle, raft::device_vector_view counts_out) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - raft::sparse::op::coo_sort(sort_vector.size(), secondary_vector.size(), data.size(), @@ -145,22 +156,25 @@ void create_mapped_vector(raft::resources& handle, raft::device_vector_view result) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto host_keys = raft::make_host_vector(handle, keys.size()); + thrust::host_vector host_keys(keys.size()); + raft::copy(host_keys.data(), keys.data_handle(), keys.size(), stream); + int key_size = *thrust::max_element(host_keys.begin(), host_keys.end()); - raft::copy(host_keys.data_handle(), keys.data_handle(), keys.size(), stream); raft::linalg::map(handle, result, raft::cast_op{}, raft::make_const_mdspan(origin)); - int new_key_size = host_keys(host_keys.size() - 1) + 1; - auto origin_map = raft::make_device_vector(handle, new_key_size); - + // index into the last element and then add 1 to it. + auto origin_map = raft::make_device_vector(handle, key_size + 1); thrust::scatter(raft::resource::get_thrust_policy(handle), counts.data_handle(), counts.data_handle() + counts.size(), keys.data_handle(), origin_map.data_handle()); - thrust::for_each(raft::resource::get_thrust_policy(handle), - result.data_handle(), - result.data_handle() + result.size(), - mapper(raft::make_const_mdspan(origin_map.view()))); + + // const T2* const_counts = counts.data_handle(); + // const T1* const_keys = keys.data_handle(); + + // raft::scatter(origin_map.data_handle(), const_counts, const_keys, counts.size(), + // stream, op=raft::key_op()); + raft::linalg::map(handle, result, mapper(origin_map.view()), raft::make_const_mdspan(result)); } /** @@ -181,19 +195,19 @@ void get_id_counts(raft::resources& handle, T1 n_rows) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); + // auto preserved_rows = raft::make_device_vector(handle, rows.size()); + // raft::copy(preserved_rows.data_handle(), rows.data_handle(), rows.size(), stream); + int uniq_rows = + raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); - auto row_keys = raft::make_device_vector(handle, n_rows); - auto row_counts = raft::make_device_vector(handle, n_rows); - auto row_fill = raft::make_device_vector(handle, rows.size()); + auto row_keys = raft::make_device_vector(handle, uniq_rows); + auto row_counts = raft::make_device_vector(handle, uniq_rows); + auto row_fill = raft::make_device_vector(handle, n_rows); // the amount of columns(features) that each row(id) is found in - thrust::fill(raft::resource::get_thrust_policy(handle), - row_fill.data_handle(), - row_fill.data_handle() + row_fill.size(), - 1.0f); + raft::matrix::fill(handle, row_fill.view(), 1.0f); get_uniques_counts( handle, rows, columns, values, row_fill.view(), row_keys.view(), row_counts.view()); - create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts); } @@ -207,25 +221,40 @@ void get_id_counts(raft::resources& handle, * @param n_cols: Number of columns in matrix */ template -int get_feature_data(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view feat_lengths, - T1 n_cols) +float get_feature_data(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view feat_lengths, + T1 n_cols) { - cudaStream_t stream = raft::resource::get_cuda_stream(handle); - - auto col_keys = raft::make_device_vector(handle, n_cols); + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + auto preserved_columns = raft::make_device_vector(handle, columns.size()); + raft::copy(preserved_columns.data_handle(), columns.data_handle(), columns.size(), stream); + int uniq_cols = + raft::sparse::neighbors::get_n_components(columns.data_handle(), columns.size(), stream); + auto col_keys = raft::make_device_vector(handle, uniq_cols); auto col_counts = raft::make_device_vector(handle, n_cols); get_uniques_counts(handle, columns, rows, values, values, col_keys.view(), col_counts.view()); - int total_feature_lengths = thrust::reduce(raft::resource::get_thrust_policy(handle), - col_counts.data_handle(), - col_counts.data_handle() + col_counts.size()); - float avg_feat_length = float(total_feature_lengths) / n_cols; - create_mapped_vector(handle, columns, col_keys.view(), col_counts.view(), feat_lengths); + auto total_feature_lengths = raft::make_device_scalar(handle, 0); + + raft::linalg::mapReduce(total_feature_lengths.data_handle(), + col_counts.size(), + 0, + raft::identity_op(), + raft::add_op(), + stream, + col_counts.data_handle()); + auto total_feature_lengths_host = raft::make_host_scalar(handle, 0); + raft::copy(total_feature_lengths_host.data_handle(), + total_feature_lengths.data_handle(), + total_feature_lengths.size(), + stream); + float avg_feat_length = float(total_feature_lengths_host(0)) / n_cols; + create_mapped_vector( + handle, preserved_columns.view(), col_keys.view(), col_counts.view(), feat_lengths); return avg_feat_length; } @@ -241,14 +270,14 @@ int get_feature_data(raft::resources& handle, * @param n_cols: Number of columns in matrix */ template -int sparse_search_preprocess(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view feat_lengths, - raft::device_vector_view id_counts, - T1 n_rows, - T1 n_cols) +float sparse_search_preprocess(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view feat_lengths, + raft::device_vector_view id_counts, + T1 n_rows, + T1 n_cols) { auto avg_feature_len = get_feature_data(handle, rows, columns, values, feat_lengths, n_cols); @@ -278,14 +307,16 @@ void base_encode_tfidf(raft::resources& handle, { auto feat_lengths = raft::make_device_vector(handle, columns.size()); auto id_counts = raft::make_device_vector(handle, rows.size()); + auto col_counts = raft::make_device_vector(handle, n_cols); auto avg_feat_length = sparse_search_preprocess( handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, - tfidf(n_cols), + tfidf(n_cols), raft::make_const_mdspan(values), - raft::make_const_mdspan(id_counts.view())); + raft::make_const_mdspan(id_counts.view()), + raft::make_const_mdspan(feat_lengths.view())); } /** @@ -376,16 +407,17 @@ void base_encode_bm25(raft::resources& handle, { auto feat_lengths = raft::make_device_vector(handle, columns.size()); auto id_counts = raft::make_device_vector(handle, rows.size()); + auto col_counts = raft::make_device_vector(handle, n_cols); auto avg_feat_length = sparse_search_preprocess( handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); raft::linalg::map(handle, values_out, - bm25(n_cols, avg_feat_length, k_param, b_param), + bm25(n_cols, avg_feat_length, k_param, b_param), raft::make_const_mdspan(values), - raft::make_const_mdspan(feat_lengths.view()), - raft::make_const_mdspan(id_counts.view())); + raft::make_const_mdspan(id_counts.view()), + raft::make_const_mdspan(feat_lengths.view())); } /** diff --git a/cpp/test/preprocess_utils.cu b/cpp/test/preprocess_utils.cu new file mode 100644 index 0000000000..cd0d8f0a1d --- /dev/null +++ b/cpp/test/preprocess_utils.cu @@ -0,0 +1,290 @@ +/* + * Copyright (c) 2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace raft::util { + +template +struct check_zeroes { + float __device__ operator()(const T1& value, const T2& idx) + { + if (value == 0) { + return 0.f; + } else { + return 1.f; + } + } +}; + +template +void preproc_kernel(raft::resources& handle, + raft::host_vector_view h_rows, + raft::host_vector_view h_cols, + raft::host_vector_view h_elems, + raft::device_vector_view results, + int num_rows, + int num_cols, + bool tf_idf) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + int rows_size = h_rows.size(); + int cols_size = h_cols.size(); + int elements_size = h_elems.size(); + auto device_matrix = raft::make_device_matrix(handle, num_rows, num_cols); + raft::matrix::fill(handle, device_matrix.view(), 0.0f); + auto host_matrix = raft::make_host_matrix(handle, num_rows, num_cols); + raft::copy(host_matrix.data_handle(), device_matrix.data_handle(), device_matrix.size(), stream); + + for (int i = 0; i < elements_size; i++) { + int row = h_rows(i); + int col = h_cols(i); + float element = h_elems(i); + host_matrix(row, col) = element; + } + + raft::copy(device_matrix.data_handle(), host_matrix.data_handle(), host_matrix.size(), stream); + auto output_cols_lengths = raft::make_device_matrix(handle, 1, num_cols); + raft::linalg::reduce(output_cols_lengths.data_handle(), + device_matrix.data_handle(), + num_rows, + num_cols, + 0.0f, + false, + true, + stream); + auto h_output_cols_lengths = raft::make_host_matrix(handle, 1, num_cols); + raft::copy(h_output_cols_lengths.data_handle(), + output_cols_lengths.data_handle(), + output_cols_lengths.size(), + stream); + + auto output_cols_length_sum = raft::make_device_scalar(handle, 0); + raft::linalg::mapReduce(output_cols_length_sum.data_handle(), + num_cols, + 0, + raft::identity_op(), + raft::add_op(), + stream, + output_cols_lengths.data_handle()); + auto h_output_cols_length_sum = raft::make_host_scalar(handle, 0); + raft::copy(h_output_cols_length_sum.data_handle(), + output_cols_length_sum.data_handle(), + output_cols_length_sum.size(), + stream); + float avg_col_length = float(h_output_cols_length_sum(0)) / num_cols; + + auto output_rows_freq = raft::make_device_matrix(handle, 1, num_rows); + raft::linalg::reduce(output_rows_freq.data_handle(), + device_matrix.data_handle(), + num_rows, + num_cols, + 0.0f, + false, + false, + stream); + + auto output_rows_cnt = raft::make_device_matrix(handle, 1, num_rows); + raft::linalg::reduce(output_rows_cnt.data_handle(), + device_matrix.data_handle(), + num_rows, + num_cols, + 0.0f, + false, + false, + stream, + false, + check_zeroes()); + auto h_output_rows_cnt = raft::make_host_matrix(handle, 1, num_rows); + raft::copy( + h_output_rows_cnt.data_handle(), output_rows_cnt.data_handle(), output_rows_cnt.size(), stream); + + auto out_device_matrix = raft::make_device_matrix(handle, num_rows, num_cols); + raft::matrix::fill(handle, out_device_matrix.view(), 0.0f); + auto out_host_matrix = raft::make_host_matrix(handle, num_rows, num_cols); + auto out_host_vector = raft::make_host_vector(handle, results.size()); + + float k1 = 1.6f; + float b = 0.75f; + int count = 0; + float result; + for (int row = 0; row < num_rows; row++) { + for (int col = 0; col < num_cols; col++) { + float val = host_matrix(row, col); + if (val == 0) { + out_host_matrix(row, col) = 0.0f; + } else { + float tf = raft::log(1 + (val / h_output_cols_lengths(0, col))); + float idf = raft::log(num_cols / h_output_rows_cnt(0, row)); + if (tf_idf) { + result = tf * idf; + } else { + float bm25 = ((k1 + 1) * tf) / + (k1 * ((1 - b) + b * (h_output_cols_lengths(0, col) / avg_col_length)) + tf); + result = idf * bm25; + } + out_host_matrix(row, col) = result; + out_host_vector(count) = result; + count++; + } + } + } + raft::copy(results.data_handle(), out_host_vector.data_handle(), out_host_vector.size(), stream); +} + +template +int get_dupe_mask_count(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + const raft::device_vector_view& mask) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + raft::sparse::op::coo_sort(rows.size(), + columns.size(), + values.size(), + rows.data_handle(), + columns.data_handle(), + values.data_handle(), + stream); + + raft::sparse::op::compute_duplicates_mask( + mask.data_handle(), rows.data_handle(), columns.data_handle(), rows.size(), stream); + + int col_nnz_count = thrust::reduce(raft::resource::get_thrust_policy(handle), + mask.data_handle(), + mask.data_handle() + mask.size()); + return col_nnz_count; +} + +template +void remove_dupes(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + raft::device_vector_view mask, + const raft::device_vector_view& out_rows, + const raft::device_vector_view& out_cols, + const raft::device_vector_view& out_vals, + int num_rows = 128) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + auto col_counts = raft::make_device_vector(handle, columns.size()); + + thrust::fill(raft::resource::get_thrust_policy(handle), + col_counts.data_handle(), + col_counts.data_handle() + col_counts.size(), + 1.0f); + + auto keys_out = raft::make_device_vector(handle, num_rows); + auto counts_out = raft::make_device_vector(handle, num_rows); + + thrust::reduce_by_key(raft::resource::get_thrust_policy(handle), + rows.data_handle(), + rows.data_handle() + rows.size(), + col_counts.data_handle(), + keys_out.data_handle(), + counts_out.data_handle()); + + auto mask_out = raft::make_device_vector(handle, rows.size()); + + raft::linalg::map(handle, mask_out.view(), raft::cast_op{}, raft::make_const_mdspan(mask)); + + auto values_c = raft::make_device_vector(handle, values.size()); + raft::linalg::map(handle, + values_c.view(), + raft::mul_op{}, + raft::make_const_mdspan(values), + raft::make_const_mdspan(mask_out.view())); + + auto keys_nnz_out = raft::make_device_vector(handle, num_rows); + auto counts_nnz_out = raft::make_device_vector(handle, num_rows); + + thrust::reduce_by_key(raft::resource::get_thrust_policy(handle), + rows.data_handle(), + rows.data_handle() + rows.size(), + mask.data_handle(), + keys_nnz_out.data_handle(), + counts_nnz_out.data_handle()); + + raft::sparse::op::coo_remove_scalar(rows.data_handle(), + columns.data_handle(), + values_c.data_handle(), + values_c.size(), + out_rows.data_handle(), + out_cols.data_handle(), + out_vals.data_handle(), + counts_nnz_out.data_handle(), + counts_out.data_handle(), + 0, + num_rows, + stream); +} + +template +void create_dataset(raft::resources& handle, + raft::device_vector_view rows, + raft::device_vector_view columns, + raft::device_vector_view values, + int max_term_occurence_doc = 5, + int num_rows_unique = 7, + int num_cols_unique = 7) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + int seed = 12345; + raft::random::RngState rng(seed); + + auto d_out = raft::make_device_vector(handle, rows.size() * 2); + + int theta_guide = max(num_rows_unique, num_cols_unique); + auto theta = raft::make_device_vector(handle, theta_guide * 4); + + raft::random::uniform(handle, rng, theta.view(), 0.0f, 1.0f); + + raft::random::rmat_rectangular_gen(d_out.data_handle(), + rows.data_handle(), + columns.data_handle(), + theta.data_handle(), + num_rows_unique, + num_cols_unique, + int(values.size()), + stream, + rng); + + auto vals = raft::make_device_vector(handle, rows.size()); + raft::random::uniformInt(handle, rng, vals.view(), 1, max_term_occurence_doc); + raft::linalg::map(handle, values, raft::cast_op{}, raft::make_const_mdspan(vals.view())); +} + +}; // namespace raft::util \ No newline at end of file diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index 1e915b9cde..ea060d5d0c 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "../preprocess_utils.cu" #include "../test_utils.cuh" #include @@ -25,19 +26,49 @@ #include +#include #include #include namespace raft { namespace sparse { +template +void calc_tfidf_bm25(raft::resources& handle, + raft::device_coo_matrix_view coo_in, + raft::device_vector_view results, + bool tf_idf = false) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + int num_rows = coo_in.structure_view().get_n_rows(); + int num_cols = coo_in.structure_view().get_n_cols(); + int rows_size = coo_in.structure_view().get_cols().size(); + int cols_size = coo_in.structure_view().get_rows().size(); + int elements_size = coo_in.get_elements().size(); + + auto h_rows = raft::make_host_vector(handle, rows_size); + auto h_cols = raft::make_host_vector(handle, cols_size); + auto h_elems = raft::make_host_vector(handle, elements_size); + + raft::copy(h_rows.data_handle(), + coo_in.structure_view().get_rows().data(), + coo_in.structure_view().get_rows().size(), + stream); + raft::copy(h_cols.data_handle(), + coo_in.structure_view().get_cols().data(), + coo_in.structure_view().get_cols().size(), + stream); + raft::copy( + h_elems.data_handle(), coo_in.get_elements().data(), coo_in.get_elements().size(), stream); + raft::util::preproc_kernel( + handle, h_rows.view(), h_cols.view(), h_elems.view(), results, num_rows, num_cols, tf_idf); +} + template struct SparsePreprocessInputs { int n_rows; int n_cols; - std::vector rows_h; - std::vector columns_h; - std::vector values_h; + int nnz_edges; }; template @@ -55,75 +86,51 @@ class SparsePreprocessCoo void Run(bool bm25_on) { - int k = 2; - std::vector bm25_vals_h = {1.03581, - 1.44385, - 0.794119, - 0.476471, - 0.476471, - 1.02101, - 1.798, - 1.44385, - 1.03581, - 1.78677}; // bm25 - std::vector tfidf_vals_h = {0.635124, - 1.00665, - 1.00665, - 0.635124, - 0.635124, - 1.27025, - 1.47471, - 1.00665, - 0.635124, - 1.27025}; // tfidf - std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, - 5, 6, 7, 4, 8, 7, 9, 1, 10, 5, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.408045, 0, 0, 0, 0, 0, 0.408045, - 0, 0.226891, 0, 0, 0, 0, 0, 0.226891, - 0, 0.776995, 0, 1.44385, 0, 0.559336, 0, 0}; - std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 10, - 5, 10, 7, 8, 8, 7, 9, 1, 5, 10, 0, 0}; - std::vector out_dists_tfidf_h = {0, 0.371524, 0, 0, 0, 0, 0, 0.371524, - 0, 0.2636, 0, 0, 0, 0, 0, 0.204464, - 0, 0.204464, 0, 1.00665, 0, 0, 0, 0}; - cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto rows = raft::make_device_vector(handle, params.rows_h.size()); - auto columns = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); - auto bm25_vals = raft::make_device_vector(handle, bm25_vals_h.size()); - auto tfidf_vals = raft::make_device_vector(handle, tfidf_vals_h.size()); - auto out_idxs_bm25 = raft::make_device_vector(handle, out_idxs_bm25_h.size()); - auto out_idxs_tfidf = - raft::make_device_vector(handle, out_idxs_tfidf_h.size()); - auto out_dists_bm25 = - raft::make_device_vector(handle, out_dists_bm25_h.size()); - auto out_dists_tfidf = - raft::make_device_vector(handle, out_dists_tfidf_h.size()); - - raft::copy(rows.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); - raft::copy(columns.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); - raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); - raft::copy(bm25_vals.data_handle(), bm25_vals_h.data(), bm25_vals_h.size(), stream); - raft::copy(tfidf_vals.data_handle(), tfidf_vals_h.data(), tfidf_vals_h.size(), stream); - raft::copy(out_idxs_bm25.data_handle(), out_idxs_bm25_h.data(), out_idxs_bm25_h.size(), stream); - raft::copy( - out_idxs_tfidf.data_handle(), out_idxs_tfidf_h.data(), out_idxs_tfidf_h.size(), stream); - raft::copy( - out_dists_bm25.data_handle(), out_dists_bm25_h.data(), out_dists_bm25_h.size(), stream); - raft::copy( - out_dists_tfidf.data_handle(), out_dists_tfidf_h.data(), out_dists_tfidf_h.size(), stream); - - auto coo_struct_view = raft::make_device_coordinate_structure_view( - rows.data_handle(), columns.data_handle(), params.n_rows, params.n_cols, int(values.size())); + + int num_rows = pow(2, params.n_rows); + int num_cols = pow(2, params.n_cols); + + auto rows = raft::make_device_vector(handle, params.nnz_edges); + auto columns = raft::make_device_vector(handle, params.nnz_edges); + auto values = raft::make_device_vector(handle, params.nnz_edges); + auto mask = raft::make_device_vector(handle, params.nnz_edges); + + raft::util::create_dataset( + handle, rows.view(), columns.view(), values.view(), 5, params.n_rows, params.n_cols); + int non_dupe_nnz_count = raft::util::get_dupe_mask_count( + handle, rows.view(), columns.view(), values.view(), mask.view()); + + auto rows_nnz = raft::make_device_vector(handle, non_dupe_nnz_count); + auto columns_nnz = raft::make_device_vector(handle, non_dupe_nnz_count); + auto values_nnz = raft::make_device_vector(handle, non_dupe_nnz_count); + raft::util::remove_dupes(handle, + rows.view(), + columns.view(), + values.view(), + mask.view(), + rows_nnz.view(), + columns_nnz.view(), + values_nnz.view(), + num_rows); + + auto coo_struct_view = raft::make_device_coordinate_structure_view(rows_nnz.data_handle(), + columns_nnz.data_handle(), + num_rows, + num_cols, + int(values_nnz.size())); auto c_matrix = raft::make_device_coo_matrix(handle, coo_struct_view); raft::update_device( - c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); + c_matrix.view().get_elements().data(), values_nnz.data_handle(), values_nnz.size(), stream); + + auto result = raft::make_device_vector(handle, values_nnz.size()); + auto bm25_vals = raft::make_device_vector(handle, values_nnz.size()); + auto tfidf_vals = raft::make_device_vector(handle, values_nnz.size()); if (bm25_on) { sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + calc_tfidf_bm25(handle, c_matrix.view(), bm25_vals.view()); ASSERT_TRUE(raft::devArrMatch(bm25_vals.data_handle(), result.data_handle(), result.size(), @@ -131,6 +138,7 @@ class SparsePreprocessCoo stream)); } else { sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + calc_tfidf_bm25(handle, c_matrix.view(), tfidf_vals.view(), true); ASSERT_TRUE(raft::devArrMatch(tfidf_vals.data_handle(), result.data_handle(), result.size(), @@ -138,47 +146,6 @@ class SparsePreprocessCoo stream)); } - raft::update_device( - c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); - - auto out_indices = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); - auto out_dists = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); - - raft::sparse::neighbors::brute_force_knn(c_matrix, - c_matrix, - out_indices.data_handle(), - out_dists.data_handle(), - k, - handle, - c_matrix.structure_view().get_n_rows(), - c_matrix.structure_view().get_n_rows(), - raft::distance::DistanceType::L1); - - if (bm25_on) { - ASSERT_TRUE(raft::devArrMatch(out_idxs_bm25.data_handle(), - out_indices.data_handle(), - out_indices.size(), - raft::Compare(), - stream)); - ASSERT_TRUE(raft::devArrMatch(out_dists_bm25.data_handle(), - out_dists.data_handle(), - out_dists.size(), - raft::CompareApprox(2e-5), - stream)); - } else { - ASSERT_TRUE(raft::devArrMatch(out_idxs_tfidf.data_handle(), - out_indices.data_handle(), - out_indices.size(), - raft::Compare(), - stream)); - ASSERT_TRUE(raft::devArrMatch(out_dists_tfidf.data_handle(), - out_dists.data_handle(), - out_dists.size(), - raft::CompareApprox(2e-5), - stream)); - } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } @@ -196,11 +163,11 @@ using SparsePreprocessBm25Coo = SparsePreprocessCoo; TEST_P(SparsePreprocessBm25Coo, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { - {12, // n_rows - 5, // n_cols - {0, 3, 4, 5, 6, 7, 8, 9, 10, 11}, // rows - {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols - {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals + { + 4, // n_rows + 2, // n_cols + 10 // nnz_edges + }, }; INSTANTIATE_TEST_CASE_P(SparsePreprocessCoo, diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index 1c816892ed..cf5e142593 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -14,6 +14,7 @@ * limitations under the License. */ +#include "../preprocess_utils.cu" #include "../test_utils.cuh" #include @@ -31,13 +32,46 @@ namespace raft { namespace sparse { +template +void calc_tfidf_bm25(raft::resources& handle, + raft::device_csr_matrix_view csr_in, + raft::device_vector_view results, + bool tf_idf = false) +{ + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + int num_rows = csr_in.structure_view().get_n_rows(); + int num_cols = csr_in.structure_view().get_n_cols(); + int rows_size = csr_in.structure_view().get_indptr().size(); + int cols_size = csr_in.structure_view().get_indices().size(); + int elements_size = csr_in.get_elements().size(); + + auto h_rows = raft::make_host_vector(handle, rows_size); + auto h_cols = raft::make_host_vector(handle, cols_size); + auto h_elems = raft::make_host_vector(handle, elements_size); + + auto indptr = raft::make_device_vector_view( + csr_in.structure_view().get_indptr().data(), csr_in.structure_view().get_indptr().size()); + auto indices = raft::make_device_vector_view( + csr_in.structure_view().get_indices().data(), csr_in.structure_view().get_indices().size()); + auto values = raft::make_device_vector_view(csr_in.get_elements().data(), + csr_in.get_elements().size()); + auto rows = raft::make_device_vector(handle, values.size()); + + raft::sparse::convert::csr_to_coo( + indptr.data_handle(), num_rows, rows.data_handle(), rows.size(), stream); + + raft::copy(h_rows.data_handle(), rows.data_handle(), rows.size(), stream); + raft::copy(h_cols.data_handle(), indices.data_handle(), cols_size, stream); + raft::copy(h_elems.data_handle(), values.data_handle(), values.size(), stream); + raft::util::preproc_kernel( + handle, h_rows.view(), h_cols.view(), h_elems.view(), results, num_rows, num_cols, tf_idf); +} + template struct SparsePreprocessInputs { int n_rows; int n_cols; - std::vector rows_h; - std::vector columns_h; - std::vector values_h; + int nnz_edges; }; template @@ -55,79 +89,55 @@ class SparsePreprocessCSR void Run(bool bm25_on) { - int k = 2; - std::vector bm25_vals_h = {1.03581, - 1.44385, - 0.794119, - 0.476471, - 0.476471, - 1.02101, - 1.798, - 1.44385, - 1.03581, - 1.78677}; // bm25 - std::vector tfidf_vals_h = {0.635124, - 1.00665, - 1.00665, - 0.635124, - 0.635124, - 1.27025, - 1.47471, - 1.00665, - 0.635124, - 1.27025}; // tfidf - std::vector out_idxs_bm25_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 6, - 5, 6, 7, 4, 8, 7, 9, 1, 10, 5, 0, 0}; - std::vector out_dists_bm25_h = {0, 0.408045, 0, 0, 0, 0, 0, 0.408045, - 0, 0.226891, 0, 0, 0, 0, 0, 0.226891, - 0, 0.776995, 0, 1.44385, 0, 0.559336, 0, 0}; - std::vector out_idxs_tfidf_h = {0, 3, 1, 2, 1, 2, 3, 0, 4, 7, 5, 10, - 5, 10, 7, 8, 8, 7, 9, 1, 5, 10, 0, 0}; - std::vector out_dists_tfidf_h = {0, 0.371524, 0, 0, 0, 0, 0, 0.371524, - 0, 0.2636, 0, 0, 0, 0, 0, 0.204464, - 0, 0.204464, 0, 1.00665, 0, 0, 0, 0}; - cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto indptr = raft::make_device_vector(handle, params.rows_h.size()); - auto indices = raft::make_device_vector(handle, params.columns_h.size()); - auto values = raft::make_device_vector(handle, params.values_h.size()); - auto result = raft::make_device_vector(handle, params.values_h.size()); - auto bm25_vals = raft::make_device_vector(handle, bm25_vals_h.size()); - auto tfidf_vals = raft::make_device_vector(handle, tfidf_vals_h.size()); - auto out_idxs_bm25 = raft::make_device_vector(handle, out_idxs_bm25_h.size()); - auto out_idxs_tfidf = - raft::make_device_vector(handle, out_idxs_tfidf_h.size()); - auto out_dists_bm25 = - raft::make_device_vector(handle, out_dists_bm25_h.size()); - auto out_dists_tfidf = - raft::make_device_vector(handle, out_dists_tfidf_h.size()); - - raft::copy(indptr.data_handle(), params.rows_h.data(), params.rows_h.size(), stream); - raft::copy(indices.data_handle(), params.columns_h.data(), params.columns_h.size(), stream); - raft::copy(values.data_handle(), params.values_h.data(), params.values_h.size(), stream); - raft::copy(bm25_vals.data_handle(), bm25_vals_h.data(), bm25_vals_h.size(), stream); - raft::copy(tfidf_vals.data_handle(), tfidf_vals_h.data(), tfidf_vals_h.size(), stream); - raft::copy(out_idxs_bm25.data_handle(), out_idxs_bm25_h.data(), out_idxs_bm25_h.size(), stream); - raft::copy( - out_idxs_tfidf.data_handle(), out_idxs_tfidf_h.data(), out_idxs_tfidf_h.size(), stream); - raft::copy( - out_dists_bm25.data_handle(), out_dists_bm25_h.data(), out_dists_bm25_h.size(), stream); - raft::copy( - out_dists_tfidf.data_handle(), out_dists_tfidf_h.data(), out_dists_tfidf_h.size(), stream); - - auto csr_struct_view = raft::make_device_compressed_structure_view(indptr.data_handle(), - indices.data_handle(), - params.n_rows, - params.n_cols, - int(values.size())); + + int num_rows = pow(2, params.n_rows); + int num_cols = pow(2, params.n_cols); + + auto rows = raft::make_device_vector(handle, params.nnz_edges); + auto columns = raft::make_device_vector(handle, params.nnz_edges); + auto values = raft::make_device_vector(handle, params.nnz_edges); + auto mask = raft::make_device_vector(handle, params.nnz_edges); + + raft::util::create_dataset( + handle, rows.view(), columns.view(), values.view(), 5, params.n_rows, params.n_cols); + int non_dupe_nnz_count = raft::util::get_dupe_mask_count( + handle, rows.view(), columns.view(), values.view(), mask.view()); + + auto rows_nnz = raft::make_device_vector(handle, non_dupe_nnz_count); + auto columns_nnz = raft::make_device_vector(handle, non_dupe_nnz_count); + auto values_nnz = raft::make_device_vector(handle, non_dupe_nnz_count); + raft::util::remove_dupes(handle, + rows.view(), + columns.view(), + values.view(), + mask.view(), + rows_nnz.view(), + columns_nnz.view(), + values_nnz.view(), + num_rows); + auto rows_csr = raft::make_device_vector(handle, non_dupe_nnz_count); + raft::sparse::convert::sorted_coo_to_csr( + rows_nnz.data_handle(), int(rows_nnz.size()), rows_csr.data_handle(), num_rows, stream); + + auto csr_struct_view = raft::make_device_compressed_structure_view(rows_csr.data_handle(), + columns_nnz.data_handle(), + num_rows, + num_cols, + int(values_nnz.size())); auto c_matrix = raft::make_device_csr_matrix(handle, csr_struct_view); raft::update_device( - c_matrix.view().get_elements().data(), values.data_handle(), values.size(), stream); + c_matrix.view().get_elements().data(), values_nnz.data_handle(), values_nnz.size(), stream); + + auto result = raft::make_device_vector(handle, values_nnz.size()); + auto bm25_vals = raft::make_device_vector(handle, values_nnz.size()); + auto tfidf_vals = raft::make_device_vector(handle, values_nnz.size()); if (bm25_on) { sparse::matrix::encode_bm25(handle, c_matrix.view(), result.view()); + calc_tfidf_bm25(handle, c_matrix.view(), bm25_vals.view()); ASSERT_TRUE(raft::devArrMatch(bm25_vals.data_handle(), result.data_handle(), result.size(), @@ -135,52 +145,13 @@ class SparsePreprocessCSR stream)); } else { sparse::matrix::encode_tfidf(handle, c_matrix.view(), result.view()); + calc_tfidf_bm25(handle, c_matrix.view(), tfidf_vals.view(), true); ASSERT_TRUE(raft::devArrMatch(tfidf_vals.data_handle(), result.data_handle(), result.size(), raft::CompareApprox(2e-5), stream)); } - raft::update_device( - c_matrix.view().get_elements().data(), result.data_handle(), result.size(), stream); - auto out_indices = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); - auto out_dists = - raft::make_device_vector(handle, c_matrix.structure_view().get_n_rows() * k); - - raft::sparse::neighbors::brute_force_knn(c_matrix, - c_matrix, - out_indices.data_handle(), - out_dists.data_handle(), - k, - handle, - c_matrix.structure_view().get_n_rows(), - c_matrix.structure_view().get_n_rows(), - raft::distance::DistanceType::L1); - - if (bm25_on) { - ASSERT_TRUE(raft::devArrMatch(out_idxs_bm25.data_handle(), - out_indices.data_handle(), - out_indices.size(), - raft::Compare(), - stream)); - ASSERT_TRUE(raft::devArrMatch(out_dists_bm25.data_handle(), - out_dists.data_handle(), - out_dists.size(), - raft::CompareApprox(2e-5), - stream)); - } else { - ASSERT_TRUE(raft::devArrMatch(out_idxs_tfidf.data_handle(), - out_indices.data_handle(), - out_indices.size(), - raft::Compare(), - stream)); - ASSERT_TRUE(raft::devArrMatch(out_dists_tfidf.data_handle(), - out_dists.data_handle(), - out_dists.size(), - raft::CompareApprox(2e-5), - stream)); - } RAFT_CUDA_TRY(cudaStreamSynchronize(stream)); } @@ -199,11 +170,11 @@ using SparsePreprocessBm25Csr = SparsePreprocessCSR; TEST_P(SparsePreprocessBm25Csr, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { - {12, // n_rows - 5, // n_cols - {0, 1, 1, 1, 2, 3, 4, 5, 6, 7, 8, 9}, // rows - {0, 0, 1, 2, 2, 1, 1, 3, 2, 1}, // cols - {1.0, 2.0, 2.0, 1.0, 1.0, 3.0, 4.0, 2.0, 1.0, 3.0}}, // vals + { + 4, // n_rows + 2, // n_cols + 10 // num nnz values + }, }; INSTANTIATE_TEST_CASE_P(SparsePreprocessCSR, From a1e3a48c673534086a05c77e56d77c9b9211b792 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 11 Sep 2024 05:51:40 -0400 Subject: [PATCH 54/75] remove unnecessary imports --- .../raft/sparse/matrix/detail/preprocessing.cuh | 11 ----------- cpp/test/preprocess_utils.cu | 7 ------- 2 files changed, 18 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 010336a0be..505f6ef9e2 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -13,20 +13,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include #include -#include -#include -#include #include #include -#include #include #include #include #include #include -#include #include #include @@ -169,11 +163,6 @@ void create_mapped_vector(raft::resources& handle, keys.data_handle(), origin_map.data_handle()); - // const T2* const_counts = counts.data_handle(); - // const T1* const_keys = keys.data_handle(); - - // raft::scatter(origin_map.data_handle(), const_counts, const_keys, counts.size(), - // stream, op=raft::key_op()); raft::linalg::map(handle, result, mapper(origin_map.view()), raft::make_const_mdspan(result)); } diff --git a/cpp/test/preprocess_utils.cu b/cpp/test/preprocess_utils.cu index cd0d8f0a1d..f722c81597 100644 --- a/cpp/test/preprocess_utils.cu +++ b/cpp/test/preprocess_utils.cu @@ -13,16 +13,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include #include -#include -#include -#include #include #include -#include #include -#include #include #include #include @@ -30,7 +24,6 @@ #include #include #include -#include namespace raft::util { From 44f3e1c5e9803004ad44283a757e8a001ed856ff Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 11 Sep 2024 12:04:34 -0400 Subject: [PATCH 55/75] remove log for tf --- cpp/include/raft/sparse/matrix/detail/preprocessing.cuh | 4 ++-- cpp/test/preprocess_utils.cu | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 505f6ef9e2..929e0c0241 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -50,7 +50,7 @@ struct bm25 { float __device__ operator()(const T2& value, const T2& num_feats_id_occ, const T2& feat_length) { - float tf = raft::log(1 + (value / feat_length)); + float tf = float(value / feat_length); float idf = raft::log(total_feats / num_feats_id_occ); float bm = ((k + 1) * tf) / (k * ((1.0f - b) + b * (feat_length / avg_feat_length)) + tf); @@ -73,7 +73,7 @@ struct tfidf { float __device__ operator()(const T1& value, const T2& num_feats_id_occ, const T2& feat_length) { - float tf = raft::log(1 + (value / feat_length)); + float tf = float(value / feat_length); float idf = raft::log(total_feats / num_feats_id_occ); return tf * idf; } diff --git a/cpp/test/preprocess_utils.cu b/cpp/test/preprocess_utils.cu index f722c81597..67e4348259 100644 --- a/cpp/test/preprocess_utils.cu +++ b/cpp/test/preprocess_utils.cu @@ -136,7 +136,7 @@ void preproc_kernel(raft::resources& handle, if (val == 0) { out_host_matrix(row, col) = 0.0f; } else { - float tf = raft::log(1 + (val / h_output_cols_lengths(0, col))); + float tf = float(val / h_output_cols_lengths(0, col)); float idf = raft::log(num_cols / h_output_rows_cnt(0, row)); if (tf_idf) { result = tf * idf; From e25e2deb7dd89e00be16c11a83f3b9a59eee6f97 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 11 Sep 2024 12:52:40 -0400 Subject: [PATCH 56/75] added more template changes --- .../sparse/matrix/detail/preprocessing.cuh | 51 +++++++++---------- cpp/test/preprocess_utils.cu | 4 +- cpp/test/sparse/preprocess_coo.cu | 6 +-- cpp/test/sparse/preprocess_csr.cu | 6 +-- 4 files changed, 33 insertions(+), 34 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 929e0c0241..bbcc69e34f 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -31,8 +31,7 @@ namespace raft::sparse::matrix::detail { /** - * @brief Calculates the BM25 values for a target matrix. Term frequency is calculate using - * logrithmically scaled frequency. + * @brief Calculates the BM25 values for a target matrix. * @param num_feats: The total number of features in the matrix * @param avg_feat_len: The avg length of all features combined. * @param k_param: K value required by BM25 algorithm. @@ -50,16 +49,16 @@ struct bm25 { float __device__ operator()(const T2& value, const T2& num_feats_id_occ, const T2& feat_length) { - float tf = float(value / feat_length); - float idf = raft::log(total_feats / num_feats_id_occ); - float bm = ((k + 1) * tf) / (k * ((1.0f - b) + b * (feat_length / avg_feat_length)) + tf); + T2 tf = T2(value / feat_length); + T2 idf = raft::log(total_feats / num_feats_id_occ); + T2 bm = ((k + 1) * tf) / (k * ((1.0f - b) + b * (feat_length / avg_feat_length)) + tf); return idf * bm; } - float avg_feat_length; - int total_feats; - float k; - float b; + T2 avg_feat_length; + T1 total_feats; + T2 k; + T2 b; }; /** @@ -71,13 +70,13 @@ template struct tfidf { tfidf(T1 total_feats_param) { total_feats = total_feats_param; } - float __device__ operator()(const T1& value, const T2& num_feats_id_occ, const T2& feat_length) + float __device__ operator()(const T2& value, const T2& num_feats_id_occ, const T2& feat_length) { - float tf = float(value / feat_length); - float idf = raft::log(total_feats / num_feats_id_occ); + T2 tf = T2(value / feat_length); + T2 idf = raft::log(total_feats / num_feats_id_occ); return tf * idf; } - int total_feats; + T1 total_feats; }; template @@ -227,7 +226,7 @@ float get_feature_data(raft::resources& handle, get_uniques_counts(handle, columns, rows, values, values, col_keys.view(), col_counts.view()); - auto total_feature_lengths = raft::make_device_scalar(handle, 0); + auto total_feature_lengths = raft::make_device_scalar(handle, 0); raft::linalg::mapReduce(total_feature_lengths.data_handle(), col_counts.size(), @@ -236,12 +235,12 @@ float get_feature_data(raft::resources& handle, raft::add_op(), stream, col_counts.data_handle()); - auto total_feature_lengths_host = raft::make_host_scalar(handle, 0); + auto total_feature_lengths_host = raft::make_host_scalar(handle, 0); raft::copy(total_feature_lengths_host.data_handle(), total_feature_lengths.data_handle(), total_feature_lengths.size(), stream); - float avg_feat_length = float(total_feature_lengths_host(0)) / n_cols; + T2 avg_feat_length = T2(total_feature_lengths_host(0)) / n_cols; create_mapped_vector( handle, preserved_columns.view(), col_keys.view(), col_counts.view(), feat_lengths); return avg_feat_length; @@ -291,8 +290,8 @@ void base_encode_tfidf(raft::resources& handle, raft::device_vector_view columns, raft::device_vector_view values, raft::device_vector_view values_out, - int n_rows, - int n_cols) + T1 n_rows, + T1 n_cols) { auto feat_lengths = raft::make_device_vector(handle, columns.size()); auto id_counts = raft::make_device_vector(handle, rows.size()); @@ -389,10 +388,10 @@ void base_encode_bm25(raft::resources& handle, raft::device_vector_view columns, raft::device_vector_view values, raft::device_vector_view values_out, - int n_rows, - int n_cols, - float k_param = 1.6f, - float b_param = 0.75f) + T1 n_rows, + T1 n_cols, + T2 k_param = 1.6f, + T2 b_param = 0.75f) { auto feat_lengths = raft::make_device_vector(handle, columns.size()); auto id_counts = raft::make_device_vector(handle, rows.size()); @@ -421,8 +420,8 @@ template void encode_bm25(raft::resources& handle, raft::device_coo_matrix_view coo_in, raft::device_vector_view values_out, - float k_param = 1.6f, - float b_param = 0.75f) + T2 k_param = 1.6f, + T2 b_param = 0.75f) { auto rows = raft::make_device_vector_view(coo_in.structure_view().get_rows().data(), coo_in.structure_view().get_rows().size()); @@ -452,8 +451,8 @@ template void encode_bm25(raft::resources& handle, raft::device_csr_matrix_view csr_in, raft::device_vector_view values_out, - float k_param = 1.6f, - float b_param = 0.75f) + T2 k_param = 1.6f, + T2 b_param = 0.75f) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); diff --git a/cpp/test/preprocess_utils.cu b/cpp/test/preprocess_utils.cu index 67e4348259..19a3cbf2ee 100644 --- a/cpp/test/preprocess_utils.cu +++ b/cpp/test/preprocess_utils.cu @@ -252,10 +252,10 @@ void create_dataset(raft::resources& handle, raft::device_vector_view values, int max_term_occurence_doc = 5, int num_rows_unique = 7, - int num_cols_unique = 7) + int num_cols_unique = 7, + int seed = 12345) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - int seed = 12345; raft::random::RngState rng(seed); auto d_out = raft::make_device_vector(handle, rows.size() * 2); diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index ea060d5d0c..0c9c7e3699 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -164,9 +164,9 @@ TEST_P(SparsePreprocessBm25Coo, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { { - 4, // n_rows - 2, // n_cols - 10 // nnz_edges + 7, // n_rows + 5, // n_cols + 100 // nnz_edges }, }; diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index cf5e142593..0d8d82159f 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -171,9 +171,9 @@ TEST_P(SparsePreprocessBm25Csr, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { { - 4, // n_rows - 2, // n_cols - 10 // num nnz values + 7, // n_rows + 5, // n_cols + 100 // num nnz values }, }; From e6d2c1cb143a8eddce50e3f163158018c42980a1 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 23 Sep 2024 20:36:07 -0400 Subject: [PATCH 57/75] remove excess thrust calls --- .../sparse/matrix/detail/preprocessing.cuh | 50 +++++++++++-------- cpp/test/sparse/preprocess_coo.cu | 6 +-- cpp/test/sparse/preprocess_csr.cu | 6 +-- 3 files changed, 35 insertions(+), 27 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index bbcc69e34f..e057a4e204 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -22,12 +22,8 @@ #include #include -#include #include -#include -#include - namespace raft::sparse::matrix::detail { /** @@ -96,6 +92,19 @@ struct mapper { raft::device_vector_view map; }; +template +struct map_to { + map_to(raft::device_vector_view map) : map(map) {} + + float __device__ operator()(const T1& key, const T2& count) + { + map[key] = count; + return 0.0f; + } + + raft::device_vector_view map; +}; + /** * @brief Get unique counts * @param handle: raft resource handle @@ -146,22 +155,21 @@ void create_mapped_vector(raft::resources& handle, const raft::device_vector_view origin, const raft::device_vector_view keys, const raft::device_vector_view counts, - raft::device_vector_view result) + raft::device_vector_view result, + T1 key_size) { - cudaStream_t stream = raft::resource::get_cuda_stream(handle); - thrust::host_vector host_keys(keys.size()); - raft::copy(host_keys.data(), keys.data_handle(), keys.size(), stream); - int key_size = *thrust::max_element(host_keys.begin(), host_keys.end()); - - raft::linalg::map(handle, result, raft::cast_op{}, raft::make_const_mdspan(origin)); // index into the last element and then add 1 to it. auto origin_map = raft::make_device_vector(handle, key_size + 1); - thrust::scatter(raft::resource::get_thrust_policy(handle), - counts.data_handle(), - counts.data_handle() + counts.size(), - keys.data_handle(), - origin_map.data_handle()); + raft::matrix::fill(handle, origin_map.view(), 0.0f); + + auto dummy_vec = raft::make_device_vector(handle, keys.size()); + raft::linalg::map(handle, + dummy_vec.view(), + map_to(origin_map.view()), + raft::make_const_mdspan(keys), + raft::make_const_mdspan(counts)); + raft::linalg::map(handle, result, raft::cast_op{}, raft::make_const_mdspan(origin)); raft::linalg::map(handle, result, mapper(origin_map.view()), raft::make_const_mdspan(result)); } @@ -183,8 +191,6 @@ void get_id_counts(raft::resources& handle, T1 n_rows) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - // auto preserved_rows = raft::make_device_vector(handle, rows.size()); - // raft::copy(preserved_rows.data_handle(), rows.data_handle(), rows.size(), stream); int uniq_rows = raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); @@ -194,9 +200,11 @@ void get_id_counts(raft::resources& handle, // the amount of columns(features) that each row(id) is found in raft::matrix::fill(handle, row_fill.view(), 1.0f); + get_uniques_counts( handle, rows, columns, values, row_fill.view(), row_keys.view(), row_counts.view()); - create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts); + + create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts, n_rows); } /** @@ -222,7 +230,7 @@ float get_feature_data(raft::resources& handle, int uniq_cols = raft::sparse::neighbors::get_n_components(columns.data_handle(), columns.size(), stream); auto col_keys = raft::make_device_vector(handle, uniq_cols); - auto col_counts = raft::make_device_vector(handle, n_cols); + auto col_counts = raft::make_device_vector(handle, uniq_cols); get_uniques_counts(handle, columns, rows, values, values, col_keys.view(), col_counts.view()); @@ -242,7 +250,7 @@ float get_feature_data(raft::resources& handle, stream); T2 avg_feat_length = T2(total_feature_lengths_host(0)) / n_cols; create_mapped_vector( - handle, preserved_columns.view(), col_keys.view(), col_counts.view(), feat_lengths); + handle, preserved_columns.view(), col_keys.view(), col_counts.view(), feat_lengths, n_cols); return avg_feat_length; } diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index 0c9c7e3699..bde8bab165 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -164,9 +164,9 @@ TEST_P(SparsePreprocessBm25Coo, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { { - 7, // n_rows - 5, // n_cols - 100 // nnz_edges + 10, // n_rows + 10, // n_cols + 1000 // nnz_edges }, }; diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index 0d8d82159f..ec1136acd0 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -171,9 +171,9 @@ TEST_P(SparsePreprocessBm25Csr, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { { - 7, // n_rows - 5, // n_cols - 100 // num nnz values + 7, // n_rows + 5, // n_cols + 10 // num nnz values }, }; From 5120c972de63ccdeaf82d4a9a4699ccf8c193504 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Mon, 23 Sep 2024 20:37:06 -0400 Subject: [PATCH 58/75] add better comment on inputs for tests --- cpp/test/sparse/preprocess_coo.cu | 4 ++-- cpp/test/sparse/preprocess_csr.cu | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index bde8bab165..b26e5122d7 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -164,8 +164,8 @@ TEST_P(SparsePreprocessBm25Coo, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { { - 10, // n_rows - 10, // n_cols + 10, // n_rows_factor + 10, // n_cols_factor 1000 // nnz_edges }, }; diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index ec1136acd0..eab270ce79 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -171,8 +171,8 @@ TEST_P(SparsePreprocessBm25Csr, Result) { Run(true); } const std::vector> sparse_preprocess_inputs = { { - 7, // n_rows - 5, // n_cols + 7, // n_rows_factor + 5, // n_cols_factor 10 // num nnz values }, }; From 87a729c1343fc0e6cb7ecb4e605faf4f3f62b758 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Thu, 26 Sep 2024 11:13:16 -0400 Subject: [PATCH 59/75] fixed scale errors --- .../sparse/matrix/detail/preprocessing.cuh | 71 ++++++++++++++----- cpp/test/preprocess_utils.cu | 6 +- 2 files changed, 55 insertions(+), 22 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index e057a4e204..26703d692c 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -105,6 +105,33 @@ struct map_to { raft::device_vector_view map; }; +template +struct map_add { + map_add(raft::device_vector_view map) : map(map) {} + + float __device__ operator()(const T1& key, const T2& count) + { + map[key] = map[key] + count; + return 0.0f; + } + + raft::device_vector_view map; +}; + +template +struct map_inc { + map_inc(raft::device_vector_view map) : map(map) {} + + float __device__ operator()(const T1& key) + { + T1 value = map[key]; + map[key] = value + 1; + return 0.0f; + } + + raft::device_vector_view map; +}; + /** * @brief Get unique counts * @param handle: raft resource handle @@ -125,9 +152,9 @@ void get_uniques_counts(raft::resources& handle, raft::device_vector_view counts_out) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - raft::sparse::op::coo_sort(sort_vector.size(), - secondary_vector.size(), - data.size(), + raft::sparse::op::coo_sort(int(sort_vector.size()), + int(secondary_vector.size()), + int(data.size()), sort_vector.data_handle(), secondary_vector.data_handle(), data.data_handle(), @@ -187,24 +214,30 @@ void get_id_counts(raft::resources& handle, raft::device_vector_view rows, raft::device_vector_view columns, raft::device_vector_view values, - raft::device_vector_view id_counts, + raft::device_vector_view id_counts, T1 n_rows) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - int uniq_rows = - raft::sparse::neighbors::get_n_components(rows.data_handle(), rows.size(), stream); - auto row_keys = raft::make_device_vector(handle, uniq_rows); - auto row_counts = raft::make_device_vector(handle, uniq_rows); - auto row_fill = raft::make_device_vector(handle, n_rows); + raft::sparse::op::coo_sort(int(rows.size()), + int(columns.size()), + int(values.size()), + rows.data_handle(), + columns.data_handle(), + values.data_handle(), + stream); - // the amount of columns(features) that each row(id) is found in - raft::matrix::fill(handle, row_fill.view(), 1.0f); + // auto row_keys = raft::make_device_vector(handle, uniq_rows); + auto rows_counts = raft::make_device_vector(handle, n_rows); + raft::matrix::fill(handle, rows_counts.view(), 0); - get_uniques_counts( - handle, rows, columns, values, row_fill.view(), row_keys.view(), row_counts.view()); + raft::sparse::linalg::coo_degree(raft::make_const_mdspan(rows).data_handle(), + int(rows.size()), + rows_counts.data_handle(), + stream); - create_mapped_vector(handle, rows, row_keys.view(), row_counts.view(), id_counts, n_rows); + raft::linalg::map( + handle, id_counts, mapper(rows_counts.view()), raft::make_const_mdspan(rows)); } /** @@ -271,7 +304,7 @@ float sparse_search_preprocess(raft::resources& handle, raft::device_vector_view columns, raft::device_vector_view values, raft::device_vector_view feat_lengths, - raft::device_vector_view id_counts, + raft::device_vector_view id_counts, T1 n_rows, T1 n_cols) { @@ -301,8 +334,8 @@ void base_encode_tfidf(raft::resources& handle, T1 n_rows, T1 n_cols) { - auto feat_lengths = raft::make_device_vector(handle, columns.size()); - auto id_counts = raft::make_device_vector(handle, rows.size()); + auto feat_lengths = raft::make_device_vector(handle, values.size()); + auto id_counts = raft::make_device_vector(handle, values.size()); auto col_counts = raft::make_device_vector(handle, n_cols); auto avg_feat_length = sparse_search_preprocess( handle, rows, columns, values, feat_lengths.view(), id_counts.view(), n_rows, n_cols); @@ -401,8 +434,8 @@ void base_encode_bm25(raft::resources& handle, T2 k_param = 1.6f, T2 b_param = 0.75f) { - auto feat_lengths = raft::make_device_vector(handle, columns.size()); - auto id_counts = raft::make_device_vector(handle, rows.size()); + auto feat_lengths = raft::make_device_vector(handle, values.size()); + auto id_counts = raft::make_device_vector(handle, values.size()); auto col_counts = raft::make_device_vector(handle, n_cols); auto avg_feat_length = sparse_search_preprocess( diff --git a/cpp/test/preprocess_utils.cu b/cpp/test/preprocess_utils.cu index 19a3cbf2ee..5734128373 100644 --- a/cpp/test/preprocess_utils.cu +++ b/cpp/test/preprocess_utils.cu @@ -163,9 +163,9 @@ int get_dupe_mask_count(raft::resources& handle, { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - raft::sparse::op::coo_sort(rows.size(), - columns.size(), - values.size(), + raft::sparse::op::coo_sort(int(rows.size()), + int(columns.size()), + int(values.size()), rows.data_handle(), columns.data_handle(), values.data_handle(), From 63576b023774a63ebeb56c8be6bbab43e5f12e33 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Thu, 26 Sep 2024 11:15:08 -0400 Subject: [PATCH 60/75] remove vector based public apis --- .../raft/sparse/matrix/preprocessing.cuh | 50 ------------------- 1 file changed, 50 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/preprocessing.cuh b/cpp/include/raft/sparse/matrix/preprocessing.cuh index e6c737efae..e4b3edd64b 100644 --- a/cpp/include/raft/sparse/matrix/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/preprocessing.cuh @@ -26,33 +26,6 @@ namespace raft::sparse::matrix { -/** - * @brief Use BM25 algorithm to encode features in COO sparse matrix - * @param handle: raft resource handle - * @param rows: Input COO rows array - * @param columns: Input COO columns array - * @param values: Input COO values array - * @param values_out: Output COO values array - * @param n_rows: Number of rows in matrix - * @param n_cols: Number of columns in matrix - * @param k_param: K value to use for BM25 algorithm - * @param b_param: B value to use for BM25 algorithm - */ -template -void encode_bm25(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view values_out, - int n_rows, - int n_cols, - float k_param = 1.6f, - float b_param = 0.75) -{ - return matrix::detail::base_encode_bm25( - handle, rows, columns, values, values_out, n_rows, n_cols, k_param, b_param); -} - /** * @brief Use BM25 algorithm to encode features in COO sparse matrix * @param handle: raft resource handle @@ -89,29 +62,6 @@ void encode_bm25(raft::resources& handle, return matrix::detail::encode_bm25(handle, csr_in, values_out, k_param, b_param); } -/** - * @brief Use TFIDF algorithm to encode features in COO sparse matrix - * @param handle: raft resource handle - * @param rows: Input COO rows array - * @param columns: Input COO columns array - * @param values: Input COO values array - * @param values_out: Output COO values array - * @param n_rows: Number of rows in matrix - * @param n_cols: Number of columns in matrix - */ -template -void encode_tfidf(raft::resources& handle, - raft::device_vector_view rows, - raft::device_vector_view columns, - raft::device_vector_view values, - raft::device_vector_view values_out, - int n_rows, - int n_cols) -{ - return matrix::detail::base_encode_tfidf( - handle, rows, columns, values, values_out, n_rows, n_cols); -} - /** * @brief Use TFIDF algorithm to encode features in COO sparse matrix * @param handle: raft resource handle From c123acb3c354052b56cb92576de1b0ddf9ac012d Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Fri, 27 Sep 2024 16:12:43 -0400 Subject: [PATCH 61/75] add in bfknn tests for csr and coo sparse matrices --- cpp/include/raft/sparse/neighbors/knn.cuh | 63 +++--- cpp/test/sparse/neighbors/brute_force_coo.cu | 190 +++++++++++++++++++ cpp/test/sparse/neighbors/brute_force_csr.cu | 183 ++++++++++++++++++ 3 files changed, 405 insertions(+), 31 deletions(-) create mode 100644 cpp/test/sparse/neighbors/brute_force_coo.cu create mode 100644 cpp/test/sparse/neighbors/brute_force_csr.cu diff --git a/cpp/include/raft/sparse/neighbors/knn.cuh b/cpp/include/raft/sparse/neighbors/knn.cuh index 73520eb572..bffbf6c943 100644 --- a/cpp/include/raft/sparse/neighbors/knn.cuh +++ b/cpp/include/raft/sparse/neighbors/knn.cuh @@ -133,8 +133,8 @@ void brute_force_knn(raft::device_csr_matrix csr_query, - value_idx* output_indices, - value_t* output_dists, + device_vector_view output_indices, + device_vector_view output_dists, int k, raft::resources const& handle, size_t batch_size_index = 2 << 14, // approx 1M @@ -149,20 +149,21 @@ void brute_force_knn(raft::device_csr_matrix(idxIndptr.data(), idxIndices.data(), idxData.data(), - idxData.size(), - csr_idx.structure_view().get_n_rows() - 1, + idxIndices.size(), + idxIndptr.size() - 1, csr_idx.structure_view().get_n_cols(), queryIndptr.data(), queryIndices.data(), queryData.data(), - queryData.size(), - csr_query.structure_view().get_n_rows() - 1, + queryIndices.size(), + queryIndptr.size() - 1, csr_query.structure_view().get_n_cols(), - output_indices, - output_dists, + output_indices.data_handle(), + output_dists.data_handle(), k, handle, batch_size_index, @@ -198,8 +199,8 @@ void brute_force_knn(raft::device_coo_matrix coo_query, - value_idx* output_indices, - value_t* output_dists, + device_vector_view output_indices, + device_vector_view output_dists, int k, raft::resources const& handle, size_t batch_size_index = 2 << 14, // approx 1M @@ -217,57 +218,57 @@ void brute_force_knn(raft::device_coo_matrix(handle, coo_query.structure_view().get_n_rows()); - auto queryRowsCsr = - raft::make_device_vector(handle, coo_query.structure_view().get_n_rows()); + // + 1 is to account for the 0 at the beginning of the csr representation + auto idxRowsCsr = raft::make_device_vector( + handle, coo_query.structure_view().get_n_rows() + 1); + auto queryRowsCsr = raft::make_device_vector( + handle, coo_query.structure_view().get_n_rows() + 1); raft::sparse::convert::sorted_coo_to_csr(idxRows.data(), int(idxRows.size()), idxRowsCsr.data_handle(), - coo_idx.structure_view().get_n_rows(), + coo_idx.structure_view().get_n_rows() + 1, stream); raft::sparse::convert::sorted_coo_to_csr(queryRows.data(), int(queryRows.size()), queryRowsCsr.data_handle(), - coo_query.structure_view().get_n_rows(), + coo_query.structure_view().get_n_rows() + 1, stream); brute_force::knn(idxRowsCsr.data_handle(), idxCols.data(), idxData.data(), - idxData.size(), - coo_idx.structure_view().get_n_rows() - 1, + idxCols.size(), + idxRowsCsr.size() - 1, coo_idx.structure_view().get_n_cols(), queryRowsCsr.data_handle(), queryCols.data(), queryData.data(), - queryData.size(), - coo_query.structure_view().get_n_rows() - 1, + queryCols.size(), + queryRowsCsr.size() - 1, coo_query.structure_view().get_n_cols(), - output_indices, - output_dists, + output_indices.data_handle(), + output_dists.data_handle(), k, handle, - coo_idx.structure_view().get_n_rows(), - coo_query.structure_view().get_n_rows(), + batch_size_index, + batch_size_query, metric, metricArg); } diff --git a/cpp/test/sparse/neighbors/brute_force_coo.cu b/cpp/test/sparse/neighbors/brute_force_coo.cu new file mode 100644 index 0000000000..f1ebd6b578 --- /dev/null +++ b/cpp/test/sparse/neighbors/brute_force_coo.cu @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../../test_utils.cuh" + +#include +#include +#include +#include +#include + +#include +#include + +namespace raft { +namespace sparse { +namespace selection { + +using namespace raft; +using namespace raft::sparse; + +template +struct SparseKNNInputs { + value_idx n_cols; + + std::vector indptr_h; + std::vector indices_h; + std::vector data_h; + + std::vector out_dists_ref_h; + std::vector out_indices_ref_h; + + int k; + + int batch_size_index = 2; + int batch_size_query = 2; + + raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded; +}; + +template +::std::ostream& operator<<(::std::ostream& os, const SparseKNNInputs& dims) +{ + return os; +} + +template +class SparseKNNCOOTest : public ::testing::TestWithParam> { + public: + SparseKNNCOOTest() + : params(::testing::TestWithParam>::GetParam()), + indptr(0, resource::get_cuda_stream(handle)), + indices(0, resource::get_cuda_stream(handle)), + data(0, resource::get_cuda_stream(handle)), + out_indices(0, resource::get_cuda_stream(handle)), + out_dists(0, resource::get_cuda_stream(handle)), + out_indices_ref(0, resource::get_cuda_stream(handle)), + out_dists_ref(0, resource::get_cuda_stream(handle)) + { + } + + protected: + void SetUp() override + { + n_rows = params.indptr_h.size() - 1; + nnz = params.indices_h.size(); + k = params.k; + + auto out_indices_dev = raft::make_device_vector(handle, n_rows * k); + auto out_dists_dev = raft::make_device_vector(handle, n_rows * k); + + auto rows = raft::make_device_vector(handle, nnz); + + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + make_data(); + + raft::sparse::convert::csr_to_coo( + indptr.data(), int(indptr.size()), rows.data_handle(), nnz, stream); + + auto coo_struct_view = raft::make_device_coordinate_structure_view( + rows.data_handle(), indices.data(), n_rows, params.n_cols, int(data.size())); + auto c_matrix = raft::make_device_coo_matrix( + handle, coo_struct_view); + raft::update_device( + c_matrix.view().get_elements().data(), data.data(), data.size(), stream); + + raft::sparse::neighbors::brute_force_knn(c_matrix, + c_matrix, + out_indices_dev.view(), + out_dists_dev.view(), + k, + handle, + params.batch_size_index, + params.batch_size_query, + params.metric); + + raft::copy(out_indices.data(), out_indices_dev.data_handle(), out_indices_dev.size(), stream); + raft::copy(out_dists.data(), out_dists_dev.data_handle(), out_dists_dev.size(), stream); + + RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle))); + } + + void compare() + { + ASSERT_TRUE(devArrMatch( + out_dists_ref.data(), out_dists.data(), n_rows * k, CompareApprox(1e-4))); + ASSERT_TRUE( + devArrMatch(out_indices_ref.data(), out_indices.data(), n_rows * k, Compare())); + } + + protected: + void make_data() + { + std::vector indptr_h = params.indptr_h; + std::vector indices_h = params.indices_h; + std::vector data_h = params.data_h; + + auto stream = resource::get_cuda_stream(handle); + indptr.resize(indptr_h.size(), stream); + indices.resize(indices_h.size(), stream); + data.resize(data_h.size(), stream); + + update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); + update_device(indices.data(), indices_h.data(), indices_h.size(), stream); + update_device(data.data(), data_h.data(), data_h.size(), stream); + + std::vector out_dists_ref_h = params.out_dists_ref_h; + std::vector out_indices_ref_h = params.out_indices_ref_h; + + out_indices_ref.resize(out_indices_ref_h.size(), stream); + out_dists_ref.resize(out_dists_ref_h.size(), stream); + + update_device( + out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream); + update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), stream); + + out_dists.resize(n_rows * k, stream); + out_indices.resize(n_rows * k, stream); + } + + raft::resources handle; + + int n_rows, nnz, k; + + // input data + rmm::device_uvector indptr, indices; + rmm::device_uvector data; + + // output data + rmm::device_uvector out_indices; + rmm::device_uvector out_dists; + + rmm::device_uvector out_indices_ref; + rmm::device_uvector out_dists_ref; + + SparseKNNInputs params; +}; + +const std::vector> inputs_i32_f = { + {9, // ncols + {0, 2, 4, 6, 8}, // indptr + {0, 4, 0, 3, 0, 2, 0, 8}, // indices + {0.0f, 1.0f, 5.0f, 6.0f, 5.0f, 6.0f, 0.0f, 1.0f}, // data + {0, 1.41421, 0, 7.87401, 0, 7.87401, 0, 1.41421}, // dists + {0, 3, 1, 0, 2, 0, 3, 0}, // inds + 2, + 2, + 2, + raft::distance::DistanceType::L2SqrtExpanded}}; +typedef SparseKNNCOOTest SparseKNNCOOTestF; +TEST_P(SparseKNNCOOTestF, Result) { compare(); } +INSTANTIATE_TEST_CASE_P(SparseKNNCOOTest, SparseKNNCOOTestF, ::testing::ValuesIn(inputs_i32_f)); + +}; // end namespace selection +}; // end namespace sparse +}; // end namespace raft diff --git a/cpp/test/sparse/neighbors/brute_force_csr.cu b/cpp/test/sparse/neighbors/brute_force_csr.cu new file mode 100644 index 0000000000..dec1914e09 --- /dev/null +++ b/cpp/test/sparse/neighbors/brute_force_csr.cu @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2018-2024, NVIDIA CORPORATION. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "../../test_utils.cuh" + +#include +#include +#include +#include + +#include +#include + +namespace raft { +namespace sparse { +namespace selection { + +using namespace raft; +using namespace raft::sparse; + +template +struct SparseKNNInputs { + value_idx n_cols; + + std::vector indptr_h; + std::vector indices_h; + std::vector data_h; + + std::vector out_dists_ref_h; + std::vector out_indices_ref_h; + + int k; + + int batch_size_index = 2; + int batch_size_query = 2; + + raft::distance::DistanceType metric = raft::distance::DistanceType::L2SqrtExpanded; +}; + +template +::std::ostream& operator<<(::std::ostream& os, const SparseKNNInputs& dims) +{ + return os; +} + +template +class SparseKNNCSRTest : public ::testing::TestWithParam> { + public: + SparseKNNCSRTest() + : params(::testing::TestWithParam>::GetParam()), + indptr(0, resource::get_cuda_stream(handle)), + indices(0, resource::get_cuda_stream(handle)), + data(0, resource::get_cuda_stream(handle)), + out_indices(0, resource::get_cuda_stream(handle)), + out_dists(0, resource::get_cuda_stream(handle)), + out_indices_ref(0, resource::get_cuda_stream(handle)), + out_dists_ref(0, resource::get_cuda_stream(handle)) + { + } + + protected: + void SetUp() override + { + n_rows = params.indptr_h.size() - 1; + nnz = params.indices_h.size(); + k = params.k; + auto out_indices_dev = raft::make_device_vector(handle, n_rows * k); + auto out_dists_dev = raft::make_device_vector(handle, n_rows * k); + + cudaStream_t stream = raft::resource::get_cuda_stream(handle); + + make_data(); + auto csr_struct_view = raft::make_device_compressed_structure_view( + indptr.data(), indices.data(), n_rows, params.n_cols, int(data.size())); + auto c_matrix = raft::make_device_csr_matrix(handle, csr_struct_view); + + raft::update_device( + c_matrix.view().get_elements().data(), data.data(), data.size(), stream); + + raft::sparse::neighbors::brute_force_knn(c_matrix, + c_matrix, + out_indices_dev.view(), + out_dists_dev.view(), + k, + handle, + params.batch_size_index, + params.batch_size_query, + params.metric); + + raft::copy(out_indices.data(), out_indices_dev.data_handle(), out_indices_dev.size(), stream); + raft::copy(out_dists.data(), out_dists_dev.data_handle(), out_dists_dev.size(), stream); + std::cout << "finished copy" << std::endl; + + RAFT_CUDA_TRY(cudaStreamSynchronize(resource::get_cuda_stream(handle))); + } + + void compare() + { + ASSERT_TRUE(devArrMatch( + out_dists_ref.data(), out_dists.data(), n_rows * k, CompareApprox(1e-4))); + ASSERT_TRUE( + devArrMatch(out_indices_ref.data(), out_indices.data(), n_rows * k, Compare())); + } + + protected: + void make_data() + { + std::vector indptr_h = params.indptr_h; + std::vector indices_h = params.indices_h; + std::vector data_h = params.data_h; + + auto stream = resource::get_cuda_stream(handle); + indptr.resize(indptr_h.size(), stream); + indices.resize(indices_h.size(), stream); + data.resize(data_h.size(), stream); + + update_device(indptr.data(), indptr_h.data(), indptr_h.size(), stream); + update_device(indices.data(), indices_h.data(), indices_h.size(), stream); + update_device(data.data(), data_h.data(), data_h.size(), stream); + + std::vector out_dists_ref_h = params.out_dists_ref_h; + std::vector out_indices_ref_h = params.out_indices_ref_h; + + out_indices_ref.resize(out_indices_ref_h.size(), stream); + out_dists_ref.resize(out_dists_ref_h.size(), stream); + + update_device( + out_indices_ref.data(), out_indices_ref_h.data(), out_indices_ref_h.size(), stream); + update_device(out_dists_ref.data(), out_dists_ref_h.data(), out_dists_ref_h.size(), stream); + + out_dists.resize(n_rows * k, stream); + out_indices.resize(n_rows * k, stream); + } + + raft::resources handle; + + int n_rows, nnz, k; + + // input data + rmm::device_uvector indptr, indices; + rmm::device_uvector data; + + // output data + rmm::device_uvector out_indices; + rmm::device_uvector out_dists; + + rmm::device_uvector out_indices_ref; + rmm::device_uvector out_dists_ref; + + SparseKNNInputs params; +}; + +const std::vector> inputs_i32_f = { + {9, // ncols + {0, 2, 4, 6, 8}, // indptr + {0, 4, 0, 3, 0, 2, 0, 8}, // indices + {0.0f, 1.0f, 5.0f, 6.0f, 5.0f, 6.0f, 0.0f, 1.0f}, // data + {0, 1.41421, 0, 7.87401, 0, 7.87401, 0, 1.41421}, // dists + {0, 3, 1, 0, 2, 0, 3, 0}, // inds + 2, + 2, + 2, + raft::distance::DistanceType::L2SqrtExpanded}}; +typedef SparseKNNCSRTest SparseKNNCSRTestF; +TEST_P(SparseKNNCSRTestF, Result) { compare(); } +INSTANTIATE_TEST_CASE_P(SparseKNNCSRTest, SparseKNNCSRTestF, ::testing::ValuesIn(inputs_i32_f)); + +}; // end namespace selection +}; // end namespace sparse +}; // end namespace raft From 397042a0af9cb819a098ce60259a55e0a4509956 Mon Sep 17 00:00:00 2001 From: Ray Douglass Date: Wed, 9 Oct 2024 11:29:22 -0400 Subject: [PATCH 62/75] REL v24.10.00 release --- .../all_cuda-118_arch-aarch64.yaml | 14 ++++----- .../all_cuda-118_arch-x86_64.yaml | 14 ++++----- .../all_cuda-125_arch-aarch64.yaml | 14 ++++----- .../all_cuda-125_arch-x86_64.yaml | 14 ++++----- .../bench_ann_cuda-118_arch-aarch64.yaml | 4 +-- .../bench_ann_cuda-118_arch-x86_64.yaml | 4 +-- .../bench_ann_cuda-120_arch-aarch64.yaml | 4 +-- .../bench_ann_cuda-120_arch-x86_64.yaml | 4 +-- dependencies.yaml | 30 +++++++++---------- python/pylibraft/pyproject.toml | 4 +-- python/raft-dask/pyproject.toml | 10 +++---- 11 files changed, 58 insertions(+), 58 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-aarch64.yaml b/conda/environments/all_cuda-118_arch-aarch64.yaml index 0b84772fad..452420e143 100644 --- a/conda/environments/all_cuda-118_arch-aarch64.yaml +++ b/conda/environments/all_cuda-118_arch-aarch64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.*,>=0.0.0a0 -- distributed-ucxx==0.40.*,>=0.0.0a0 +- dask-cuda==24.10.* +- distributed-ucxx==0.40.* - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - nccl>=2.19 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-aarch64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.*,>=0.0.0a0 +- pylibraft==24.10.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.*,>=0.0.0a0 +- rapids-dask-dependency==24.10.* - recommonmark -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.40.*,>=0.0.0a0 +- ucx-py==0.40.* name: all_cuda-118_arch-aarch64 diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index d1c01f1b16..abdd8ec717 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.*,>=0.0.0a0 -- distributed-ucxx==0.40.*,>=0.0.0a0 +- dask-cuda==24.10.* +- distributed-ucxx==0.40.* - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - nccl>=2.19 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.*,>=0.0.0a0 +- pylibraft==24.10.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.*,>=0.0.0a0 +- rapids-dask-dependency==24.10.* - recommonmark -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.40.*,>=0.0.0a0 +- ucx-py==0.40.* name: all_cuda-118_arch-x86_64 diff --git a/conda/environments/all_cuda-125_arch-aarch64.yaml b/conda/environments/all_cuda-125_arch-aarch64.yaml index 4c506f5297..3647762e0f 100644 --- a/conda/environments/all_cuda-125_arch-aarch64.yaml +++ b/conda/environments/all_cuda-125_arch-aarch64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.*,>=0.0.0a0 -- distributed-ucxx==0.40.*,>=0.0.0a0 +- dask-cuda==24.10.* +- distributed-ucxx==0.40.* - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - nccl>=2.19 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.*,>=0.0.0a0 +- pylibraft==24.10.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.*,>=0.0.0a0 +- rapids-dask-dependency==24.10.* - recommonmark -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.40.*,>=0.0.0a0 +- ucx-py==0.40.* name: all_cuda-125_arch-aarch64 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index a123950e3a..53209763c1 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.*,>=0.0.0a0 -- distributed-ucxx==0.40.*,>=0.0.0a0 +- dask-cuda==24.10.* +- distributed-ucxx==0.40.* - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - nccl>=2.19 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.*,>=0.0.0a0 +- pylibraft==24.10.* - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.*,>=0.0.0a0 +- rapids-dask-dependency==24.10.* - recommonmark -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.40.*,>=0.0.0a0 +- ucx-py==0.40.* name: all_cuda-125_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml index 864eb2130b..bb5e6c6103 100644 --- a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - matplotlib - nccl>=2.19 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-118_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml index 5da6eaf17e..4a686a44ab 100644 --- a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - matplotlib - nccl>=2.19 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-118_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml index 65de97c170..2c772b29d9 100644 --- a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - matplotlib - nccl>=2.19 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-120_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml index 7e1adbc483..fac8421691 100644 --- a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.*,>=0.0.0a0 +- libucxx==0.40.* - matplotlib - nccl>=2.19 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.*,>=0.0.0a0 +- rmm==24.10.* - scikit-build-core>=0.10.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-120_arch-x86_64 diff --git a/dependencies.yaml b/dependencies.yaml index d0991f4d04..b3a4803d52 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -171,7 +171,7 @@ dependencies: - c-compiler - cxx-compiler - nccl>=2.19 - - libucxx==0.40.*,>=0.0.0a0 + - libucxx==0.40.* specific: - output_types: conda matrices: @@ -210,7 +210,7 @@ dependencies: common: - output_types: [conda] packages: - - &rmm_unsuffixed rmm==24.10.*,>=0.0.0a0 + - &rmm_unsuffixed rmm==24.10.* - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -237,12 +237,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &rmm_cu12 rmm-cu12==24.10.*,>=0.0.0a0 + - &rmm_cu12 rmm-cu12==24.10.* - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &rmm_cu11 rmm-cu11==24.10.*,>=0.0.0a0 + - &rmm_cu11 rmm-cu11==24.10.* - {matrix: null, packages: [*rmm_unsuffixed] } checks: common: @@ -514,14 +514,14 @@ dependencies: common: - output_types: [conda, pyproject] packages: - - dask-cuda==24.10.*,>=0.0.0a0 + - dask-cuda==24.10.* - joblib>=0.11 - numba>=0.57 - - rapids-dask-dependency==24.10.*,>=0.0.0a0 + - rapids-dask-dependency==24.10.* - output_types: conda packages: - - &pylibraft_unsuffixed pylibraft==24.10.*,>=0.0.0a0 - - &ucx_py_unsuffixed ucx-py==0.40.*,>=0.0.0a0 + - &pylibraft_unsuffixed pylibraft==24.10.* + - &ucx_py_unsuffixed ucx-py==0.40.* - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -535,14 +535,14 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &pylibraft_cu12 pylibraft-cu12==24.10.*,>=0.0.0a0 - - &ucx_py_cu12 ucx-py-cu12==0.40.*,>=0.0.0a0 + - &pylibraft_cu12 pylibraft-cu12==24.10.* + - &ucx_py_cu12 ucx-py-cu12==0.40.* - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &pylibraft_cu11 pylibraft-cu11==24.10.*,>=0.0.0a0 - - &ucx_py_cu11 ucx-py-cu11==0.40.*,>=0.0.0a0 + - &pylibraft_cu11 pylibraft-cu11==24.10.* + - &ucx_py_cu11 ucx-py-cu11==0.40.* - {matrix: null, packages: [*pylibraft_unsuffixed, *ucx_py_unsuffixed]} test_python_common: common: @@ -562,7 +562,7 @@ dependencies: packages: # UCXX is not currently a hard-dependency thus only installed during tests, # this will change in the future. - - &distributed_ucxx_unsuffixed distributed-ucxx==0.40.*,>=0.0.0a0 + - &distributed_ucxx_unsuffixed distributed-ucxx==0.40.* - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -575,12 +575,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu12==0.40.*,>=0.0.0a0 + - distributed-ucxx-cu12==0.40.* - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu11==0.40.*,>=0.0.0a0 + - distributed-ucxx-cu11==0.40.* - {matrix: null, packages: [*distributed_ucxx_unsuffixed]} depends_on_ucx_build: common: diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index a540915585..854970277e 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ "nvidia-curand", "nvidia-cusolver", "nvidia-cusparse", - "rmm==24.10.*,>=0.0.0a0", + "rmm==24.10.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", @@ -125,7 +125,7 @@ requires = [ "cuda-python", "cython>=3.0.0", "ninja", - "rmm==24.10.*,>=0.0.0a0", + "rmm==24.10.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. dependencies-file = "../../dependencies.yaml" matrix-entry = "cuda_suffixed=true;use_cuda_wheels=true" diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index d1f577120f..aa75185fbc 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -31,13 +31,13 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.10" dependencies = [ - "dask-cuda==24.10.*,>=0.0.0a0", - "distributed-ucxx==0.40.*,>=0.0.0a0", + "dask-cuda==24.10.*", + "distributed-ucxx==0.40.*", "joblib>=0.11", "numba>=0.57", - "pylibraft==24.10.*,>=0.0.0a0", - "rapids-dask-dependency==24.10.*,>=0.0.0a0", - "ucx-py==0.40.*,>=0.0.0a0", + "pylibraft==24.10.*", + "rapids-dask-dependency==24.10.*", + "ucx-py==0.40.*", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", From b0000657859424e9cca53c951996b17cbb36d589 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Thu, 17 Oct 2024 16:12:02 -0400 Subject: [PATCH 63/75] remove unused functions --- .../sparse/matrix/detail/preprocessing.cuh | 30 +------------------ 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh index 26703d692c..4865fab8a3 100644 --- a/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh +++ b/cpp/include/raft/sparse/matrix/detail/preprocessing.cuh @@ -105,33 +105,6 @@ struct map_to { raft::device_vector_view map; }; -template -struct map_add { - map_add(raft::device_vector_view map) : map(map) {} - - float __device__ operator()(const T1& key, const T2& count) - { - map[key] = map[key] + count; - return 0.0f; - } - - raft::device_vector_view map; -}; - -template -struct map_inc { - map_inc(raft::device_vector_view map) : map(map) {} - - float __device__ operator()(const T1& key) - { - T1 value = map[key]; - map[key] = value + 1; - return 0.0f; - } - - raft::device_vector_view map; -}; - /** * @brief Get unique counts * @param handle: raft resource handle @@ -169,7 +142,7 @@ void get_uniques_counts(raft::resources& handle, } /** - * @brief Compute cumulative sum for each unique value in the origin array + * @brief Broadcasts values to target indices of vector based on key/value look up * @param handle: raft resource handle * @param origin: Input array that has values to use for computation * @param keys: Output array that has keys, should be the size of unique @@ -227,7 +200,6 @@ void get_id_counts(raft::resources& handle, values.data_handle(), stream); - // auto row_keys = raft::make_device_vector(handle, uniq_rows); auto rows_counts = raft::make_device_vector(handle, n_rows); raft::matrix::fill(handle, rows_counts.view(), 0); From 85d97d0fe56db2341b0fdcd20eb3ec06b92cce72 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 12:28:44 -0400 Subject: [PATCH 64/75] getting rid of changes to pull request template, not part of this PR --- .github/PULL_REQUEST_TEMPLATE.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index caf46f5d6a..4cafc39a12 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,11 +34,11 @@ Here are some guidelines to help the review process go smoothly. features or make changes out of the scope of those requested by the reviewer (doing this just add delays as already reviewed code ends up having to be re-reviewed/it is hard to tell what is new etc!). Further, please do not - rebase your branch/force push/rewrite history, doing any of these + rebase your branch on master/force push/rewrite history, doing any of these causes the context of any comments made by reviewers to be lost. If - conflicts occur they should be resolved by merging the target branch + conflicts occur against master they should be resolved by merging master into the branch used for making the pull request. Many thanks in advance for your cooperation! ---> +--> \ No newline at end of file From c59bdf9da19f20c917d0ab70440d817c422e44c8 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 12:30:50 -0400 Subject: [PATCH 65/75] revert contributing md changes --- docs/source/contributing.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/contributing.md b/docs/source/contributing.md index 47eb88c429..89ff8043f1 100755 --- a/docs/source/contributing.md +++ b/docs/source/contributing.md @@ -88,4 +88,5 @@ others know you are working on it. If you have any questions related to the implementation of the issue, ask them in the issue instead of the PR. ## Attribution -Portions adopted from https://github.com/pytorch/pytorch/blob/main/CONTRIBUTING.md +Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md + From c8710234f95796d0b4e98a34ded8c2cf0cbb363d Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:29:05 -0400 Subject: [PATCH 66/75] remove change to pre-commit-config.yaml --- .pre-commit-config.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 045006e52b..458d8b1b51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -116,7 +116,6 @@ repos: docs/source/sphinxext/github_link[.]py| cpp/cmake/modules/FindAVX[.]cmake - id: verify-alpha-spec - args: ["--fix", "--mode=release"] - repo: https://github.com/rapidsai/dependency-file-generator rev: v1.13.11 hooks: From 04041c2fe3931cef96751770e2f3df6d08b531e3 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:32:15 -0400 Subject: [PATCH 67/75] remove all changes to conda env files --- conda/environments/all_cuda-118_arch-aarch64.yaml | 14 +++++++------- conda/environments/all_cuda-118_arch-x86_64.yaml | 14 +++++++------- conda/environments/all_cuda-125_arch-aarch64.yaml | 14 +++++++------- conda/environments/all_cuda-125_arch-x86_64.yaml | 14 +++++++------- .../bench_ann_cuda-118_arch-aarch64.yaml | 4 ++-- .../bench_ann_cuda-118_arch-x86_64.yaml | 4 ++-- .../bench_ann_cuda-120_arch-aarch64.yaml | 4 ++-- .../bench_ann_cuda-120_arch-x86_64.yaml | 4 ++-- 8 files changed, 36 insertions(+), 36 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-aarch64.yaml b/conda/environments/all_cuda-118_arch-aarch64.yaml index 452420e143..c5ff93ebb9 100644 --- a/conda/environments/all_cuda-118_arch-aarch64.yaml +++ b/conda/environments/all_cuda-118_arch-aarch64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.* -- distributed-ucxx==0.40.* +- dask-cuda==24.12.*,>=0.0.0a0 +- distributed-ucxx==0.41.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - nccl>=2.19 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-aarch64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.* +- pylibraft==24.12.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.* +- rapids-dask-dependency==24.12.*,>=0.0.0a0 - recommonmark -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.40.* +- ucx-py==0.41.*,>=0.0.0a0 name: all_cuda-118_arch-aarch64 diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index abdd8ec717..069896c137 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -20,8 +20,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.* -- distributed-ucxx==0.40.* +- dask-cuda==24.12.*,>=0.0.0a0 +- distributed-ucxx==0.41.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -35,7 +35,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - nccl>=2.19 - ninja - numba>=0.57 @@ -44,18 +44,18 @@ dependencies: - nvcc_linux-64=11.8 - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.* +- pylibraft==24.12.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.* +- rapids-dask-dependency==24.12.*,>=0.0.0a0 - recommonmark -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.40.* +- ucx-py==0.41.*,>=0.0.0a0 name: all_cuda-118_arch-x86_64 diff --git a/conda/environments/all_cuda-125_arch-aarch64.yaml b/conda/environments/all_cuda-125_arch-aarch64.yaml index 3647762e0f..932934fb18 100644 --- a/conda/environments/all_cuda-125_arch-aarch64.yaml +++ b/conda/environments/all_cuda-125_arch-aarch64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.* -- distributed-ucxx==0.40.* +- dask-cuda==24.12.*,>=0.0.0a0 +- distributed-ucxx==0.41.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-aarch64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - nccl>=2.19 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.* +- pylibraft==24.12.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.* +- rapids-dask-dependency==24.12.*,>=0.0.0a0 - recommonmark -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-aarch64==2.17 -- ucx-py==0.40.* +- ucx-py==0.41.*,>=0.0.0a0 name: all_cuda-125_arch-aarch64 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 53209763c1..5f0cfdec68 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -21,8 +21,8 @@ dependencies: - cupy>=12.0.0 - cxx-compiler - cython>=3.0.0 -- dask-cuda==24.10.* -- distributed-ucxx==0.40.* +- dask-cuda==24.12.*,>=0.0.0a0 +- distributed-ucxx==0.41.*,>=0.0.0a0 - doxygen>=1.8.20 - gcc_linux-64=11.* - graphviz @@ -32,7 +32,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - nccl>=2.19 - ninja - numba>=0.57 @@ -40,18 +40,18 @@ dependencies: - numpydoc - pre-commit - pydata-sphinx-theme -- pylibraft==24.10.* +- pylibraft==24.12.*,>=0.0.0a0 - pytest-cov - pytest==7.* - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rapids-dask-dependency==24.10.* +- rapids-dask-dependency==24.12.*,>=0.0.0a0 - recommonmark -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - scikit-learn - scipy - sphinx-copybutton - sphinx-markdown-tables - sysroot_linux-64==2.17 -- ucx-py==0.40.* +- ucx-py==0.41.*,>=0.0.0a0 name: all_cuda-125_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml index bb5e6c6103..4c9d308ecd 100644 --- a/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-aarch64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - matplotlib - nccl>=2.19 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-118_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml index 4a686a44ab..1b62c492cf 100644 --- a/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-118_arch-x86_64.yaml @@ -30,7 +30,7 @@ dependencies: - libcusolver=11.4.1.48 - libcusparse-dev=11.7.5.86 - libcusparse=11.7.5.86 -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - matplotlib - nccl>=2.19 - ninja @@ -40,7 +40,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-118_arch-x86_64 diff --git a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml index 2c772b29d9..54d67f462a 100644 --- a/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-aarch64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - matplotlib - nccl>=2.19 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - sysroot_linux-aarch64==2.17 name: bench_ann_cuda-120_arch-aarch64 diff --git a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml index fac8421691..4f39378047 100644 --- a/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml +++ b/conda/environments/bench_ann_cuda-120_arch-x86_64.yaml @@ -27,7 +27,7 @@ dependencies: - libcurand-dev - libcusolver-dev - libcusparse-dev -- libucxx==0.40.* +- libucxx==0.41.*,>=0.0.0a0 - matplotlib - nccl>=2.19 - ninja @@ -36,7 +36,7 @@ dependencies: - pandas - pyyaml - rapids-build-backend>=0.3.0,<0.4.0.dev0 -- rmm==24.10.* +- rmm==24.12.*,>=0.0.0a0 - scikit-build-core>=0.10.0 - sysroot_linux-64==2.17 name: bench_ann_cuda-120_arch-x86_64 From b022e6ec6b5cc09d4c8d7618e5a69c6b830f8c3b Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:34:07 -0400 Subject: [PATCH 68/75] revert changes to python pyproject files --- python/pylibraft/pyproject.toml | 4 ++-- python/raft-dask/pyproject.toml | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/python/pylibraft/pyproject.toml b/python/pylibraft/pyproject.toml index 854970277e..f0f3849c6d 100644 --- a/python/pylibraft/pyproject.toml +++ b/python/pylibraft/pyproject.toml @@ -37,7 +37,7 @@ dependencies = [ "nvidia-curand", "nvidia-cusolver", "nvidia-cusparse", - "rmm==24.10.*", + "rmm==24.12.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", @@ -125,7 +125,7 @@ requires = [ "cuda-python", "cython>=3.0.0", "ninja", - "rmm==24.10.*", + "rmm==24.12.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. dependencies-file = "../../dependencies.yaml" matrix-entry = "cuda_suffixed=true;use_cuda_wheels=true" diff --git a/python/raft-dask/pyproject.toml b/python/raft-dask/pyproject.toml index aa75185fbc..d71f89085b 100644 --- a/python/raft-dask/pyproject.toml +++ b/python/raft-dask/pyproject.toml @@ -31,13 +31,13 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.10" dependencies = [ - "dask-cuda==24.10.*", - "distributed-ucxx==0.40.*", + "dask-cuda==24.12.*,>=0.0.0a0", + "distributed-ucxx==0.41.*,>=0.0.0a0", "joblib>=0.11", "numba>=0.57", - "pylibraft==24.10.*", - "rapids-dask-dependency==24.10.*", - "ucx-py==0.40.*", + "pylibraft==24.12.*,>=0.0.0a0", + "rapids-dask-dependency==24.12.*,>=0.0.0a0", + "ucx-py==0.41.*,>=0.0.0a0", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../dependencies.yaml and run `rapids-dependency-file-generator`. classifiers = [ "Intended Audience :: Developers", From 52dd0d95fb251a38574982fbab3c1a545ad44789 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:38:32 -0400 Subject: [PATCH 69/75] remove extra comment symbol in file --- .github/PULL_REQUEST_TEMPLATE.md | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 4cafc39a12..d46e9e4f6e 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -41,4 +41,3 @@ Here are some guidelines to help the review process go smoothly. Many thanks in advance for your cooperation! ---> \ No newline at end of file From 8b53c8df259085ed2d9df3063cf92c115e40f4c2 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:41:00 -0400 Subject: [PATCH 70/75] complete reversion of file to main --- .github/PULL_REQUEST_TEMPLATE.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d46e9e4f6e..d889b2d593 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,10 +34,11 @@ Here are some guidelines to help the review process go smoothly. features or make changes out of the scope of those requested by the reviewer (doing this just add delays as already reviewed code ends up having to be re-reviewed/it is hard to tell what is new etc!). Further, please do not - rebase your branch on master/force push/rewrite history, doing any of these + rebase your branch on main/force push/rewrite history, doing any of these causes the context of any comments made by reviewers to be lost. If - conflicts occur against master they should be resolved by merging master + conflicts occur against main they should be resolved by merging main into the branch used for making the pull request. Many thanks in advance for your cooperation! +--> From d0e875064d5ac22a1fcf165b475fa8b338eee288 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:43:18 -0400 Subject: [PATCH 71/75] revert dependencies file from merge --- dependencies.yaml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/dependencies.yaml b/dependencies.yaml index b3a4803d52..6c33ba92b5 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -171,7 +171,7 @@ dependencies: - c-compiler - cxx-compiler - nccl>=2.19 - - libucxx==0.40.* + - libucxx==0.41.*,>=0.0.0a0 specific: - output_types: conda matrices: @@ -210,7 +210,7 @@ dependencies: common: - output_types: [conda] packages: - - &rmm_unsuffixed rmm==24.10.* + - &rmm_unsuffixed rmm==24.12.*,>=0.0.0a0 - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -237,12 +237,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &rmm_cu12 rmm-cu12==24.10.* + - &rmm_cu12 rmm-cu12==24.12.*,>=0.0.0a0 - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &rmm_cu11 rmm-cu11==24.10.* + - &rmm_cu11 rmm-cu11==24.12.*,>=0.0.0a0 - {matrix: null, packages: [*rmm_unsuffixed] } checks: common: @@ -514,14 +514,14 @@ dependencies: common: - output_types: [conda, pyproject] packages: - - dask-cuda==24.10.* + - dask-cuda==24.12.*,>=0.0.0a0 - joblib>=0.11 - numba>=0.57 - - rapids-dask-dependency==24.10.* + - rapids-dask-dependency==24.12.*,>=0.0.0a0 - output_types: conda packages: - - &pylibraft_unsuffixed pylibraft==24.10.* - - &ucx_py_unsuffixed ucx-py==0.40.* + - &pylibraft_unsuffixed pylibraft==24.12.*,>=0.0.0a0 + - &ucx_py_unsuffixed ucx-py==0.41.*,>=0.0.0a0 - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -535,14 +535,14 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - &pylibraft_cu12 pylibraft-cu12==24.10.* - - &ucx_py_cu12 ucx-py-cu12==0.40.* + - &pylibraft_cu12 pylibraft-cu12==24.12.*,>=0.0.0a0 + - &ucx_py_cu12 ucx-py-cu12==0.41.*,>=0.0.0a0 - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - &pylibraft_cu11 pylibraft-cu11==24.10.* - - &ucx_py_cu11 ucx-py-cu11==0.40.* + - &pylibraft_cu11 pylibraft-cu11==24.12.*,>=0.0.0a0 + - &ucx_py_cu11 ucx-py-cu11==0.41.*,>=0.0.0a0 - {matrix: null, packages: [*pylibraft_unsuffixed, *ucx_py_unsuffixed]} test_python_common: common: @@ -562,7 +562,7 @@ dependencies: packages: # UCXX is not currently a hard-dependency thus only installed during tests, # this will change in the future. - - &distributed_ucxx_unsuffixed distributed-ucxx==0.40.* + - &distributed_ucxx_unsuffixed distributed-ucxx==0.41.*,>=0.0.0a0 - output_types: requirements packages: # pip recognizes the index as a global option for the requirements.txt file @@ -575,12 +575,12 @@ dependencies: cuda: "12.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu12==0.40.* + - distributed-ucxx-cu12==0.41.*,>=0.0.0a0 - matrix: cuda: "11.*" cuda_suffixed: "true" packages: - - distributed-ucxx-cu11==0.40.* + - distributed-ucxx-cu11==0.41.*,>=0.0.0a0 - {matrix: null, packages: [*distributed_ucxx_unsuffixed]} depends_on_ucx_build: common: From f9c9a0b9786bdc1c8cb1a58438e01f13392818bc Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:44:14 -0400 Subject: [PATCH 72/75] file revert --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d889b2d593..9c42cda720 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -34,9 +34,9 @@ Here are some guidelines to help the review process go smoothly. features or make changes out of the scope of those requested by the reviewer (doing this just add delays as already reviewed code ends up having to be re-reviewed/it is hard to tell what is new etc!). Further, please do not - rebase your branch on main/force push/rewrite history, doing any of these + rebase your branch on master/force push/rewrite history, doing any of these causes the context of any comments made by reviewers to be lost. If - conflicts occur against main they should be resolved by merging main + conflicts occur against master they should be resolved by merging master into the branch used for making the pull request. Many thanks in advance for your cooperation! From 9db7cd9786647275622657812cc9a7c4b2e97c59 Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:46:17 -0400 Subject: [PATCH 73/75] revert contributing md --- docs/source/contributing.md | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/source/contributing.md b/docs/source/contributing.md index 89ff8043f1..446e7b2a7b 100755 --- a/docs/source/contributing.md +++ b/docs/source/contributing.md @@ -89,4 +89,3 @@ implementation of the issue, ask them in the issue instead of the PR. ## Attribution Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md - From a70619e09eb74e41528b7f8569833913e7b9c16f Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Wed, 30 Oct 2024 14:47:22 -0400 Subject: [PATCH 74/75] revert contributing md --- docs/source/contributing.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/contributing.md b/docs/source/contributing.md index 446e7b2a7b..1b4071d0a5 100755 --- a/docs/source/contributing.md +++ b/docs/source/contributing.md @@ -89,3 +89,5 @@ implementation of the issue, ask them in the issue instead of the PR. ## Attribution Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md + + From 2b0202ae2cd75a68471d00787009723683799c5b Mon Sep 17 00:00:00 2001 From: Julio Perez Date: Thu, 31 Oct 2024 15:19:32 -0400 Subject: [PATCH 75/75] add in review comments --- .../raft/sparse/neighbors/brute_force.cuh | 4 +- cpp/include/raft/sparse/neighbors/knn.cuh | 6 +- cpp/test/CMakeLists.txt | 12 ++- cpp/test/preprocess_utils.cu | 80 ++++++++++--------- cpp/test/sparse/preprocess_coo.cu | 2 +- cpp/test/sparse/preprocess_csr.cu | 2 +- 6 files changed, 59 insertions(+), 47 deletions(-) diff --git a/cpp/include/raft/sparse/neighbors/brute_force.cuh b/cpp/include/raft/sparse/neighbors/brute_force.cuh index 47e00a012f..8e8f36c2c3 100644 --- a/cpp/include/raft/sparse/neighbors/brute_force.cuh +++ b/cpp/include/raft/sparse/neighbors/brute_force.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2023, NVIDIA CORPORATION. + * Copyright (c) 2020-2024, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,6 +25,8 @@ namespace raft::sparse::neighbors::brute_force { /** * Search the sparse kNN for the k-nearest neighbors of a set of sparse query vectors * using some distance implementation + * template parameter value_idx is the type of the Indptr and Indices arrays. + * template parameter value_t is the type of the Data array. * @param[in] idxIndptr csr indptr of the index matrix (size n_idx_rows + 1) * @param[in] idxIndices csr column indices array of the index matrix (size n_idx_nnz) * @param[in] idxData csr data array of the index matrix (size idxNNZ) diff --git a/cpp/include/raft/sparse/neighbors/knn.cuh b/cpp/include/raft/sparse/neighbors/knn.cuh index bffbf6c943..7b93ea4d0d 100644 --- a/cpp/include/raft/sparse/neighbors/knn.cuh +++ b/cpp/include/raft/sparse/neighbors/knn.cuh @@ -62,7 +62,7 @@ namespace raft::sparse::neighbors { * @param[in] metric distance metric/measure to use * @param[in] metricArg potential argument for metric (currently unused) */ -template +template void brute_force_knn(const value_idx* idxIndptr, const value_idx* idxIndices, const value_t* idxData, @@ -120,7 +120,7 @@ void brute_force_knn(const value_idx* idxIndptr, * @param[in] metric distance metric/measure to use * @param[in] metricArg potential argument for metric (currently unused) */ -template +template void brute_force_knn(raft::device_csr_matrix +template void brute_force_knn(raft::device_coo_matrix -void preproc_kernel(raft::resources& handle, - raft::host_vector_view h_rows, - raft::host_vector_view h_cols, - raft::host_vector_view h_elems, - raft::device_vector_view results, - int num_rows, - int num_cols, - bool tf_idf) +void preproc_coo(raft::resources& handle, + raft::host_vector_view h_rows, + raft::host_vector_view h_cols, + raft::host_vector_view h_elems, + raft::device_vector_view results, + int num_rows, + int num_cols, + bool tf_idf) { cudaStream_t stream = raft::resource::get_cuda_stream(handle); int rows_size = h_rows.size(); int cols_size = h_cols.size(); int elements_size = h_elems.size(); auto device_matrix = raft::make_device_matrix(handle, num_rows, num_cols); - raft::matrix::fill(handle, device_matrix.view(), 0.0f); + raft::matrix::fill(handle, device_matrix.view(), 0.0f); auto host_matrix = raft::make_host_matrix(handle, num_rows, num_cols); raft::copy(host_matrix.data_handle(), device_matrix.data_handle(), device_matrix.size(), stream); + raft::resource::sync_stream(handle, stream); + for (int i = 0; i < elements_size; i++) { int row = h_rows(i); int col = h_cols(i); @@ -81,7 +83,7 @@ void preproc_kernel(raft::resources& handle, output_cols_lengths.size(), stream); - auto output_cols_length_sum = raft::make_device_scalar(handle, 0); + auto output_cols_length_sum = raft::make_device_scalar(handle, 0); raft::linalg::mapReduce(output_cols_length_sum.data_handle(), num_cols, 0, @@ -89,12 +91,12 @@ void preproc_kernel(raft::resources& handle, raft::add_op(), stream, output_cols_lengths.data_handle()); - auto h_output_cols_length_sum = raft::make_host_scalar(handle, 0); + auto h_output_cols_length_sum = raft::make_host_scalar(handle, 0); raft::copy(h_output_cols_length_sum.data_handle(), output_cols_length_sum.data_handle(), output_cols_length_sum.size(), stream); - float avg_col_length = float(h_output_cols_length_sum(0)) / num_cols; + T2 avg_col_length = T2(h_output_cols_length_sum(0)) / num_cols; auto output_rows_freq = raft::make_device_matrix(handle, 1, num_rows); raft::linalg::reduce(output_rows_freq.data_handle(), @@ -116,13 +118,13 @@ void preproc_kernel(raft::resources& handle, false, stream, false, - check_zeroes()); + check_zeroes()); auto h_output_rows_cnt = raft::make_host_matrix(handle, 1, num_rows); raft::copy( h_output_rows_cnt.data_handle(), output_rows_cnt.data_handle(), output_rows_cnt.size(), stream); auto out_device_matrix = raft::make_device_matrix(handle, num_rows, num_cols); - raft::matrix::fill(handle, out_device_matrix.view(), 0.0f); + raft::matrix::fill(handle, out_device_matrix.view(), 0.0f); auto out_host_matrix = raft::make_host_matrix(handle, num_rows, num_cols); auto out_host_vector = raft::make_host_vector(handle, results.size()); @@ -137,7 +139,7 @@ void preproc_kernel(raft::resources& handle, out_host_matrix(row, col) = 0.0f; } else { float tf = float(val / h_output_cols_lengths(0, col)); - float idf = raft::log(num_cols / h_output_rows_cnt(0, row)); + float idf = raft::log(num_cols / h_output_rows_cnt(0, row)); if (tf_idf) { result = tf * idf; } else { @@ -171,7 +173,7 @@ int get_dupe_mask_count(raft::resources& handle, values.data_handle(), stream); - raft::sparse::op::compute_duplicates_mask( + raft::sparse::op::compute_duplicates_mask( mask.data_handle(), rows.data_handle(), columns.data_handle(), rows.size(), stream); int col_nnz_count = thrust::reduce(raft::resource::get_thrust_policy(handle), @@ -193,15 +195,15 @@ void remove_dupes(raft::resources& handle, { cudaStream_t stream = raft::resource::get_cuda_stream(handle); - auto col_counts = raft::make_device_vector(handle, columns.size()); + auto col_counts = raft::make_device_vector(handle, columns.size()); thrust::fill(raft::resource::get_thrust_policy(handle), col_counts.data_handle(), col_counts.data_handle() + col_counts.size(), 1.0f); - auto keys_out = raft::make_device_vector(handle, num_rows); - auto counts_out = raft::make_device_vector(handle, num_rows); + auto keys_out = raft::make_device_vector(handle, num_rows); + auto counts_out = raft::make_device_vector(handle, num_rows); thrust::reduce_by_key(raft::resource::get_thrust_policy(handle), rows.data_handle(), @@ -210,19 +212,19 @@ void remove_dupes(raft::resources& handle, keys_out.data_handle(), counts_out.data_handle()); - auto mask_out = raft::make_device_vector(handle, rows.size()); + auto mask_out = raft::make_device_vector(handle, rows.size()); - raft::linalg::map(handle, mask_out.view(), raft::cast_op{}, raft::make_const_mdspan(mask)); + raft::linalg::map(handle, mask_out.view(), raft::cast_op{}, raft::make_const_mdspan(mask)); - auto values_c = raft::make_device_vector(handle, values.size()); + auto values_c = raft::make_device_vector(handle, values.size()); raft::linalg::map(handle, values_c.view(), raft::mul_op{}, raft::make_const_mdspan(values), raft::make_const_mdspan(mask_out.view())); - auto keys_nnz_out = raft::make_device_vector(handle, num_rows); - auto counts_nnz_out = raft::make_device_vector(handle, num_rows); + auto keys_nnz_out = raft::make_device_vector(handle, num_rows); + auto counts_nnz_out = raft::make_device_vector(handle, num_rows); thrust::reduce_by_key(raft::resource::get_thrust_policy(handle), rows.data_handle(), @@ -231,18 +233,18 @@ void remove_dupes(raft::resources& handle, keys_nnz_out.data_handle(), counts_nnz_out.data_handle()); - raft::sparse::op::coo_remove_scalar(rows.data_handle(), - columns.data_handle(), - values_c.data_handle(), - values_c.size(), - out_rows.data_handle(), - out_cols.data_handle(), - out_vals.data_handle(), - counts_nnz_out.data_handle(), - counts_out.data_handle(), - 0, - num_rows, - stream); + raft::sparse::op::coo_remove_scalar(rows.data_handle(), + columns.data_handle(), + values_c.data_handle(), + values_c.size(), + out_rows.data_handle(), + out_cols.data_handle(), + out_vals.data_handle(), + counts_nnz_out.data_handle(), + counts_out.data_handle(), + 0, + num_rows, + stream); } template @@ -261,7 +263,7 @@ void create_dataset(raft::resources& handle, auto d_out = raft::make_device_vector(handle, rows.size() * 2); int theta_guide = max(num_rows_unique, num_cols_unique); - auto theta = raft::make_device_vector(handle, theta_guide * 4); + auto theta = raft::make_device_vector(handle, theta_guide * 4); raft::random::uniform(handle, rng, theta.view(), 0.0f, 1.0f); @@ -275,9 +277,9 @@ void create_dataset(raft::resources& handle, stream, rng); - auto vals = raft::make_device_vector(handle, rows.size()); + auto vals = raft::make_device_vector(handle, rows.size()); raft::random::uniformInt(handle, rng, vals.view(), 1, max_term_occurence_doc); - raft::linalg::map(handle, values, raft::cast_op{}, raft::make_const_mdspan(vals.view())); + raft::linalg::map(handle, values, raft::cast_op{}, raft::make_const_mdspan(vals.view())); } }; // namespace raft::util \ No newline at end of file diff --git a/cpp/test/sparse/preprocess_coo.cu b/cpp/test/sparse/preprocess_coo.cu index b26e5122d7..44dac88cdb 100644 --- a/cpp/test/sparse/preprocess_coo.cu +++ b/cpp/test/sparse/preprocess_coo.cu @@ -60,7 +60,7 @@ void calc_tfidf_bm25(raft::resources& handle, stream); raft::copy( h_elems.data_handle(), coo_in.get_elements().data(), coo_in.get_elements().size(), stream); - raft::util::preproc_kernel( + raft::util::preproc_coo( handle, h_rows.view(), h_cols.view(), h_elems.view(), results, num_rows, num_cols, tf_idf); } diff --git a/cpp/test/sparse/preprocess_csr.cu b/cpp/test/sparse/preprocess_csr.cu index eab270ce79..e48aabcaa4 100644 --- a/cpp/test/sparse/preprocess_csr.cu +++ b/cpp/test/sparse/preprocess_csr.cu @@ -63,7 +63,7 @@ void calc_tfidf_bm25(raft::resources& handle, raft::copy(h_rows.data_handle(), rows.data_handle(), rows.size(), stream); raft::copy(h_cols.data_handle(), indices.data_handle(), cols_size, stream); raft::copy(h_elems.data_handle(), values.data_handle(), values.size(), stream); - raft::util::preproc_kernel( + raft::util::preproc_coo( handle, h_rows.view(), h_cols.view(), h_elems.view(), results, num_rows, num_cols, tf_idf); }