From b4f592e3fa15a714e6b7bde68c59c8a1b8e4011d Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 7 Jan 2025 14:27:21 -0800 Subject: [PATCH 1/7] Support raft's logger targets (#4848) https://github.com/rapidsai/raft/pull/2530 added new targets that we need to make global in cugraph's CMake as well. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Chuck Hastings (https://github.com/ChuckHastings) URL: https://github.com/rapidsai/cugraph/pull/4848 --- cpp/cmake/thirdparty/get_raft.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/cmake/thirdparty/get_raft.cmake b/cpp/cmake/thirdparty/get_raft.cmake index 8f56372c81a..62633d95c64 100644 --- a/cpp/cmake/thirdparty/get_raft.cmake +++ b/cpp/cmake/thirdparty/get_raft.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ function(find_and_configure_raft) endif() rapids_cpm_find(raft ${PKG_VERSION} - GLOBAL_TARGETS raft::raft + GLOBAL_TARGETS raft::raft raft::raft_logger raft::raft_logger_impl BUILD_EXPORT_SET cugraph-exports INSTALL_EXPORT_SET cugraph-exports COMPONENTS ${RAFT_COMPONENTS} From cddd69ea3f62cabdb3aa2b7b6676e0b74ab4eefc Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Tue, 7 Jan 2025 17:18:57 -0600 Subject: [PATCH 2/7] Use cuda-python bindings for getting device properties. (#4830) This PR uses `cuda-python` for getting device properties. These APIs are more stable than getting this information via `numba.cuda`. Companion to #4829 (this is not dependent on that PR, though). Authors: - Bradley Dice (https://github.com/bdice) - Ralph Liu (https://github.com/nv-rliu) - Kyle Edwards (https://github.com/KyleFromNVIDIA) Approvers: - Kyle Edwards (https://github.com/KyleFromNVIDIA) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/4830 --- ci/notebook_list.py | 46 ++++++++-------- .../cugraph/cugraph/dask/common/mg_utils.py | 13 +++-- .../cugraph/tests/docs/test_doctests.py | 14 +++-- .../utilities/path_retrieval_wrapper.pyx | 3 +- python/cugraph/cugraph/utilities/utils.py | 54 +++++++++---------- 5 files changed, 64 insertions(+), 66 deletions(-) diff --git a/ci/notebook_list.py b/ci/notebook_list.py index f7a284beeeb..659ac4de755 100644 --- a/ci/notebook_list.py +++ b/ci/notebook_list.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,7 +17,7 @@ import glob from pathlib import Path -from numba import cuda +from cuda.bindings import runtime # for adding another run type and skip file name add to this dictionary runtype_dict = { @@ -30,20 +30,27 @@ def skip_book_dir(runtype): # Add all run types here, currently only CI supported + return runtype in runtype_dict and Path(runtype_dict.get(runtype)).is_file() - if runtype in runtype_dict.keys(): - if Path(runtype_dict.get(runtype)).is_file(): - return True - return False +def _get_cuda_version_string(): + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major, minor = divmod(version, 1000) + minor //= 10 + return f"{major}.{minor}" + + +def _is_ampere_or_newer(): + status, device_id = runtime.cudaGetDevice() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device.") + status, device_prop = runtime.cudaGetDeviceProperties(device_id) + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device properties.") + return (device_prop.major, device_prop.minor) >= (8, 0) -cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()]) -# -# Not strictly true... however what we mean is -# Pascal or earlier -# -ampere = False -device = cuda.get_current_device() parser = argparse.ArgumentParser(description="Condition for running the notebook tests") parser.add_argument("runtype", type=str) @@ -52,19 +59,10 @@ def skip_book_dir(runtype): runtype = args.runtype -if runtype not in runtype_dict.keys(): +if runtype not in runtype_dict: print(f"Unknown Run Type = {runtype}", file=sys.stderr) exit() - -# check for the attribute using both pre and post numba 0.53 names -cc = getattr(device, "COMPUTE_CAPABILITY", None) or getattr( - device, "compute_capability" -) -if cc[0] >= 8: - ampere = True - -skip = False for filename in glob.iglob("**/*.ipynb", recursive=True): skip = False if skip_book_dir(runtype): @@ -88,7 +86,7 @@ def skip_book_dir(runtype): ) skip = True break - elif ampere and re.search("# Does not run on Ampere", line): + elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line): print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr) skip = True break diff --git a/python/cugraph/cugraph/dask/common/mg_utils.py b/python/cugraph/cugraph/dask/common/mg_utils.py index b04f293dc0e..e4e3ac9a44e 100644 --- a/python/cugraph/cugraph/dask/common/mg_utils.py +++ b/python/cugraph/cugraph/dask/common/mg_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,7 +13,7 @@ import os import gc -import numba.cuda +from cuda.bindings import runtime # FIXME: this raft import breaks the library if ucx-py is @@ -53,11 +53,10 @@ def prepare_worker_to_parts(data, client=None): def is_single_gpu(): - ngpus = len(numba.cuda.gpus) - if ngpus > 1: - return False - else: - return True + status, count = runtime.cudaGetDeviceCount() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device count.") + return count > 1 def get_visible_devices(): diff --git a/python/cugraph/cugraph/tests/docs/test_doctests.py b/python/cugraph/cugraph/tests/docs/test_doctests.py index 2095fd41fe9..9d9f8436b99 100644 --- a/python/cugraph/cugraph/tests/docs/test_doctests.py +++ b/python/cugraph/cugraph/tests/docs/test_doctests.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -25,14 +25,21 @@ import cugraph import pylibcugraph import cudf -from numba import cuda +from cuda.bindings import runtime from cugraph.testing import utils modules_to_skip = ["dask", "proto", "raft"] datasets = utils.RAPIDS_DATASET_ROOT_DIR_PATH -cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()]) + +def _get_cuda_version_string(): + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major = version // 1000 + minor = (version % 1000) // 10 + return f"{major}.{minor}" def _is_public_name(name): @@ -131,6 +138,7 @@ def skip_docstring(docstring_obj): NOTE: this function is currently not available on CUDA 11.4 systems. """ docstring = docstring_obj.docstring + cuda_version_string = _get_cuda_version_string() for line in docstring.splitlines(): if f"currently not available on CUDA {cuda_version_string} systems" in line: return f"docstring example not supported on CUDA {cuda_version_string}" diff --git a/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx b/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx index 98d11ad07df..8e71c7aae4e 100644 --- a/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx +++ b/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,7 +19,6 @@ from cugraph.utilities.path_retrieval cimport get_traversed_cost as c_get_traversed_cost from cugraph.structure.graph_primtypes cimport * from libc.stdint cimport uintptr_t -from numba import cuda import cudf import numpy as np diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 0257da4ffc0..074503e2f60 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -15,13 +15,10 @@ import os import shutil -from numba import cuda - import cudf from cudf.core.column import as_column -from cuda.cudart import cudaDeviceAttr -from rmm._cuda.gpu import getDeviceAttribute +from cuda.bindings import runtime from warnings import warn @@ -210,45 +207,42 @@ def get_traversed_path_list(df, id): return answer -def is_cuda_version_less_than(min_version=(10, 2)): +def is_cuda_version_less_than(min_version): """ Returns True if the version of CUDA being used is less than min_version """ - this_cuda_ver = cuda.runtime.get_version() # returns (, ) - if this_cuda_ver[0] > min_version[0]: - return False - if this_cuda_ver[0] < min_version[0]: - return True - if this_cuda_ver[1] < min_version[1]: - return True - return False + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major = version // 1000 + minor = (version % 1000) // 10 + return (major, minor) < min_version -def is_device_version_less_than(min_version=(7, 0)): +def is_device_version_less_than(min_version): """ Returns True if the version of CUDA being used is less than min_version """ - major_version = getDeviceAttribute( - cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, 0 - ) - minor_version = getDeviceAttribute( - cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, 0 - ) - if major_version > min_version[0]: - return False - if major_version < min_version[0]: - return True - if minor_version < min_version[1]: - return True - return False + status, device_id = runtime.cudaGetDevice() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device.") + status, device_prop = runtime.cudaGetDeviceProperties(device_id) + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device properties.") + return (device_prop.major, device_prop.minor) < min_version def get_device_memory_info(): """ Returns the total amount of global memory on the device in bytes """ - meminfo = cuda.current_context().get_memory_info() - return meminfo[1] + status, device_id = runtime.cudaGetDevice() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device.") + status, device_prop = runtime.cudaGetDeviceProperties(device_id) + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device properties.") + return device_prop.totalGlobalMem # FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if From e46ff65f58a9448ec62ce09591ee7b0707e2e82c Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Thu, 9 Jan 2025 12:13:42 -0600 Subject: [PATCH 3/7] Remove unnecessary CUDA utilities (#4855) This PR removes some utilities that were updated in https://github.com/rapidsai/cugraph/pull/4830 but are no longer needed. xref: https://github.com/rapidsai/build-planning/issues/117 Authors: - Bradley Dice (https://github.com/bdice) Approvers: - James Lamb (https://github.com/jameslamb) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/4855 --- .../cugraph/pytest-based/bench_algos.py | 6 +-- ci/notebook_list.py | 15 +------ python/cugraph/cugraph/traversal/ms_bfs.py | 3 +- python/cugraph/cugraph/utilities/utils.py | 40 ------------------- 4 files changed, 3 insertions(+), 61 deletions(-) diff --git a/benchmarks/cugraph/pytest-based/bench_algos.py b/benchmarks/cugraph/pytest-based/bench_algos.py index 04407d656d7..1c988ea636a 100644 --- a/benchmarks/cugraph/pytest-based/bench_algos.py +++ b/benchmarks/cugraph/pytest-based/bench_algos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,7 +42,6 @@ def setFixtureParamNames(*args, **kwargs): from cugraph.structure.number_map import NumberMap from cugraph.generators import rmat from cugraph.testing import utils, mg_utils -from cugraph.utilities.utils import is_device_version_less_than from cugraph_benchmarking.params import ( directed_datasets, @@ -362,9 +361,6 @@ def bench_sorensen(gpubenchmark, unweighted_graph): gpubenchmark(sorensen, G, vert_pairs) -@pytest.mark.skipif( - is_device_version_less_than((7, 0)), reason="Not supported on Pascal" -) def bench_louvain(gpubenchmark, graph): louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain gpubenchmark(louvain, graph) diff --git a/ci/notebook_list.py b/ci/notebook_list.py index 659ac4de755..db26f2efa2c 100644 --- a/ci/notebook_list.py +++ b/ci/notebook_list.py @@ -41,16 +41,7 @@ def _get_cuda_version_string(): minor //= 10 return f"{major}.{minor}" - -def _is_ampere_or_newer(): - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return (device_prop.major, device_prop.minor) >= (8, 0) - +cuda_version_string = _get_cuda_version_string() parser = argparse.ArgumentParser(description="Condition for running the notebook tests") parser.add_argument("runtype", type=str) @@ -86,10 +77,6 @@ def _is_ampere_or_newer(): ) skip = True break - elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line): - print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr) - skip = True - break elif re.search("# Does not run on CUDA ", line) and ( cuda_version_string in line ): diff --git a/python/cugraph/cugraph/traversal/ms_bfs.py b/python/cugraph/cugraph/traversal/ms_bfs.py index df624e453ee..b80331d475a 100644 --- a/python/cugraph/cugraph/traversal/ms_bfs.py +++ b/python/cugraph/cugraph/traversal/ms_bfs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -56,7 +56,6 @@ def _get_feasibility(G, sources, components=None, depth_limit=None): # Fixme not implemented in RMM yet # using 96GB upper bound for now - # mem = get_device_memory_info() mem = 9.6e10 n_sources = sources.size V = G.number_of_vertices() diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 074503e2f60..493a9850a0f 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -18,8 +18,6 @@ import cudf from cudf.core.column import as_column -from cuda.bindings import runtime - from warnings import warn # optional dependencies @@ -207,44 +205,6 @@ def get_traversed_path_list(df, id): return answer -def is_cuda_version_less_than(min_version): - """ - Returns True if the version of CUDA being used is less than min_version - """ - status, version = runtime.getLocalRuntimeVersion() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA runtime version.") - major = version // 1000 - minor = (version % 1000) // 10 - return (major, minor) < min_version - - -def is_device_version_less_than(min_version): - """ - Returns True if the version of CUDA being used is less than min_version - """ - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return (device_prop.major, device_prop.minor) < min_version - - -def get_device_memory_info(): - """ - Returns the total amount of global memory on the device in bytes - """ - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return device_prop.totalGlobalMem - - # FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if # set. An additional optional parameter for the weight attr name when accepting # Nx graphs may be needed. From the Nx docs: From a6eea2f7cc40165e584e459351d1a92406de7415 Mon Sep 17 00:00:00 2001 From: James Lamb Date: Thu, 9 Jan 2025 13:16:43 -0600 Subject: [PATCH 4/7] move wheel installs into per-project test scripts, other packaging changes (#4847) Proposes some changes to small things I noticed while working on #4804. * CMake option cleanup: - adds `BUILD_PRIMS_BENCH OFF`, removes `BUILD_BENCH OFF` in `get_raft.cmake` (matching changes to RAFT from 23.04: https://github.com/rapidsai/raft/pull/1304) - adds `BUILD_BENCHMARKS OFF` in `get_cudf.cmake` ([this is the default](https://github.com/rapidsai/cudf/blob/b81d9e17fbffbb912e0128148f556bf7af41b6ab/cpp/CMakeLists.txt#L51), but better to be explicit) * consolidates some `.gitignore` rules, adds wheels and conda packages there * moves responsibility for installing CI artifacts into `ci/test_wheel_{package}.sh` and out of `ci/test_wheel.sh` * splits up Cython and `scikit-build-core` in `dependencies.yaml` - *every Python package here using Cython also uses `scikit-build-core`, but the reverse won't be true as of #4804 ... making that change here is harmless and reduces the size of the diff in #4804 a bit* Authors: - James Lamb (https://github.com/jameslamb) Approvers: - Kyle Edwards (https://github.com/KyleFromNVIDIA) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cugraph/pull/4847 --- .gitignore | 10 +++------- ci/test_wheel.sh | 11 +---------- ci/test_wheel_cugraph.sh | 14 ++++++++++---- ci/test_wheel_pylibcugraph.sh | 11 ++++++++++- cpp/cmake/thirdparty/get_raft.cmake | 2 +- .../cmake/thirdparty/get_cudf.cmake | 10 ++++++---- .../cmake/thirdparty/get_cugraph.cmake | 6 +++--- dependencies.yaml | 17 +++++++++++------ 8 files changed, 45 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 9480c2618bf..7853526b22b 100644 --- a/.gitignore +++ b/.gitignore @@ -32,6 +32,7 @@ test-results ## Python build directories & artifacts dask-worker-space/ htmlcov +*.conda dist/ *.egg-info/ python/build @@ -40,9 +41,7 @@ wheels/ wheelhouse/ _skbuild/ cufile.log - -## pylibcugraph build directories & artifacts -python/pylibcugraph/pylibcugraph.egg-info +*.whl ## Patching *.diff @@ -89,10 +88,7 @@ docs/cugraph/lib* docs/cugraph/api/* # created by Dask tests -python/dask-worker-space -python/cugraph/dask-worker-space -python/cugraph/cugraph/dask-worker-space -python/cugraph/cugraph/tests/dask-worker-space +dask-worker-space/ # Sphinx docs & build artifacts docs/cugraph/source/api_docs/api/* diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index b5cd90996c7..c96e91b037c 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail @@ -7,15 +7,6 @@ package_name=$1 python_package_name=$(echo ${package_name}|sed 's/-/_/g') -mkdir -p ./dist -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" - -RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist - -# use 'ls' to expand wildcard before adding `[extra]` requires for pip -# pip creates wheels using python package names -python -m pip install $(ls ./dist/${python_package_name}*.whl)[test] - # Run smoke tests for aarch64 pull requests arch=$(uname -m) if [[ "${arch}" == "aarch64" && ${RAPIDS_BUILD_TYPE} == "pull-request" ]]; then diff --git a/ci/test_wheel_cugraph.sh b/ci/test_wheel_cugraph.sh index 295cec7cb10..4703ed61985 100755 --- a/ci/test_wheel_cugraph.sh +++ b/ci/test_wheel_cugraph.sh @@ -1,11 +1,17 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail -# Download the pylibcugraph built in the previous step +# Download the packages built in the previous step +mkdir -p ./dist RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" -RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-pylibcugraph-dep -python -m pip install --no-deps ./local-pylibcugraph-dep/pylibcugraph*.whl +RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist +RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./local-pylibcugraph-dep + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + "$(echo ./dist/cugraph*.whl)[test]" \ + ./local-pylibcugraph-dep/pylibcugraph*.whl ./ci/test_wheel.sh cugraph diff --git a/ci/test_wheel_pylibcugraph.sh b/ci/test_wheel_pylibcugraph.sh index ddc9976308b..d0c97834a20 100755 --- a/ci/test_wheel_pylibcugraph.sh +++ b/ci/test_wheel_pylibcugraph.sh @@ -1,6 +1,15 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail +# Download the packages built in the previous step +mkdir -p ./dist +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + "$(echo ./dist/pylibcugraph*.whl)[test]" + ./ci/test_wheel.sh pylibcugraph diff --git a/cpp/cmake/thirdparty/get_raft.cmake b/cpp/cmake/thirdparty/get_raft.cmake index 62633d95c64..28e9ec0cda7 100644 --- a/cpp/cmake/thirdparty/get_raft.cmake +++ b/cpp/cmake/thirdparty/get_raft.cmake @@ -51,7 +51,7 @@ function(find_and_configure_raft) OPTIONS "RAFT_COMPILE_LIBRARY ${PKG_COMPILE_RAFT_LIB}" "BUILD_TESTS OFF" - "BUILD_BENCH OFF" + "BUILD_PRIMS_BENCH OFF" "BUILD_CAGRA_HNSWLIB OFF" ) diff --git a/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake b/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake index 8d57bf570bb..aab159d4242 100644 --- a/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake +++ b/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,9 @@ function(find_and_configure_cudf) GIT_REPOSITORY https://github.com/${PKG_FORK}/cudf.git GIT_TAG ${PKG_PINNED_TAG} SOURCE_SUBDIR cpp - OPTIONS "BUILD_TESTS OFF" + OPTIONS + "BUILD_BENCHMARKS OFF" + "BUILD_TESTS OFF" ) message(VERBOSE "CUGRAPH_ETL: Using CUDF located in ${cudf_SOURCE_DIR}") @@ -39,8 +41,8 @@ set(CUGRAPH_ETL_BRANCH_VERSION_cudf "${CUGRAPH_ETL_VERSION_MAJOR}.${CUGRAPH_ETL_ # Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_cudf_SOURCE=/path/to/local/cudf +# To use a different cuDF locally, set the CMake variable +# CPM_cudf_SOURCE=/path/to/local/cudf find_and_configure_cudf(VERSION ${CUGRAPH_ETL_MIN_VERSION_cudf} FORK rapidsai PINNED_TAG branch-${CUGRAPH_ETL_BRANCH_VERSION_cudf} diff --git a/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake b/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake index c551646d919..c42bd8d2ae9 100644 --- a/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake +++ b/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,8 +39,8 @@ set(CUGRAPH_ETL_BRANCH_VERSION_cugraph "${CUGRAPH_ETL_VERSION_MAJOR}.${CUGRAPH_E # Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_cugraph_SOURCE=/path/to/local/cugraph +# To use a different cuGraph locally, set the CMake variable +# CPM_cugraph_SOURCE=/path/to/local/cugraph find_and_configure_cugraph(VERSION ${CUGRAPH_ETL_MIN_VERSION_cugraph} FORK rapidsai PINNED_TAG branch-${CUGRAPH_ETL_BRANCH_VERSION_cugraph} diff --git a/dependencies.yaml b/dependencies.yaml index e1a8cc065c7..56c0f9deba0 100755 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -26,6 +26,7 @@ files: - depends_on_ucx_py - docs - python_build_cythonize + - python_build_skbuild - python_build_rapids - python_build_wheel - python_run_cugraph @@ -77,6 +78,7 @@ files: includes: - python_build_cythonize - python_build_rapids + - python_build_skbuild py_rapids_build_cugraph: output: pyproject pyproject_dir: python/cugraph @@ -121,6 +123,7 @@ files: includes: - python_build_cythonize - python_build_rapids + - python_build_skbuild py_rapids_build_pylibcugraph: output: pyproject pyproject_dir: python/pylibcugraph @@ -370,6 +373,14 @@ dependencies: - output_types: [conda, pyproject, requirements] packages: - rapids-build-backend>=0.3.1,<0.4.0.dev0 + python_build_skbuild: + common: + - output_types: conda + packages: + - scikit-build-core>=0.10.0 + - output_types: [requirements, pyproject] + packages: + - scikit-build-core[pyproject]>=0.10.0 python_build_wheel: common: - output_types: [conda, pyproject, requirements] @@ -381,12 +392,6 @@ dependencies: - output_types: [conda, pyproject, requirements] packages: - cython>=3.0.0 - - output_types: conda - packages: - - scikit-build-core>=0.10.0 - - output_types: [pyproject, requirements] - packages: - - scikit-build-core[pyproject]>=0.10.0 python_run_cugraph: common: - output_types: [conda, pyproject] From a5679f0d0c246fe68c75fb4026600f589f843790 Mon Sep 17 00:00:00 2001 From: Don Acosta <97529984+acostadon@users.noreply.github.com> Date: Fri, 10 Jan 2025 17:12:16 -0500 Subject: [PATCH 5/7] added doxygen groups so docs can clean up the API (#4857) added doxygen groups so docs can clean up the API This PR won't do anything until corresponding sphinx files are created in cugraph-docs Authors: - Don Acosta (https://github.com/acostadon) Approvers: - Seunghwa Kang (https://github.com/seunghwak) - Chuck Hastings (https://github.com/ChuckHastings) URL: https://github.com/rapidsai/cugraph/pull/4857 --- cpp/include/cugraph/algorithms.hpp | 114 ++++++++++++++++++++++++++--- 1 file changed, 105 insertions(+), 9 deletions(-) diff --git a/cpp/include/cugraph/algorithms.hpp b/cpp/include/cugraph/algorithms.hpp index 60f4d21822f..5a0a835c617 100644 --- a/cpp/include/cugraph/algorithms.hpp +++ b/cpp/include/cugraph/algorithms.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, NVIDIA CORPORATION. + * Copyright (c) 2020-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -36,9 +36,46 @@ * @{ */ +/** @defgroup centrality_cpp C++ centrality algorithms + */ + +/** @defgroup community_cpp C++ community Algorithms + */ + +/** @defgroup sampling_cpp C++ sampling algorithms + */ + +/** @defgroup similarity_cpp C++ similarity algorithms + */ + +/** @defgroup traversal_cpp C++ traversal algorithms + */ + +/** @defgroup labeling_cpp C++ labeling algorithms + */ + +/** @defgroup linear_cpp C++ linear assignment algorithms + */ + +/** @defgroup link_analysis_cpp C++ link Analysis algorithms + */ + +/** @defgroup layout_cpp C++ layout algorithms + */ + +/** @defgroup component_cpp C++ component algorithms + */ + +/** @defgroup tree_cpp C++ tree algorithms + */ + +/** @defgroup utility_cpp C++ utility algorithms + */ + namespace cugraph { /** + * @ingroup similarity_cpp * @brief Compute jaccard similarity coefficient for all vertices * * Computes the Jaccard similarity coefficient for every pair of vertices in the graph @@ -60,6 +97,7 @@ template void jaccard(legacy::GraphCSRView const& graph, WT const* weights, WT* result); /** + * @ingroup similarity_cpp * @brief Compute jaccard similarity coefficient for selected vertex pairs * * Computes the Jaccard similarity coefficient for each pair of specified vertices. @@ -89,6 +127,7 @@ void jaccard_list(legacy::GraphCSRView const& graph, WT* result); /** +.* @ingroup similarity_cpp * @brief Compute overlap coefficient for all vertices in the graph * * Computes the Overlap Coefficient for every pair of vertices in the graph which are @@ -110,6 +149,7 @@ template void overlap(legacy::GraphCSRView const& graph, WT const* weights, WT* result); /** + * @ingroup similarity_cpp * @brief Compute overlap coefficient for select pairs of vertices * * Computes the overlap coefficient for each pair of specified vertices. @@ -139,7 +179,7 @@ void overlap_list(legacy::GraphCSRView const& graph, WT* result); /** - * + * @ingroup layout_cpp * @brief ForceAtlas2 is a continuous graph layout algorithm * for handy network visualization. * @@ -212,6 +252,7 @@ void force_atlas2(raft::handle_t const& handle, internals::GraphBasedDimRedCallback* callback = nullptr); /** + * @ingroup centrality_cpp * @brief Compute betweenness centrality for a graph * * Betweenness centrality for a vertex is the sum of the fraction of @@ -259,6 +300,7 @@ void betweenness_centrality(const raft::handle_t& handle, vertex_t const* vertices = nullptr); /** + * @ingroup centrality_cpp * @brief Compute edge betweenness centrality for a graph * * Betweenness centrality of an edge is the sum of the fraction of all-pairs shortest paths that @@ -302,6 +344,7 @@ void edge_betweenness_centrality(const raft::handle_t& handle, vertex_t const* vertices = nullptr); /** + * @ingroup centrality_cpp * @brief Compute betweenness centrality for a graph * * Betweenness centrality for a vertex is the sum of the fraction of @@ -346,6 +389,7 @@ rmm::device_uvector betweenness_centrality( bool const do_expensive_check = false); /** + * @ingroup centrality_cpp * @brief Compute edge betweenness centrality for a graph * * Betweenness centrality of an edge is the sum of the fraction of all-pairs shortest paths that @@ -392,6 +436,7 @@ enum class cugraph_cc_t { }; /** + * @ingroup components_cpp * @brief Compute connected components. * * The weak version (for undirected graphs, only) was imported from cuML. @@ -426,6 +471,7 @@ void connected_components(legacy::GraphCSRView const& graph, VT* labels); /** + * @ingroup linear_cpp * @brief Compute Hungarian algorithm on a weighted bipartite graph * * The Hungarian algorithm computes an assigment of "jobs" to "workers". This function accepts @@ -458,6 +504,7 @@ weight_t hungarian(raft::handle_t const& handle, vertex_t* assignments); /** + * @ingroup linear_cpp * @brief Compute Hungarian algorithm on a weighted bipartite graph * * The Hungarian algorithm computes an assigment of "jobs" to "workers". This function accepts @@ -492,6 +539,7 @@ weight_t hungarian(raft::handle_t const& handle, weight_t epsilon); /** + * @ingroup community_cpp * @brief Louvain implementation * * Compute a clustering of the graph by maximizing modularity @@ -541,6 +589,7 @@ std::pair louvain( weight_t resolution = weight_t{1}); /** + * @ingroup community_cpp * @brief Louvain implementation, returning dendrogram * * Compute a clustering of the graph by maximizing modularity @@ -587,6 +636,7 @@ std::pair>, weight_t> louvain( weight_t resolution = weight_t{1}); /** + * @ingroup community_cpp * @brief Flatten a Dendrogram at a particular level * * A Dendrogram represents a hierarchical clustering/partitioning of @@ -611,6 +661,7 @@ void flatten_dendrogram(raft::handle_t const& handle, typename graph_view_t::vertex_type* clustering); /** + * @ingroup community_cpp * @brief Leiden implementation * * Compute a clustering of the graph by maximizing modularity using the Leiden improvements @@ -663,6 +714,7 @@ std::pair>, weight_t> leiden( weight_t theta = weight_t{1}); /** +.* @ingroup community_cpp * @brief Leiden implementation * * Compute a clustering of the graph by maximizing modularity using the Leiden improvements @@ -716,6 +768,7 @@ std::pair leiden( weight_t theta = weight_t{1}); /** +.* @ingroup community_cpp * @brief Computes the ecg clustering of the given graph. * * ECG runs truncated Louvain on an ensemble of permutations of the input graph, @@ -765,6 +818,7 @@ std::tuple, size_t, weight_t> ecg( weight_t resolution = weight_t{1}); /** + * @ingroup tree_cpp * @brief Generate edges in a minimum spanning forest of an undirected weighted graph. * * A minimum spanning tree is a subgraph of the graph (a tree) with the minimum sum of edge weights. @@ -792,6 +846,7 @@ std::unique_ptr> minimum_spanning_t namespace subgraph { /** +.* @ingroup utility_cpp * @brief Extract subgraph by vertices * * This function will identify all edges that connect pairs of vertices @@ -817,6 +872,7 @@ std::unique_ptr> extract_subgraph_vertex( } // namespace subgraph /** + * @ingroup community_cpp * @brief Wrapper function for Nvgraph balanced cut clustering * * @throws cugraph::logic_error when an error occurs. @@ -850,6 +906,7 @@ void balancedCutClustering(legacy::GraphCSRView const& graph, VT* clustering); /** + * @ingroup community_cpp * @brief Wrapper function for Nvgraph spectral modularity maximization algorithm * * @throws cugraph::logic_error when an error occurs. @@ -881,6 +938,7 @@ void spectralModularityMaximization(legacy::GraphCSRView const& grap VT* clustering); /** + * @ingroup community_cpp * @brief Wrapper function for Nvgraph clustering modularity metric * * @throws cugraph::logic_error when an error occurs. @@ -903,6 +961,7 @@ void analyzeClustering_modularity(legacy::GraphCSRView const& graph, WT* score); /** + * @ingroup community_cpp * @brief Wrapper function for Nvgraph clustering edge cut metric * * @throws cugraph::logic_error when an error occurs. @@ -925,6 +984,7 @@ void analyzeClustering_edge_cut(legacy::GraphCSRView const& graph, WT* score); /** + * @ingroup community_cpp * @brief Wrapper function for Nvgraph clustering ratio cut metric * * @throws cugraph::logic_error when an error occurs. @@ -950,6 +1010,7 @@ void analyzeClustering_ratio_cut(legacy::GraphCSRView const& graph, namespace dense { /** + * @ingroup linear_cpp * @brief Compute Hungarian algorithm on a weighted bipartite graph * * The Hungarian algorithm computes an assigment of "jobs" to "workers". This function accepts @@ -979,6 +1040,7 @@ weight_t hungarian(raft::handle_t const& handle, vertex_t* assignments); /** + * @ingroup linear_cpp * @brief Compute Hungarian algorithm on a weighted bipartite graph * * The Hungarian algorithm computes an assigment of "jobs" to "workers". This function accepts @@ -1013,6 +1075,7 @@ weight_t hungarian(raft::handle_t const& handle, } // namespace dense /** + * @ingroup traversal_cpp * @brief Run breadth-first search to find the distances (and predecessors) from the source * vertex. * @@ -1055,6 +1118,7 @@ void bfs(raft::handle_t const& handle, bool do_expensive_check = false); /** + * @ingroup traversal_cpp * @brief Extract paths from breadth-first search output * * This function extracts paths from the BFS output. BFS outputs distances @@ -1092,6 +1156,7 @@ std::tuple, vertex_t> extract_bfs_paths( size_t n_destinations); /** + * @ingroup traversal_cpp * @brief Run single-source shortest-path to compute the minimum distances (and predecessors) from * the source vertex. * @@ -1128,7 +1193,8 @@ void sssp(raft::handle_t const& handle, weight_t cutoff = std::numeric_limits::max(), bool do_expensive_check = false); -/* +/** +.* @ingroup traversal_cpp * @brief Compute the shortest distances from the given origins to all the given destinations. * * This algorithm is designed for large diameter graphs. For small diameter graphs, running the @@ -1168,6 +1234,7 @@ rmm::device_uvector od_shortest_distances( bool do_expensive_check = false); /** + * @ingroup link_analysis_cpp * @brief Compute PageRank scores. * * @deprecated This API will be deprecated to replaced by the new version below @@ -1240,6 +1307,7 @@ struct centrality_algorithm_metadata_t { }; /** +.* @ingroup link_analysis_cpp * @brief Compute PageRank scores. * * This function computes general (if @p personalization_vertices is `nullptr`) or personalized (if @@ -1297,6 +1365,7 @@ std::tuple, centrality_algorithm_metadata_t> pager bool do_expensive_check = false); /** +.* @ingroup centrality_cpp * @brief Compute Eigenvector Centrality scores. * * This function computes eigenvector centrality scores using the power method. @@ -1334,6 +1403,7 @@ rmm::device_uvector eigenvector_centrality( bool do_expensive_check = false); /** +.* @ingroup link_analysis_cpp * @brief Compute HITS scores. * * This function computes HITS scores for the vertices of a graph @@ -1373,6 +1443,7 @@ std::tuple hits(raft::handle_t const& handle, bool do_expensive_check); /** +.* @ingroup centrality_cpp * @brief Compute Katz Centrality scores. * * This function computes Katz Centrality scores. @@ -1425,6 +1496,7 @@ void katz_centrality(raft::handle_t const& handle, bool do_expensive_check = false); /** +.* @ingroup community_cpp * @brief returns induced EgoNet subgraph(s) of neighbors centered at nodes in source_vertex within * a given radius. * @@ -1460,6 +1532,7 @@ extract_ego(raft::handle_t const& handle, vertex_t radius); /** +.* @ingroup community_cpp * @brief returns induced EgoNet subgraph(s) of neighbors centered at nodes in source_vertex within * a given radius. * @@ -1493,6 +1566,7 @@ extract_ego(raft::handle_t const& handle, bool do_expensive_check = false); /** +.* @ingroup sampling_cpp * @brief returns random walks (RW) from starting sources, where each path is of given maximum * length. Uniform distribution is assumed for the random engine. * @@ -1537,6 +1611,7 @@ std:: std::unique_ptr sampling_strategy = nullptr); /** +.* @ingroup sampling_cpp * @brief returns uniform random walks from starting sources, where each path is of given * maximum length. * @@ -1582,6 +1657,7 @@ uniform_random_walks(raft::handle_t const& handle, size_t max_length); /** +.* @ingroup sampling_cpp * @brief returns biased random walks from starting sources, where each path is of given * maximum length. * @@ -1626,6 +1702,7 @@ biased_random_walks(raft::handle_t const& handle, size_t max_length); /** +.* @ingroup sampling_cpp * @brief returns biased random walks with node2vec biases from starting sources, * where each path is of given maximum length. * @@ -1675,6 +1752,7 @@ node2vec_random_walks(raft::handle_t const& handle, weight_t q); /** +.* @ingroup components_cpp * @brief Finds (weakly-connected-)component IDs of each vertices in the input graph. * * The input graph must be symmetric. Component IDs can be arbitrary integers (they can be @@ -1697,12 +1775,14 @@ void weakly_connected_components(raft::handle_t const& handle, bool do_expensive_check = false); /** +.* @ingroup core_cpp * @brief Identify whether the core number computation should be based off incoming edges, * outgoing edges or both. */ enum class k_core_degree_type_t { IN = 0, OUT = 1, INOUT = 2 }; /** +.* @ingroup core_cpp * @brief Compute core numbers of individual vertices from K-Core decomposition. * * The input graph should not have self-loops nor multi-edges. Currently, only undirected graphs are @@ -1734,6 +1814,7 @@ void core_number(raft::handle_t const& handle, bool do_expensive_check = false); /** +.* @ingroup core_cpp * @brief Extract K-Core of a graph * * @throws cugraph::logic_error when an error occurs. @@ -1766,7 +1847,8 @@ k_core(raft::handle_t const& handle, std::optional> core_numbers, bool do_expensive_check = false); -/* +/** + * @ingroup community_cpp * @brief Compute triangle counts. * * Compute triangle counts for the entire set of vertices (if @p vertices is std::nullopt) or the @@ -1792,7 +1874,8 @@ void triangle_count(raft::handle_t const& handle, raft::device_span counts, bool do_expensive_check = false); -/* +/** +.* @ingroup community_cpp * @brief Compute edge triangle counts. * * Compute edge triangle counts for the entire set of edges. @@ -1814,7 +1897,8 @@ edge_property_t, edge_t> edge_t graph_view_t const& graph_view, bool do_expensive_check = false); -/* +/** +.* @ingroup community_cpp * @brief Compute K-Truss. * * Extract the K-Truss subgraph of a graph @@ -1841,6 +1925,7 @@ k_truss(raft::handle_t const& handle, bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute Jaccard similarity coefficient * * Similarity is computed for every pair of vertices specified. Note that @@ -1872,6 +1957,7 @@ rmm::device_uvector jaccard_coefficients( bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute Cosine similarity coefficient * * Similarity is computed for every pair of vertices specified. Note that @@ -1903,6 +1989,7 @@ rmm::device_uvector cosine_similarity_coefficients( bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute Sorensen similarity coefficient * * Similarity is computed for every pair of vertices specified. Note that @@ -1935,6 +2022,7 @@ rmm::device_uvector sorensen_coefficients( bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute overlap similarity coefficient * * Similarity is computed for every pair of vertices specified. Note that @@ -1967,6 +2055,7 @@ rmm::device_uvector overlap_coefficients( bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute Jaccard all pairs similarity coefficient * * Similarity is computed for all pairs of vertices. Note that in a sparse @@ -2023,6 +2112,7 @@ std:: bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute Consine all pairs similarity coefficient * * Similarity is computed for all pairs of vertices. Note that in a sparse @@ -2079,6 +2169,7 @@ std:: bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute Sorensen similarity coefficient * * Similarity is computed for all pairs of vertices. Note that in a sparse @@ -2134,6 +2225,7 @@ std:: bool do_expensive_check = false); /** +.* @ingroup similarity_cpp * @brief Compute overlap similarity coefficient * * Similarity is computed for all pairs of vertices. Note that in a sparse @@ -2189,6 +2281,7 @@ std:: bool do_expensive_check = false); /* +.* @ingroup utility_cpp * @brief Enumerate K-hop neighbors * * Note that the number of K-hop neighbors (and memory footprint) can grow very fast if there are @@ -2217,7 +2310,8 @@ std::tuple, rmm::device_uvector> k_hop_nbr size_t k, bool do_expensive_check = false); -/* +/** + * @ingroup tree_cpp * @brief Find a Maximal Independent Set * * @tparam vertex_t Type of vertex identifiers. Needs to be an integral type. @@ -2235,7 +2329,8 @@ rmm::device_uvector maximal_independent_set( graph_view_t const& graph_view, raft::random::RngState& rng_state); -/* +/** + * @ingroup utility_cpp * @brief Find a Greedy Vertex Coloring * * A vertex coloring is an assignment of colors or labels to each vertex of a graph so that @@ -2262,7 +2357,8 @@ rmm::device_uvector vertex_coloring( graph_view_t const& graph_view, raft::random::RngState& rng_state); -/* +/** +.* @ingroup utility_cpp * @brief Approximate Weighted Matching * * A matching in an undirected graph G = (V, E) is a pairing of adjacent vertices From ed954dce25178f7b25237296bc97a072af36e5da Mon Sep 17 00:00:00 2001 From: Joseph Nke <76006812+jnke2016@users.noreply.github.com> Date: Sat, 11 Jan 2025 01:10:13 +0100 Subject: [PATCH 6/7] Address Leiden numbering issue (#4845) Our current implementation of Leiden can return non contiguous cluster IDs however, there is an unused utility function [relabel_cluster_ids](https://github.com/rapidsai/cugraph/blob/branch-25.02/cpp/src/community/leiden_impl.cuh#L601:L604) that serves the purpose of relabeling. This PR - Addresses the Leiden numbering issue from [4791](#4791) by calling `relabel_cluster_ids` after flattening the dendrogram. - Fixes a bug in the MG python API of Leiden which requires a different seed for each GPU in the C++ API - Add SG and MG C++ tests - Add a python SG and MG test capturing the numbering issue closes #4791 Authors: - Joseph Nke (https://github.com/jnke2016) Approvers: - Rick Ratzel (https://github.com/rlratzel) - Chuck Hastings (https://github.com/ChuckHastings) URL: https://github.com/rapidsai/cugraph/pull/4845 --- cpp/src/community/leiden_impl.cuh | 45 ++++++++++--------- cpp/tests/community/leiden_test.cpp | 19 +++++++- cpp/tests/community/mg_leiden_test.cpp | 38 +++++++++++++++- .../cugraph/cugraph/dask/community/leiden.py | 6 +-- .../cugraph/tests/community/test_leiden.py | 27 ++++++++++- .../cugraph/tests/community/test_leiden_mg.py | 17 ++++++- 6 files changed, 125 insertions(+), 27 deletions(-) diff --git a/cpp/src/community/leiden_impl.cuh b/cpp/src/community/leiden_impl.cuh index c3600ff12e0..0d2afc631c9 100644 --- a/cpp/src/community/leiden_impl.cuh +++ b/cpp/src/community/leiden_impl.cuh @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022-2024, NVIDIA CORPORATION. + * Copyright (c) 2022-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -604,26 +604,20 @@ void relabel_cluster_ids(raft::handle_t const& handle, size_t num_nodes) { vertex_t local_cluster_id_first{0}; + + // Get unique cluster id and shuffle + remove_duplicates(handle, unique_cluster_ids); + if constexpr (multi_gpu) { - auto unique_cluster_range_lasts = cugraph::partition_manager::compute_partition_range_lasts( - handle, static_cast(unique_cluster_ids.size())); - - auto& comm = handle.get_comms(); - auto const comm_size = comm.get_size(); - auto const comm_rank = comm.get_rank(); - auto& major_comm = handle.get_subcomm(cugraph::partition_manager::major_comm_name()); - auto const major_comm_size = major_comm.get_size(); - auto const major_comm_rank = major_comm.get_rank(); - auto& minor_comm = handle.get_subcomm(cugraph::partition_manager::minor_comm_name()); - auto const minor_comm_size = minor_comm.get_size(); - auto const minor_comm_rank = minor_comm.get_rank(); - - auto vertex_partition_id = - partition_manager::compute_vertex_partition_id_from_graph_subcomm_ranks( - major_comm_size, minor_comm_size, major_comm_rank, minor_comm_rank); - - local_cluster_id_first = - vertex_partition_id == 0 ? vertex_t{0} : unique_cluster_range_lasts[vertex_partition_id - 1]; + auto cluster_ids_size_per_rank = cugraph::host_scalar_allgather( + handle.get_comms(), unique_cluster_ids.size(), handle.get_stream()); + + std::vector cluster_ids_starts(cluster_ids_size_per_rank.size()); + std::exclusive_scan(cluster_ids_size_per_rank.begin(), + cluster_ids_size_per_rank.end(), + cluster_ids_starts.begin(), + size_t{0}); + local_cluster_id_first = cluster_ids_starts[handle.get_comms().get_rank()]; } rmm::device_uvector numbering_indices(unique_cluster_ids.size(), handle.get_stream()); @@ -713,6 +707,17 @@ std::pair leiden( detail::flatten_leiden_dendrogram(handle, graph_view, *dendrogram, clustering); + size_t local_num_verts = (*dendrogram).get_level_size_nocheck(0); + rmm::device_uvector unique_cluster_ids(local_num_verts, handle.get_stream()); + + thrust::copy(handle.get_thrust_policy(), + clustering, + clustering + local_num_verts, + unique_cluster_ids.begin()); + + detail::relabel_cluster_ids( + handle, unique_cluster_ids, clustering, local_num_verts); + return std::make_pair(dendrogram->num_levels(), modularity); } diff --git a/cpp/tests/community/leiden_test.cpp b/cpp/tests/community/leiden_test.cpp index 5ce0903f723..ad2be59eee2 100644 --- a/cpp/tests/community/leiden_test.cpp +++ b/cpp/tests/community/leiden_test.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023-2024, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2023-2025, NVIDIA CORPORATION. All rights reserved. * * NVIDIA CORPORATION and its licensors retain all intellectual property * and proprietary rights in and to this software, related documentation @@ -9,6 +9,7 @@ * */ #include "utilities/base_fixture.hpp" +#include "utilities/conversion_utilities.hpp" #include "utilities/test_graphs.hpp" #include @@ -128,6 +129,22 @@ class Tests_Leiden : public ::testing::TestWithParam(handle, clustering_v); + + unique_clustering_v = cugraph::test::unique(handle, std::move(unique_clustering_v)); + + auto expected_unique_clustering_v = + cugraph::test::sequence(handle, unique_clustering_v.size(), size_t{1}, int32_t{0}); + + auto h_unique_clustering_v = cugraph::test::to_host(handle, unique_clustering_v); + auto h_expected_unique_clustering_v = + cugraph::test::to_host(handle, expected_unique_clustering_v); + + ASSERT_TRUE(std::equal(h_unique_clustering_v.begin(), + h_unique_clustering_v.end(), + h_expected_unique_clustering_v.begin())) + << "Returned cluster IDs are not numbered consecutively"; } }; diff --git a/cpp/tests/community/mg_leiden_test.cpp b/cpp/tests/community/mg_leiden_test.cpp index 6949ac8d170..081c5cb6dfe 100644 --- a/cpp/tests/community/mg_leiden_test.cpp +++ b/cpp/tests/community/mg_leiden_test.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, NVIDIA CORPORATION. + * Copyright (c) 2021-2025, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -175,6 +175,7 @@ class Tests_MGLeiden if (leiden_usecase.check_correctness_) { SCOPED_TRACE("compare modularity input"); + // FIXME: The dendrogram is unused compare_sg_results(*handle_, rng_state, mg_graph_view, @@ -184,6 +185,41 @@ class Tests_MGLeiden leiden_usecase.theta_, mg_modularity); } + + // Check numbering + vertex_t num_vertices = mg_graph_view.local_vertex_partition_range_size(); + rmm::device_uvector clustering_v(num_vertices, handle_->get_stream()); + cugraph::leiden(*handle_, + rng_state, + mg_graph_view, + mg_edge_weight_view, + clustering_v.data(), + leiden_usecase.max_level_, + leiden_usecase.resolution_); + + auto unique_clustering_v = cugraph::test::sort(*handle_, clustering_v); + + unique_clustering_v = cugraph::test::unique(*handle_, std::move(unique_clustering_v)); + + unique_clustering_v = cugraph::test::device_allgatherv( + *handle_, unique_clustering_v.data(), unique_clustering_v.size()); + + unique_clustering_v = cugraph::test::sort(*handle_, unique_clustering_v); + + unique_clustering_v = cugraph::test::unique(*handle_, std::move(unique_clustering_v)); + + auto h_unique_clustering_v = cugraph::test::to_host(*handle_, unique_clustering_v); + + auto expected_unique_clustering_v = cugraph::test::sequence( + *handle_, unique_clustering_v.size(), size_t{1}, h_unique_clustering_v[0]); + + auto h_expected_unique_clustering_v = + cugraph::test::to_host(*handle_, expected_unique_clustering_v); + + ASSERT_TRUE(std::equal(h_unique_clustering_v.begin(), + h_unique_clustering_v.end(), + h_expected_unique_clustering_v.begin())) + << "Returned cluster IDs are not numbered consecutively"; } private: diff --git a/python/cugraph/cugraph/dask/community/leiden.py b/python/cugraph/cugraph/dask/community/leiden.py index bdcf9edc7bb..adcb278928f 100644 --- a/python/cugraph/cugraph/dask/community/leiden.py +++ b/python/cugraph/cugraph/dask/community/leiden.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -156,13 +156,13 @@ def leiden( input_graph._plc_graph[w], max_iter, resolution, - random_state, + (random_state + i) if random_state is not None else random_state, theta, do_expensive_check, workers=[w], allow_other_workers=False, ) - for w in Comms.get_workers() + for i, w in enumerate(Comms.get_workers()) ] wait(result) diff --git a/python/cugraph/cugraph/tests/community/test_leiden.py b/python/cugraph/cugraph/tests/community/test_leiden.py index 48300b2201c..04ed855adbb 100644 --- a/python/cugraph/cugraph/tests/community/test_leiden.py +++ b/python/cugraph/cugraph/tests/community/test_leiden.py @@ -1,4 +1,4 @@ -# Copyright (c) 2019-2024, NVIDIA CORPORATION. +# Copyright (c) 2019-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,6 +19,7 @@ import cugraph import cudf +from cudf.testing.testing import assert_series_equal from cugraph.testing import utils, UNDIRECTED_DATASETS from cugraph.datasets import karate_asymmetric @@ -185,6 +186,18 @@ def test_leiden(graph_file): leiden_parts, leiden_mod = cugraph_leiden(G) louvain_parts, louvain_mod = cugraph_louvain(G) + unique_parts = ( + leiden_parts["partition"] + .drop_duplicates() + .sort_values(ascending=True) + .reset_index(drop=True) + ) + + idx_col = cudf.Series(unique_parts.index) + + # Ensure Leiden cluster's ID are numbered consecutively + assert_series_equal(unique_parts, idx_col, check_dtype=False, check_names=False) + # Leiden modularity score is smaller than Louvain's assert leiden_mod >= (0.75 * louvain_mod) @@ -202,6 +215,18 @@ def test_leiden_nx(graph_file): leiden_parts, leiden_mod = cugraph_leiden(G) louvain_parts, louvain_mod = cugraph_louvain(G) + unique_parts = ( + cudf.Series(leiden_parts.values()) + .drop_duplicates() + .sort_values(ascending=True) + .reset_index(drop=True) + ) + + idx_col = cudf.Series(unique_parts.index) + + # Ensure Leiden cluster's ID are numbered consecutively + assert_series_equal(unique_parts, idx_col, check_dtype=False, check_names=False) + # Calculating modularity scores for comparison # Leiden modularity score is smaller than Louvain's assert leiden_mod >= (0.75 * louvain_mod) diff --git a/python/cugraph/cugraph/tests/community/test_leiden_mg.py b/python/cugraph/cugraph/tests/community/test_leiden_mg.py index 2904ecd12a2..4f6fee029d7 100644 --- a/python/cugraph/cugraph/tests/community/test_leiden_mg.py +++ b/python/cugraph/cugraph/tests/community/test_leiden_mg.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -16,6 +16,8 @@ import cugraph import cugraph.dask as dcg from cugraph.datasets import karate_asymmetric, karate, dolphins +import cudf +from cudf.testing.testing import assert_series_equal # ============================================================================= @@ -64,6 +66,19 @@ def test_mg_leiden_with_edgevals_undirected_graph(dask_client, dataset): dg = get_mg_graph(dataset, directed=False) parts, mod = dcg.leiden(dg) + unique_parts = ( + parts["partition"] + .compute() + .drop_duplicates() + .sort_values(ascending=True) + .reset_index(drop=True) + ) + + idx_col = cudf.Series(unique_parts.index) + + # Ensure Leiden cluster's ID are numbered consecutively + assert_series_equal(unique_parts, idx_col, check_dtype=False, check_names=False) + # FIXME: either call Nx with the same dataset and compare results, or # hardcode golden results to compare to. print() From dd228f9f1bea23b74b17dc0f939ff1b0b15cee4f Mon Sep 17 00:00:00 2001 From: James Lamb Date: Mon, 13 Jan 2025 08:03:35 -0600 Subject: [PATCH 7/7] pylibcugraph: declare cupy and numpy hard dependencies (#4854) While testing stuff for #4804, I found that `pylibcugraph` has a hard runtime dependency on `cupy` and `numpy`, but isn't declaring them ```shell docker run \ --rm \ --gpus 0 \ -it rapidsai/ci-wheel:latest \ bash python -m pip install 'pylibcugraph-cu12==25.2.*,>=0.0.0a0' python -c "import pylibcugraph" ``` ```text Traceback (most recent call last): File "", line 1, in File "/pyenv/versions/3.12.7/lib/python3.12/site-packages/pylibcugraph/__init__.py", line 18, in from pylibcugraph.graphs import SGGraph, MGGraph File "graphs.pyx", line 1, in init pylibcugraph.graphs File "utils.pyx", line 20, in init pylibcugraph.utils ModuleNotFoundError: No module named 'cupy' ``` This declares those dependencies. It also promotes `cugraph-service-server`'s `numpy` dependency to a hard runtime dependency. ## Notes for Reviewers ### Evidence that `pylibcugraph` *already* has a hard dependency on these libraries They're used unconditionally here: https://github.com/rapidsai/cugraph/blob/cddd69ea3f62cabdb3aa2b7b6676e0b74ab4eefc/python/pylibcugraph/pylibcugraph/utils.pyx#L19-L20 But have import guards in other places: https://github.com/rapidsai/cugraph/blob/cddd69ea3f62cabdb3aa2b7b6676e0b74ab4eefc/python/pylibcugraph/pylibcugraph/sssp.pyx#L127-L139 So this PR doesn't introduce new hard dependencies... it just makes them explicit, to make it easier to install and run `pylibcugraph`. ### How was this not caught in CI? Import tests aren't run here for conda packages, because conda builds happen on CPU-only nodes. https://github.com/rapidsai/cugraph/blob/cddd69ea3f62cabdb3aa2b7b6676e0b74ab4eefc/ci/build_python.sh#L27-L30 And `numpy` and `cupy` are probably getting pulled in by some of the wheels' test dependencies, like `cudf`, here: https://github.com/rapidsai/cugraph/blob/cddd69ea3f62cabdb3aa2b7b6676e0b74ab4eefc/ci/test_wheel.sh#L17 ### Should we just make the other unconditional cases conditional with try-catching? No. Talked with @rlratzel, @ChuckHastings, and @eriknw offline, and we agreed to declare these as hard runtime dependencies (and remove the try-catching in places that had it). Authors: - James Lamb (https://github.com/jameslamb) Approvers: - Rick Ratzel (https://github.com/rlratzel) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cugraph/pull/4854 --- conda/recipes/pylibcugraph/meta.yaml | 4 +++- dependencies.yaml | 14 +++++++++++++- python/cugraph-service/server/pyproject.toml | 4 ---- python/pylibcugraph/pylibcugraph/bfs.pyx | 9 +++------ python/pylibcugraph/pylibcugraph/node2vec.pyx | 12 +++--------- python/pylibcugraph/pylibcugraph/pagerank.pyx | 19 ++++--------------- .../pylibcugraph/personalized_pagerank.pyx | 19 ++++--------------- python/pylibcugraph/pylibcugraph/sssp.pyx | 19 ++++--------------- python/pylibcugraph/pyproject.toml | 2 ++ 9 files changed, 36 insertions(+), 66 deletions(-) diff --git a/conda/recipes/pylibcugraph/meta.yaml b/conda/recipes/pylibcugraph/meta.yaml index 54d29a68d91..e8a0286d2b8 100644 --- a/conda/recipes/pylibcugraph/meta.yaml +++ b/conda/recipes/pylibcugraph/meta.yaml @@ -1,4 +1,4 @@ -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. {% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') + environ.get('VERSION_SUFFIX', '') %} {% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %} @@ -74,7 +74,9 @@ requirements: {% else %} - cuda-cudart {% endif %} + - cupy >=12.0.0 - libcugraph ={{ version }} + - numpy>=1.23,<3.0a0 - pylibraft ={{ minor_version }} - python - rmm ={{ minor_version }} diff --git a/dependencies.yaml b/dependencies.yaml index 56c0f9deba0..906ccb24cb9 100755 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -141,8 +141,10 @@ files: table: project includes: - cuda_wheels + - depends_on_cupy - depends_on_pylibraft - depends_on_rmm + - python_run_pylibcugraph py_test_pylibcugraph: output: pyproject pyproject_dir: python/pylibcugraph @@ -199,7 +201,7 @@ files: key: test includes: - test_python_common - - test_python_cugraph + - test_python_cugraph_service_server channels: - rapidsai - rapidsai-nightly @@ -421,6 +423,11 @@ dependencies: - matrix: # All CUDA 11 versions packages: - cuda-python>=11.8.5,<12.0a0 + python_run_pylibcugraph: + common: + - output_types: [conda, pyproject, requirements] + packages: + - *numpy python_run_cugraph_service_client: common: - output_types: [conda, pyproject] @@ -468,6 +475,11 @@ dependencies: - output_types: [conda] packages: - *thrift + test_python_cugraph_service_server: + common: + - output_types: [conda, pyproject] + packages: + - *numpy test_python_pylibcugraph: common: - output_types: [conda, pyproject] diff --git a/python/cugraph-service/server/pyproject.toml b/python/cugraph-service/server/pyproject.toml index 29ee41854f8..ec75af55cb3 100644 --- a/python/cugraph-service/server/pyproject.toml +++ b/python/cugraph-service/server/pyproject.toml @@ -46,16 +46,12 @@ cugraph-service-server = "cugraph_service_server.__main__:main" [project.optional-dependencies] test = [ - "certifi", - "networkx>=2.5.1", "numpy>=1.23,<3.0a0", "pandas", "pytest", "pytest-benchmark", "pytest-cov", "pytest-xdist", - "python-louvain", - "scikit-learn>=0.23.1", "scipy", ] # This list was generated by `rapids-dependency-file-generator`. To make changes, edit ../../../dependencies.yaml and run `rapids-dependency-file-generator`. diff --git a/python/pylibcugraph/pylibcugraph/bfs.pyx b/python/pylibcugraph/pylibcugraph/bfs.pyx index b92afcfd7db..e6a22109b9e 100644 --- a/python/pylibcugraph/pylibcugraph/bfs.pyx +++ b/python/pylibcugraph/pylibcugraph/bfs.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,6 +14,8 @@ # Have cython use python 3 syntax # cython: language_level = 3 +import cupy + from libc.stdint cimport uintptr_t from libc.stdint cimport int32_t from libc.limits cimport INT_MAX @@ -141,11 +143,6 @@ def bfs(ResourceHandle handle, _GPUGraph graph, >>> }) """ - try: - import cupy - except ModuleNotFoundError: - raise RuntimeError("bfs requires the cupy package, which could not " - "be imported") assert_CAI_type(sources, "sources") if depth_limit <= 0: diff --git a/python/pylibcugraph/pylibcugraph/node2vec.pyx b/python/pylibcugraph/pylibcugraph/node2vec.pyx index 0e0fd73e6c8..e81afb58061 100644 --- a/python/pylibcugraph/pylibcugraph/node2vec.pyx +++ b/python/pylibcugraph/pylibcugraph/node2vec.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,6 +14,8 @@ # Have cython use python 3 syntax # cython: language_level = 3 +import cupy + from libc.stdint cimport uintptr_t from pylibcugraph._cugraph_c.types cimport ( @@ -124,14 +126,6 @@ def node2vec(ResourceHandle resource_handle, """ - # FIXME: import these modules here for now until a better pattern can be - # used for optional imports (perhaps 'import_optional()' from cugraph), or - # these are made hard dependencies. - try: - import cupy - except ModuleNotFoundError: - raise RuntimeError("node2vec requires the cupy package, which could not " - "be imported") assert_CAI_type(seed_array, "seed_array") cdef cugraph_resource_handle_t* c_resource_handle_ptr = \ diff --git a/python/pylibcugraph/pylibcugraph/pagerank.pyx b/python/pylibcugraph/pylibcugraph/pagerank.pyx index bcb8474ddfa..961e191e054 100644 --- a/python/pylibcugraph/pylibcugraph/pagerank.pyx +++ b/python/pylibcugraph/pylibcugraph/pagerank.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,6 +14,9 @@ # Have cython use python 3 syntax # cython: language_level = 3 +import cupy +import numpy + from pylibcugraph._cugraph_c.types cimport ( bool_t, ) @@ -167,20 +170,6 @@ def pagerank(ResourceHandle resource_handle, array([0.11615585, 0.21488841, 0.2988108 , 0.3701449 ], dtype=float32) """ - # FIXME: import these modules here for now until a better pattern can be - # used for optional imports (perhaps 'import_optional()' from cugraph), or - # these are made hard dependencies. - try: - import cupy - except ModuleNotFoundError: - raise RuntimeError("pagerank requires the cupy package, which could " - "not be imported") - try: - import numpy - except ModuleNotFoundError: - raise RuntimeError("pagerank requires the numpy package, which could " - "not be imported") - cdef cugraph_type_erased_device_array_view_t* \ initial_guess_vertices_view_ptr = \ create_cugraph_type_erased_device_array_view_from_py_obj( diff --git a/python/pylibcugraph/pylibcugraph/personalized_pagerank.pyx b/python/pylibcugraph/pylibcugraph/personalized_pagerank.pyx index 209d4054491..1926f41b13d 100644 --- a/python/pylibcugraph/pylibcugraph/personalized_pagerank.pyx +++ b/python/pylibcugraph/pylibcugraph/personalized_pagerank.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,6 +14,9 @@ # Have cython use python 3 syntax # cython: language_level = 3 +import cupy +import numpy + from pylibcugraph._cugraph_c.types cimport ( bool_t, ) @@ -177,20 +180,6 @@ def personalized_pagerank(ResourceHandle resource_handle, array([0.00446455, 0.00379487, 0.53607565, 0.45566472 ], dtype=float32) """ - # FIXME: import these modules here for now until a better pattern can be - # used for optional imports (perhaps 'import_optional()' from cugraph), or - # these are made hard dependencies. - try: - import cupy - except ModuleNotFoundError: - raise RuntimeError("pagerank requires the cupy package, which could " - "not be imported") - try: - import numpy - except ModuleNotFoundError: - raise RuntimeError("pagerank requires the numpy package, which could " - "not be imported") - cdef cugraph_type_erased_device_array_view_t* \ initial_guess_vertices_view_ptr = \ create_cugraph_type_erased_device_array_view_from_py_obj( diff --git a/python/pylibcugraph/pylibcugraph/sssp.pyx b/python/pylibcugraph/pylibcugraph/sssp.pyx index 7e40a801e94..2eff824d4d3 100644 --- a/python/pylibcugraph/pylibcugraph/sssp.pyx +++ b/python/pylibcugraph/pylibcugraph/sssp.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -14,6 +14,9 @@ # Have cython use python 3 syntax # cython: language_level = 3 +import cupy +import numpy + from pylibcugraph._cugraph_c.types cimport ( bool_t, ) @@ -124,20 +127,6 @@ def sssp(ResourceHandle resource_handle, array([-1, -1, 1, 2], dtype=int32) """ - # FIXME: import these modules here for now until a better pattern can be - # used for optional imports (perhaps 'import_optional()' from cugraph), or - # these are made hard dependencies. - try: - import cupy - except ModuleNotFoundError: - raise RuntimeError("sssp requires the cupy package, which could not " - "be imported") - try: - import numpy - except ModuleNotFoundError: - raise RuntimeError("sssp requires the numpy package, which could not " - "be imported") - if compute_predecessors is False: raise ValueError("compute_predecessors must be True for the current " "release.") diff --git a/python/pylibcugraph/pyproject.toml b/python/pylibcugraph/pyproject.toml index 72a5e19c702..cd98e37d327 100644 --- a/python/pylibcugraph/pyproject.toml +++ b/python/pylibcugraph/pyproject.toml @@ -23,6 +23,8 @@ authors = [ license = { text = "Apache 2.0" } requires-python = ">=3.10" dependencies = [ + "cupy-cuda11x>=12.0.0", + "numpy>=1.23,<3.0a0", "nvidia-cublas", "nvidia-curand", "nvidia-cusolver",