From 8fbcbab00ffaced3072c6036778e3a4b0ee6346b Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 7 Jan 2025 11:38:09 -0800 Subject: [PATCH 1/5] Check if nightlies have succeeded recently enough (#4840) Contributes to https://github.com/rapidsai/build-planning/issues/127 This PR cannot be merged unless nightly CI has passed within the past 7 days, so if it remains unmerged that will itself be an indication that nightly CI needs fixing. Authors: - Vyas Ramasubramani (https://github.com/vyasr) - Ralph Liu (https://github.com/nv-rliu) Approvers: - Rick Ratzel (https://github.com/rlratzel) - Bradley Dice (https://github.com/bdice) - James Lamb (https://github.com/jameslamb) URL: https://github.com/rapidsai/cugraph/pull/4840 --- .github/workflows/pr.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/pr.yaml b/.github/workflows/pr.yaml index aea81c152e..c167da995c 100644 --- a/.github/workflows/pr.yaml +++ b/.github/workflows/pr.yaml @@ -13,6 +13,7 @@ jobs: # Please keep pr-builder as the top job here pr-builder: needs: + - check-nightly-ci - changed-files - checks - conda-cpp-build @@ -42,6 +43,18 @@ jobs: - name: Telemetry setup if: ${{ vars.TELEMETRY_ENABLED == 'true' }} uses: rapidsai/shared-actions/telemetry-dispatch-stash-base-env-vars@main + check-nightly-ci: + # Switch to ubuntu-latest once it defaults to a version of Ubuntu that + # provides at least Python 3.11 (see + # https://docs.python.org/3/library/datetime.html#datetime.date.fromisoformat) + runs-on: ubuntu-24.04 + env: + RAPIDS_GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + steps: + - name: Check if nightly CI is passing + uses: rapidsai/shared-actions/check_nightly_success/dispatch@main + with: + repo: cugraph changed-files: secrets: inherit needs: telemetry-setup From b4f592e3fa15a714e6b7bde68c59c8a1b8e4011d Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 7 Jan 2025 14:27:21 -0800 Subject: [PATCH 2/5] Support raft's logger targets (#4848) https://github.com/rapidsai/raft/pull/2530 added new targets that we need to make global in cugraph's CMake as well. Authors: - Vyas Ramasubramani (https://github.com/vyasr) Approvers: - Chuck Hastings (https://github.com/ChuckHastings) URL: https://github.com/rapidsai/cugraph/pull/4848 --- cpp/cmake/thirdparty/get_raft.cmake | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cpp/cmake/thirdparty/get_raft.cmake b/cpp/cmake/thirdparty/get_raft.cmake index 8f56372c81..62633d95c6 100644 --- a/cpp/cmake/thirdparty/get_raft.cmake +++ b/cpp/cmake/thirdparty/get_raft.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ function(find_and_configure_raft) endif() rapids_cpm_find(raft ${PKG_VERSION} - GLOBAL_TARGETS raft::raft + GLOBAL_TARGETS raft::raft raft::raft_logger raft::raft_logger_impl BUILD_EXPORT_SET cugraph-exports INSTALL_EXPORT_SET cugraph-exports COMPONENTS ${RAFT_COMPONENTS} From cddd69ea3f62cabdb3aa2b7b6676e0b74ab4eefc Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Tue, 7 Jan 2025 17:18:57 -0600 Subject: [PATCH 3/5] Use cuda-python bindings for getting device properties. (#4830) This PR uses `cuda-python` for getting device properties. These APIs are more stable than getting this information via `numba.cuda`. Companion to #4829 (this is not dependent on that PR, though). Authors: - Bradley Dice (https://github.com/bdice) - Ralph Liu (https://github.com/nv-rliu) - Kyle Edwards (https://github.com/KyleFromNVIDIA) Approvers: - Kyle Edwards (https://github.com/KyleFromNVIDIA) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/4830 --- ci/notebook_list.py | 46 ++++++++-------- .../cugraph/cugraph/dask/common/mg_utils.py | 13 +++-- .../cugraph/tests/docs/test_doctests.py | 14 +++-- .../utilities/path_retrieval_wrapper.pyx | 3 +- python/cugraph/cugraph/utilities/utils.py | 54 +++++++++---------- 5 files changed, 64 insertions(+), 66 deletions(-) diff --git a/ci/notebook_list.py b/ci/notebook_list.py index f7a284beee..659ac4de75 100644 --- a/ci/notebook_list.py +++ b/ci/notebook_list.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -17,7 +17,7 @@ import glob from pathlib import Path -from numba import cuda +from cuda.bindings import runtime # for adding another run type and skip file name add to this dictionary runtype_dict = { @@ -30,20 +30,27 @@ def skip_book_dir(runtype): # Add all run types here, currently only CI supported + return runtype in runtype_dict and Path(runtype_dict.get(runtype)).is_file() - if runtype in runtype_dict.keys(): - if Path(runtype_dict.get(runtype)).is_file(): - return True - return False +def _get_cuda_version_string(): + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major, minor = divmod(version, 1000) + minor //= 10 + return f"{major}.{minor}" + + +def _is_ampere_or_newer(): + status, device_id = runtime.cudaGetDevice() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device.") + status, device_prop = runtime.cudaGetDeviceProperties(device_id) + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device properties.") + return (device_prop.major, device_prop.minor) >= (8, 0) -cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()]) -# -# Not strictly true... however what we mean is -# Pascal or earlier -# -ampere = False -device = cuda.get_current_device() parser = argparse.ArgumentParser(description="Condition for running the notebook tests") parser.add_argument("runtype", type=str) @@ -52,19 +59,10 @@ def skip_book_dir(runtype): runtype = args.runtype -if runtype not in runtype_dict.keys(): +if runtype not in runtype_dict: print(f"Unknown Run Type = {runtype}", file=sys.stderr) exit() - -# check for the attribute using both pre and post numba 0.53 names -cc = getattr(device, "COMPUTE_CAPABILITY", None) or getattr( - device, "compute_capability" -) -if cc[0] >= 8: - ampere = True - -skip = False for filename in glob.iglob("**/*.ipynb", recursive=True): skip = False if skip_book_dir(runtype): @@ -88,7 +86,7 @@ def skip_book_dir(runtype): ) skip = True break - elif ampere and re.search("# Does not run on Ampere", line): + elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line): print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr) skip = True break diff --git a/python/cugraph/cugraph/dask/common/mg_utils.py b/python/cugraph/cugraph/dask/common/mg_utils.py index b04f293dc0..e4e3ac9a44 100644 --- a/python/cugraph/cugraph/dask/common/mg_utils.py +++ b/python/cugraph/cugraph/dask/common/mg_utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2023, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -13,7 +13,7 @@ import os import gc -import numba.cuda +from cuda.bindings import runtime # FIXME: this raft import breaks the library if ucx-py is @@ -53,11 +53,10 @@ def prepare_worker_to_parts(data, client=None): def is_single_gpu(): - ngpus = len(numba.cuda.gpus) - if ngpus > 1: - return False - else: - return True + status, count = runtime.cudaGetDeviceCount() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device count.") + return count > 1 def get_visible_devices(): diff --git a/python/cugraph/cugraph/tests/docs/test_doctests.py b/python/cugraph/cugraph/tests/docs/test_doctests.py index 2095fd41fe..9d9f8436b9 100644 --- a/python/cugraph/cugraph/tests/docs/test_doctests.py +++ b/python/cugraph/cugraph/tests/docs/test_doctests.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022-2024, NVIDIA CORPORATION. +# Copyright (c) 2022-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -25,14 +25,21 @@ import cugraph import pylibcugraph import cudf -from numba import cuda +from cuda.bindings import runtime from cugraph.testing import utils modules_to_skip = ["dask", "proto", "raft"] datasets = utils.RAPIDS_DATASET_ROOT_DIR_PATH -cuda_version_string = ".".join([str(n) for n in cuda.runtime.get_version()]) + +def _get_cuda_version_string(): + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major = version // 1000 + minor = (version % 1000) // 10 + return f"{major}.{minor}" def _is_public_name(name): @@ -131,6 +138,7 @@ def skip_docstring(docstring_obj): NOTE: this function is currently not available on CUDA 11.4 systems. """ docstring = docstring_obj.docstring + cuda_version_string = _get_cuda_version_string() for line in docstring.splitlines(): if f"currently not available on CUDA {cuda_version_string} systems" in line: return f"docstring example not supported on CUDA {cuda_version_string}" diff --git a/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx b/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx index 98d11ad07d..8e71c7aae4 100644 --- a/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx +++ b/python/cugraph/cugraph/utilities/path_retrieval_wrapper.pyx @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -19,7 +19,6 @@ from cugraph.utilities.path_retrieval cimport get_traversed_cost as c_get_traversed_cost from cugraph.structure.graph_primtypes cimport * from libc.stdint cimport uintptr_t -from numba import cuda import cudf import numpy as np diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 0257da4ffc..074503e2f6 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -15,13 +15,10 @@ import os import shutil -from numba import cuda - import cudf from cudf.core.column import as_column -from cuda.cudart import cudaDeviceAttr -from rmm._cuda.gpu import getDeviceAttribute +from cuda.bindings import runtime from warnings import warn @@ -210,45 +207,42 @@ def get_traversed_path_list(df, id): return answer -def is_cuda_version_less_than(min_version=(10, 2)): +def is_cuda_version_less_than(min_version): """ Returns True if the version of CUDA being used is less than min_version """ - this_cuda_ver = cuda.runtime.get_version() # returns (, ) - if this_cuda_ver[0] > min_version[0]: - return False - if this_cuda_ver[0] < min_version[0]: - return True - if this_cuda_ver[1] < min_version[1]: - return True - return False + status, version = runtime.getLocalRuntimeVersion() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA runtime version.") + major = version // 1000 + minor = (version % 1000) // 10 + return (major, minor) < min_version -def is_device_version_less_than(min_version=(7, 0)): +def is_device_version_less_than(min_version): """ Returns True if the version of CUDA being used is less than min_version """ - major_version = getDeviceAttribute( - cudaDeviceAttr.cudaDevAttrComputeCapabilityMajor, 0 - ) - minor_version = getDeviceAttribute( - cudaDeviceAttr.cudaDevAttrComputeCapabilityMinor, 0 - ) - if major_version > min_version[0]: - return False - if major_version < min_version[0]: - return True - if minor_version < min_version[1]: - return True - return False + status, device_id = runtime.cudaGetDevice() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device.") + status, device_prop = runtime.cudaGetDeviceProperties(device_id) + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device properties.") + return (device_prop.major, device_prop.minor) < min_version def get_device_memory_info(): """ Returns the total amount of global memory on the device in bytes """ - meminfo = cuda.current_context().get_memory_info() - return meminfo[1] + status, device_id = runtime.cudaGetDevice() + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device.") + status, device_prop = runtime.cudaGetDeviceProperties(device_id) + if status != runtime.cudaError_t.cudaSuccess: + raise RuntimeError("Could not get CUDA device properties.") + return device_prop.totalGlobalMem # FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if From e46ff65f58a9448ec62ce09591ee7b0707e2e82c Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Thu, 9 Jan 2025 12:13:42 -0600 Subject: [PATCH 4/5] Remove unnecessary CUDA utilities (#4855) This PR removes some utilities that were updated in https://github.com/rapidsai/cugraph/pull/4830 but are no longer needed. xref: https://github.com/rapidsai/build-planning/issues/117 Authors: - Bradley Dice (https://github.com/bdice) Approvers: - James Lamb (https://github.com/jameslamb) - Rick Ratzel (https://github.com/rlratzel) URL: https://github.com/rapidsai/cugraph/pull/4855 --- .../cugraph/pytest-based/bench_algos.py | 6 +-- ci/notebook_list.py | 15 +------ python/cugraph/cugraph/traversal/ms_bfs.py | 3 +- python/cugraph/cugraph/utilities/utils.py | 40 ------------------- 4 files changed, 3 insertions(+), 61 deletions(-) diff --git a/benchmarks/cugraph/pytest-based/bench_algos.py b/benchmarks/cugraph/pytest-based/bench_algos.py index 04407d656d..1c988ea636 100644 --- a/benchmarks/cugraph/pytest-based/bench_algos.py +++ b/benchmarks/cugraph/pytest-based/bench_algos.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020-2024, NVIDIA CORPORATION. +# Copyright (c) 2020-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -42,7 +42,6 @@ def setFixtureParamNames(*args, **kwargs): from cugraph.structure.number_map import NumberMap from cugraph.generators import rmat from cugraph.testing import utils, mg_utils -from cugraph.utilities.utils import is_device_version_less_than from cugraph_benchmarking.params import ( directed_datasets, @@ -362,9 +361,6 @@ def bench_sorensen(gpubenchmark, unweighted_graph): gpubenchmark(sorensen, G, vert_pairs) -@pytest.mark.skipif( - is_device_version_less_than((7, 0)), reason="Not supported on Pascal" -) def bench_louvain(gpubenchmark, graph): louvain = dask_cugraph.louvain if is_graph_distributed(graph) else cugraph.louvain gpubenchmark(louvain, graph) diff --git a/ci/notebook_list.py b/ci/notebook_list.py index 659ac4de75..db26f2efa2 100644 --- a/ci/notebook_list.py +++ b/ci/notebook_list.py @@ -41,16 +41,7 @@ def _get_cuda_version_string(): minor //= 10 return f"{major}.{minor}" - -def _is_ampere_or_newer(): - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return (device_prop.major, device_prop.minor) >= (8, 0) - +cuda_version_string = _get_cuda_version_string() parser = argparse.ArgumentParser(description="Condition for running the notebook tests") parser.add_argument("runtype", type=str) @@ -86,10 +77,6 @@ def _is_ampere_or_newer(): ) skip = True break - elif _is_ampere_or_newer() and re.search("# Does not run on Ampere", line): - print(f"SKIPPING {filename} (does not run on Ampere)", file=sys.stderr) - skip = True - break elif re.search("# Does not run on CUDA ", line) and ( cuda_version_string in line ): diff --git a/python/cugraph/cugraph/traversal/ms_bfs.py b/python/cugraph/cugraph/traversal/ms_bfs.py index df624e453e..b80331d475 100644 --- a/python/cugraph/cugraph/traversal/ms_bfs.py +++ b/python/cugraph/cugraph/traversal/ms_bfs.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021-2023, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -56,7 +56,6 @@ def _get_feasibility(G, sources, components=None, depth_limit=None): # Fixme not implemented in RMM yet # using 96GB upper bound for now - # mem = get_device_memory_info() mem = 9.6e10 n_sources = sources.size V = G.number_of_vertices() diff --git a/python/cugraph/cugraph/utilities/utils.py b/python/cugraph/cugraph/utilities/utils.py index 074503e2f6..493a9850a0 100644 --- a/python/cugraph/cugraph/utilities/utils.py +++ b/python/cugraph/cugraph/utilities/utils.py @@ -18,8 +18,6 @@ import cudf from cudf.core.column import as_column -from cuda.bindings import runtime - from warnings import warn # optional dependencies @@ -207,44 +205,6 @@ def get_traversed_path_list(df, id): return answer -def is_cuda_version_less_than(min_version): - """ - Returns True if the version of CUDA being used is less than min_version - """ - status, version = runtime.getLocalRuntimeVersion() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA runtime version.") - major = version // 1000 - minor = (version % 1000) // 10 - return (major, minor) < min_version - - -def is_device_version_less_than(min_version): - """ - Returns True if the version of CUDA being used is less than min_version - """ - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return (device_prop.major, device_prop.minor) < min_version - - -def get_device_memory_info(): - """ - Returns the total amount of global memory on the device in bytes - """ - status, device_id = runtime.cudaGetDevice() - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device.") - status, device_prop = runtime.cudaGetDeviceProperties(device_id) - if status != runtime.cudaError_t.cudaSuccess: - raise RuntimeError("Could not get CUDA device properties.") - return device_prop.totalGlobalMem - - # FIXME: if G is a Nx type, the weight attribute is assumed to be "weight", if # set. An additional optional parameter for the weight attr name when accepting # Nx graphs may be needed. From the Nx docs: From a6eea2f7cc40165e584e459351d1a92406de7415 Mon Sep 17 00:00:00 2001 From: James Lamb Date: Thu, 9 Jan 2025 13:16:43 -0600 Subject: [PATCH 5/5] move wheel installs into per-project test scripts, other packaging changes (#4847) Proposes some changes to small things I noticed while working on #4804. * CMake option cleanup: - adds `BUILD_PRIMS_BENCH OFF`, removes `BUILD_BENCH OFF` in `get_raft.cmake` (matching changes to RAFT from 23.04: https://github.com/rapidsai/raft/pull/1304) - adds `BUILD_BENCHMARKS OFF` in `get_cudf.cmake` ([this is the default](https://github.com/rapidsai/cudf/blob/b81d9e17fbffbb912e0128148f556bf7af41b6ab/cpp/CMakeLists.txt#L51), but better to be explicit) * consolidates some `.gitignore` rules, adds wheels and conda packages there * moves responsibility for installing CI artifacts into `ci/test_wheel_{package}.sh` and out of `ci/test_wheel.sh` * splits up Cython and `scikit-build-core` in `dependencies.yaml` - *every Python package here using Cython also uses `scikit-build-core`, but the reverse won't be true as of #4804 ... making that change here is harmless and reduces the size of the diff in #4804 a bit* Authors: - James Lamb (https://github.com/jameslamb) Approvers: - Kyle Edwards (https://github.com/KyleFromNVIDIA) - Bradley Dice (https://github.com/bdice) URL: https://github.com/rapidsai/cugraph/pull/4847 --- .gitignore | 10 +++------- ci/test_wheel.sh | 11 +---------- ci/test_wheel_cugraph.sh | 14 ++++++++++---- ci/test_wheel_pylibcugraph.sh | 11 ++++++++++- cpp/cmake/thirdparty/get_raft.cmake | 2 +- .../cmake/thirdparty/get_cudf.cmake | 10 ++++++---- .../cmake/thirdparty/get_cugraph.cmake | 6 +++--- dependencies.yaml | 17 +++++++++++------ 8 files changed, 45 insertions(+), 36 deletions(-) diff --git a/.gitignore b/.gitignore index 9480c2618b..7853526b22 100644 --- a/.gitignore +++ b/.gitignore @@ -32,6 +32,7 @@ test-results ## Python build directories & artifacts dask-worker-space/ htmlcov +*.conda dist/ *.egg-info/ python/build @@ -40,9 +41,7 @@ wheels/ wheelhouse/ _skbuild/ cufile.log - -## pylibcugraph build directories & artifacts -python/pylibcugraph/pylibcugraph.egg-info +*.whl ## Patching *.diff @@ -89,10 +88,7 @@ docs/cugraph/lib* docs/cugraph/api/* # created by Dask tests -python/dask-worker-space -python/cugraph/dask-worker-space -python/cugraph/cugraph/dask-worker-space -python/cugraph/cugraph/tests/dask-worker-space +dask-worker-space/ # Sphinx docs & build artifacts docs/cugraph/source/api_docs/api/* diff --git a/ci/test_wheel.sh b/ci/test_wheel.sh index b5cd90996c..c96e91b037 100755 --- a/ci/test_wheel.sh +++ b/ci/test_wheel.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail @@ -7,15 +7,6 @@ package_name=$1 python_package_name=$(echo ${package_name}|sed 's/-/_/g') -mkdir -p ./dist -RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" - -RAPIDS_PY_WHEEL_NAME="${package_name}_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./dist - -# use 'ls' to expand wildcard before adding `[extra]` requires for pip -# pip creates wheels using python package names -python -m pip install $(ls ./dist/${python_package_name}*.whl)[test] - # Run smoke tests for aarch64 pull requests arch=$(uname -m) if [[ "${arch}" == "aarch64" && ${RAPIDS_BUILD_TYPE} == "pull-request" ]]; then diff --git a/ci/test_wheel_cugraph.sh b/ci/test_wheel_cugraph.sh index 295cec7cb1..4703ed6198 100755 --- a/ci/test_wheel_cugraph.sh +++ b/ci/test_wheel_cugraph.sh @@ -1,11 +1,17 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail -# Download the pylibcugraph built in the previous step +# Download the packages built in the previous step +mkdir -p ./dist RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" -RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 ./local-pylibcugraph-dep -python -m pip install --no-deps ./local-pylibcugraph-dep/pylibcugraph*.whl +RAPIDS_PY_WHEEL_NAME="cugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist +RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./local-pylibcugraph-dep + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + "$(echo ./dist/cugraph*.whl)[test]" \ + ./local-pylibcugraph-dep/pylibcugraph*.whl ./ci/test_wheel.sh cugraph diff --git a/ci/test_wheel_pylibcugraph.sh b/ci/test_wheel_pylibcugraph.sh index ddc9976308..d0c97834a2 100755 --- a/ci/test_wheel_pylibcugraph.sh +++ b/ci/test_wheel_pylibcugraph.sh @@ -1,6 +1,15 @@ #!/bin/bash -# Copyright (c) 2023-2024, NVIDIA CORPORATION. +# Copyright (c) 2023-2025, NVIDIA CORPORATION. set -eoxu pipefail +# Download the packages built in the previous step +mkdir -p ./dist +RAPIDS_PY_CUDA_SUFFIX="$(rapids-wheel-ctk-name-gen ${RAPIDS_CUDA_VERSION})" +RAPIDS_PY_WHEEL_NAME="pylibcugraph_${RAPIDS_PY_CUDA_SUFFIX}" rapids-download-wheels-from-s3 python ./dist + +# echo to expand wildcard before adding `[extra]` requires for pip +python -m pip install \ + "$(echo ./dist/pylibcugraph*.whl)[test]" + ./ci/test_wheel.sh pylibcugraph diff --git a/cpp/cmake/thirdparty/get_raft.cmake b/cpp/cmake/thirdparty/get_raft.cmake index 62633d95c6..28e9ec0cda 100644 --- a/cpp/cmake/thirdparty/get_raft.cmake +++ b/cpp/cmake/thirdparty/get_raft.cmake @@ -51,7 +51,7 @@ function(find_and_configure_raft) OPTIONS "RAFT_COMPILE_LIBRARY ${PKG_COMPILE_RAFT_LIB}" "BUILD_TESTS OFF" - "BUILD_BENCH OFF" + "BUILD_PRIMS_BENCH OFF" "BUILD_CAGRA_HNSWLIB OFF" ) diff --git a/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake b/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake index 8d57bf570b..aab159d424 100644 --- a/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake +++ b/cpp/libcugraph_etl/cmake/thirdparty/get_cudf.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -27,7 +27,9 @@ function(find_and_configure_cudf) GIT_REPOSITORY https://github.com/${PKG_FORK}/cudf.git GIT_TAG ${PKG_PINNED_TAG} SOURCE_SUBDIR cpp - OPTIONS "BUILD_TESTS OFF" + OPTIONS + "BUILD_BENCHMARKS OFF" + "BUILD_TESTS OFF" ) message(VERBOSE "CUGRAPH_ETL: Using CUDF located in ${cudf_SOURCE_DIR}") @@ -39,8 +41,8 @@ set(CUGRAPH_ETL_BRANCH_VERSION_cudf "${CUGRAPH_ETL_VERSION_MAJOR}.${CUGRAPH_ETL_ # Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_cudf_SOURCE=/path/to/local/cudf +# To use a different cuDF locally, set the CMake variable +# CPM_cudf_SOURCE=/path/to/local/cudf find_and_configure_cudf(VERSION ${CUGRAPH_ETL_MIN_VERSION_cudf} FORK rapidsai PINNED_TAG branch-${CUGRAPH_ETL_BRANCH_VERSION_cudf} diff --git a/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake b/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake index c551646d91..c42bd8d2ae 100644 --- a/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake +++ b/cpp/libcugraph_etl/cmake/thirdparty/get_cugraph.cmake @@ -1,5 +1,5 @@ #============================================================================= -# Copyright (c) 2021, NVIDIA CORPORATION. +# Copyright (c) 2021-2025, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,8 +39,8 @@ set(CUGRAPH_ETL_BRANCH_VERSION_cugraph "${CUGRAPH_ETL_VERSION_MAJOR}.${CUGRAPH_E # Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_cugraph_SOURCE=/path/to/local/cugraph +# To use a different cuGraph locally, set the CMake variable +# CPM_cugraph_SOURCE=/path/to/local/cugraph find_and_configure_cugraph(VERSION ${CUGRAPH_ETL_MIN_VERSION_cugraph} FORK rapidsai PINNED_TAG branch-${CUGRAPH_ETL_BRANCH_VERSION_cugraph} diff --git a/dependencies.yaml b/dependencies.yaml index e1a8cc065c..56c0f9deba 100755 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -26,6 +26,7 @@ files: - depends_on_ucx_py - docs - python_build_cythonize + - python_build_skbuild - python_build_rapids - python_build_wheel - python_run_cugraph @@ -77,6 +78,7 @@ files: includes: - python_build_cythonize - python_build_rapids + - python_build_skbuild py_rapids_build_cugraph: output: pyproject pyproject_dir: python/cugraph @@ -121,6 +123,7 @@ files: includes: - python_build_cythonize - python_build_rapids + - python_build_skbuild py_rapids_build_pylibcugraph: output: pyproject pyproject_dir: python/pylibcugraph @@ -370,6 +373,14 @@ dependencies: - output_types: [conda, pyproject, requirements] packages: - rapids-build-backend>=0.3.1,<0.4.0.dev0 + python_build_skbuild: + common: + - output_types: conda + packages: + - scikit-build-core>=0.10.0 + - output_types: [requirements, pyproject] + packages: + - scikit-build-core[pyproject]>=0.10.0 python_build_wheel: common: - output_types: [conda, pyproject, requirements] @@ -381,12 +392,6 @@ dependencies: - output_types: [conda, pyproject, requirements] packages: - cython>=3.0.0 - - output_types: conda - packages: - - scikit-build-core>=0.10.0 - - output_types: [pyproject, requirements] - packages: - - scikit-build-core[pyproject]>=0.10.0 python_run_cugraph: common: - output_types: [conda, pyproject]