From 4d0bdfde3c706028127b22ff27ce83ee66eb5540 Mon Sep 17 00:00:00 2001 From: Bradley Dice Date: Fri, 10 Feb 2023 18:08:38 -0600 Subject: [PATCH] Remove gpuCI scripts. (#1111) Since migrating to GitHub Actions, the gpuCI scripts are no longer needed. This PR removes those outdated gpuCI scripts. Authors: - Bradley Dice (https://github.com/bdice) - AJ Schmidt (https://github.com/ajschmidt8) Approvers: - Ray Douglass (https://github.com/raydouglass) - Mads R. B. Kristensen (https://github.com/madsbk) - AJ Schmidt (https://github.com/ajschmidt8) URL: https://github.com/rapidsai/dask-cuda/pull/1111 --- .github/labeler.yml | 2 +- ci/checks/style.sh | 16 ---- ci/cpu/build.sh | 108 -------------------------- ci/cpu/upload-pypi.sh | 16 ---- ci/cpu/upload.sh | 42 ---------- ci/gpu/build.sh | 129 ------------------------------- ci/local/README.md | 58 -------------- ci/local/build.sh | 145 ----------------------------------- ci/release/update-version.sh | 3 - 9 files changed, 1 insertion(+), 518 deletions(-) delete mode 100755 ci/checks/style.sh delete mode 100755 ci/cpu/build.sh delete mode 100755 ci/cpu/upload-pypi.sh delete mode 100755 ci/cpu/upload.sh delete mode 100755 ci/gpu/build.sh delete mode 100644 ci/local/README.md delete mode 100755 ci/local/build.sh diff --git a/.github/labeler.yml b/.github/labeler.yml index e8c3f14dc..89814c09a 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -5,7 +5,7 @@ python: - 'dask_cuda/**' -gpuCI: +ci: - 'ci/**' conda: diff --git a/ci/checks/style.sh b/ci/checks/style.sh deleted file mode 100755 index 5d01f97d9..000000000 --- a/ci/checks/style.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020, NVIDIA CORPORATION. -################################################################################ -# dask-cuda Style Tester -################################################################################ - -# Ignore errors and set path -set +e -PATH=/opt/conda/bin:$PATH - -# Activate common conda env -. /opt/conda/etc/profile.d/conda.sh -conda activate rapids - -# Run pre-commit checks -pre-commit run --hook-stage manual --all-files diff --git a/ci/cpu/build.sh b/ci/cpu/build.sh deleted file mode 100755 index bd819098f..000000000 --- a/ci/cpu/build.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) 2019-2022, NVIDIA CORPORATION. -################################################################################ -# dask-cuda cpu build -################################################################################ -set -e - -# Set path and build parallel level -export PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH -export PARALLEL_LEVEL=${PARALLEL_LEVEL:-4} - -# Set home to the job's workspace -export HOME="$WORKSPACE" - -# Determine CUDA release version -export CUDA_REL=${CUDA_VERSION%.*} - -# Setup 'gpuci_conda_retry' for build retries (results in 2 total attempts) -export GPUCI_CONDA_RETRY_MAX=1 -export GPUCI_CONDA_RETRY_SLEEP=30 - -# Whether to keep `dask/label/dev` channel in the env. If INSTALL_DASK_MAIN=0, -# `dask/label/dev` channel is removed. -export INSTALL_DASK_MAIN=1 - -# Dask version to install when `INSTALL_DASK_MAIN=0` -export DASK_STABLE_VERSION="2023.1.1" - -# Switch to project root; also root of repo checkout -cd "$WORKSPACE" - -# While conda provides these during conda-build, they are also necessary during -# the setup.py build for PyPI -export GIT_DESCRIBE_TAG=`git describe --abbrev=0 --tags` -export GIT_DESCRIBE_NUMBER=`git rev-list ${GIT_DESCRIBE_TAG}..HEAD --count` - -# If nightly build, append current YYMMDD to version -if [[ "$BUILD_MODE" = "branch" && "$SOURCE_BRANCH" = branch-* ]] ; then - export VERSION_SUFFIX=`date +%y%m%d` -fi - -################################################################################ -# SETUP - Check environment -################################################################################ - -gpuci_logger "Check environment variables" -env - -gpuci_logger "Activate conda env" -. /opt/conda/etc/profile.d/conda.sh -conda activate rapids - -# Remove `rapidsai-nightly` & `dask/label/dev` channel if we are building main branch -if [ "$SOURCE_BRANCH" = "main" ]; then - conda config --system --remove channels rapidsai-nightly - conda config --system --remove channels dask/label/dev -elif [[ "${INSTALL_DASK_MAIN}" == 0 ]]; then -# Remove `dask/label/dev` channel if INSTALL_DASK_MAIN=0 - conda config --system --remove channels dask/label/dev -fi - -gpuci_logger "Check compiler versions" -python --version -$CC --version -$CXX --version - -gpuci_logger "Check conda environment" -conda info -conda config --show-sources -conda list --show-channel-urls - -# FIX Added to deal with Anancoda SSL verification issues during conda builds -conda config --set ssl_verify False - -# Install latest nightly version for dask and distributed if needed -if [[ "${INSTALL_DASK_MAIN}" == 1 ]]; then - gpuci_logger "Installing dask and distributed from dask nightly channel" - gpuci_mamba_retry install -c dask/label/dev \ - "dask/label/dev::dask" \ - "dask/label/dev::distributed" -else - gpuci_logger "gpuci_mamba_retry install conda-forge::dask==${DASK_STABLE_VERSION} conda-forge::distributed==${DASK_STABLE_VERSION} conda-forge::dask-core==${DASK_STABLE_VERSION} --force-reinstall" - gpuci_mamba_retry install conda-forge::dask==${DASK_STABLE_VERSION} conda-forge::distributed==${DASK_STABLE_VERSION} conda-forge::dask-core==${DASK_STABLE_VERSION} --force-reinstall -fi - - -################################################################################ -# BUILD - Package builds -################################################################################ - -# FIXME: Move boa install to gpuci/rapidsai -gpuci_mamba_retry install -c conda-forge boa - -gpuci_logger "Build conda pkg for dask-cuda" -gpuci_conda_retry mambabuild conda/recipes/dask-cuda --python=${PYTHON} - -rm -rf dist/ -python setup.py sdist bdist_wheel - -################################################################################ -# UPLOAD - Packages -################################################################################ - -gpuci_logger "Upload conda pkg..." -source ci/cpu/upload.sh - -gpuci_logger "Upload pypi pkg..." -source ci/cpu/upload-pypi.sh diff --git a/ci/cpu/upload-pypi.sh b/ci/cpu/upload-pypi.sh deleted file mode 100755 index 4eb6a2ad9..000000000 --- a/ci/cpu/upload-pypi.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - - -if [ ${BUILD_MODE} != "branch" ]; then - echo "Skipping upload" - return 0 -fi - -if [ -z "$TWINE_PASSWORD" ]; then - echo "TWINE_PASSWORD not set" - return 0 -fi - -echo "Upload pypi" -twine upload --skip-existing -u ${TWINE_USERNAME:-rapidsai} dist/* diff --git a/ci/cpu/upload.sh b/ci/cpu/upload.sh deleted file mode 100755 index 5a40e1f98..000000000 --- a/ci/cpu/upload.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -# -# Adopted from https://github.com/tmcdonell/travis-scripts/blob/dfaac280ac2082cd6bcaba3217428347899f2975/update-accelerate-buildbot.sh - -set -e - -# Setup 'gpuci_retry' for upload retries (results in 4 total attempts) -export GPUCI_RETRY_MAX=3 -export GPUCI_RETRY_SLEEP=30 - -# Set default label options if they are not defined elsewhere -export LABEL_OPTION=${LABEL_OPTION:-"--label main"} - -# Skip uploads unless BUILD_MODE == "branch" -if [ ${BUILD_MODE} != "branch" ]; then - echo "Skipping upload" - return 0 -fi - -# Skip uploads if there is no upload key -if [ -z "$MY_UPLOAD_KEY" ]; then - echo "No upload key" - return 0 -fi - -################################################################################ -# SETUP - Get conda file output locations -################################################################################ - -gpuci_logger "Get conda file output locations" -export DASKCUDA_FILE=`conda build conda/recipes/dask-cuda --python=$PYTHON --output` - -################################################################################ -# UPLOAD - Conda packages -################################################################################ - -gpuci_logger "Starting conda uploads" - -test -e ${DASKCUDA_FILE} -echo "Upload Dask-cuda" -echo ${DASKCUDA_FILE} -gpuci_retry anaconda -t ${MY_UPLOAD_KEY} upload -u ${CONDA_USERNAME:-rapidsai} ${LABEL_OPTION} --skip-existing ${DASKCUDA_FILE} --no-progress diff --git a/ci/gpu/build.sh b/ci/gpu/build.sh deleted file mode 100755 index 14b871818..000000000 --- a/ci/gpu/build.sh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/bash -# Copyright (c) 2020, NVIDIA CORPORATION. -############################################## -# dask-cuda GPU build and test script for CI # -############################################## -set -e -NUMARGS=$# -ARGS=$* - -# Arg parsing function -function hasArg { - (( ${NUMARGS} != 0 )) && (echo " ${ARGS} " | grep -q " $1 ") -} - -# Set path and build parallel level -export PATH=/opt/conda/bin:/usr/local/cuda/bin:$PATH -export PARALLEL_LEVEL=${PARALLEL_LEVEL:-4} -export CUDA_REL=${CUDA_VERSION%.*} -export CUDA_REL2=${CUDA//./} - -# Set home to the job's workspace -export HOME="$WORKSPACE" - -# Parse git describe -cd "$WORKSPACE" -export GIT_DESCRIBE_TAG=`git describe --tags` -export MINOR_VERSION=`echo $GIT_DESCRIBE_TAG | grep -o -E '([0-9]+\.[0-9]+)'` -export UCX_PATH=$CONDA_PREFIX -export UCXPY_VERSION=0.31 -unset GIT_DESCRIBE_TAG - -# Enable NumPy's __array_function__ protocol (needed for NumPy 1.16.x, -# will possibly be enabled by default starting on 1.17) -export NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1 - -# Install dask and distributed from main branch. Usually needed during -# development time and disabled before a new dask-cuda release. -export INSTALL_DASK_MAIN=1 - -# Dask version to install when `INSTALL_DASK_MAIN=0` -export DASK_STABLE_VERSION="2023.1.1" - -# Temporary workaround for Jupyter errors. -# See https://github.com/rapidsai/dask-cuda/issues/1040 -export JUPYTER_PLATFORM_DIRS=1 - -################################################################################ -# SETUP - Check environment -################################################################################ - -gpuci_logger "Check environment" -env - -gpuci_logger "Check GPU usage" -nvidia-smi - -gpuci_logger "Activate conda env" -. /opt/conda/etc/profile.d/conda.sh -conda activate rapids - -conda info -conda config --show-sources -conda list --show-channel-urls - -# Installing cucim in order to test GDS spilling -gpuci_mamba_retry install "cudf=${MINOR_VERSION}" \ - "dask-cudf=${MINOR_VERSION}" \ - "ucx-py=${UCXPY_VERSION}" \ - "ucx-proc=*=gpu" \ - "cucim" - - -gpuci_logger "Check versions" -python --version -$CC --version -$CXX --version - -conda info -conda config --show-sources -conda list --show-channel-urls - -################################################################################ -# BUILD - Build dask-cuda -################################################################################ - -# TODO: Move boa install to gpuci/rapidsai -gpuci_mamba_retry install boa - -gpuci_logger "Build and install dask-cuda" -cd "${WORKSPACE}" -CONDA_BLD_DIR="${WORKSPACE}/.conda-bld" -gpuci_conda_retry mambabuild --croot "${CONDA_BLD_DIR}" conda/recipes/dask-cuda --python="${PYTHON}" -gpuci_mamba_retry install -c "${CONDA_BLD_DIR}" dask-cuda - -################################################################################ -# DASK - Install latest nightly version for dask and distributed if needed. -# Done after everything else to ensure packages are not downgraded. -################################################################################ -if [[ "${INSTALL_DASK_MAIN}" == 1 ]]; then - gpuci_logger "Installing dask and distributed from dask nightly channel" - gpuci_mamba_retry install -c dask/label/dev \ - "dask/label/dev::dask" \ - "dask/label/dev::distributed" -else - gpuci_logger "gpuci_mamba_retry install conda-forge::dask==${DASK_STABLE_VERSION} conda-forge::distributed==${DASK_STABLE_VERSION} conda-forge::dask-core==${DASK_STABLE_VERSION} --force-reinstall" - gpuci_mamba_retry install conda-forge::dask==${DASK_STABLE_VERSION} conda-forge::distributed==${DASK_STABLE_VERSION} conda-forge::dask-core==${DASK_STABLE_VERSION} --force-reinstall - conda config --system --remove channels dask/label/dev -fi - -################################################################################ -# TEST - Run pytests for ucx-py -################################################################################ - -if hasArg --skip-tests; then - gpuci_logger "Skipping Tests" -else - gpuci_logger "Python pytest for dask-cuda" - cd "$WORKSPACE" - ls dask_cuda/tests/ - DASK_CUDA_TEST_SINGLE_GPU=1 UCXPY_IFNAME=eth0 UCX_WARN_UNUSED_ENV_VARS=n UCX_MEMTYPE_CACHE=n pytest -vs --cache-clear --basetemp="$WORKSPACE/dask-cuda-tmp" --junitxml="$WORKSPACE/junit-dask-cuda.xml" --cov-config=.coveragerc --cov=dask_cuda --cov-report=xml:"$WORKSPACE/dask-cuda-coverage.xml" --cov-report term dask_cuda/tests/ - - logger "Run local benchmark..." - python dask_cuda/benchmarks/local_cudf_shuffle.py --partition-size="1 KiB" -d 0 --runs 1 --backend dask - python dask_cuda/benchmarks/local_cudf_shuffle.py --partition-size="1 KiB" -d 0 --runs 1 --backend explicit-comms -fi - -if [ -n "${CODECOV_TOKEN}" ]; then - codecov -t $CODECOV_TOKEN -fi diff --git a/ci/local/README.md b/ci/local/README.md deleted file mode 100644 index b121cfc05..000000000 --- a/ci/local/README.md +++ /dev/null @@ -1,58 +0,0 @@ -## Purpose - -This script is designed for developer and contributor use. This tool mimics the actions of gpuCI on your local machine. This allows you to test and even debug your code inside a gpuCI base container before pushing your code as a GitHub commit. -The script can be helpful in locally triaging and debugging RAPIDS continuous integration failures. - -## Requirements - -``` -nvidia-docker -``` - -## Usage - -``` -bash build.sh [-h] [-H] [-s] [-r ] [-i ] -Build and test your local repository using a base gpuCI Docker image - -where: - -H Show this help text - -r Path to repository (defaults to working directory) - -i Use Docker image (default is gpuci/rapidsai-base:cuda10.0-ubuntu16.04-gcc5-py3.6) - -s Skip building and testing and start an interactive shell in a container of the Docker image -``` - -Example Usage: -`bash build.sh -r ~/rapids/dask-cuda -i gpuci/rapidsai-base:cuda10.1-ubuntu16.04-gcc5-py3.6` - -For a full list of available gpuCI docker images, visit our [DockerHub](https://hub.docker.com/r/gpuci/rapidsai-base/tags) page. - -Style Check: -```bash -$ bash ci/local/build.sh -r ~/rapids/dask-cuda -s -$ . /opt/conda/etc/profile.d/conda.sh -$ conda activate rapids #Activate gpuCI conda environment -$ cd rapids -$ flake8 python -``` - -## Information - -There are some caveats to be aware of when using this script, especially if you plan on developing from within the container itself. - - -### Docker Image Build Repository - -The docker image will generate build artifacts in a folder on your machine located in the `root` directory of the repository you passed to the script. For the above example, the directory is named `~/rapids/dask-cuda/build_rapidsai-base_cuda10.1-ubuntu16.04-gcc5-py3.6/`. Feel free to remove this directory after the script is finished. - -*Note*: The script *will not* override your local build repository. Your local environment stays in tact. - - -### Where The User is Dumped - -The script will build your repository and run all tests. If any tests fail, it dumps the user into the docker container itself to allow you to debug from within the container. If all the tests pass as expected the container exits and is automatically removed. Remember to exit the container if tests fail and you do not wish to debug within the container itself. - - -### Container File Structure - -Your repository will be located in the `/rapids/` folder of the container. This folder is volume mounted from the local machine. Any changes to the code in this repository are replicated onto the local machine. The `cpp/build` and `python/build` directories within your repository is on a separate mount to avoid conflicting with your local build artifacts. diff --git a/ci/local/build.sh b/ci/local/build.sh deleted file mode 100755 index 20d867e8c..000000000 --- a/ci/local/build.sh +++ /dev/null @@ -1,145 +0,0 @@ -#!/bin/bash - -GIT_DESCRIBE_TAG=`git describe --tags` -MINOR_VERSION=`echo $GIT_DESCRIBE_TAG | grep -o -E '([0-9]+\.[0-9]+)'` - -DOCKER_IMAGE="gpuci/rapidsai:${MINOR_VERSION}-cuda10.1-devel-ubuntu16.04-py3.7" -REPO_PATH=${PWD} -RAPIDS_DIR_IN_CONTAINER="/rapids" -CPP_BUILD_DIR="cpp/build" -PYTHON_BUILD_DIR="python/build" -CONTAINER_SHELL_ONLY=0 - -SHORTHELP="$(basename "$0") [-h] [-H] [-s] [-r ] [-i ]" -LONGHELP="${SHORTHELP} -Build and test your local repository using a base gpuCI Docker image - -where: - -H Show this help text - -r Path to repository (defaults to working directory) - -i Use Docker image (default is ${DOCKER_IMAGE}) - -s Skip building and testing and start an interactive shell in a container of the Docker image -" - -# Limit GPUs available to container based on CUDA_VISIBLE_DEVICES -if [[ -z "${CUDA_VISIBLE_DEVICES}" ]]; then - NVIDIA_VISIBLE_DEVICES="all" -else - NVIDIA_VISIBLE_DEVICES=${CUDA_VISIBLE_DEVICES} -fi - -while getopts ":hHr:i:s" option; do - case ${option} in - r) - REPO_PATH=${OPTARG} - ;; - i) - DOCKER_IMAGE=${OPTARG} - ;; - s) - CONTAINER_SHELL_ONLY=1 - ;; - h) - echo "${SHORTHELP}" - exit 0 - ;; - H) - echo "${LONGHELP}" - exit 0 - ;; - *) - echo "ERROR: Invalid flag" - echo "${SHORTHELP}" - exit 1 - ;; - esac -done - -REPO_PATH_IN_CONTAINER="${RAPIDS_DIR_IN_CONTAINER}/$(basename "${REPO_PATH}")" -CPP_BUILD_DIR_IN_CONTAINER="${RAPIDS_DIR_IN_CONTAINER}/$(basename "${REPO_PATH}")/${CPP_BUILD_DIR}" -PYTHON_BUILD_DIR_IN_CONTAINER="${RAPIDS_DIR_IN_CONTAINER}/$(basename "${REPO_PATH}")/${PYTHON_BUILD_DIR}" - - -# BASE_CONTAINER_BUILD_DIR is named after the image name, allowing for -# multiple image builds to coexist on the local filesystem. This will -# be mapped to the typical BUILD_DIR inside of the container. Builds -# running in the container generate build artifacts just as they would -# in a bare-metal environment, and the host filesystem is able to -# maintain the host build in BUILD_DIR as well. -# FIXME: Fix the shellcheck complaints -# shellcheck disable=SC2001,SC2005,SC2046 -BASE_CONTAINER_BUILD_DIR=${REPO_PATH}/build_$(echo $(basename "${DOCKER_IMAGE}")|sed -e 's/:/_/g') -CPP_CONTAINER_BUILD_DIR=${BASE_CONTAINER_BUILD_DIR}/cpp -PYTHON_CONTAINER_BUILD_DIR=${BASE_CONTAINER_BUILD_DIR}/python -# Create build directories. This is to ensure correct owner for directories. If -# directories don't exist there is side effect from docker volume mounting creating build -# directories owned by root(volume mount point(s)) -mkdir -p "${REPO_PATH}/${CPP_BUILD_DIR}" -mkdir -p "${REPO_PATH}/${PYTHON_BUILD_DIR}" - -BUILD_SCRIPT="#!/bin/bash -set -e -WORKSPACE=${REPO_PATH_IN_CONTAINER} -PREBUILD_SCRIPT=${REPO_PATH_IN_CONTAINER}/ci/gpu/prebuild.sh -BUILD_SCRIPT=${REPO_PATH_IN_CONTAINER}/ci/gpu/build.sh -cd "\$WORKSPACE" -if [ -f \${PREBUILD_SCRIPT} ]; then - source \${PREBUILD_SCRIPT} -fi -yes | source \${BUILD_SCRIPT} -" - -if (( CONTAINER_SHELL_ONLY == 0 )); then - COMMAND="${CPP_BUILD_DIR_IN_CONTAINER}/build.sh || bash" -else - COMMAND="bash" -fi - -# Create the build dir for the container to mount, generate the build script inside of it -mkdir -p "${BASE_CONTAINER_BUILD_DIR}" -mkdir -p "${CPP_CONTAINER_BUILD_DIR}" -mkdir -p "${PYTHON_CONTAINER_BUILD_DIR}" -echo "${BUILD_SCRIPT}" > "${CPP_CONTAINER_BUILD_DIR}/build.sh" -chmod ugo+x "${CPP_CONTAINER_BUILD_DIR}/build.sh" - -# Mount passwd and group files to docker. This allows docker to resolve username and group -# avoiding these nags: -# * groups: cannot find name for group ID ID -# * I have no name!@id:/$ -# For ldap user user information is not present in system /etc/passwd and /etc/group files. -# Hence we generate dummy files for ldap users which docker uses to resolve username and group - -PASSWD_FILE="/etc/passwd" -GROUP_FILE="/etc/group" - -USER_FOUND=$(grep -wc "$(whoami)" < "$PASSWD_FILE") -if [ "$USER_FOUND" == 0 ]; then - echo "Local User not found, LDAP WAR for docker mounts activated. Creating dummy passwd and group" - echo "files to allow docker resolve username and group" - cp "$PASSWD_FILE" /tmp/passwd - PASSWD_FILE="/tmp/passwd" - cp "$GROUP_FILE" /tmp/group - GROUP_FILE="/tmp/group" - echo "$(whoami):x:$(id -u):$(id -g):$(whoami),,,:$HOME:$SHELL" >> "$PASSWD_FILE" - echo "$(whoami):x:$(id -g):" >> "$GROUP_FILE" -fi - -# Run the generated build script in a container -docker pull "${DOCKER_IMAGE}" - -DOCKER_MAJOR=$(docker -v|sed 's/[^[0-9]*\([0-9]*\).*/\1/') -GPU_OPTS="--gpus device=${NVIDIA_VISIBLE_DEVICES}" -if [ "$DOCKER_MAJOR" -lt 19 ] -then - GPU_OPTS="--runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=${NVIDIA_VISIBLE_DEVICES}" -fi - -docker run --rm -it ${GPU_OPTS} \ - -u "$(id -u)":"$(id -g)" \ - -v "${REPO_PATH}":"${REPO_PATH_IN_CONTAINER}" \ - -v "${CPP_CONTAINER_BUILD_DIR}":"${CPP_BUILD_DIR_IN_CONTAINER}" \ - -v "${PYTHON_CONTAINER_BUILD_DIR}":"${PYTHON_BUILD_DIR_IN_CONTAINER}" \ - -v "$PASSWD_FILE":/etc/passwd:ro \ - -v "$GROUP_FILE":/etc/group:ro \ - --cap-add=SYS_PTRACE \ - "${DOCKER_IMAGE}" bash -c "${COMMAND}" diff --git a/ci/release/update-version.sh b/ci/release/update-version.sh index cab06b0ad..b73037951 100755 --- a/ci/release/update-version.sh +++ b/ci/release/update-version.sh @@ -31,9 +31,6 @@ function sed_runner() { sed -i.bak ''"$1"'' $2 && rm -f ${2}.bak } -# Update UCX-Py version -sed_runner "s/export UCXPY_VERSION=.*/export UCXPY_VERSION="${NEXT_UCXPY_VERSION}"/g" ci/gpu/build.sh - # Bump cudf and dask-cudf testing dependencies sed_runner "s/cudf=.*/cudf=${NEXT_SHORT_TAG}/g" dependencies.yaml sed_runner "s/dask-cudf=.*/dask-cudf=${NEXT_SHORT_TAG}/g" dependencies.yaml