From 914715d4a11357dd7c1a36cc702d11d459811803 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:05:50 -0800 Subject: [PATCH 01/18] Update changelog after release (#2782) changelog Signed-off-by: Yury-Fridlyand --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e52f9b2161..dd3059aaef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +#### Changes + +#### Breaking Changes + +#### Fixes + +#### Operational Enhancements + +## 1.2.0 (2024-11-27) + #### Changes * Node: Client API for retrieving internal statistics ([#2727](https://github.com/valkey-io/valkey-glide/pull/2727)) * Python: Client API for retrieving internal statistics ([#2707](https://github.com/valkey-io/valkey-glide/pull/2707)) From 385c160c9b47785d81d004d374eb1f4f160f473c Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:23:16 -0800 Subject: [PATCH 02/18] Fix license header in core files (#2648) Signed-off-by: Yury-Fridlyand --- benchmarks/rust/src/main.rs | 4 +--- csharp/lib/src/lib.rs | 5 ++--- glide-core/benches/connections_benchmark.rs | 5 ++--- glide-core/benches/memory_benchmark.rs | 5 ++--- glide-core/benches/rotating_buffer_benchmark.rs | 5 ++--- glide-core/build.rs | 4 +--- glide-core/src/client/mod.rs | 5 ++--- glide-core/src/client/reconnecting_connection.rs | 5 ++--- glide-core/src/client/standalone_client.rs | 5 ++--- glide-core/src/client/types.rs | 4 +--- glide-core/src/client/value_conversion.rs | 5 ++--- glide-core/src/cluster_scan_container.rs | 5 ++--- glide-core/src/errors.rs | 4 +--- glide-core/src/lib.rs | 4 +--- glide-core/src/request_type.rs | 5 ++--- glide-core/src/retry_strategies.rs | 5 ++--- glide-core/src/rotating_buffer.rs | 4 +--- glide-core/src/scripts_container.rs | 5 ++--- glide-core/src/socket_listener.rs | 5 ++--- glide-core/tests/test_client.rs | 5 ++--- glide-core/tests/test_cluster_client.rs | 5 ++--- glide-core/tests/test_socket_listener.rs | 4 +--- glide-core/tests/test_standalone_client.rs | 5 ++--- glide-core/tests/utilities/cluster.rs | 5 ++--- glide-core/tests/utilities/mocks.rs | 5 ++--- glide-core/tests/utilities/mod.rs | 4 +--- go/src/lib.rs | 4 +--- java/src/errors.rs | 5 ++--- java/src/ffi_test.rs | 5 ++--- java/src/lib.rs | 5 ++--- node/rust-client/build.rs | 5 ++--- node/rust-client/src/lib.rs | 5 ++--- python/src/lib.rs | 5 ++--- 33 files changed, 57 insertions(+), 99 deletions(-) diff --git a/benchmarks/rust/src/main.rs b/benchmarks/rust/src/main.rs index edace91a30..f454928180 100644 --- a/benchmarks/rust/src/main.rs +++ b/benchmarks/rust/src/main.rs @@ -1,6 +1,4 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; diff --git a/csharp/lib/src/lib.rs b/csharp/lib/src/lib.rs index eac9254a53..c497410e31 100644 --- a/csharp/lib/src/lib.rs +++ b/csharp/lib/src/lib.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::client; use glide_core::client::Client as GlideClient; use glide_core::request_type::RequestType; diff --git a/glide-core/benches/connections_benchmark.rs b/glide-core/benches/connections_benchmark.rs index c26d5bba45..5f6075dec6 100644 --- a/glide-core/benches/connections_benchmark.rs +++ b/glide-core/benches/connections_benchmark.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use criterion::{criterion_group, criterion_main, Criterion}; use futures::future::join_all; use redis::{ diff --git a/glide-core/benches/memory_benchmark.rs b/glide-core/benches/memory_benchmark.rs index 1948d9a2cd..923ffeaa1b 100644 --- a/glide-core/benches/memory_benchmark.rs +++ b/glide-core/benches/memory_benchmark.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::{ client::Client, connection_request::{ConnectionRequest, NodeAddress, TlsMode}, diff --git a/glide-core/benches/rotating_buffer_benchmark.rs b/glide-core/benches/rotating_buffer_benchmark.rs index 224d1b702b..7f543a21d3 100644 --- a/glide-core/benches/rotating_buffer_benchmark.rs +++ b/glide-core/benches/rotating_buffer_benchmark.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use std::io::Write; use bytes::BufMut; diff --git a/glide-core/build.rs b/glide-core/build.rs index cd838c9a53..9ba9ca89a7 100644 --- a/glide-core/build.rs +++ b/glide-core/build.rs @@ -1,6 +1,4 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[cfg(feature = "socket-layer")] fn build_protobuf() { diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index cfe8d6dc05..29a3a05393 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod types; use crate::cluster_scan_container::insert_cluster_scan_cursor; diff --git a/glide-core/src/client/reconnecting_connection.rs b/glide-core/src/client/reconnecting_connection.rs index 39a4c1db62..c882dd29d6 100644 --- a/glide-core/src/client/reconnecting_connection.rs +++ b/glide-core/src/client/reconnecting_connection.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::{NodeAddress, TlsMode}; use crate::retry_strategies::RetryStrategy; use async_trait::async_trait; diff --git a/glide-core/src/client/standalone_client.rs b/glide-core/src/client/standalone_client.rs index c5e69fd6dd..5bc26999a8 100644 --- a/glide-core/src/client/standalone_client.rs +++ b/glide-core/src/client/standalone_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::get_redis_connection_info; use super::reconnecting_connection::{ReconnectReason, ReconnectingConnection}; use super::{ConnectionRequest, NodeAddress, TlsMode}; diff --git a/glide-core/src/client/types.rs b/glide-core/src/client/types.rs index 0c7680b3a6..a0053587c8 100644 --- a/glide-core/src/client/types.rs +++ b/glide-core/src/client/types.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[allow(unused_imports)] use logger_core::log_warn; diff --git a/glide-core/src/client/value_conversion.rs b/glide-core/src/client/value_conversion.rs index 6ba9dc757c..48996fc76d 100644 --- a/glide-core/src/client/value_conversion.rs +++ b/glide-core/src/client/value_conversion.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use redis::{ cluster_routing::Routable, from_owned_redis_value, Cmd, ErrorKind, RedisResult, Value, }; diff --git a/glide-core/src/cluster_scan_container.rs b/glide-core/src/cluster_scan_container.rs index 38cb1ec76d..2733b49092 100644 --- a/glide-core/src/cluster_scan_container.rs +++ b/glide-core/src/cluster_scan_container.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use logger_core::log_debug; use nanoid::nanoid; use once_cell::sync::Lazy; diff --git a/glide-core/src/errors.rs b/glide-core/src/errors.rs index b5f9b1af9e..b2e71eceaf 100644 --- a/glide-core/src/errors.rs +++ b/glide-core/src/errors.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 use redis::RedisError; diff --git a/glide-core/src/lib.rs b/glide-core/src/lib.rs index 9b22a8bb55..9450afef2c 100644 --- a/glide-core/src/lib.rs +++ b/glide-core/src/lib.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[cfg(feature = "socket-layer")] include!(concat!(env!("OUT_DIR"), "/protobuf/mod.rs")); diff --git a/glide-core/src/request_type.rs b/glide-core/src/request_type.rs index 9ee4ac92a9..d6b43ae027 100644 --- a/glide-core/src/request_type.rs +++ b/glide-core/src/request_type.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use redis::{cmd, Cmd}; #[cfg(feature = "socket-layer")] diff --git a/glide-core/src/retry_strategies.rs b/glide-core/src/retry_strategies.rs index 1a5157d225..3b60cc402e 100644 --- a/glide-core/src/retry_strategies.rs +++ b/glide-core/src/retry_strategies.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use crate::client::ConnectionRetryStrategy; use std::time::Duration; use tokio_retry2::strategy::{jitter_range, ExponentialBackoff}; diff --git a/glide-core/src/rotating_buffer.rs b/glide-core/src/rotating_buffer.rs index b87f666605..1bebb33c65 100644 --- a/glide-core/src/rotating_buffer.rs +++ b/glide-core/src/rotating_buffer.rs @@ -1,6 +1,4 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[allow(unused_imports)] use bytes::{Bytes, BytesMut}; use integer_encoding::VarInt; diff --git a/glide-core/src/scripts_container.rs b/glide-core/src/scripts_container.rs index a039593f79..22f5237c2f 100644 --- a/glide-core/src/scripts_container.rs +++ b/glide-core/src/scripts_container.rs @@ -1,7 +1,6 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use bytes::BytesMut; -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ use logger_core::log_info; use once_cell::sync::Lazy; use sha1_smol::Sha1; diff --git a/glide-core/src/socket_listener.rs b/glide-core/src/socket_listener.rs index b9db4e6d99..f148bbdede 100644 --- a/glide-core/src/socket_listener.rs +++ b/glide-core/src/socket_listener.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::rotating_buffer::RotatingBuffer; use crate::client::Client; use crate::cluster_scan_container::get_cluster_scan_cursor; diff --git a/glide-core/tests/test_client.rs b/glide-core/tests/test_client.rs index ffc672fee6..b62210ed9f 100644 --- a/glide-core/tests/test_client.rs +++ b/glide-core/tests/test_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod utilities; #[macro_export] diff --git a/glide-core/tests/test_cluster_client.rs b/glide-core/tests/test_cluster_client.rs index 1c60dc8c79..2943ab21bb 100644 --- a/glide-core/tests/test_cluster_client.rs +++ b/glide-core/tests/test_cluster_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod utilities; #[cfg(test)] diff --git a/glide-core/tests/test_socket_listener.rs b/glide-core/tests/test_socket_listener.rs index 6f2aa566b9..5921236e36 100644 --- a/glide-core/tests/test_socket_listener.rs +++ b/glide-core/tests/test_socket_listener.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #![cfg(feature = "socket-layer")] use glide_core::*; diff --git a/glide-core/tests/test_standalone_client.rs b/glide-core/tests/test_standalone_client.rs index c118d6d28f..77363b5c18 100644 --- a/glide-core/tests/test_standalone_client.rs +++ b/glide-core/tests/test_standalone_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod utilities; #[cfg(test)] diff --git a/glide-core/tests/utilities/cluster.rs b/glide-core/tests/utilities/cluster.rs index 8f7ed6aca0..3cb298ffef 100644 --- a/glide-core/tests/utilities/cluster.rs +++ b/glide-core/tests/utilities/cluster.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::{create_connection_request, ClusterMode, TestConfiguration}; use futures::future::{join_all, BoxFuture}; use futures::FutureExt; diff --git a/glide-core/tests/utilities/mocks.rs b/glide-core/tests/utilities/mocks.rs index 33b8ae4121..198abe716a 100644 --- a/glide-core/tests/utilities/mocks.rs +++ b/glide-core/tests/utilities/mocks.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use futures_intrusive::sync::ManualResetEvent; use redis::{Cmd, ConnectionAddr, Value}; use std::collections::HashMap; diff --git a/glide-core/tests/utilities/mod.rs b/glide-core/tests/utilities/mod.rs index 7318d6e640..adae3aed07 100644 --- a/glide-core/tests/utilities/mod.rs +++ b/glide-core/tests/utilities/mod.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #![allow(dead_code)] use futures::Future; diff --git a/go/src/lib.rs b/go/src/lib.rs index 344dac6e45..1a04c7f756 100644 --- a/go/src/lib.rs +++ b/go/src/lib.rs @@ -1,6 +1,4 @@ -/* -* Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 -*/ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #![deny(unsafe_op_in_unsafe_fn)] use glide_core::client::Client as GlideClient; diff --git a/java/src/errors.rs b/java/src/errors.rs index 93372a7257..3137a9583e 100644 --- a/java/src/errors.rs +++ b/java/src/errors.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use jni::{errors::Error as JNIError, JNIEnv}; use log::error; use std::string::FromUtf8Error; diff --git a/java/src/ffi_test.rs b/java/src/ffi_test.rs index ee3bd4ad32..fb54fc3b5b 100644 --- a/java/src/ffi_test.rs +++ b/java/src/ffi_test.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use crate::errors::{handle_errors, handle_panics, throw_java_exception, ExceptionType, FFIError}; use jni::{ objects::{JByteArray, JClass, JLongArray, JString}, diff --git a/java/src/lib.rs b/java/src/lib.rs index 311d9a13dc..3b493aa61e 100644 --- a/java/src/lib.rs +++ b/java/src/lib.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::start_socket_listener as start_socket_listener_core; // Protocol constants to expose to Java. diff --git a/node/rust-client/build.rs b/node/rust-client/build.rs index af38e8a35e..5284801493 100644 --- a/node/rust-client/build.rs +++ b/node/rust-client/build.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + extern crate napi_build; fn main() { diff --git a/node/rust-client/src/lib.rs b/node/rust-client/src/lib.rs index b15b18f521..963f966f24 100644 --- a/node/rust-client/src/lib.rs +++ b/node/rust-client/src/lib.rs @@ -1,8 +1,7 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::Telemetry; use redis::GlideConnectionOptions; -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ #[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; diff --git a/python/src/lib.rs b/python/src/lib.rs index 6b41123dd3..e3b9a298cb 100644 --- a/python/src/lib.rs +++ b/python/src/lib.rs @@ -1,8 +1,7 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use bytes::Bytes; use glide_core::client::FINISHED_SCAN_CURSOR; -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ use glide_core::start_socket_listener; use glide_core::Telemetry; use glide_core::MAX_REQUEST_ARGS_LENGTH; From 96f2597d5b3c42b9fe13c067dde0108a84f9ea37 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:31:25 -0800 Subject: [PATCH 03/18] Java: bump `netty` version (#2777) * bump netty Signed-off-by: Yury-Fridlyand --- CHANGELOG.md | 1 + java/client/build.gradle | 8 ++++---- java/integTest/build.gradle | 6 +++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd3059aaef..429093dec9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ #### Changes +* Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) #### Breaking Changes diff --git a/java/client/build.gradle b/java/client/build.gradle index cc2446251d..7337c16567 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -17,12 +17,12 @@ dependencies { implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.27.1' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.13.0' - implementation group: 'io.netty', name: 'netty-handler', version: '4.1.100.Final' + implementation group: 'io.netty', name: 'netty-handler', version: '4.1.115.Final' // https://github.com/netty/netty/wiki/Native-transports // At the moment, Windows is not supported - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-x86_64' - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-aarch_64' - implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.100.Final', classifier: 'osx-aarch_64' + implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.115.Final', classifier: 'linux-x86_64' + implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.115.Final', classifier: 'linux-aarch_64' + implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.115.Final', classifier: 'osx-aarch_64' // junit testImplementation group: 'org.mockito', name: 'mockito-inline', version: '3.12.4' diff --git a/java/integTest/build.gradle b/java/integTest/build.gradle index 53b690aa49..3e97f58f10 100644 --- a/java/integTest/build.gradle +++ b/java/integTest/build.gradle @@ -15,9 +15,9 @@ dependencies { // https://github.com/netty/netty/wiki/Native-transports // At the moment, Windows is not supported - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-x86_64' - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-aarch_64' - implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.100.Final', classifier: 'osx-aarch_64' + implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.115.Final', classifier: 'linux-x86_64' + implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.115.Final', classifier: 'osx-x86_64' + implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.115.Final', classifier: 'osx-aarch_64' // junit testImplementation 'org.mockito:mockito-junit-jupiter:3.12.4' From c99f30b63ba068bd4bed3747fc354c1d7b3f6ad4 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:48:24 -0800 Subject: [PATCH 04/18] [backport to 1.2] Bump protoc (#2561) * Bump protoc Signed-off-by: Yury-Fridlyand Co-authored-by: Andrew Carbonetto --- .github/workflows/java-cd.yml | 4 ++-- .github/workflows/java.yml | 4 ++-- CHANGELOG.md | 1 + java/DEVELOPER.md | 12 ++++++------ java/client/build.gradle | 10 +++++----- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/java-cd.yml b/.github/workflows/java-cd.yml index f4c0146342..7033141881 100644 --- a/.github/workflows/java-cd.yml +++ b/.github/workflows/java-cd.yml @@ -102,7 +102,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Create secret key ring file @@ -245,7 +245,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Start standalone Valkey server diff --git a/.github/workflows/java.yml b/.github/workflows/java.yml index 66c99cca3e..449c9f1a65 100644 --- a/.github/workflows/java.yml +++ b/.github/workflows/java.yml @@ -103,7 +103,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java client @@ -184,7 +184,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java wrapper diff --git a/CHANGELOG.md b/CHANGELOG.md index 429093dec9..e0679a64d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ #### Changes +* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561)) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) #### Breaking Changes diff --git a/java/DEVELOPER.md b/java/DEVELOPER.md index d8bc19b99c..3a275425fc 100644 --- a/java/DEVELOPER.md +++ b/java/DEVELOPER.md @@ -17,7 +17,7 @@ The Valkey GLIDE Java wrapper consists of both Java and Rust code. Rust bindings - git - GCC - pkg-config -- protoc (protobuf compiler) >= 26.1 +- protoc (protobuf compiler) >= 28.2 - openssl - openssl-dev - rustup @@ -64,17 +64,17 @@ Continue with **Install protobuf compiler** below. To install protobuf for MacOS, run: ```bash brew install protobuf -# Check that the protobuf compiler version 26.1 or higher is installed +# Check that the protobuf compiler version 28.2 or higher is installed protoc --version ``` For the remaining systems, do the following: ```bash PB_REL="https://github.com/protocolbuffers/protobuf/releases" -curl -LO $PB_REL/download/v26.1/protoc-26.1-linux-x86_64.zip -unzip protoc-26.1-linux-x86_64.zip -d $HOME/.local +curl -LO $PB_REL/download/v28.2/protoc-28.2-linux-x86_64.zip +unzip protoc-28.2-linux-x86_64.zip -d $HOME/.local export PATH="$PATH:$HOME/.local/bin" -# Check that the protobuf compiler version 26.1 or higher is installed +# Check that the protobuf compiler version 28.2 or higher is installed protoc --version ``` @@ -165,7 +165,7 @@ Some troubleshooting issues: - Failed to find `cargo` after `rustup`. - No Protobuf compiler (protoc) found. - If build fails because of rust compiler fails, make sure submodules are updated using `git submodule update`. -- If protobuf 26.0 or earlier is detected, upgrade to the latest protobuf release. +- If protobuf 28.0 or earlier is detected, upgrade to the latest protobuf release. ## Running Examples App diff --git a/java/client/build.gradle b/java/client/build.gradle index 7337c16567..fa617fb131 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -14,7 +14,7 @@ repositories { } dependencies { - implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.27.1' + implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.28.2' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.13.0' implementation group: 'io.netty', name: 'netty-handler', version: '4.1.115.Final' @@ -38,11 +38,11 @@ dependencies { ext { checkProtocVersion = { String output -> - // Line in format like: libprotoc 26.1 + // Line in format like: libprotoc 28.1 int majorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[0].trim()); int minorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[1].trim()); - if (majorVersion < 26 || (majorVersion == 26 && minorVersion < 1)) { - throw new GradleException("Protobuf compiler (protoc) version 26.1 or newer is required. Current version: $output"); + if (majorVersion < 28) { + throw new GradleException("Protobuf compiler (protoc) version 28.0 or newer is required. Current version: $output"); } return output.split(" ")[1] } @@ -61,7 +61,7 @@ tasks.register('protobuf', Exec) { } } catch (Exception e) { if (e.getMessage().startsWith("A problem occurred starting process")) { - throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 26.1 or newer is required."); + throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 28.0 or newer is required."); } throw e } From 2dd48fe21bbd3691c2d6fa764815c306ee86f4d1 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:55:23 -0800 Subject: [PATCH 05/18] Apply linters to submodule (#2783) * lint Signed-off-by: Yury-Fridlyand --- glide-core/redis-rs/redis/src/cluster.rs | 2 +- .../redis-rs/redis/src/cluster_routing.rs | 2 +- glide-core/redis-rs/redis/src/cmd.rs | 2 +- .../redis/src/commands/cluster_scan.rs | 30 +++++++++---------- glide-core/redis-rs/redis/src/connection.rs | 4 +-- glide-core/redis-rs/redis/src/types.rs | 4 +-- .../test_async_cluster_connections_logic.rs | 8 +++++ .../redis/tests/test_cluster_async.rs | 12 ++++---- 8 files changed, 36 insertions(+), 28 deletions(-) diff --git a/glide-core/redis-rs/redis/src/cluster.rs b/glide-core/redis-rs/redis/src/cluster.rs index ffd537152a..57d3a7eae4 100644 --- a/glide-core/redis-rs/redis/src/cluster.rs +++ b/glide-core/redis-rs/redis/src/cluster.rs @@ -105,7 +105,7 @@ impl<'a> Input<'a> { } } -impl<'a> Routable for Input<'a> { +impl Routable for Input<'_> { fn arg_idx(&self, idx: usize) -> Option<&[u8]> { match self { Input::Slice { cmd: _, routable } => routable.arg_idx(idx), diff --git a/glide-core/redis-rs/redis/src/cluster_routing.rs b/glide-core/redis-rs/redis/src/cluster_routing.rs index eab3bf398a..8bf11d19d4 100644 --- a/glide-core/redis-rs/redis/src/cluster_routing.rs +++ b/glide-core/redis-rs/redis/src/cluster_routing.rs @@ -1378,7 +1378,7 @@ impl ShardAddrs { } } -impl<'a> IntoIterator for &'a ShardAddrs { +impl IntoIterator for &ShardAddrs { type Item = Arc; type IntoIter = std::iter::Chain>, std::vec::IntoIter>>; diff --git a/glide-core/redis-rs/redis/src/cmd.rs b/glide-core/redis-rs/redis/src/cmd.rs index 3e248dad6f..92e8aea989 100644 --- a/glide-core/redis-rs/redis/src/cmd.rs +++ b/glide-core/redis-rs/redis/src/cmd.rs @@ -40,7 +40,7 @@ pub struct Iter<'a, T: FromRedisValue> { cmd: Cmd, } -impl<'a, T: FromRedisValue> Iterator for Iter<'a, T> { +impl Iterator for Iter<'_, T> { type Item = T; #[inline] diff --git a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs index 0fccb0e6f5..109aceca22 100644 --- a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs +++ b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs @@ -1,3 +1,18 @@ +//! This module contains the implementation of scanning operations in a Redis cluster. +//! +//! The [`ClusterScanArgs`] struct represents the arguments for a cluster scan operation, +//! including the scan state reference, match pattern, count, and object type. +//! +//! The [[`ScanStateRC`]] struct is a wrapper for managing the state of a scan operation in a cluster. +//! It holds a reference to the scan state and provides methods for accessing the state. +//! +//! The [[`ClusterInScan`]] trait defines the methods for interacting with a Redis cluster during scanning, +//! including retrieving address information, refreshing slot mapping, and routing commands to specific address. +//! +//! The [[`ScanState`]] struct represents the state of a scan operation in a Redis cluster. +//! It holds information about the current scan state, including the cursor position, scanned slots map, +//! address being scanned, and address's epoch. + use crate::aio::ConnectionLike; use crate::cluster_async::{ ClusterConnInner, Connect, Core, InternalRoutingInfo, InternalSingleNodeRouting, RefreshPolicy, @@ -10,21 +25,6 @@ use async_trait::async_trait; use std::sync::Arc; use strum_macros::Display; -/// This module contains the implementation of scanning operations in a Redis cluster. -/// -/// The [`ClusterScanArgs`] struct represents the arguments for a cluster scan operation, -/// including the scan state reference, match pattern, count, and object type. -/// -/// The [[`ScanStateRC`]] struct is a wrapper for managing the state of a scan operation in a cluster. -/// It holds a reference to the scan state and provides methods for accessing the state. -/// -/// The [[`ClusterInScan`]] trait defines the methods for interacting with a Redis cluster during scanning, -/// including retrieving address information, refreshing slot mapping, and routing commands to specific address. -/// -/// The [[`ScanState`]] struct represents the state of a scan operation in a Redis cluster. -/// It holds information about the current scan state, including the cursor position, scanned slots map, -/// address being scanned, and address's epoch. - const BITS_PER_U64: usize = u64::BITS as usize; const NUM_OF_SLOTS: usize = SLOT_SIZE as usize; const BITS_ARRAY_SIZE: usize = NUM_OF_SLOTS / BITS_PER_U64; diff --git a/glide-core/redis-rs/redis/src/connection.rs b/glide-core/redis-rs/redis/src/connection.rs index f75b9df494..527fb40fa0 100644 --- a/glide-core/redis-rs/redis/src/connection.rs +++ b/glide-core/redis-rs/redis/src/connection.rs @@ -287,7 +287,7 @@ impl IntoConnectionInfo for ConnectionInfo { /// - Specifying DB: `redis://127.0.0.1:6379/0` /// - Enabling TLS: `rediss://127.0.0.1:6379` /// - Enabling Insecure TLS: `rediss://127.0.0.1:6379/#insecure` -impl<'a> IntoConnectionInfo for &'a str { +impl IntoConnectionInfo for &str { fn into_connection_info(self) -> RedisResult { match parse_redis_url(self) { Some(u) => u.into_connection_info(), @@ -1578,7 +1578,7 @@ impl<'a> PubSub<'a> { } } -impl<'a> Drop for PubSub<'a> { +impl Drop for PubSub<'_> { fn drop(&mut self) { let _ = self.con.exit_pubsub(); } diff --git a/glide-core/redis-rs/redis/src/types.rs b/glide-core/redis-rs/redis/src/types.rs index 2d8035d697..f66be0291e 100644 --- a/glide-core/redis-rs/redis/src/types.rs +++ b/glide-core/redis-rs/redis/src/types.rs @@ -1443,7 +1443,7 @@ impl ToRedisArgs for String { } } -impl<'a> ToRedisArgs for &'a str { +impl ToRedisArgs for &str { fn write_redis_args(&self, out: &mut W) where W: ?Sized + RedisWrite, @@ -1465,7 +1465,7 @@ impl ToRedisArgs for Vec { } } -impl<'a, T: ToRedisArgs> ToRedisArgs for &'a [T] { +impl ToRedisArgs for &[T] { fn write_redis_args(&self, out: &mut W) where W: ?Sized + RedisWrite, diff --git a/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs b/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs index 356c5bfc8c..1b4acb3a20 100644 --- a/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs +++ b/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs @@ -236,6 +236,7 @@ mod test_connect_and_check { ConnectionDetails { conn: user_conn, ip: Some(ip), + az: None, } .into_future(), None, @@ -283,6 +284,7 @@ mod test_connect_and_check { ConnectionDetails { conn: user_conn, ip: prev_ip, + az: None, } .into_future(), None, @@ -339,12 +341,14 @@ mod test_connect_and_check { ConnectionDetails { conn: old_user_conn, ip: Some(prev_ip), + az: None, } .into_future(), Some( ConnectionDetails { conn: management_conn, ip: Some(prev_ip), + az: None, } .into_future(), ), @@ -380,12 +384,14 @@ mod test_check_node_connections { ConnectionDetails { conn: get_mock_connection_with_port(name, 1, 6380), ip, + az: None, } .into_future(), Some( ConnectionDetails { conn: get_mock_connection_with_port(name, 2, 6381), ip, + az: None, } .into_future(), ), @@ -463,6 +469,7 @@ mod test_check_node_connections { ConnectionDetails { conn: get_mock_connection(name, 1), ip, + az: None, } .into_future(), None, @@ -547,6 +554,7 @@ mod test_check_node_connections { ConnectionDetails { conn: get_mock_connection(name, 1), ip: None, + az: None, } .into_future(), None, diff --git a/glide-core/redis-rs/redis/tests/test_cluster_async.rs b/glide-core/redis-rs/redis/tests/test_cluster_async.rs index 7273f98702..c69a9f933f 100644 --- a/glide-core/redis-rs/redis/tests/test_cluster_async.rs +++ b/glide-core/redis-rs/redis/tests/test_cluster_async.rs @@ -23,7 +23,7 @@ async fn engine_version_less_than(min_version: &str) -> bool { ); return true; } - return false; + false } /// Static function to get the engine version. When version looks like 8.0.0 -> 80000 and 12.0.1 -> 120001. @@ -49,10 +49,10 @@ async fn get_cluster_version() -> usize { cluster_version.set( parse_version_from_info(info_result.clone()) - .expect(format!("Invalid version string in INFO : {info_result}").as_str()), + .unwrap_or_else(|| panic!("Invalid version string in INFO : {info_result}")), ); } - return cluster_version.get(); + cluster_version.get() } fn parse_version_from_info(info: String) -> Option { @@ -273,7 +273,7 @@ mod cluster_async { .unwrap(); let info_result = redis::from_owned_redis_value::>(info).unwrap(); - let get_cmdstat = format!("cmdstat_get:calls="); + let get_cmdstat = "cmdstat_get:calls=".to_string(); let n_get_cmdstat = format!("cmdstat_get:calls={}", n); let client_az = format!("availability_zone:{}", az); @@ -363,7 +363,7 @@ mod cluster_async { .unwrap(); let info_result = redis::from_owned_redis_value::>(info).unwrap(); - let get_cmdstat = format!("cmdstat_get:calls="); + let get_cmdstat = "cmdstat_get:calls=".to_string(); let n_get_cmdstat = format!("cmdstat_get:calls={}", n); let client_az = format!("availability_zone:{}", az); @@ -385,7 +385,7 @@ mod cluster_async { (matching_entries_count.try_into() as Result).unwrap(), replica_num, "Test failed: expected exactly '{}' entries with '{}' and '{}', found {}", - replica_num.to_string(), + replica_num, get_cmdstat, client_az, matching_entries_count From 36a5d89c458857aebe6473fb055469a6c7da928d Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 11:07:31 -0800 Subject: [PATCH 06/18] Node: Fix modules CI (#2768) Fix GHA Signed-off-by: Yury-Fridlyand --- node/DEVELOPER.md | 1 + node/package.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/node/DEVELOPER.md b/node/DEVELOPER.md index 8878fdd91d..9568e3be82 100644 --- a/node/DEVELOPER.md +++ b/node/DEVELOPER.md @@ -156,6 +156,7 @@ In order to run these tests, use: ```bash npm run test-modules -- --cluster-endpoints=
: ``` + Note: these tests don't run with standalone server as of now. ### REPL (interactive shell) diff --git a/node/package.json b/node/package.json index b581123576..5936805685 100644 --- a/node/package.json +++ b/node/package.json @@ -35,7 +35,7 @@ "test": "npm run build-test-utils && jest --verbose --testPathIgnorePatterns='ServerModules'", "test-dbg": "npm run build-test-utils && jest --runInBand", "test-minimum": "npm run build-test-utils && jest --verbose --testNamePattern='^(.(?!(GlideJson|GlideFt|pubsub|kill)))*$'", - "test-modules": "npm run build-test-utils && jest --verbose --testNamePattern='(GlideJson|GlideFt)'", + "test-modules": "npm run build-test-utils && jest --runInBand --verbose --testNamePattern='(GlideJson|GlideFt)'", "build-test-utils": "cd ../utils && npm i && npm run build", "lint:fix": "npm run install-linting && npx eslint -c ../eslint.config.mjs --fix && npm run prettier:format", "lint": "npm run install-linting && npx eslint -c ../eslint.config.mjs && npm run prettier:check:ci", From b69bba44815e520e3a35113b59ac2fabf7949250 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 10 Dec 2024 10:00:41 -0800 Subject: [PATCH 07/18] Backport #2793 to release-1.2 - Fix rust CI: bump idna (#2794) Fix rust CI: bump `idna`. (#2793) Bump `idna`. Signed-off-by: Yury-Fridlyand --- glide-core/redis-rs/redis/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/glide-core/redis-rs/redis/Cargo.toml b/glide-core/redis-rs/redis/Cargo.toml index 25b06f64c2..534b59fc81 100644 --- a/glide-core/redis-rs/redis/Cargo.toml +++ b/glide-core/redis-rs/redis/Cargo.toml @@ -33,7 +33,7 @@ strum_macros = "0.26" percent-encoding = "2.1" # We need this for redis url parsing -url = "= 2.5.0" +url = "2.5.4" # We need this for script support sha1_smol = { version = "1.0", optional = true } From 54d624c26ac289e06209f32acf1f7ed266be661c Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 10 Dec 2024 10:42:51 -0800 Subject: [PATCH 08/18] Fix value conversion for `CONFIG GET`. (#2381) Signed-off-by: Yury-Fridlyand --- CHANGELOG.md | 2 + glide-core/src/client/value_conversion.rs | 84 ++++++++++++----------- node/tests/GlideClusterClient.test.ts | 21 ++++++ python/python/tests/test_async_client.py | 12 ++++ 4 files changed, 80 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0679a64d2..44a8ca101e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -108,6 +108,8 @@ #### Fixes * Core: UDS Socket Handling Rework ([#2482](https://github.com/valkey-io/valkey-glide/pull/2482)) +* Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) + #### Operational Enhancements * Java: Add modules CI ([#2388](https://github.com/valkey-io/valkey-glide/pull/2388), [#2404](https://github.com/valkey-io/valkey-glide/pull/2404), [#2416](https://github.com/valkey-io/valkey-glide/pull/2416)) diff --git a/glide-core/src/client/value_conversion.rs b/glide-core/src/client/value_conversion.rs index 48996fc76d..7eafe3f373 100644 --- a/glide-core/src/client/value_conversion.rs +++ b/glide-core/src/client/value_conversion.rs @@ -10,6 +10,11 @@ pub(crate) enum ExpectedReturnType<'a> { key_type: &'a Option>, value_type: &'a Option>, }, + // Second parameter is a function which returns true if value needs to be converted + SingleOrMultiNode( + &'a Option>, + Option bool>, + ), MapOfStringToDouble, Double, Boolean, @@ -278,12 +283,6 @@ pub(crate) fn convert_to_expected_type( }, ExpectedReturnType::Lolwut => { match value { - // cluster (multi-node) response - go recursive - Value::Map(map) => convert_map_entries( - map, - Some(ExpectedReturnType::BulkString), - Some(ExpectedReturnType::Lolwut), - ), // RESP 2 response Value::BulkString(bytes) => { let text = std::str::from_utf8(&bytes).unwrap(); @@ -558,19 +557,7 @@ pub(crate) fn convert_to_expected_type( // Second part is converted as `Map[str, Map[str, int]]` ExpectedReturnType::FunctionStatsReturnType => match value { // TODO reuse https://github.com/Bit-Quill/glide-for-redis/pull/331 and https://github.com/valkey-io/valkey-glide/pull/1489 - Value::Map(map) => { - if map[0].0 == Value::BulkString(b"running_script".into()) { - // already a RESP3 response - do nothing - Ok(Value::Map(map)) - } else { - // cluster (multi-node) response - go recursive - convert_map_entries( - map, - Some(ExpectedReturnType::BulkString), - Some(ExpectedReturnType::FunctionStatsReturnType), - ) - } - } + Value::Map(map) => Ok(Value::Map(map)), Value::Array(mut array) if array.len() == 4 => { let mut result: Vec<(Value, Value)> = Vec::with_capacity(2); let running_script_info = array.remove(1); @@ -1144,6 +1131,19 @@ pub(crate) fn convert_to_expected_type( ) .into()) } + ExpectedReturnType::SingleOrMultiNode(value_type, value_checker) => match value { + Value::Map(ref map) => match value_checker { + Some(func) => { + if !map.is_empty() && func(map[0].clone().1) { + convert_to_expected_type(value, Some(ExpectedReturnType::Map { key_type: &None, value_type })) + } else { + Ok(value) + } + } + None => convert_to_expected_type(value, Some(ExpectedReturnType::Map { key_type: &None, value_type })), + } + _ => convert_to_expected_type(value, *value_type), + } } } @@ -1392,12 +1392,19 @@ pub(crate) fn expected_type_for_cmd(cmd: &Cmd) -> Option { // TODO use enum to avoid mistakes match command.as_slice() { - b"HGETALL" | b"CONFIG GET" | b"FT.CONFIG GET" | b"FT._ALIASLIST" | b"HELLO" => { + b"HGETALL" | b"FT.CONFIG GET" | b"FT._ALIASLIST" | b"HELLO" => { Some(ExpectedReturnType::Map { key_type: &None, value_type: &None, }) } + b"CONFIG GET" => Some(ExpectedReturnType::SingleOrMultiNode( + &Some(ExpectedReturnType::Map { + key_type: &None, + value_type: &None, + }), + Some(|val| matches!(val, Value::Array(_))), + )), b"XCLAIM" => { if cmd.position(b"JUSTID").is_some() { Some(ExpectedReturnType::ArrayOfStrings) @@ -1481,11 +1488,17 @@ pub(crate) fn expected_type_for_cmd(cmd: &Cmd) -> Option { None } } - b"LOLWUT" => Some(ExpectedReturnType::Lolwut), + b"LOLWUT" => Some(ExpectedReturnType::SingleOrMultiNode( + &Some(ExpectedReturnType::Lolwut), + None, + )), b"FUNCTION LIST" => Some(ExpectedReturnType::ArrayOfMaps(&Some( ExpectedReturnType::ArrayOfMaps(&Some(ExpectedReturnType::StringOrSet)), ))), - b"FUNCTION STATS" => Some(ExpectedReturnType::FunctionStatsReturnType), + b"FUNCTION STATS" => Some(ExpectedReturnType::SingleOrMultiNode( + &Some(ExpectedReturnType::FunctionStatsReturnType), + Some(|val| matches!(val, Value::Array(_))), + )), b"GEOSEARCH" => { if cmd.position(b"WITHDIST").is_some() || cmd.position(b"WITHHASH").is_some() @@ -1953,17 +1966,14 @@ mod tests { #[test] fn convert_lolwut() { - assert!(matches!( - expected_type_for_cmd(redis::cmd("LOLWUT").arg("version").arg("42")), - Some(ExpectedReturnType::Lolwut) - )); - let unconverted_string : String = "\x1b[0;97;107m \x1b[0m--\x1b[0;37;47m \x1b[0m--\x1b[0;90;100m \x1b[0m--\x1b[0;30;40m \x1b[0m".into(); let expected: String = "\u{2591}--\u{2592}--\u{2593}-- ".into(); + let mut cmd = redis::cmd("LOLWUT"); + let conversion_type = expected_type_for_cmd(cmd.arg("version").arg("42")); let converted_1 = convert_to_expected_type( Value::BulkString(unconverted_string.clone().into_bytes()), - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert_eq!( Value::BulkString(expected.clone().into_bytes()), @@ -1975,7 +1985,7 @@ mod tests { format: redis::VerbatimFormat::Text, text: unconverted_string.clone(), }, - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert_eq!( Value::BulkString(expected.clone().into_bytes()), @@ -1993,16 +2003,16 @@ mod tests { Value::BulkString(unconverted_string.clone().into_bytes()), ), ]), - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert_eq!( Value::Map(vec![ ( - Value::BulkString("node 1".into()), + Value::SimpleString("node 1".into()), Value::BulkString(expected.clone().into_bytes()) ), ( - Value::BulkString("node 2".into()), + Value::SimpleString("node 2".into()), Value::BulkString(expected.clone().into_bytes()) ), ]), @@ -2011,7 +2021,7 @@ mod tests { let converted_4 = convert_to_expected_type( Value::SimpleString(unconverted_string.clone()), - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert!(converted_4.is_err()); } @@ -2521,11 +2531,6 @@ mod tests { #[test] fn convert_function_stats() { - assert!(matches!( - expected_type_for_cmd(redis::cmd("FUNCTION").arg("STATS")), - Some(ExpectedReturnType::FunctionStatsReturnType) - )); - let resp2_response_non_empty_first_part_data = vec![ Value::BulkString(b"running_script".into()), Value::Array(vec![ @@ -2652,7 +2657,8 @@ mod tests { ), ]); - let conversion_type = Some(ExpectedReturnType::FunctionStatsReturnType); + let cmd = redis::cmd("FUNCTION STATS"); + let conversion_type = expected_type_for_cmd(&cmd); // resp2 -> resp3 conversion with non-empty `running_script` block assert_eq!( convert_to_expected_type( diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index f74e137cac..2c8325a013 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -328,6 +328,27 @@ describe("GlideClusterClient", () => { TIMEOUT, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + `config get with wildcard and multi node route %p`, + async (protocol) => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption(cluster.getAddresses(), protocol), + ); + const result = await client.configGet(["*file"], { + route: "allPrimaries", + }); + Object.values( + result as Record>, + ).forEach((resp) => { + const keys = Object.keys(resp); + expect(keys.length).toBeGreaterThan(5); + expect(keys).toContain("pidfile"); + expect(keys).toContain("logfile"); + }); + }, + TIMEOUT, + ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `can send transactions_%p`, async (protocol) => { diff --git a/python/python/tests/test_async_client.py b/python/python/tests/test_async_client.py index b32aa6936d..d8d65d1ca7 100644 --- a/python/python/tests/test_async_client.py +++ b/python/python/tests/test_async_client.py @@ -770,6 +770,18 @@ async def test_config_get_set(self, glide_client: TGlideClient): == OK ) + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_config_get_with_wildcard_and_multi_node_route( + self, glide_client: GlideClusterClient + ): + result = await glide_client.config_get(["*file"], AllPrimaries()) + assert isinstance(result, Dict) + for resp in result.values(): + assert len(resp) > 5 + assert b"pidfile" in resp + assert b"logfile" in resp + @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_decr_decrby_existing_key(self, glide_client: TGlideClient): From 79c3bc9ecb959fcba642c297324f57c0a243cb03 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 10 Dec 2024 17:22:19 -0800 Subject: [PATCH 09/18] [Backport 1.2] Protobuf: bump to 29.1 (#2802) * Protobuf: bump to 29.1 (#2801) Signed-off-by: Yury-Fridlyand --- .github/workflows/java-cd.yml | 4 ++-- .github/workflows/java.yml | 6 +++--- CHANGELOG.md | 5 ++--- java/DEVELOPER.md | 27 +++++++-------------------- java/client/build.gradle | 10 +++++----- 5 files changed, 19 insertions(+), 33 deletions(-) diff --git a/.github/workflows/java-cd.yml b/.github/workflows/java-cd.yml index 7033141881..5e2fd5886e 100644 --- a/.github/workflows/java-cd.yml +++ b/.github/workflows/java-cd.yml @@ -102,7 +102,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Create secret key ring file @@ -245,7 +245,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Start standalone Valkey server diff --git a/.github/workflows/java.yml b/.github/workflows/java.yml index 449c9f1a65..c837b22dc6 100644 --- a/.github/workflows/java.yml +++ b/.github/workflows/java.yml @@ -103,7 +103,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java client @@ -184,7 +184,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java wrapper @@ -235,7 +235,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Test java wrapper diff --git a/CHANGELOG.md b/CHANGELOG.md index 44a8ca101e..9f4421333d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,11 @@ #### Changes -* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561)) +* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) #### Breaking Changes #### Fixes +* Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) #### Operational Enhancements @@ -108,8 +109,6 @@ #### Fixes * Core: UDS Socket Handling Rework ([#2482](https://github.com/valkey-io/valkey-glide/pull/2482)) -* Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) - #### Operational Enhancements * Java: Add modules CI ([#2388](https://github.com/valkey-io/valkey-glide/pull/2388), [#2404](https://github.com/valkey-io/valkey-glide/pull/2404), [#2416](https://github.com/valkey-io/valkey-glide/pull/2416)) diff --git a/java/DEVELOPER.md b/java/DEVELOPER.md index 3a275425fc..9304dc3802 100644 --- a/java/DEVELOPER.md +++ b/java/DEVELOPER.md @@ -17,7 +17,7 @@ The Valkey GLIDE Java wrapper consists of both Java and Rust code. Rust bindings - git - GCC - pkg-config -- protoc (protobuf compiler) >= 28.2 +- protoc (protobuf compiler) >= 29.1 - openssl - openssl-dev - rustup @@ -64,17 +64,17 @@ Continue with **Install protobuf compiler** below. To install protobuf for MacOS, run: ```bash brew install protobuf -# Check that the protobuf compiler version 28.2 or higher is installed +# Check that the protobuf compiler version 29.1 or higher is installed protoc --version ``` For the remaining systems, do the following: ```bash PB_REL="https://github.com/protocolbuffers/protobuf/releases" -curl -LO $PB_REL/download/v28.2/protoc-28.2-linux-x86_64.zip -unzip protoc-28.2-linux-x86_64.zip -d $HOME/.local +curl -LO $PB_REL/download/v29.1/protoc-29.1-linux-x86_64.zip +unzip protoc-29.1-linux-x86_64.zip -d $HOME/.local export PATH="$PATH:$HOME/.local/bin" -# Check that the protobuf compiler version 28.2 or higher is installed +# Check that the protobuf compiler version 29.1 or higher is installed protoc --version ``` @@ -87,11 +87,7 @@ Before starting this step, make sure you've installed all software dependencies. git clone https://github.com/valkey-io/valkey-glide.git cd valkey-glide/java ``` -2. Initialize git submodule: - ```bash - git submodule update --init --recursive - ``` -3. Build the Java wrapper (Choose a build option from the following and run it from the `java` folder): +2. Build the Java wrapper (Choose a build option from the following and run it from the `java` folder): 1. Enter the java directory: @@ -164,8 +160,7 @@ Some troubleshooting issues: you may need to restart your machine. In particular, this may solve the following problems: - Failed to find `cargo` after `rustup`. - No Protobuf compiler (protoc) found. -- If build fails because of rust compiler fails, make sure submodules are updated using `git submodule update`. -- If protobuf 28.0 or earlier is detected, upgrade to the latest protobuf release. +- If protobuf 29.0 or earlier is detected, upgrade to the latest protobuf release. ## Running Examples App @@ -275,14 +270,6 @@ To (re)generate protobuf code, use the following command: ./gradlew protobuf ``` -### Submodules - -After pulling new changes, ensure that you update the submodules by running the following command: - -```bash -git submodule update -``` - ### Contributing new ValKey commands A Valkey command can either have a standalone or cluster implementation which is dependent on their specifications. diff --git a/java/client/build.gradle b/java/client/build.gradle index fa617fb131..0075b01f87 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -14,7 +14,7 @@ repositories { } dependencies { - implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.28.2' + implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.29.1' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.13.0' implementation group: 'io.netty', name: 'netty-handler', version: '4.1.115.Final' @@ -38,11 +38,11 @@ dependencies { ext { checkProtocVersion = { String output -> - // Line in format like: libprotoc 28.1 + // Line in format like: libprotoc 29.1 int majorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[0].trim()); int minorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[1].trim()); - if (majorVersion < 28) { - throw new GradleException("Protobuf compiler (protoc) version 28.0 or newer is required. Current version: $output"); + if (majorVersion < 29) { + throw new GradleException("Protobuf compiler (protoc) version 29.0 or newer is required. Current version: $output"); } return output.split(" ")[1] } @@ -61,7 +61,7 @@ tasks.register('protobuf', Exec) { } } catch (Exception e) { if (e.getMessage().startsWith("A problem occurred starting process")) { - throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 28.0 or newer is required."); + throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 29.0 or newer is required."); } throw e } From b9d340644ef6b3f0a951177c6c9ac07a762b5823 Mon Sep 17 00:00:00 2001 From: Lior Sventitzky Date: Mon, 16 Dec 2024 14:36:16 +0200 Subject: [PATCH 10/18] Fix python memory leak (#2818) changes references to be bound Signed-off-by: lior sventitzky --- python/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/src/lib.rs b/python/src/lib.rs index e3b9a298cb..09914c2c59 100644 --- a/python/src/lib.rs +++ b/python/src/lib.rs @@ -84,7 +84,7 @@ impl Script { fn new(code: &Bound) -> PyResult { let hash = if let Ok(code_str) = code.extract::() { glide_core::scripts_container::add_script(code_str.as_bytes()) - } else if let Ok(code_bytes) = code.extract::<&PyBytes>() { + } else if let Ok(code_bytes) = code.extract::>() { glide_core::scripts_container::add_script(code_bytes.as_bytes()) } else { return Err(PyTypeError::new_err( @@ -267,7 +267,7 @@ fn glide(_py: Python, m: &Bound) -> PyResult<()> { } #[pyfunction] - pub fn create_leaked_bytes_vec(args_vec: Vec<&PyBytes>) -> usize { + pub fn create_leaked_bytes_vec(args_vec: Vec>) -> usize { // Convert the bytes vec -> Bytes vector let bytes_vec: Vec = args_vec .iter() From a86378590eb6553470bfda65f4ba03e767871ac5 Mon Sep 17 00:00:00 2001 From: ikolomi <152477505+ikolomi@users.noreply.github.com> Date: Tue, 17 Dec 2024 14:01:17 +0200 Subject: [PATCH 11/18] Ensure cluster client creation fail when engine is < 7.0 and sharded subscriptions are configured (#2819) Introduce engine version validation when sharded subscriptions are configured Signed-off-by: ikolomi --- CHANGELOG.md | 1 + glide-core/Cargo.toml | 1 + glide-core/src/client/mod.rs | 58 +++++++++++++++- glide-core/tests/test_cluster_client.rs | 89 ++++++++++++++++++++++++- 4 files changed, 145 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f4421333d..1f8ebb2985 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ #### Fixes * Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) +* Core: Ensure cluster client creation fail when engine is < 7.0 and sharded subscriptions are configured ([#2819](https://github.com/valkey-io/valkey-glide/pull/2819)) #### Operational Enhancements diff --git a/glide-core/Cargo.toml b/glide-core/Cargo.toml index bd12bb09c9..394c953f49 100644 --- a/glide-core/Cargo.toml +++ b/glide-core/Cargo.toml @@ -41,6 +41,7 @@ nanoid = "0.4.0" async-trait = { version = "0.1.24" } serde_json = "1" serde = { version = "1", features = ["derive"] } +versions = "6.3" [features] socket-layer = [ diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index 29a3a05393..a560e17697 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -12,7 +12,10 @@ use redis::cluster_routing::{ MultipleNodeRoutingInfo, ResponsePolicy, Routable, RoutingInfo, SingleNodeRoutingInfo, }; use redis::cluster_slotmap::ReadFromReplicaStrategy; -use redis::{Cmd, ErrorKind, ObjectType, PushInfo, RedisError, RedisResult, ScanStateRC, Value}; +use redis::{ + Cmd, ErrorKind, FromRedisValue, InfoDict, ObjectType, PushInfo, RedisError, RedisResult, + ScanStateRC, Value, +}; pub use standalone_client::StandaloneClient; use std::io; use std::sync::atomic::{AtomicIsize, Ordering}; @@ -25,6 +28,7 @@ mod reconnecting_connection; mod standalone_client; mod value_conversion; use tokio::sync::mpsc; +use versions::Versioning; pub const HEARTBEAT_SLEEP_DURATION: Duration = Duration::from_secs(1); pub const DEFAULT_RETRIES: u32 = 3; @@ -607,7 +611,7 @@ async fn create_cluster_client( }; builder = builder.tls(tls); } - if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions { + if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions.clone() { builder = builder.pubsub_subscriptions(pubsub_subscriptions); } @@ -615,7 +619,55 @@ async fn create_cluster_client( builder = builder.periodic_connections_checks(CONNECTION_CHECKS_INTERVAL); let client = builder.build()?; - client.get_async_connection(push_sender).await + let mut con = client.get_async_connection(push_sender).await?; + + // This validation ensures that sharded subscriptions are not applied to Redis engines older than version 7.0, + // preventing scenarios where the client becomes inoperable or, worse, unaware that sharded pubsub messages are not being received. + // The issue arises because `client.get_async_connection()` might succeed even if the engine does not support sharded pubsub. + // For example, initial connections may exclude the target node for sharded subscriptions, allowing the creation to succeed, + // but subsequent resubscription tasks will fail when `setup_connection()` cannot establish a connection to the node. + // + // One approach to handle this would be to check the engine version inside `setup_connection()` and skip applying sharded subscriptions. + // However, this approach would leave the application unaware that the subscriptions were not applied, requiring the user to analyze logs to identify the issue. + // Instead, we explicitly check the engine version here and fail the connection creation if it is incompatible with sharded subscriptions. + + if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions { + if pubsub_subscriptions.contains_key(&redis::PubSubSubscriptionKind::Sharded) { + let info_res = con + .route_command( + redis::cmd("INFO").arg("SERVER"), + RoutingInfo::SingleNode(SingleNodeRoutingInfo::Random), + ) + .await?; + let info_dict: InfoDict = FromRedisValue::from_redis_value(&info_res)?; + match info_dict.get::("redis_version") { + Some(version) => match (Versioning::new(version), Versioning::new("7.0")) { + (Some(server_ver), Some(min_ver)) => { + if server_ver < min_ver { + return Err(RedisError::from(( + ErrorKind::InvalidClientConfig, + "Sharded subscriptions provided, but the engine version is < 7.0", + ))); + } + } + _ => { + return Err(RedisError::from(( + ErrorKind::ResponseError, + "Failed to parse engine version", + ))) + } + }, + _ => { + return Err(RedisError::from(( + ErrorKind::ResponseError, + "Could not determine engine version from INFO result", + ))) + } + } + } + } + + Ok(con) } #[derive(thiserror::Error)] diff --git a/glide-core/tests/test_cluster_client.rs b/glide-core/tests/test_cluster_client.rs index 2943ab21bb..ec795481c6 100644 --- a/glide-core/tests/test_cluster_client.rs +++ b/glide-core/tests/test_cluster_client.rs @@ -7,13 +7,19 @@ mod cluster_client_tests { use std::collections::HashMap; use super::*; - use glide_core::connection_request::ReadFrom; + use cluster::{setup_cluster_with_replicas, LONG_CLUSTER_TEST_TIMEOUT}; + use glide_core::client::Client; + use glide_core::connection_request::{ + self, PubSubChannelsOrPatterns, PubSubSubscriptions, ReadFrom, + }; use redis::cluster_routing::{ MultipleNodeRoutingInfo, Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr, }; + use redis::InfoDict; use rstest::rstest; use utilities::cluster::{setup_test_basics_internal, SHORT_CLUSTER_TEST_TIMEOUT}; use utilities::*; + use versions::Versioning; fn count_primary_or_replica(value: &str) -> (u16, u16) { if value.contains("role:master") { @@ -214,4 +220,85 @@ mod cluster_client_tests { assert_eq!(replicas, 1); }); } + + #[rstest] + #[timeout(LONG_CLUSTER_TEST_TIMEOUT)] + fn test_fail_creation_with_unsupported_sharded_pubsub() { + block_on_all(async { + let mut test_basics = setup_cluster_with_replicas( + TestConfiguration { + cluster_mode: ClusterMode::Enabled, + shared_server: false, + ..Default::default() + }, + 0, + 3, + ) + .await; + + // get engine version + let cmd = redis::cmd("INFO"); + let info = test_basics + .client + .send_command( + &cmd, + Some(RoutingInfo::SingleNode(SingleNodeRoutingInfo::Random)), + ) + .await + .unwrap(); + + let info_dict: InfoDict = redis::from_owned_redis_value(info).unwrap(); + match info_dict.get::("redis_version") { + Some(version) => match (Versioning::new(version), Versioning::new("7.0")) { + (Some(server_ver), Some(min_ver)) => { + if server_ver < min_ver { + // try to create client with initial nodes lacking the target sharded subscription node + let cluster = test_basics.cluster.unwrap(); + let mut addresses = cluster.get_server_addresses(); + addresses.truncate(1); + + let mut connection_request = + connection_request::ConnectionRequest::new(); + connection_request.addresses = + addresses.iter().map(get_address_info).collect(); + + connection_request.cluster_mode_enabled = true; + // Assumes the current implementation of the test cluster, where slots are distributed across nodes + // in a monotonically increasing order. + let mut last_slot_channel = PubSubChannelsOrPatterns::new(); + last_slot_channel + .channels_or_patterns + .push("last-slot-channel-{16383}".as_bytes().into()); + + let mut subs = PubSubSubscriptions::new(); + // First try to create a client with the Exact subscription + subs.channels_or_patterns_by_type + .insert(0, last_slot_channel.clone()); + connection_request.pubsub_subscriptions = + protobuf::MessageField::from_option(Some(subs.clone())); + + let _client = Client::new(connection_request.clone().into(), None) + .await + .unwrap(); + + // Now try to create a client with a Sharded subscription which should fail + subs.channels_or_patterns_by_type + .insert(2, last_slot_channel); + connection_request.pubsub_subscriptions = + protobuf::MessageField::from_option(Some(subs)); + + let client = Client::new(connection_request.into(), None).await; + assert!(client.is_err()); + } + } + _ => { + panic!("Failed to parse engine version"); + } + }, + _ => { + panic!("Could not determine engine version from INFO result"); + } + } + }); + } } From e0b4186796bccca7040af7d2c01c52548c19ac06 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 17 Dec 2024 16:18:20 -0800 Subject: [PATCH 12/18] Fix node ITs (#2826) Fix tests Signed-off-by: Yury-Fridlyand --- node/tests/GlideClusterClient.test.ts | 404 +++++++++----------------- 1 file changed, 145 insertions(+), 259 deletions(-) diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index 2c8325a013..43b039c61f 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -27,7 +27,6 @@ import { InfoOptions, ListDirection, ProtocolVersion, - ReadFrom, RequestError, Routes, ScoreFilter, @@ -2023,62 +2022,81 @@ describe("GlideClusterClient", () => { } }, ); - describe("GlideClusterClient - AZAffinity Read Strategy Test", () => { + + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "should return valid statistics using protocol %p", + async (protocol) => { + let glideClientForTesting; + + try { + // Create a GlideClusterClient instance for testing + glideClientForTesting = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + { + requestTimeout: 2000, + }, + ), + ); + + // Fetch statistics using get_statistics method + const stats = glideClientForTesting.getStatistics(); + + // Assertions to check if stats object has correct structure + expect(typeof stats).toBe("object"); + expect(stats).toHaveProperty("total_connections"); + expect(stats).toHaveProperty("total_clients"); + expect(Object.keys(stats)).toHaveLength(2); + } finally { + // Ensure the client is properly closed + glideClientForTesting?.close(); + } + }, + ); + + describe("AZAffinity Read Strategy Tests", () => { async function getNumberOfReplicas( azClient: GlideClusterClient, ): Promise { - const replicationInfo = await azClient.customCommand([ - "INFO", - "REPLICATION", - ]); - - if (Array.isArray(replicationInfo)) { - // Handle array response from cluster (CME Mode) - let totalReplicas = 0; - - for (const node of replicationInfo) { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - - if (typeof nodeInfo.value === "string") { - const lines = nodeInfo.value.split(/\r?\n/); - const connectedReplicasLine = lines.find( - (line) => - line.startsWith("connected_slaves:") || - line.startsWith("connected_replicas:"), - ); + const replicationInfo = (await azClient.info({ + sections: [InfoOptions.Replication], + })) as Record; + let totalReplicas = 0; + Object.values(replicationInfo).forEach((nodeInfo) => { + const lines = nodeInfo.split(/\r?\n/); + const connectedReplicasLine = lines.find( + (line) => + line.startsWith("connected_slaves:") || + line.startsWith("connected_replicas:"), + ); - if (connectedReplicasLine) { - const parts = connectedReplicasLine.split(":"); - const numReplicas = parseInt(parts[1], 10); + if (connectedReplicasLine) { + const parts = connectedReplicasLine.split(":"); + const numReplicas = parseInt(parts[1], 10); - if (!isNaN(numReplicas)) { - // Sum up replicas from each primary node - totalReplicas += numReplicas; - } - } + if (!isNaN(numReplicas)) { + // Sum up replicas from each primary node + totalReplicas += numReplicas; } } + }); - if (totalReplicas > 0) { - return totalReplicas; - } - - throw new Error( - "Could not find replica information in any node's response", - ); + if (totalReplicas > 0) { + return totalReplicas; } throw new Error( - "Unexpected response format from INFO REPLICATION command", + "Could not find replica information in any node's response", ); } it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "should route GET commands to all replicas with the same AZ using protocol %p", async (protocol) => { + // Skip test if version is below 8.0.0 + if (cluster.checkIfServerVersionLessThan("8.0.0")) return; + const az = "us-east-1a"; const GET_CALLS_PER_REPLICA = 3; @@ -2094,21 +2112,9 @@ describe("GlideClusterClient", () => { protocol, ), ); - - // Skip test if version is below 8.0.0 - if (cluster.checkIfServerVersionLessThan("8.0.0")) { - console.log( - "Skipping test: requires Valkey 8.0.0 or higher", - ); - return; - } - - await client_for_config_set.customCommand([ - "CONFIG", - "RESETSTAT", - ]); - await client_for_config_set.customCommand( - ["CONFIG", "SET", "availability-zone", az], + await client_for_config_set.configResetStat(); + await client_for_config_set.configSet( + { "availability-zone": az }, { route: "allNodes" }, ); @@ -2133,46 +2139,22 @@ describe("GlideClusterClient", () => { azCluster.getAddresses(), protocol, { - readFrom: "AZAffinity" as ReadFrom, + readFrom: "AZAffinity", clientAz: az, }, ), ); - const azs = await client_for_testing_az.customCommand( - ["CONFIG", "GET", "availability-zone"], + const azs = (await client_for_testing_az.configGet( + ["availability-zone"], { route: "allNodes" }, - ); + )) as Record>; - if (Array.isArray(azs)) { - const allAZsMatch = azs.every((node) => { - const nodeResponse = node as { - key: string; - value: string | number; - }; - - if (protocol === ProtocolVersion.RESP2) { - // RESP2: Direct array format ["availability-zone", "us-east-1a"] - return ( - Array.isArray(nodeResponse.value) && - nodeResponse.value[1] === az - ); - } else { - // RESP3: Nested object format [{ key: "availability-zone", value: "us-east-1a" }] - return ( - Array.isArray(nodeResponse.value) && - nodeResponse.value[0]?.key === - "availability-zone" && - nodeResponse.value[0]?.value === az - ); - } - }); - expect(allAZsMatch).toBe(true); - } else { - throw new Error( - "Unexpected response format from CONFIG GET command", - ); - } + Object.values(azs).forEach((nodeResponse) => + expect(nodeResponse["availability-zone"]).toEqual( + "us-east-1a", + ), + ); // Stage 3: Set test data and perform GET operations await client_for_testing_az.set("foo", "testvalue"); @@ -2182,52 +2164,40 @@ describe("GlideClusterClient", () => { } // Stage 4: Verify GET commands were routed correctly - const info_result = - await client_for_testing_az.customCommand( - ["INFO", "ALL"], // Get both replication and commandstats info - { route: "allNodes" }, - ); - - if (Array.isArray(info_result)) { - const matching_entries_count = info_result.filter( - (node) => { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - const infoStr = - nodeInfo.value?.toString() || ""; - - // Check if this is a replica node AND it has the expected number of GET calls - const isReplicaNode = - infoStr.includes("role:slave") || - infoStr.includes("role:replica"); - - return ( - isReplicaNode && - infoStr.includes(get_cmdstat) - ); - }, - ).length; - - expect(matching_entries_count).toBe(n_replicas); // Should expect 12 as the cluster was created with 3 primary and 4 replicas, totalling 12 replica nodes - } else { - throw new Error( - "Unexpected response format from INFO command", - ); - } + const info_result = (await client_for_testing_az.info( + { sections: [InfoOptions.All], route: "allNodes" }, // Get both replication and commandstats info + )) as Record; + + const matching_entries_count = Object.values( + info_result, + ).filter((infoStr) => { + // Check if this is a replica node AND it has the expected number of GET calls + const isReplicaNode = + infoStr.includes("role:slave") || + infoStr.includes("role:replica"); + + return isReplicaNode && infoStr.includes(get_cmdstat); + }).length; + + expect(matching_entries_count).toBe(n_replicas); // Should expect 12 as the cluster was created with 3 primary and 4 replicas, totalling 12 replica nodes } finally { // Cleanup - await client_for_config_set?.close(); - await client_for_testing_az?.close(); + await client_for_config_set?.configSet( + { "availability-zone": "" }, + { route: "allNodes" }, + ); + client_for_config_set?.close(); + client_for_testing_az?.close(); } }, ); - }); - describe("GlideClusterClient - AZAffinity Routing to 1 replica", () => { + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "should route commands to single replica with AZ using protocol %p", async (protocol) => { + // Skip test if version is below 8.0.0 + if (cluster.checkIfServerVersionLessThan("8.0.0")) return; + const az = "us-east-1a"; const GET_CALLS = 3; const get_cmdstat = `calls=${GET_CALLS}`; @@ -2244,26 +2214,15 @@ describe("GlideClusterClient", () => { ), ); - // Skip test if version is below 8.0.0 - if (cluster.checkIfServerVersionLessThan("8.0.0")) { - console.log( - "Skipping test: requires Valkey 8.0.0 or higher", - ); - return; - } - - await client_for_config_set.customCommand( - ["CONFIG", "SET", "availability-zone", ""], + await client_for_config_set.configSet( + { "availability-zone": "" }, { route: "allNodes" }, ); - await client_for_config_set.customCommand([ - "CONFIG", - "RESETSTAT", - ]); + await client_for_config_set.configResetStat(); - await client_for_config_set.customCommand( - ["CONFIG", "SET", "availability-zone", az], + await client_for_config_set.configSet( + { "availability-zone": az }, { route: { type: "replicaSlotId", id: 12182 } }, ); @@ -2286,73 +2245,54 @@ describe("GlideClusterClient", () => { } // Stage 4: Verify GET commands were routed correctly - const info_result = - await client_for_testing_az.customCommand( - ["INFO", "ALL"], - { route: "allNodes" }, - ); + const info_result = (await client_for_testing_az.info({ + sections: [InfoOptions.All], + route: "allNodes", + })) as Record; // Process the info_result to check that only one replica has the GET calls - if (Array.isArray(info_result)) { - // Count the number of nodes where both get_cmdstat and az are present - const matching_entries_count = info_result.filter( - (node) => { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - const infoStr = - nodeInfo.value?.toString() || ""; - return ( - infoStr.includes(get_cmdstat) && - infoStr.includes(`availability_zone:${az}`) - ); - }, - ).length; - - expect(matching_entries_count).toBe(1); - - // Check that only one node has the availability zone set to az - const changed_az_count = info_result.filter((node) => { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - const infoStr = nodeInfo.value?.toString() || ""; + const matching_entries_count = Object.values( + info_result, + ).filter((infoStr) => { + return ( + infoStr.includes(get_cmdstat) && + infoStr.includes(`availability_zone:${az}`) + ); + }).length; + + expect(matching_entries_count).toBe(1); + + // Check that only one node has the availability zone set to az + const changed_az_count = Object.values(info_result).filter( + (infoStr) => { return infoStr.includes(`availability_zone:${az}`); - }).length; + }, + ).length; - expect(changed_az_count).toBe(1); - } else { - throw new Error( - "Unexpected response format from INFO command", - ); - } + expect(changed_az_count).toBe(1); } finally { - await client_for_config_set?.close(); - await client_for_testing_az?.close(); + await client_for_config_set?.configSet( + { "availability-zone": "" }, + { route: "allNodes" }, + ); + client_for_config_set?.close(); + client_for_testing_az?.close(); } }, ); - }); - describe("GlideClusterClient - AZAffinity with Non-existing AZ", () => { + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "should route commands to a replica when AZ does not exist using protocol %p", async (protocol) => { + // Skip test if version is below 8.0.0 + if (cluster.checkIfServerVersionLessThan("8.0.0")) return; + const GET_CALLS = 4; const replica_calls = 1; const get_cmdstat = `cmdstat_get:calls=${replica_calls}`; let client_for_testing_az; try { - // Skip test if server version is below 8.0.0 - if (azCluster.checkIfServerVersionLessThan("8.0.0")) { - console.log( - "Skipping test: requires Valkey 8.0.0 or higher", - ); - return; - } - // Create a client configured for AZAffinity with a non-existing AZ client_for_testing_az = await GlideClusterClient.createClient( @@ -2368,10 +2308,9 @@ describe("GlideClusterClient", () => { ); // Reset command stats on all nodes - await client_for_testing_az.customCommand( - ["CONFIG", "RESETSTAT"], - { route: "allNodes" }, - ); + await client_for_testing_az.configResetStat({ + route: "allNodes", + }); // Issue GET commands for (let i = 0; i < GET_CALLS; i++) { @@ -2379,76 +2318,23 @@ describe("GlideClusterClient", () => { } // Fetch command stats from all nodes - const info_result = - await client_for_testing_az.customCommand( - ["INFO", "COMMANDSTATS"], - { route: "allNodes" }, - ); + const info_result = (await client_for_testing_az.info({ + sections: [InfoOptions.Commandstats], + route: "allNodes", + })) as Record; // Inline matching logic - let matchingEntriesCount = 0; - - if ( - typeof info_result === "object" && - info_result !== null - ) { - const nodeResponses = Object.values(info_result); - - for (const response of nodeResponses) { - if ( - response && - typeof response === "object" && - "value" in response && - response.value.includes(get_cmdstat) - ) { - matchingEntriesCount++; - } - } - } else { - throw new Error( - "Unexpected response format from INFO command", - ); - } + const matchingEntriesCount = Object.values( + info_result, + ).filter((nodeResponses) => { + return nodeResponses.includes(get_cmdstat); + }).length; // Validate that only one replica handled the GET calls expect(matchingEntriesCount).toBe(4); } finally { // Cleanup: Close the client after test execution - await client_for_testing_az?.close(); - } - }, - ); - }); - describe("GlideClusterClient - Get Statistics", () => { - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "should return valid statistics using protocol %p", - async (protocol) => { - let glideClientForTesting; - - try { - // Create a GlideClusterClient instance for testing - glideClientForTesting = - await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - { - requestTimeout: 2000, - }, - ), - ); - - // Fetch statistics using get_statistics method - const stats = await glideClientForTesting.getStatistics(); - - // Assertions to check if stats object has correct structure - expect(typeof stats).toBe("object"); - expect(stats).toHaveProperty("total_connections"); - expect(stats).toHaveProperty("total_clients"); - expect(Object.keys(stats)).toHaveLength(2); - } finally { - // Ensure the client is properly closed - await glideClientForTesting?.close(); + client_for_testing_az?.close(); } }, ); From 36452b77ab2a0647efd9c87b89929ba4ef8ee441 Mon Sep 17 00:00:00 2001 From: Joseph Brinkman Date: Tue, 17 Dec 2024 20:12:12 -0500 Subject: [PATCH 13/18] remove native package references for MacOS x64 architecture (#2824) * remove native package references for MacOS x64 architecture Signed-off-by: jbrinkman --- CHANGELOG.md | 5 +++++ node/npm/glide/index.ts | 3 --- node/npm/glide/package.json | 1 - 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f8ebb2985..f820f45e05 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ #### Changes + * Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) +* Node: Remove native package references for MacOs x64 architecture ([#2799](https://github.com/valkey-io/valkey-glide/issues/2799)) #### Breaking Changes @@ -13,6 +15,7 @@ ## 1.2.0 (2024-11-27) #### Changes + * Node: Client API for retrieving internal statistics ([#2727](https://github.com/valkey-io/valkey-glide/pull/2727)) * Python: Client API for retrieving internal statistics ([#2707](https://github.com/valkey-io/valkey-glide/pull/2707)) * Node, Python, Java: Adding support for replacing connection configured password ([#2651](https://github.com/valkey-io/valkey-glide/pull/2651), [#2659](https://github.com/valkey-io/valkey-glide/pull/2659), [#2677](https://github.com/valkey-io/valkey-glide/pull/2677)) @@ -107,7 +110,9 @@ #### Breaking Changes + #### Fixes + * Core: UDS Socket Handling Rework ([#2482](https://github.com/valkey-io/valkey-glide/pull/2482)) #### Operational Enhancements diff --git a/node/npm/glide/index.ts b/node/npm/glide/index.ts index da719c51c7..c4dab9795b 100644 --- a/node/npm/glide/index.ts +++ b/node/npm/glide/index.ts @@ -53,9 +53,6 @@ function loadNativeBinding() { break; case "darwin": switch (arch) { - case "x64": - nativeBinding = require("@scope/valkey-glide-darwin-x64"); - break; case "arm64": nativeBinding = require("@scope/valkey-glide-darwin-arm64"); break; diff --git a/node/npm/glide/package.json b/node/npm/glide/package.json index 78ec8d0821..8444de8ef9 100644 --- a/node/npm/glide/package.json +++ b/node/npm/glide/package.json @@ -41,7 +41,6 @@ }, "optionalDependencies": { "${scope}valkey-glide-darwin-arm64": "${package_version}", - "${scope}valkey-glide-darwin-x64": "${package_version}", "${scope}valkey-glide-linux-arm64": "${package_version}", "${scope}valkey-glide-linux-x64": "${package_version}", "${scope}valkey-glide-linux-musl-arm64": "${package_version}", From 4de48472cb3c29a3f04db2b18e18ec7e36f0f850 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Thu, 19 Dec 2024 09:54:10 -0800 Subject: [PATCH 14/18] Fix full matrix CI (#2784) Fix CI Signed-off-by: Yury-Fridlyand --- .github/workflows/create-test-matrices/action.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/create-test-matrices/action.yml b/.github/workflows/create-test-matrices/action.yml index 5bd777b5a1..e1a7186521 100644 --- a/.github/workflows/create-test-matrices/action.yml +++ b/.github/workflows/create-test-matrices/action.yml @@ -41,12 +41,11 @@ runs: echo 'Select server engines to run tests against' if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" || "${{ inputs.run-full-matrix }}" == "false" ]]; then echo 'Pick engines marked as `"run": "always"` only - on PR, push or manually triggered job which does not require full matrix' - jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT else echo 'Pick all engines - on cron (schedule) or if manually triggered job requires a full matrix' - jq -c . < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c . < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT fi - cat $GITHUB_OUTPUT - name: Load host matrix id: load-host-matrix @@ -57,12 +56,11 @@ runs: echo 'Select runners (VMs) to run tests on' if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" || "${{ inputs.run-full-matrix }}" == "false" ]]; then echo 'Pick runners marked as '"run": "always"' only - on PR, push or manually triggered job which does not require full matrix' - jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT else echo 'Pick all runners assigned for the chosen client (language) - on cron (schedule) or if manually triggered job requires a full matrix' - jq -c "[.[] | select(.languages? and any(.languages[] == \"${{ inputs.language-name }}\"; .) and $CONDITION)]" < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c "[.[] | select(.languages? and any(.languages[] == \"${{ inputs.language-name }}\"; .) and $CONDITION)]" < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT fi - cat $GITHUB_OUTPUT - name: Create language version matrix id: create-lang-version-matrix @@ -72,9 +70,8 @@ runs: echo 'Select language (framework/SDK) versions to run tests on' if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" || "${{ inputs.run-full-matrix }}" == "false" ]]; then echo 'Pick language versions listed in 'always-run-versions' only - on PR, push or manually triggered job which does not require full matrix' - jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .["always-run-versions"]][0] // []' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .["always-run-versions"]][0] // []' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT else echo 'Pick language versions listed in 'versions' - on cron (schedule) or if manually triggered job requires a full matrix' - jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .versions][0]' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .versions][0]' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT fi - cat $GITHUB_OUTPUT From 8bfa0d87b3ab4a4aec58dc34b08af6f6cab32801 Mon Sep 17 00:00:00 2001 From: Avi Fenesh <55848801+avifenesh@users.noreply.github.com> Date: Wed, 25 Dec 2024 19:01:31 +0200 Subject: [PATCH 15/18] Release 1.2 uncoverd slots (#2869) * Add allow_non_covered_slots option to cluster scan across Node, Python, and Java implementations Signed-off-by: avifenesh * Merge python requirement files. (#2597) * Merge python requirement files. --------- Signed-off-by: Yury-Fridlyand Signed-off-by: avifenesh --------- Signed-off-by: avifenesh Signed-off-by: Yury-Fridlyand Co-authored-by: Yury-Fridlyand --- .github/workflows/python.yml | 4 +- CHANGELOG.md | 3 +- Makefile | 1 - eslint.config.mjs | 8 + .../redis-rs/redis/src/cluster_async/mod.rs | 150 +-- .../redis-rs/redis/src/cluster_slotmap.rs | 2 +- .../redis/src/commands/cluster_scan.rs | 1095 +++++++++-------- glide-core/redis-rs/redis/src/commands/mod.rs | 3 + glide-core/redis-rs/redis/src/lib.rs | 3 + .../redis-rs/redis/tests/test_cluster_scan.rs | 549 ++++++++- glide-core/src/client/mod.rs | 28 +- glide-core/src/protobuf/command_request.proto | 1 + glide-core/src/socket_listener.rs | 39 +- .../api/models/commands/scan/ScanOptions.java | 15 + .../java/glide/managers/CommandManager.java | 4 + node/src/Commands.ts | 13 + node/src/GlideClusterClient.ts | 17 +- node/tests/ScanTest.test.ts | 162 +++ node/tests/TestUtilities.ts | 45 + package.json | 12 +- python/DEVELOPER.md | 3 +- python/dev_requirements.txt | 7 - .../glide/async_commands/cluster_commands.py | 89 +- python/python/glide/async_commands/core.py | 1 + python/python/glide/glide_client.py | 2 + python/python/tests/conftest.py | 8 +- python/python/tests/test_scan.py | 157 ++- python/python/tests/test_transaction.py | 5 +- python/requirements.txt | 7 +- utils/TestUtils.ts | 8 +- 30 files changed, 1683 insertions(+), 758 deletions(-) delete mode 100644 python/dev_requirements.txt diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 699033cf1a..11df78697a 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -115,7 +115,7 @@ jobs: working-directory: ./python run: | source .env/bin/activate - pip install -r dev_requirements.txt + pip install -r requirements.txt cd python/tests/ pytest --asyncio-mode=auto --html=pytest_report.html --self-contained-html @@ -178,7 +178,7 @@ jobs: working-directory: ./python run: | source .env/bin/activate - pip install -r dev_requirements.txt + pip install -r requirements.txt cd python/tests/ pytest --asyncio-mode=auto -k test_pubsub --html=pytest_report.html --self-contained-html diff --git a/CHANGELOG.md b/CHANGELOG.md index f820f45e05..a1bc2cd15f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,5 @@ #### Changes - +* Node, Python, Java: Add allow uncovered slots scanning flag option in cluster scan ([#2814](https://github.com/valkey-io/valkey-glide/pull/2814), [#2815](https://github.com/valkey-io/valkey-glide/pull/2815), [#2860](https://github.com/valkey-io/valkey-glide/pull/2860)) * Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) * Node: Remove native package references for MacOs x64 architecture ([#2799](https://github.com/valkey-io/valkey-glide/issues/2799)) @@ -535,4 +535,3 @@ Preview release of **GLIDE for Redis** a Polyglot Redis client. See the [README](README.md) for additional information. - diff --git a/Makefile b/Makefile index 877d78f6e9..d4088fb898 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,6 @@ python-test: .build/python_deps check-redis-server @cd python && python3 -m venv .env @echo "$(GREEN)Installing requirements...$(RESET)" @cd python && .env/bin/pip install -r requirements.txt - @cd python && .env/bin/pip install -r dev_requirements.txt @mkdir -p .build/ && touch .build/python_deps ## diff --git a/eslint.config.mjs b/eslint.config.mjs index 21995480f4..a96d4fdecd 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -2,6 +2,7 @@ import eslint from "@eslint/js"; import prettierConfig from "eslint-config-prettier"; import tseslint from "typescript-eslint"; +import jsdoc from "eslint-plugin-jsdoc"; export default tseslint.config( eslint.configs.recommended, @@ -54,6 +55,13 @@ export default tseslint.config( next: "*", }, ], + "@typescript-eslint/indent": ["error", 4, { + "SwitchCase": 1, + "ObjectExpression": 1, + "FunctionDeclaration": {"parameters": "first"}, + "FunctionExpression": {"parameters": "first"}, + "ignoredNodes": ["TSTypeParameterInstantiation"] + }], }, }, prettierConfig, diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs index 3726d7a674..8164d09413 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs @@ -32,15 +32,13 @@ pub mod testing { use crate::{ client::GlideConnectionOptions, cluster_routing::{Routable, RoutingInfo, ShardUpdateResult}, - cluster_slotmap::SlotMap, cluster_topology::{ calculate_topology, get_slot, SlotRefreshState, DEFAULT_NUMBER_OF_REFRESH_SLOTS_RETRIES, DEFAULT_REFRESH_SLOTS_RETRY_BASE_DURATION_MILLIS, DEFAULT_REFRESH_SLOTS_RETRY_BASE_FACTOR, - SLOT_SIZE, }, cmd, - commands::cluster_scan::{cluster_scan, ClusterScanArgs, ObjectType, ScanStateRC}, - FromRedisValue, InfoDict, ToRedisArgs, + commands::cluster_scan::{cluster_scan, ClusterScanArgs, ScanStateRC}, + FromRedisValue, InfoDict, }; use dashmap::DashMap; use std::{ @@ -111,7 +109,7 @@ use crate::types::RetryMethod; pub(crate) const MUTEX_READ_ERR: &str = "Failed to obtain read lock. Poisoned mutex?"; const MUTEX_WRITE_ERR: &str = "Failed to obtain write lock. Poisoned mutex?"; -/// This represents an async Redis Cluster connection. It stores the +/// This represents an async Cluster connection. It stores the /// underlying connections maintained for each node in the cluster, as well /// as common parameters for connecting to nodes and executing commands. #[derive(Clone)] @@ -142,68 +140,9 @@ where }) } - /// Special handling for `SCAN` command, using `cluster_scan`. - /// If you wish to use a match pattern, use [`cluster_scan_with_pattern`]. - /// Perform a `SCAN` command on a Redis cluster, using scan state object in order to handle changes in topology - /// and make sure that all keys that were in the cluster from start to end of the scan are scanned. - /// In order to make sure all keys in the cluster scanned, topology refresh occurs more frequently and may affect performance. - /// - /// # Arguments - /// - /// * `scan_state_rc` - A reference to the scan state, For initiating new scan send [`ScanStateRC::new()`], - /// for each subsequent iteration use the returned [`ScanStateRC`]. - /// * `count` - An optional count of keys requested, - /// the amount returned can vary and not obligated to return exactly count. - /// * `object_type` - An optional [`ObjectType`] enum of requested key redis type. - /// - /// # Returns - /// - /// A [`ScanStateRC`] for the updated state of the scan and the vector of keys that were found in the scan. - /// structure of returned value: - /// `Ok((ScanStateRC, Vec))` - /// - /// When the scan is finished [`ScanStateRC`] will be None, and can be checked by calling `scan_state_wrapper.is_finished()`. - /// - /// # Example - /// ```rust,no_run - /// use redis::cluster::ClusterClient; - /// use redis::{ScanStateRC, FromRedisValue, from_redis_value, Value, ObjectType}; - /// - /// async fn scan_all_cluster() -> Vec { - /// let nodes = vec!["redis://127.0.0.1/"]; - /// let client = ClusterClient::new(nodes).unwrap(); - /// let mut connection = client.get_async_connection(None).await.unwrap(); - /// let mut scan_state_rc = ScanStateRC::new(); - /// let mut keys: Vec = vec![]; - /// loop { - /// let (next_cursor, scan_keys): (ScanStateRC, Vec) = - /// connection.cluster_scan(scan_state_rc, None, None).await.unwrap(); - /// scan_state_rc = next_cursor; - /// let mut scan_keys = scan_keys - /// .into_iter() - /// .map(|v| from_redis_value(&v).unwrap()) - /// .collect::>(); // Change the type of `keys` to `Vec` - /// keys.append(&mut scan_keys); - /// if scan_state_rc.is_finished() { - /// break; - /// } - /// } - /// keys - /// } - /// ``` - pub async fn cluster_scan( - &mut self, - scan_state_rc: ScanStateRC, - count: Option, - object_type: Option, - ) -> RedisResult<(ScanStateRC, Vec)> { - let cluster_scan_args = ClusterScanArgs::new(scan_state_rc, None, count, object_type); - self.route_cluster_scan(cluster_scan_args).await - } - /// Special handling for `SCAN` command, using `cluster_scan_with_pattern`. /// It is a special case of [`cluster_scan`], with an additional match pattern. - /// Perform a `SCAN` command on a Redis cluster, using scan state object in order to handle changes in topology + /// Perform a `SCAN` command on a cluster, using scan state object in order to handle changes in topology /// and make sure that all keys that were in the cluster from start to end of the scan are scanned. /// In order to make sure all keys in the cluster scanned, topology refresh occurs more frequently and may affect performance. /// @@ -211,10 +150,8 @@ where /// /// * `scan_state_rc` - A reference to the scan state, For initiating new scan send [`ScanStateRC::new()`], /// for each subsequent iteration use the returned [`ScanStateRC`]. - /// * `match_pattern` - A match pattern of requested keys. - /// * `count` - An optional count of keys requested, - /// the amount returned can vary and not obligated to return exactly count. - /// * `object_type` - An optional [`ObjectType`] enum of requested key redis type. + /// * `cluster_scan_args` - A [`ClusterScanArgs`] struct containing the arguments for the cluster scan command - match pattern, count, + /// object type and the allow_non_covered_slots flag. /// /// # Returns /// @@ -227,7 +164,7 @@ where /// # Example /// ```rust,no_run /// use redis::cluster::ClusterClient; - /// use redis::{ScanStateRC, FromRedisValue, from_redis_value, Value, ObjectType}; + /// use redis::{ScanStateRC, from_redis_value, Value, ObjectType, ClusterScanArgs}; /// /// async fn scan_all_cluster() -> Vec { /// let nodes = vec!["redis://127.0.0.1/"]; @@ -235,9 +172,10 @@ where /// let mut connection = client.get_async_connection(None).await.unwrap(); /// let mut scan_state_rc = ScanStateRC::new(); /// let mut keys: Vec = vec![]; + /// let cluster_scan_args = ClusterScanArgs::builder().with_count(1000).with_object_type(ObjectType::String).build(); /// loop { /// let (next_cursor, scan_keys): (ScanStateRC, Vec) = - /// connection.cluster_scan_with_pattern(scan_state_rc, b"my_key", None, None).await.unwrap(); + /// connection.cluster_scan(scan_state_rc, cluster_scan_args.clone()).await.unwrap(); /// scan_state_rc = next_cursor; /// let mut scan_keys = scan_keys /// .into_iter() @@ -251,19 +189,12 @@ where /// keys /// } /// ``` - pub async fn cluster_scan_with_pattern( + pub async fn cluster_scan( &mut self, scan_state_rc: ScanStateRC, - match_pattern: K, - count: Option, - object_type: Option, + mut cluster_scan_args: ClusterScanArgs, ) -> RedisResult<(ScanStateRC, Vec)> { - let cluster_scan_args = ClusterScanArgs::new( - scan_state_rc, - Some(match_pattern.to_redis_args().concat()), - count, - object_type, - ); + cluster_scan_args.set_scan_state_cursor(scan_state_rc); self.route_cluster_scan(cluster_scan_args).await } @@ -279,18 +210,18 @@ where sender, }) .await - .map_err(|_| { + .map_err(|e| { RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to send command", + format!("Cluster: Error occurred while trying to send SCAN command to internal send task. {e:?}"), )) })?; receiver .await - .unwrap_or_else(|_| { + .unwrap_or_else(|e| { Err(RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to receive command", + format!("Cluster: Failed to receive SCAN command response from internal send task. {e:?}"), ))) }) .map(|response| match response { @@ -316,18 +247,20 @@ where sender, }) .await - .map_err(|_| { + .map_err(|e| { RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to send command", + format!("Cluster: Error occurred while trying to send command to internal sender. {e:?}"), )) })?; receiver .await - .unwrap_or_else(|_| { + .unwrap_or_else(|e| { Err(RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to receive command", + format!( + "Cluster: Failed to receive command response from internal sender. {e:?}" + ), ))) }) .map(|response| match response { @@ -489,21 +422,8 @@ where .map_err(|_| RedisError::from((ErrorKind::ClientError, MUTEX_WRITE_ERR))) } - // return address of node for slot - pub(crate) async fn get_address_from_slot( - &self, - slot: u16, - slot_addr: SlotAddr, - ) -> Option> { - self.conn_lock - .read() - .expect(MUTEX_READ_ERR) - .slot_map - .get_node_address_for_slot(slot, slot_addr) - } - // return epoch of node - pub(crate) async fn get_address_epoch(&self, node_address: &str) -> Result { + pub(crate) async fn address_epoch(&self, node_address: &str) -> Result { let command = cmd("CLUSTER").arg("INFO").to_owned(); let node_conn = self .conn_lock @@ -541,14 +461,26 @@ where } } - // return slots of node - pub(crate) async fn get_slots_of_address(&self, node_address: Arc) -> Vec { + /// return slots of node + pub(crate) async fn slots_of_address(&self, node_address: Arc) -> Vec { self.conn_lock .read() .expect(MUTEX_READ_ERR) .slot_map .get_slots_of_node(node_address) } + + /// Get connection for address + pub(crate) async fn connection_for_address( + &self, + address: &str, + ) -> Option> { + self.conn_lock + .read() + .expect(MUTEX_READ_ERR) + .connection_for_address(address) + .map(|(_, conn)| conn) + } } pub(crate) struct ClusterConnInner { @@ -1884,14 +1816,6 @@ where Self::refresh_slots_inner(inner, curr_retry).await } - pub(crate) fn check_if_all_slots_covered(slot_map: &SlotMap) -> bool { - let mut slots_covered = 0; - for (end, slots) in slot_map.slots.iter() { - slots_covered += end.saturating_sub(slots.start).saturating_add(1); - } - slots_covered == SLOT_SIZE - } - // Query a node to discover slot-> master mappings async fn refresh_slots_inner(inner: Arc>, curr_retry: usize) -> RedisResult<()> { let num_of_nodes = inner.conn_lock.read().expect(MUTEX_READ_ERR).len(); diff --git a/glide-core/redis-rs/redis/src/cluster_slotmap.rs b/glide-core/redis-rs/redis/src/cluster_slotmap.rs index 88e7549323..f2e43b4449 100644 --- a/glide-core/redis-rs/redis/src/cluster_slotmap.rs +++ b/glide-core/redis-rs/redis/src/cluster_slotmap.rs @@ -202,7 +202,7 @@ impl SlotMap { .collect() } - pub(crate) fn get_node_address_for_slot( + pub(crate) fn node_address_for_slot( &self, slot: u16, slot_addr: SlotAddr, diff --git a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs index 109aceca22..517d43c6b3 100644 --- a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs +++ b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs @@ -1,73 +1,269 @@ -//! This module contains the implementation of scanning operations in a Redis cluster. +//! This module implements cluster-wide scanning operations for clusters. //! -//! The [`ClusterScanArgs`] struct represents the arguments for a cluster scan operation, -//! including the scan state reference, match pattern, count, and object type. +//! # Overview //! -//! The [[`ScanStateRC`]] struct is a wrapper for managing the state of a scan operation in a cluster. -//! It holds a reference to the scan state and provides methods for accessing the state. +//! The module provides functionality to scan keys across all nodes in a cluster, +//! handling topology changes, failovers, and partial cluster coverage scenarios. +//! It maintains state between scan iterations and ensures consistent scanning even +//! when cluster topology changes. //! -//! The [[`ClusterInScan`]] trait defines the methods for interacting with a Redis cluster during scanning, -//! including retrieving address information, refreshing slot mapping, and routing commands to specific address. +//! # Key Components //! -//! The [[`ScanState`]] struct represents the state of a scan operation in a Redis cluster. -//! It holds information about the current scan state, including the cursor position, scanned slots map, -//! address being scanned, and address's epoch. +//! - [`ClusterScanArgs`]: Configuration for scan operations including filtering and behavior options +//! - [`ScanStateRC`]: Thread-safe reference-counted wrapper for scan state management +//! - [`ScanState`]: Internal state tracking for cluster-wide scanning progress +//! - [`ObjectType`]: Supported data types for filtering scan results +//! +//! # Key Features +//! +//! - Resumable scanning across cluster nodes +//! - Automatic handling of cluster topology changes +//! - Support for all regular SCAN options +//! - Resilient to node failures and resharding +//! +//! # Implementation Details +//! +//! The scanning process is implemented using a bitmap to track scanned slots and +//! maintains epoch information to handle topology changes. The implementation: +//! +//! - Uses a 64-bit aligned bitmap for efficient slot tracking +//! - Maintains cursor position per node +//! - Handles partial cluster coverage scenarios +//! - Provides automatic recovery from node failures +//! - Ensures consistent scanning across topology changes +//! +//! # Error Handling +//! +//! The module handles various error scenarios including: +//! - Node failures during scanning +//! - Cluster topology changes +//! - Network connectivity issues +//! - Invalid routing scenarios use crate::aio::ConnectionLike; -use crate::cluster_async::{ - ClusterConnInner, Connect, Core, InternalRoutingInfo, InternalSingleNodeRouting, RefreshPolicy, - Response, MUTEX_READ_ERR, -}; +use crate::cluster_async::{ClusterConnInner, Connect, InnerCore, RefreshPolicy, MUTEX_READ_ERR}; use crate::cluster_routing::SlotAddr; use crate::cluster_topology::SLOT_SIZE; -use crate::{cmd, from_redis_value, Cmd, ErrorKind, RedisError, RedisResult, Value}; -use async_trait::async_trait; +use crate::{cmd, from_redis_value, ErrorKind, RedisError, RedisResult, Value}; use std::sync::Arc; -use strum_macros::Display; +use strum_macros::{Display, EnumString}; + +const BITS_PER_U64: u16 = u64::BITS as u16; +const NUM_OF_SLOTS: u16 = SLOT_SIZE; +const BITS_ARRAY_SIZE: u16 = NUM_OF_SLOTS / BITS_PER_U64; +const END_OF_SCAN: u16 = NUM_OF_SLOTS; +type SlotsBitsArray = [u64; BITS_ARRAY_SIZE as usize]; + +/// Holds configuration for a cluster scan operation. +/// +/// # Fields +/// - `scan_state_cursor`: Internal state tracking scan progress +/// - `match_pattern`: Optional pattern to filter keys +/// - `count`: Optional limit on number of keys returned per iteration +/// - `object_type`: Optional filter for specific data types +/// - `allow_non_covered_slots`: Whether to continue if some slots are uncovered +/// +/// See examples below for usage with the builder pattern. +/// # Examples +/// +/// Create a new `ClusterScanArgs` instance using the builder pattern: +/// +/// ```rust,no_run +/// use redis::ClusterScanArgs; +/// use redis::ObjectType; +/// +/// // Create basic scan args with defaults +/// let basic_scan = ClusterScanArgs::builder().build(); +/// +/// // Create scan args with custom options +/// let custom_scan = ClusterScanArgs::builder() +/// .with_match_pattern("user:*") // Match keys starting with "user:" +/// .with_count(100) // Return 100 keys per iteration +/// .with_object_type(ObjectType::Hash) // Only scan hash objects +/// .allow_non_covered_slots(true) // Continue scanning even if some slots aren't covered +/// .build(); +/// +/// // The builder can be used to create multiple configurations +/// let another_scan = ClusterScanArgs::builder() +/// .with_match_pattern("session:*") +/// .with_object_type(ObjectType::String) +/// .build(); +/// ``` -const BITS_PER_U64: usize = u64::BITS as usize; -const NUM_OF_SLOTS: usize = SLOT_SIZE as usize; -const BITS_ARRAY_SIZE: usize = NUM_OF_SLOTS / BITS_PER_U64; -const END_OF_SCAN: u16 = NUM_OF_SLOTS as u16 + 1; -type SlotsBitsArray = [u64; BITS_ARRAY_SIZE]; +#[derive(Clone, Default)] +pub struct ClusterScanArgs { + /// Reference-counted scan state cursor, managed internally. + pub scan_state_cursor: ScanStateRC, -#[derive(Clone)] -pub(crate) struct ClusterScanArgs { - pub(crate) scan_state_cursor: ScanStateRC, + /// Optional pattern to match keys during the scan. + pub match_pattern: Option>, + + /// A "hint" to the cluster of how much keys to return per scan iteration, if none is sent to the server, the default value is 10. + pub count: Option, + + /// Optional filter to include only keys of a specific data type. + pub object_type: Option, + + /// Flag indicating whether to allow scanning when there are slots not covered by the cluster, by default it is set to false and the scan will stop if some slots are not covered. + pub allow_non_covered_slots: bool, +} + +impl ClusterScanArgs { + /// Creates a new [`ClusterScanArgsBuilder`] instance. + /// + /// # Returns + /// + /// A [`ClusterScanArgsBuilder`] instance for configuring cluster scan arguments. + pub fn builder() -> ClusterScanArgsBuilder { + ClusterScanArgsBuilder::default() + } + pub(crate) fn set_scan_state_cursor(&mut self, scan_state_cursor: ScanStateRC) { + self.scan_state_cursor = scan_state_cursor; + } +} + +#[derive(Default)] +/// Builder pattern for creating cluster scan arguments. +/// +/// This struct allows configuring various parameters for scanning keys in a cluster: +/// * Pattern matching for key filtering +/// * Count limit for returned keys +/// * Filtering by object type +/// * Control over scanning non-covered slots +/// +/// # Example +/// ``` +/// use redis::{ClusterScanArgs, ObjectType}; +/// +/// let args = ClusterScanArgs::builder() +/// .with_match_pattern(b"user:*") +/// .with_count(100) +/// .with_object_type(ObjectType::Hash) +/// .build(); +/// ``` +pub struct ClusterScanArgsBuilder { + /// By default, the match pattern is set to `None` and no filtering is applied. match_pattern: Option>, - count: Option, + /// A "hint" to the cluster of how much keys to return per scan iteration, by default none is sent to the server, the default value is 10. + count: Option, + /// By default, the object type is set to `None` and no filtering is applied. object_type: Option, + /// By default, the flag to allow scanning non-covered slots is set to `false`, meaning scanning will stop if some slots are not covered. + allow_non_covered_slots: Option, } -#[derive(Debug, Clone, Display)] -/// Represents the type of an object in Redis. +impl ClusterScanArgsBuilder { + /// Sets the match pattern for the scan operation. + /// + /// # Arguments + /// + /// * `pattern` - The pattern to match keys against. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn with_match_pattern>>(mut self, pattern: T) -> Self { + self.match_pattern = Some(pattern.into()); + self + } + + /// Sets the count for the scan operation. + /// + /// # Arguments + /// + /// * `count` - A "hint" to the cluster of how much keys to return per scan iteration. + /// + /// The actual number of keys returned may be less or more than the count specified. + /// + /// 4,294,967,295 is the maximum keys possible in a cluster, so higher values will be capped. + /// Hence the count is represented as a `u32` instead of `usize`. + /// + /// The default value is 10, if nothing is sent to the server, meaning nothing set in the builder. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn with_count(mut self, count: u32) -> Self { + self.count = Some(count); + self + } + + /// Sets the object type for the scan operation. + /// + /// # Arguments + /// + /// * `object_type` - The type of object to filter keys by. + /// + /// See [`ObjectType`] for supported data types. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn with_object_type(mut self, object_type: ObjectType) -> Self { + self.object_type = Some(object_type); + self + } + + /// Sets the flag to allow scanning non-covered slots. + /// + /// # Arguments + /// + /// * `allow` - A boolean flag indicating whether to allow scanning non-covered slots. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn allow_non_covered_slots(mut self, allow: bool) -> Self { + self.allow_non_covered_slots = Some(allow); + self + } + + /// Builds the [`ClusterScanArgs`] instance with the provided configuration. + /// + /// # Returns + /// + /// A [`ClusterScanArgs`] instance with the configured options. + pub fn build(self) -> ClusterScanArgs { + ClusterScanArgs { + scan_state_cursor: ScanStateRC::new(), + match_pattern: self.match_pattern, + count: self.count, + object_type: self.object_type, + allow_non_covered_slots: self.allow_non_covered_slots.unwrap_or(false), + } + } +} + +/// Represents the type of an object used to filter keys by data type. +/// +/// This enum is used with the `TYPE` option in the `SCAN` command to +/// filter keys by their data type. +#[derive(Debug, Clone, Display, PartialEq, EnumString)] pub enum ObjectType { - /// Represents a string object in Redis. + /// String data type. String, - /// Represents a list object in Redis. + /// List data type. List, - /// Represents a set object in Redis. + /// Set data type. Set, - /// Represents a sorted set object in Redis. + /// Sorted set data type. ZSet, - /// Represents a hash object in Redis. + /// Hash data type. Hash, - /// Represents a stream object in Redis. + /// Stream data type. Stream, } -impl ClusterScanArgs { - pub(crate) fn new( - scan_state_cursor: ScanStateRC, - match_pattern: Option>, - count: Option, - object_type: Option, - ) -> Self { - Self { - scan_state_cursor, - match_pattern, - count, - object_type, +impl From for ObjectType { + fn from(s: String) -> Self { + match s.to_lowercase().as_str() { + "string" => ObjectType::String, + "list" => ObjectType::List, + "set" => ObjectType::Set, + "zset" => ObjectType::ZSet, + "hash" => ObjectType::Hash, + "stream" => ObjectType::Stream, + _ => ObjectType::String, } } } @@ -80,10 +276,11 @@ pub enum ScanStateStage { Finished, } +/// Wrapper struct for managing the state of a cluster scan operation. +/// +/// This struct holds an `Arc` to the actual scan state and a status indicating +/// whether the scan is initiating, in progress, or finished. #[derive(Debug, Clone, Default)] -/// A wrapper struct for managing the state of a scan operation in a cluster. -/// It holds a reference to the scan state and provides methods for accessing the state. -/// The `status` field indicates the status of the scan operation. pub struct ScanStateRC { scan_state_rc: Arc>, status: ScanStateStage, @@ -121,7 +318,7 @@ impl ScanStateRC { } /// Returns a clone of the scan state, if it exist. - pub(crate) fn get_state_from_wrapper(&self) -> Option { + pub(crate) fn state_from_wrapper(&self) -> Option { if self.status == ScanStateStage::Initiating || self.status == ScanStateStage::Finished { None } else { @@ -130,33 +327,10 @@ impl ScanStateRC { } } -/// This trait defines the methods for interacting with a Redis cluster during scanning. -#[async_trait] -pub(crate) trait ClusterInScan { - /// Retrieves the address associated with a given slot in the cluster. - async fn get_address_by_slot(&self, slot: u16) -> RedisResult>; - - /// Retrieves the epoch of a given address in the cluster. - /// The epoch represents the version of the address, which is updated when a failover occurs or slots migrate in. - async fn get_address_epoch(&self, address: &str) -> Result; - - /// Retrieves the slots assigned to a given address in the cluster. - async fn get_slots_of_address(&self, address: Arc) -> Vec; - - /// Routes a Redis command to a specific address in the cluster. - async fn route_command(&self, cmd: Cmd, address: &str) -> RedisResult; - - /// Check if all slots are covered by the cluster - async fn are_all_slots_covered(&self) -> bool; - - /// Check if the topology of the cluster has changed and refresh the slots if needed - async fn refresh_if_topology_changed(&self) -> RedisResult; -} - -/// Represents the state of a scan operation in a Redis cluster. +/// Represents the state of a cluster scan operation. /// -/// This struct holds information about the current scan state, including the cursor position, -/// the scanned slots map, the address being scanned, and the address's epoch. +/// This struct keeps track of the current cursor, which slots have been scanned, +/// the address currently being scanned, and the epoch of that address. #[derive(PartialEq, Debug, Clone)] pub(crate) struct ScanState { // the real cursor in the scan operation @@ -205,7 +379,7 @@ impl ScanState { fn create_finished_state() -> Self { Self { cursor: 0, - scanned_slots_map: [0; BITS_ARRAY_SIZE], + scanned_slots_map: [0; BITS_ARRAY_SIZE as usize], address_in_scan: Default::default(), address_epoch: 0, scan_status: ScanStateStage::Finished, @@ -217,63 +391,68 @@ impl ScanState { /// and the address set to the address associated with slot 0. /// The address epoch is set to the epoch of the address. /// If the address epoch cannot be retrieved, the method returns an error. - async fn initiate_scan(connection: &C) -> RedisResult { - let new_scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; + async fn initiate_scan( + core: &InnerCore, + allow_non_covered_slots: bool, + ) -> RedisResult + where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, + { + let mut new_scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE as usize]; let new_cursor = 0; - let address = connection.get_address_by_slot(0).await?; - let address_epoch = connection.get_address_epoch(&address).await.unwrap_or(0); - Ok(ScanState::new( - new_cursor, - new_scanned_slots_map, - address, - address_epoch, - ScanStateStage::InProgress, - )) - } + let address = + next_address_to_scan(core, 0, &mut new_scanned_slots_map, allow_non_covered_slots)?; - /// Get the next slot to be scanned based on the scanned slots map. - /// If all slots have been scanned, the method returns [`END_OF_SCAN`]. - fn get_next_slot(&self, scanned_slots_map: &SlotsBitsArray) -> Option { - let all_slots_scanned = scanned_slots_map.iter().all(|&word| word == u64::MAX); - if all_slots_scanned { - return Some(END_OF_SCAN); - } - for (i, slot) in scanned_slots_map.iter().enumerate() { - let mut mask = 1; - for j in 0..BITS_PER_U64 { - if (slot & mask) == 0 { - return Some((i * BITS_PER_U64 + j) as u16); - } - mask <<= 1; + match address { + NextNodeResult::AllSlotsCompleted => Ok(ScanState::create_finished_state()), + NextNodeResult::Address(address) => { + let address_epoch = core.address_epoch(&address).await.unwrap_or(0); + Ok(ScanState::new( + new_cursor, + new_scanned_slots_map, + address, + address_epoch, + ScanStateStage::InProgress, + )) } } - None } /// Update the scan state without updating the scanned slots map. /// This method is used when the address epoch has changed, and we can't determine which slots are new. /// In this case, we skip updating the scanned slots map and only update the address and cursor. - async fn creating_state_without_slot_changes( + async fn new_scan_state( &self, - connection: &C, - ) -> RedisResult { - let next_slot = self.get_next_slot(&self.scanned_slots_map).unwrap_or(0); - let new_address = if next_slot == END_OF_SCAN { - return Ok(ScanState::create_finished_state()); - } else { - connection.get_address_by_slot(next_slot).await - }; - match new_address { - Ok(address) => { - let new_epoch = connection.get_address_epoch(&address).await.unwrap_or(0); + core: Arc>, + allow_non_covered_slots: bool, + new_scanned_slots_map: Option, + ) -> RedisResult + where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, + { + // If the new scanned slots map is not provided, use the current scanned slots map. + // The new scanned slots map is provided in the general case when the address epoch has not changed, + // meaning that we could safely update the scanned slots map with the slots owned by the node. + // Epoch change means that some slots are new, and we can't determine which slots been there from the beginning and which are new. + let mut scanned_slots_map = new_scanned_slots_map.unwrap_or(self.scanned_slots_map); + let next_slot = next_slot(&scanned_slots_map).unwrap_or(0); + match next_address_to_scan( + &core, + next_slot, + &mut scanned_slots_map, + allow_non_covered_slots, + ) { + Ok(NextNodeResult::Address(new_address)) => { + let new_epoch = core.address_epoch(&new_address).await.unwrap_or(0); Ok(ScanState::new( 0, - self.scanned_slots_map, - address, + scanned_slots_map, + new_address, new_epoch, ScanStateStage::InProgress, )) } + Ok(NextNodeResult::AllSlotsCompleted) => Ok(ScanState::create_finished_state()), Err(err) => Err(err), } } @@ -284,210 +463,204 @@ impl ScanState { /// If the address epoch has changed, the method skips updating the scanned slots map and only updates the address and cursor. /// If the address epoch has not changed, the method updates the scanned slots map with the slots owned by the address. /// The method returns the new scan state with the updated cursor, scanned slots map, address, and epoch. - async fn create_updated_scan_state_for_completed_address( + async fn create_updated_scan_state_for_completed_address( &mut self, - connection: &C, - ) -> RedisResult { - connection - .refresh_if_topology_changed() - .await - .map_err(|err| { - RedisError::from(( - ErrorKind::ResponseError, - "Error during cluster scan: failed to refresh slots", - format!("{:?}", err), - )) - })?; + core: Arc>, + allow_non_covered_slots: bool, + ) -> RedisResult + where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, + { + ClusterConnInner::check_topology_and_refresh_if_diff( + core.clone(), + &RefreshPolicy::NotThrottable, + ) + .await?; + let mut scanned_slots_map = self.scanned_slots_map; // If the address epoch changed it mean that some slots in the address are new, so we cant know which slots been there from the beginning and which are new, or out and in later. // In this case we will skip updating the scanned_slots_map and will just update the address and the cursor - let new_address_epoch = connection - .get_address_epoch(&self.address_in_scan) - .await - .unwrap_or(0); + let new_address_epoch = core.address_epoch(&self.address_in_scan).await.unwrap_or(0); if new_address_epoch != self.address_epoch { - return self.creating_state_without_slot_changes(connection).await; + return self + .new_scan_state(core, allow_non_covered_slots, None) + .await; } // If epoch wasn't changed, the slots owned by the address after the refresh are all valid as slots that been scanned // So we will update the scanned_slots_map with the slots owned by the address - let slots_scanned = connection - .get_slots_of_address(self.address_in_scan.clone()) - .await; + let slots_scanned = core.slots_of_address(self.address_in_scan.clone()).await; for slot in slots_scanned { - let slot_index = slot as usize / BITS_PER_U64; - let slot_bit = slot as usize % BITS_PER_U64; - scanned_slots_map[slot_index] |= 1 << slot_bit; + mark_slot_as_scanned(&mut scanned_slots_map, slot); } // Get the next address to scan and its param base on the next slot set to 0 in the scanned_slots_map - let next_slot = self.get_next_slot(&scanned_slots_map).unwrap_or(0); - let new_address = if next_slot == END_OF_SCAN { - return Ok(ScanState::create_finished_state()); - } else { - connection.get_address_by_slot(next_slot).await - }; - match new_address { - Ok(new_address) => { - let new_epoch = connection - .get_address_epoch(&new_address) - .await - .unwrap_or(0); - let new_cursor = 0; - Ok(ScanState::new( - new_cursor, - scanned_slots_map, - new_address, - new_epoch, - ScanStateStage::InProgress, - )) - } - Err(err) => Err(err), - } + self.new_scan_state(core, allow_non_covered_slots, Some(scanned_slots_map)) + .await } } -// Implement the [`ClusterInScan`] trait for [`InnerCore`] of async cluster connection. -#[async_trait] -impl ClusterInScan for Core +fn mark_slot_as_scanned(scanned_slots_map: &mut SlotsBitsArray, slot: u16) { + let slot_index = (slot as u64 / BITS_PER_U64 as u64) as usize; + let slot_bit = slot as u64 % (BITS_PER_U64 as u64); + scanned_slots_map[slot_index] |= 1 << slot_bit; +} + +#[derive(PartialEq, Debug, Clone)] +/// The address type representing a connection address +/// +/// # Fields +/// +/// * `Address` - A thread-safe shared string containing the server address +/// * `AllSlotsCompleted` - Indicates that all slots have been scanned +enum NextNodeResult { + Address(Arc), + AllSlotsCompleted, +} + +/// Determines the next node address to scan within the cluster. +/// +/// This asynchronous function iterates through cluster slots to find the next available +/// node responsible for scanning. If a slot is not covered and `allow_non_covered_slots` +/// is enabled, it marks the slot as scanned and proceeds to the next one. The process +/// continues until a valid address is found or all slots have been scanned. +/// +/// # Arguments +/// +/// * `core` - Reference to the cluster's inner core connection. +/// * `slot` - The current slot number to scan. +/// * `scanned_slots_map` - Mutable reference to the bitmap tracking scanned slots. +/// * `allow_non_covered_slots` - Flag indicating whether to allow scanning of uncovered slots. +/// +/// # Returns +/// +/// * `RedisResult` - Returns the next node address to scan or indicates completion. +/// +/// # Type Parameters +/// +/// * `C`: The connection type that must implement `ConnectionLike`, `Connect`, `Clone`, `Send`, `Sync`, and `'static`. +/// +fn next_address_to_scan( + core: &InnerCore, + mut slot: u16, + scanned_slots_map: &mut SlotsBitsArray, + allow_non_covered_slots: bool, +) -> RedisResult where C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - async fn get_address_by_slot(&self, slot: u16) -> RedisResult> { - let address = self - .get_address_from_slot(slot, SlotAddr::ReplicaRequired) - .await; - match address { - Some(addr) => Ok(addr), - None => { - if self.are_all_slots_covered().await { - Err(RedisError::from(( - ErrorKind::IoError, - "Failed to get connection to the node cover the slot, please check the cluster configuration ", - ))) - } else { - Err(RedisError::from(( - ErrorKind::NotAllSlotsCovered, - "All slots are not covered by the cluster, please check the cluster configuration ", - ))) - } - } + loop { + if slot == END_OF_SCAN { + return Ok(NextNodeResult::AllSlotsCompleted); } - } - async fn get_address_epoch(&self, address: &str) -> Result { - self.as_ref().get_address_epoch(address).await - } - async fn get_slots_of_address(&self, address: Arc) -> Vec { - self.as_ref().get_slots_of_address(address).await - } - async fn route_command(&self, cmd: Cmd, address: &str) -> RedisResult { - let routing = InternalRoutingInfo::SingleNode(InternalSingleNodeRouting::ByAddress( - address.to_string(), - )); - let core = self.to_owned(); - let response = ClusterConnInner::::try_cmd_request(Arc::new(cmd), routing, core) - .await - .map_err(|err| err.1)?; - match response { - Response::Single(value) => Ok(value), - _ => Err(RedisError::from(( - ErrorKind::ClientError, - "Expected single response, got unexpected response", - ))), + if let Some(addr) = core + .conn_lock + .read() + .expect(MUTEX_READ_ERR) + .slot_map + .node_address_for_slot(slot, SlotAddr::ReplicaRequired) + { + // Found a valid address for the slot + return Ok(NextNodeResult::Address(addr)); + } else if allow_non_covered_slots { + // Mark the current slot as scanned + mark_slot_as_scanned(scanned_slots_map, slot); + slot = next_slot(scanned_slots_map).unwrap(); + } else { + // Error if slots are not covered and scanning is not allowed + return Err(RedisError::from(( + ErrorKind::NotAllSlotsCovered, + "Could not find an address covering a slot, SCAN operation cannot continue \n + If you want to continue scanning even if some slots are not covered, set allow_non_covered_slots to true \n + Note that this may lead to incomplete scanning, and the SCAN operation lose its all guarantees ", + ))); } } - async fn are_all_slots_covered(&self) -> bool { - ClusterConnInner::::check_if_all_slots_covered( - &self.conn_lock.read().expect(MUTEX_READ_ERR).slot_map, - ) - } - async fn refresh_if_topology_changed(&self) -> RedisResult { - ClusterConnInner::check_topology_and_refresh_if_diff( - self.to_owned(), - // The cluster SCAN implementation must refresh the slots when a topology change is found - // to ensure the scan logic is correct. - &RefreshPolicy::NotThrottable, - ) - .await +} + +/// Get the next slot to be scanned based on the scanned slots map. +/// If all slots have been scanned, the method returns [`END_OF_SCAN`]. +fn next_slot(scanned_slots_map: &SlotsBitsArray) -> Option { + let all_slots_scanned = scanned_slots_map.iter().all(|&word| word == u64::MAX); + if all_slots_scanned { + return Some(END_OF_SCAN); + } + for (i, slot) in scanned_slots_map.iter().enumerate() { + let mut mask = 1; + for j in 0..BITS_PER_U64 { + if (slot & mask) == 0 { + return Some(i as u16 * BITS_PER_U64 + j); + } + mask <<= 1; + } } + None } -/// Perform a cluster scan operation. -/// This function performs a scan operation in a Redis cluster using the given [`ClusterInScan`] connection. -/// It scans the cluster for keys based on the given `ClusterScanArgs` arguments. -/// The function returns a tuple containing the new scan state cursor and the keys found in the scan operation. -/// If the scan operation fails, an error is returned. +/// Performs a cluster-wide `SCAN` operation. +/// +/// This function scans the cluster for keys based on the provided arguments. +/// It handles the initiation of a new scan or continues an existing scan, manages +/// scan state, handles routing failures, and ensures consistent scanning across +/// cluster topology changes. /// /// # Arguments -/// * `core` - The connection to the Redis cluster. -/// * `cluster_scan_args` - The arguments for the cluster scan operation. +/// +/// * `core` - An `Arc`-wrapped reference to the cluster connection (`InnerCore`). +/// * `cluster_scan_args` - Configuration and arguments for the scan operation. /// /// # Returns -/// A tuple containing the new scan state cursor and the keys found in the scan operation. -/// If the scan operation fails, an error is returned. +/// +/// * `RedisResult<(ScanStateRC, Vec)>` - +/// - On success: A tuple containing the updated scan state (`ScanStateRC`) and a vector of `Value`s representing the found keys. +/// - On failure: A `RedisError` detailing the reason for the failure. +/// +/// # Type Parameters +/// +/// * `C`: The connection type that must implement `ConnectionLike`, `Connect`, `Clone`, `Send`, `Sync`, and `'static`. +/// pub(crate) async fn cluster_scan( - core: C, + core: Arc>, cluster_scan_args: ClusterScanArgs, ) -> RedisResult<(ScanStateRC, Vec)> where - C: ClusterInScan, + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - let ClusterScanArgs { - scan_state_cursor, - match_pattern, - count, - object_type, - } = cluster_scan_args; - // If scan_state is None, meaning we start a new scan - let scan_state = match scan_state_cursor.get_state_from_wrapper() { + // Extract the current scan state cursor and the flag for non-covered slots + let scan_state_cursor = &cluster_scan_args.scan_state_cursor; + let allow_non_covered_slots = cluster_scan_args.allow_non_covered_slots; + + // Determine the current scan state: + // - If an existing scan state is present, use it. + // - Otherwise, initiate a new scan. + let scan_state = match scan_state_cursor.state_from_wrapper() { Some(state) => state, - None => match ScanState::initiate_scan(&core).await { + None => match ScanState::initiate_scan(&core, allow_non_covered_slots).await { Ok(state) => state, Err(err) => { + // Early return if initiating the scan fails return Err(err); } }, }; - // Send the actual scan command to the address in the scan_state - let scan_result = send_scan( - &scan_state, - &core, - match_pattern.clone(), - count, - object_type.clone(), - ) - .await; - let ((new_cursor, new_keys), mut scan_state): ((u64, Vec), ScanState) = match scan_result - { - Ok(scan_result) => (from_redis_value(&scan_result)?, scan_state.clone()), - Err(err) => match err.kind() { - // If the scan command failed to route to the address because the address is not found in the cluster or - // the connection to the address cant be reached from different reasons, we will check we want to check if - // the problem is problem that we can recover from like failover or scale down or some network issue - // that we can retry the scan command to an address that own the next slot we are at. - ErrorKind::IoError - | ErrorKind::AllConnectionsUnavailable - | ErrorKind::ConnectionNotFoundForRoute => { - let retry = - retry_scan(&scan_state, &core, match_pattern, count, object_type).await?; - (from_redis_value(&retry.0?)?, retry.1) - } - _ => return Err(err), - }, - }; + // Send the SCAN command using the current scan state and scan arguments + let ((new_cursor, new_keys), mut scan_state) = + try_scan(&scan_state, &cluster_scan_args, core.clone()).await?; - // If the cursor is 0, meaning we finished scanning the address - // we will update the scan state to get the next address to scan + // Check if the cursor indicates the end of the current scan segment if new_cursor == 0 { + // Update the scan state to move to the next address/node in the cluster scan_state = scan_state - .create_updated_scan_state_for_completed_address(&core) + .create_updated_scan_state_for_completed_address(core, allow_non_covered_slots) .await?; } - // If the address is empty, meaning we finished scanning all the address + // Verify if the entire cluster has been scanned if scan_state.scan_status == ScanStateStage::Finished { + // Return the final scan state and the collected keys return Ok((ScanStateRC::create_finished(), new_keys)); } + // Update the scan state with the new cursor and maintain the progress scan_state = ScanState::new( new_cursor, scan_state.scanned_slots_map, @@ -495,256 +668,214 @@ where scan_state.address_epoch, ScanStateStage::InProgress, ); + + // Return the updated scan state and the newly found keys Ok((ScanStateRC::from_scan_state(scan_state), new_keys)) } -// Send the scan command to the address in the scan_state +/// Sends the `SCAN` command to the specified address. +/// +/// # Arguments +/// +/// * `scan_state` - The current scan state. +/// * `cluster_scan_args` - Arguments for the scan operation, including match pattern, count, object type, and allow_non_covered_slots. +/// * `core` - The cluster connection. +/// +/// # Returns +/// +/// A `RedisResult` containing the response from the `SCAN` command. async fn send_scan( scan_state: &ScanState, - core: &C, - match_pattern: Option>, - count: Option, - object_type: Option, + cluster_scan_args: &ClusterScanArgs, + core: Arc>, ) -> RedisResult where - C: ClusterInScan, + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - let mut scan_command = cmd("SCAN"); - scan_command.arg(scan_state.cursor); - if let Some(match_pattern) = match_pattern { - scan_command.arg("MATCH").arg(match_pattern); - } - if let Some(count) = count { - scan_command.arg("COUNT").arg(count); - } - if let Some(object_type) = object_type { - scan_command.arg("TYPE").arg(object_type.to_string()); + if let Some(conn_future) = core + .connection_for_address(&scan_state.address_in_scan) + .await + { + let mut conn = conn_future.await; + let mut scan_command = cmd("SCAN"); + scan_command.arg(scan_state.cursor); + if let Some(match_pattern) = cluster_scan_args.match_pattern.as_ref() { + scan_command.arg("MATCH").arg(match_pattern); + } + if let Some(count) = cluster_scan_args.count { + scan_command.arg("COUNT").arg(count); + } + if let Some(object_type) = &cluster_scan_args.object_type { + scan_command.arg("TYPE").arg(object_type.to_string()); + } + conn.req_packed_command(&scan_command).await + } else { + Err(RedisError::from(( + ErrorKind::ConnectionNotFoundForRoute, + "Cluster scan failed. No connection available for address: ", + format!("{}", scan_state.address_in_scan), + ))) } +} - core.route_command(scan_command, &scan_state.address_in_scan) - .await +/// Checks if the error is retryable during scanning. +/// Retryable errors include network issues, cluster topology changes, and unavailable connections. +/// Scan operations are not keyspace operations, so they are not affected by keyspace errors like `MOVED`. +fn is_scanwise_retryable_error(err: &RedisError) -> bool { + matches!( + err.kind(), + ErrorKind::IoError + | ErrorKind::AllConnectionsUnavailable + | ErrorKind::ConnectionNotFoundForRoute + | ErrorKind::ClusterDown + | ErrorKind::FatalSendError + ) } -// If the scan command failed to route to the address we will check we will first refresh the slots, we will check if all slots are covered by cluster, -// and if so we will try to get a new address to scan for handling case of failover. -// if all slots are not covered by the cluster we will return an error indicating that the cluster is not well configured. -// if all slots are covered by cluster but we failed to get a new address to scan we will return an error indicating that we failed to get a new address to scan. -// if we got a new address to scan but the scan command failed to route to the address we will return an error indicating that we failed to route the command. -async fn retry_scan( +/// Gets the next scan state by finding the next address to scan. +/// The method updates the scanned slots map and retrieves the next address to scan. +/// If the address epoch has changed, the method creates a new scan state without updating the scanned slots map. +/// If the address epoch has not changed, the method updates the scanned slots map with the slots owned by the address. +/// The method returns the new scan state with the updated cursor, scanned slots map, address, and epoch. +/// The method is used to continue scanning the cluster after completing a scan segment. +async fn next_scan_state( + core: &Arc>, scan_state: &ScanState, - core: &C, - match_pattern: Option>, - count: Option, - object_type: Option, -) -> RedisResult<(RedisResult, ScanState)> + cluster_scan_args: &ClusterScanArgs, +) -> RedisResult> where - C: ClusterInScan, + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - // TODO: This mechanism of refreshing on failure to route to address should be part of the routing mechanism - // After the routing mechanism is updated to handle this case, this refresh in the case bellow should be removed - core.refresh_if_topology_changed().await.map_err(|err| { - RedisError::from(( - ErrorKind::ResponseError, - "Error during cluster scan: failed to refresh slots", - format!("{:?}", err), - )) - })?; - if !core.are_all_slots_covered().await { - return Err(RedisError::from(( - ErrorKind::NotAllSlotsCovered, - "Not all slots are covered by the cluster, please check the cluster configuration", - ))); + let next_slot = next_slot(&scan_state.scanned_slots_map).unwrap_or(0); + let mut scanned_slots_map = scan_state.scanned_slots_map; + match next_address_to_scan( + core, + next_slot, + &mut scanned_slots_map, + cluster_scan_args.allow_non_covered_slots, + ) { + Ok(NextNodeResult::Address(new_address)) => { + let new_epoch = core.address_epoch(&new_address).await.unwrap_or(0); + Ok(Some(ScanState::new( + 0, + scanned_slots_map, + new_address, + new_epoch, + ScanStateStage::InProgress, + ))) + } + Ok(NextNodeResult::AllSlotsCompleted) => Ok(None), + Err(err) => Err(err), + } +} + +/// Attempts to scan the cluster for keys based on the current scan state. +/// Sends the `SCAN` command to the current address and processes the response. +/// On retryable errors, refreshes the cluster topology and retries the scan. +/// Returns the new cursor and keys found upon success. +async fn try_scan( + scan_state: &ScanState, + cluster_scan_args: &ClusterScanArgs, + core: Arc>, +) -> RedisResult<((u64, Vec), ScanState)> +where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, +{ + let mut new_scan_state = scan_state.clone(); + + loop { + match send_scan(&new_scan_state, cluster_scan_args, core.clone()).await { + Ok(scan_response) => { + let (new_cursor, new_keys) = from_redis_value::<(u64, Vec)>(&scan_response)?; + return Ok(((new_cursor, new_keys), new_scan_state)); + } + Err(err) if is_scanwise_retryable_error(&err) => { + ClusterConnInner::check_topology_and_refresh_if_diff( + core.clone(), + &RefreshPolicy::NotThrottable, + ) + .await?; + + if let Some(next_scan_state) = + next_scan_state(&core, &new_scan_state, cluster_scan_args).await? + { + new_scan_state = next_scan_state; + } else { + return Ok(((0, Vec::new()), ScanState::create_finished_state())); + } + } + Err(err) => return Err(err), + } } - // If for some reason we failed to reach the address we don't know if its a scale down or a failover. - // Since it might be scale down we cant just keep going with the current state we the same cursor as we are at - // the same point in the new address, so we need to get the new address own the next slot that haven't been scanned - // and start from the beginning of this address. - let next_slot = scan_state - .get_next_slot(&scan_state.scanned_slots_map) - .unwrap_or(0); - let address = core.get_address_by_slot(next_slot).await?; - - let new_epoch = core.get_address_epoch(&address).await.unwrap_or(0); - let scan_state = &ScanState::new( - 0, - scan_state.scanned_slots_map, - address, - new_epoch, - ScanStateStage::InProgress, - ); - let res = ( - send_scan(scan_state, core, match_pattern, count, object_type).await, - scan_state.clone(), - ); - Ok(res) } #[cfg(test)] mod tests { - use super::*; - #[test] - fn test_creation_of_empty_scan_wrapper() { - let scan_state_wrapper = ScanStateRC::new(); - assert!(scan_state_wrapper.status == ScanStateStage::Initiating); - } + #[tokio::test] + async fn test_cluster_scan_args_builder() { + let args = ClusterScanArgs::builder() + .with_match_pattern("user:*") + .with_count(100) + .with_object_type(ObjectType::Hash) + .allow_non_covered_slots(true) + .build(); - #[test] - fn test_creation_of_scan_state_wrapper_from() { - let scan_state = ScanState { - cursor: 0, - scanned_slots_map: [0; BITS_ARRAY_SIZE], - address_in_scan: String::from("address1").into(), - address_epoch: 1, - scan_status: ScanStateStage::InProgress, - }; - - let scan_state_wrapper = ScanStateRC::from_scan_state(scan_state); - assert!(!scan_state_wrapper.is_finished()); + assert_eq!(args.match_pattern, Some(b"user:*".to_vec())); + assert_eq!(args.count, Some(100)); + assert_eq!(args.object_type, Some(ObjectType::Hash)); + assert!(args.allow_non_covered_slots); } - #[test] - // Test the get_next_slot method - fn test_scan_state_get_next_slot() { - let scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; - let scan_state = ScanState { - cursor: 0, - scanned_slots_map, - address_in_scan: String::from("address1").into(), - address_epoch: 1, - scan_status: ScanStateStage::InProgress, - }; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(0)); - // Set the first slot to 1 - let mut scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; - scanned_slots_map[0] = 1; - let scan_state = ScanState { - cursor: 0, - scanned_slots_map, - address_in_scan: String::from("address1").into(), - address_epoch: 1, - scan_status: ScanStateStage::InProgress, - }; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(1)); - } - // Create a mock connection - struct MockConnection; - #[async_trait] - impl ClusterInScan for MockConnection { - async fn refresh_if_topology_changed(&self) -> RedisResult { - Ok(true) - } - async fn get_address_by_slot(&self, _slot: u16) -> RedisResult> { - Ok("mock_address".to_string().into()) - } - async fn get_address_epoch(&self, _address: &str) -> Result { - Ok(0) - } - async fn get_slots_of_address(&self, address: Arc) -> Vec { - if address.as_str() == "mock_address" { - vec![3, 4, 5] - } else { - vec![0, 1, 2] - } - } - async fn route_command(&self, _: Cmd, _: &str) -> RedisResult { - unimplemented!() - } - async fn are_all_slots_covered(&self) -> bool { - true - } - } - // Test the initiate_scan function #[tokio::test] - async fn test_initiate_scan() { - let connection = MockConnection; - let scan_state = ScanState::initiate_scan(&connection).await.unwrap(); - - // Assert that the scan state is initialized correctly - assert_eq!(scan_state.cursor, 0); - assert_eq!(scan_state.scanned_slots_map, [0; BITS_ARRAY_SIZE]); - assert_eq!( - scan_state.address_in_scan, - "mock_address".to_string().into() + async fn test_scan_state_new() { + let address = Arc::new("127.0.0.1:6379".to_string()); + let scan_state = ScanState::new( + 0, + [0; BITS_ARRAY_SIZE as usize], + address.clone(), + 1, + ScanStateStage::InProgress, ); - assert_eq!(scan_state.address_epoch, 0); - } - // Test the get_next_slot function - #[test] - fn test_get_next_slot() { - let scan_state = ScanState { - cursor: 0, - scanned_slots_map: [0; BITS_ARRAY_SIZE], - address_in_scan: "".to_string().into(), - address_epoch: 0, - scan_status: ScanStateStage::InProgress, - }; - // Test when all first bits of each u6 are set to 1, the next slots should be 1 - let scanned_slots_map: SlotsBitsArray = [1; BITS_ARRAY_SIZE]; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(1)); - - // Test when all slots are scanned, the next slot should be 0 - let scanned_slots_map: SlotsBitsArray = [u64::MAX; BITS_ARRAY_SIZE]; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(16385)); - - // Test when first, second, fourth, sixth and eighth slots scanned, the next slot should be 2 - let mut scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; - scanned_slots_map[0] = 171; // 10101011 - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(2)); + assert_eq!(scan_state.cursor, 0); + assert_eq!(scan_state.scanned_slots_map, [0; BITS_ARRAY_SIZE as usize]); + assert_eq!(scan_state.address_in_scan, address); + assert_eq!(scan_state.address_epoch, 1); + assert_eq!(scan_state.scan_status, ScanStateStage::InProgress); } - // Test the update_scan_state_and_get_next_address function #[tokio::test] - async fn test_update_scan_state_and_get_next_address() { - let connection = MockConnection; - let scan_state = ScanState::initiate_scan(&connection).await; - let updated_scan_state = scan_state - .unwrap() - .create_updated_scan_state_for_completed_address(&connection) - .await - .unwrap(); + async fn test_scan_state_create_finished() { + let scan_state = ScanState::create_finished_state(); - // cursor should be reset to 0 - assert_eq!(updated_scan_state.cursor, 0); + assert_eq!(scan_state.cursor, 0); + assert_eq!(scan_state.scanned_slots_map, [0; BITS_ARRAY_SIZE as usize]); + assert_eq!(scan_state.address_in_scan, Arc::new(String::new())); + assert_eq!(scan_state.address_epoch, 0); + assert_eq!(scan_state.scan_status, ScanStateStage::Finished); + } - // address_in_scan should be updated to the new address - assert_eq!( - updated_scan_state.address_in_scan, - "mock_address".to_string().into() - ); + #[tokio::test] + async fn test_mark_slot_as_scanned() { + let mut scanned_slots_map = [0; BITS_ARRAY_SIZE as usize]; + mark_slot_as_scanned(&mut scanned_slots_map, 5); - // address_epoch should be updated to the new address epoch - assert_eq!(updated_scan_state.address_epoch, 0); + assert_eq!(scanned_slots_map[0], 1 << 5); } #[tokio::test] - async fn test_update_scan_state_without_updating_scanned_map() { - let connection = MockConnection; + async fn test_next_slot() { let scan_state = ScanState::new( 0, - [0; BITS_ARRAY_SIZE], - "address".to_string().into(), - 0, + [0; BITS_ARRAY_SIZE as usize], + Arc::new("127.0.0.1:6379".to_string()), + 1, ScanStateStage::InProgress, ); - let scanned_slots_map = scan_state.scanned_slots_map; - let updated_scan_state = scan_state - .creating_state_without_slot_changes(&connection) - .await - .unwrap(); - assert_eq!(updated_scan_state.scanned_slots_map, scanned_slots_map); - assert_eq!(updated_scan_state.cursor, 0); - assert_eq!( - updated_scan_state.address_in_scan, - "mock_address".to_string().into() - ); - assert_eq!(updated_scan_state.address_epoch, 0); + let next_slot = next_slot(&scan_state.scanned_slots_map); + + assert_eq!(next_slot, Some(0)); } } diff --git a/glide-core/redis-rs/redis/src/commands/mod.rs b/glide-core/redis-rs/redis/src/commands/mod.rs index 22a68cc987..19dae750c2 100644 --- a/glide-core/redis-rs/redis/src/commands/mod.rs +++ b/glide-core/redis-rs/redis/src/commands/mod.rs @@ -16,6 +16,9 @@ mod json; #[cfg(feature = "cluster-async")] pub use cluster_scan::ScanStateRC; +#[cfg(feature = "cluster-async")] +pub use cluster_scan::ClusterScanArgs; + #[cfg(feature = "cluster-async")] pub(crate) mod cluster_scan; diff --git a/glide-core/redis-rs/redis/src/lib.rs b/glide-core/redis-rs/redis/src/lib.rs index 0c960f3b4e..7121ee03c7 100644 --- a/glide-core/redis-rs/redis/src/lib.rs +++ b/glide-core/redis-rs/redis/src/lib.rs @@ -457,6 +457,9 @@ pub use crate::commands::ScanStateRC; #[cfg(feature = "cluster-async")] pub use crate::commands::ObjectType; +#[cfg(feature = "cluster-async")] +pub use crate::commands::ClusterScanArgs; + #[cfg(feature = "cluster")] mod cluster_client; diff --git a/glide-core/redis-rs/redis/tests/test_cluster_scan.rs b/glide-core/redis-rs/redis/tests/test_cluster_scan.rs index cfc4bae594..96910fe7f8 100644 --- a/glide-core/redis-rs/redis/tests/test_cluster_scan.rs +++ b/glide-core/redis-rs/redis/tests/test_cluster_scan.rs @@ -5,9 +5,68 @@ mod support; mod test_cluster_scan_async { use crate::support::*; use rand::Rng; - use redis::cluster_routing::{RoutingInfo, SingleNodeRoutingInfo}; - use redis::{cmd, from_redis_value, ObjectType, RedisResult, ScanStateRC, Value}; + use redis::cluster_routing::{ + MultipleNodeRoutingInfo, ResponsePolicy, RoutingInfo, SingleNodeRoutingInfo, + }; + use redis::{ + cmd, from_redis_value, ClusterScanArgs, ObjectType, RedisResult, ScanStateRC, Value, + }; use std::time::Duration; + use tokio::time::{sleep, Instant}; + + async fn del_slots_range( + cluster: &TestClusterContext, + range: (u16, u16), + ) -> Result<(), &'static str> { + let mut cluster_conn = cluster.async_connection(None).await; + let mut del_slots_cmd = cmd("CLUSTER"); + let (start, end) = range; + del_slots_cmd.arg("DELSLOTSRANGE").arg(start).arg(end); + let _: RedisResult = cluster_conn + .route_command( + &del_slots_cmd, + RoutingInfo::MultiNode(( + MultipleNodeRoutingInfo::AllNodes, + Some(ResponsePolicy::AllSucceeded), + )), + ) + .await; + + let timeout = Duration::from_secs(10); + let mut invalid = false; + loop { + sleep(Duration::from_millis(500)).await; + + let now = Instant::now(); + if now.elapsed() > timeout { + return Err("Timeout while waiting for slots to be deleted"); + } + + let slot_distribution = + cluster.get_slots_ranges_distribution(&cluster.get_cluster_nodes().await); + for (_, _, _, slot_ranges) in slot_distribution { + println!("slot_ranges: {:?}", slot_ranges); + for slot_range in slot_ranges { + let (slot_start, slot_end) = (slot_range[0], slot_range[1]); + + println!("slot_start: {}, slot_end: {}", slot_start, slot_end); + if slot_start >= start && slot_start <= end { + invalid = true; + continue; + } + if slot_end >= start && slot_end <= end { + invalid = true; + continue; + } + } + } + + if invalid { + continue; + } + return Ok(()); + } + } async fn kill_one_node( cluster: &TestClusterContext, @@ -49,7 +108,12 @@ mod test_cluster_scan_async { #[tokio::test] #[serial_test::serial] async fn test_async_cluster_scan() { - let cluster = TestClusterContext::new(3, 0); + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); let mut connection = cluster.async_connection(None).await; // Set some keys @@ -67,14 +131,14 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; loop { let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -88,10 +152,114 @@ mod test_cluster_scan_async { } } + #[tokio::test] + #[serial_test::serial] + async fn test_async_cluster_scan_with_allow_non_covered_slots() { + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); + + let mut connection = cluster.async_connection(None).await; + let mut expected_keys: Vec = Vec::new(); + + for i in 0..1000 { + let key = format!("key{}", i); + let _: Result<(), redis::RedisError> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + expected_keys.push(key); + } + + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = Vec::new(); + loop { + let cluster_scan_args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, cluster_scan_args) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + + keys.sort(); + expected_keys.sort(); + assert_eq!(keys, expected_keys); + } + + #[tokio::test] + #[serial_test::serial] + async fn test_async_cluster_scan_with_delslots() { + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); + let mut connection = cluster.async_connection(None).await; + let mut expected_keys: Vec = Vec::new(); + + for i in 0..1000 { + let key = format!("key{}", i); + let _: Result<(), redis::RedisError> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + expected_keys.push(key); + } + + del_slots_range(&cluster, (1, 100)).await.unwrap(); + + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = Vec::new(); + loop { + let cluster_scan_args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, cluster_scan_args) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + + keys.sort(); + expected_keys.sort(); + assert_eq!(keys, expected_keys); + } + #[tokio::test] #[serial_test::serial] // test cluster scan with slot migration in the middle async fn test_async_cluster_scan_with_migration() { - let cluster = TestClusterContext::new(3, 0); + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); let mut connection = cluster.async_connection(None).await; // Set some keys @@ -114,7 +282,7 @@ mod test_cluster_scan_async { loop { count += 1; let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; @@ -189,18 +357,21 @@ mod test_cluster_scan_async { let mut keys: Vec = Vec::new(); let mut count = 0; let mut result: RedisResult = Ok(Value::Nil); + let mut next_cursor = ScanStateRC::new(); + let mut scan_keys; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; - let (next_cursor, scan_keys) = match scan_response { - Ok((cursor, keys)) => (cursor, keys), + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; + (next_cursor, scan_keys) = match scan_response { + Ok((cursor, keys)) => (cursor.clone(), keys), Err(e) => { result = Err(e); break; } }; - scan_state_rc = next_cursor; + scan_state_rc = next_cursor.clone(); keys.extend(scan_keys.into_iter().map(|v| from_redis_value(&v).unwrap())); if scan_state_rc.is_finished() { break; @@ -225,6 +396,47 @@ mod test_cluster_scan_async { } // We expect an error of finding address assert!(result.is_err()); + + // Test we can continue scanning after the fail using allow_non_covered_slots=true + scan_state_rc = next_cursor; + // config cluster to allow missing slots + let mut config_cmd = cmd("CONFIG"); + config_cmd + .arg("SET") + .arg("cluster-require-full-coverage") + .arg("no"); + let res: RedisResult = connection + .route_command( + &config_cmd, + RoutingInfo::MultiNode((MultipleNodeRoutingInfo::AllNodes, None)), + ) + .await; + print!("config result: {:?}", res); + let args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + loop { + let res = connection + .cluster_scan(scan_state_rc.clone(), args.clone()) + .await; + let (next_cursor, scan_keys): (ScanStateRC, Vec) = match res { + Ok((cursor, keys)) => (cursor.clone(), keys), + Err(e) => { + println!("error: {:?}", e); + break; + } + }; + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + assert!(scan_state_rc.is_finished()); } #[tokio::test] @@ -268,8 +480,9 @@ mod test_cluster_scan_async { let mut count = 0; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -439,12 +652,13 @@ mod test_cluster_scan_async { } // Scan the keys let mut scan_state_rc = ScanStateRC::new(); - let mut keys: Vec = Vec::new(); + let mut keys: Vec = vec![]; let mut count = 0; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -513,7 +727,7 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; loop { let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; @@ -574,7 +788,7 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; loop { let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; @@ -642,15 +856,20 @@ mod test_cluster_scan_async { let mut scan_state_rc = ScanStateRC::new(); let mut keys: Vec = vec![]; loop { + let cluster_scan_args = ClusterScanArgs::builder() + .with_match_pattern("key:pattern:*") + .allow_non_covered_slots(false) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan_with_pattern(scan_state_rc, "key:pattern:*", None, None) + .cluster_scan(scan_state_rc, cluster_scan_args) .await .unwrap(); scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -665,7 +884,7 @@ mod test_cluster_scan_async { for key in expected_keys.iter() { assert!(keys.contains(key)); } - assert!(keys.len() == expected_keys.len()); + assert_eq!(keys.len(), expected_keys.len()); } #[tokio::test] @@ -701,15 +920,20 @@ mod test_cluster_scan_async { let mut scan_state_rc = ScanStateRC::new(); let mut keys: Vec = vec![]; loop { + let cluster_scan_args = ClusterScanArgs::builder() + .with_object_type(ObjectType::Set) + .allow_non_covered_slots(false) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, Some(ObjectType::Set)) + .cluster_scan(scan_state_rc, cluster_scan_args) .await .unwrap(); scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -724,7 +948,7 @@ mod test_cluster_scan_async { for key in expected_keys.iter() { assert!(keys.contains(key)); } - assert!(keys.len() == expected_keys.len()); + assert_eq!(keys.len(), expected_keys.len()); } #[tokio::test] @@ -755,24 +979,35 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; let mut comparing_times = 0; loop { + let cluster_scan_args = ClusterScanArgs::builder() + .with_count(100) + .allow_non_covered_slots(false) + .build(); + + let cluster_scan_args_no_count = ClusterScanArgs::builder() + .allow_non_covered_slots(false) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc.clone(), Some(100), None) + .cluster_scan(scan_state_rc.clone(), cluster_scan_args) .await .unwrap(); + let (_, scan_without_count_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, Some(100), None) + .cluster_scan(scan_state_rc, cluster_scan_args_no_count) .await .unwrap(); + if !scan_keys.is_empty() && !scan_without_count_keys.is_empty() { assert!(scan_keys.len() >= scan_without_count_keys.len()); - comparing_times += 1; } + scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -788,7 +1023,7 @@ mod test_cluster_scan_async { for key in expected_keys.iter() { assert!(keys.contains(key)); } - assert!(keys.len() == expected_keys.len()); + assert_eq!(keys.len(), expected_keys.len()); } #[tokio::test] @@ -821,8 +1056,9 @@ mod test_cluster_scan_async { let mut count = 0; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -835,7 +1071,7 @@ mod test_cluster_scan_async { if count == 5 { drop(cluster); let scan_response: RedisResult<(ScanStateRC, Vec)> = connection - .cluster_scan(scan_state_rc.clone(), None, None) + .cluster_scan(scan_state_rc.clone(), ClusterScanArgs::default()) .await; assert!(scan_response.is_err()); break; @@ -844,8 +1080,9 @@ mod test_cluster_scan_async { cluster = TestClusterContext::new(3, 0); connection = cluster.async_connection(None).await; loop { - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -857,4 +1094,246 @@ mod test_cluster_scan_async { } } } + + #[tokio::test] + #[serial_test::serial] + /// Test a case where a node is killed, key set into the cluster, and the client is still able to scan all keys + async fn test_async_cluster_scan_uncovered_slots_of_missing_node() { + // Create a cluster with 3 nodes + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(0), + false, + ); + let mut connection = cluster.async_connection(None).await; + + let mut config_cmd = cmd("CONFIG"); + config_cmd + .arg("SET") + .arg("cluster-require-full-coverage") + .arg("no"); + let _: RedisResult = connection + .route_command( + &config_cmd, + RoutingInfo::MultiNode((MultipleNodeRoutingInfo::AllNodes, None)), + ) + .await; + // Kill one node + let mut cluster_nodes = cluster.get_cluster_nodes().await; + let slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + let killed_node_routing = kill_one_node(&cluster, slot_distribution.clone()).await; + let ready = cluster.wait_for_fail_to_finish(&killed_node_routing).await; + match ready { + Ok(_) => {} + Err(e) => { + println!("error: {:?}", e); + } + } + + // Compare slot distribution before and after killing a node + cluster_nodes = cluster.get_cluster_nodes().await; + let new_slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + assert_ne!(slot_distribution, new_slot_distribution); + let mut excepted_keys: Vec = vec![]; + // Set some keys + for i in 0..100 { + let key = format!("key{}", i); + let res: Result<(), redis::RedisError> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + if res.is_ok() { + excepted_keys.push(key); + } + } + + // Scan the keys + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = vec![]; + let args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + loop { + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, args.clone()) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); // Change the type of `keys` to `Vec` + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + // Check if all keys available scanned + keys.sort(); + keys.dedup(); + excepted_keys.sort(); + excepted_keys.dedup(); + for key in excepted_keys.iter() { + assert!(keys.contains(key)); + } + assert!(keys.len() > 0); + } + + #[tokio::test] + #[serial_test::serial] + /// Test scanning after killing a node and compare with "KEYS *" from remaining nodes + async fn test_async_cluster_scan_after_node_killed() { + // Create a cluster with 3 nodes + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(0), + false, + ); + let mut connection = cluster.async_connection(None).await; + + // Set cluster-require-full-coverage to no + let mut config_cmd = cmd("CONFIG"); + config_cmd + .arg("SET") + .arg("cluster-require-full-coverage") + .arg("no"); + let _: RedisResult = connection + .route_command( + &config_cmd, + RoutingInfo::MultiNode((MultipleNodeRoutingInfo::AllNodes, None)), + ) + .await; + + for i in 0..100 { + let key = format!("key{}", i); + let _res: RedisResult<()> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + } + + // Kill one node + let cluster_nodes = cluster.get_cluster_nodes().await; + let slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + let killed_node_routing = kill_one_node(&cluster, slot_distribution.clone()).await; + let ready = cluster.wait_for_fail_to_finish(&killed_node_routing).await; + match ready { + Ok(_) => {} + Err(e) => { + println!("error: {:?}", e); + } + } + + // Scan the keys + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = vec![]; + let args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + loop { + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, args.clone()) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); // Change the type of `keys` to `Vec` + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + + // Get keys from remaining nodes using "KEYS *" + let mut keys_from_keys_command: Vec = Vec::new(); + let key_res: RedisResult = connection + .route_command( + cmd("KEYS").arg("*"), + RoutingInfo::MultiNode(( + MultipleNodeRoutingInfo::AllNodes, + Some(ResponsePolicy::CombineArrays), + )), + ) + .await; + if let Ok(value) = key_res { + let values: Vec = from_redis_value(&value).unwrap(); + keys_from_keys_command + .extend(values.into_iter().map(|v| from_redis_value(&v).unwrap())); + } + + // Sort and dedup keys + keys.sort(); + keys.dedup(); + keys_from_keys_command.sort(); + keys_from_keys_command.dedup(); + + // Check if scanned keys match keys from "KEYS *" + assert_eq!(keys, keys_from_keys_command); + } + + #[tokio::test] + #[serial_test::serial] + /// Test scanning with allow_non_covered_slots as false after killing a node + async fn test_async_cluster_scan_uncovered_slots_fail() { + // Create a cluster with 3 nodes + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(0), + false, + ); + let mut connection = cluster.async_connection(None).await; + + // Kill one node + let cluster_nodes = cluster.get_cluster_nodes().await; + let slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + let killed_node_routing = kill_one_node(&cluster, slot_distribution.clone()).await; + let ready = cluster.wait_for_fail_to_finish(&killed_node_routing).await; + match ready { + Ok(_) => {} + Err(e) => { + println!("error: {:?}", e); + } + } + + for i in 0..100 { + let key = format!("key{}", i); + let _res: RedisResult<()> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + } + + // Try scanning with allow_non_covered_slots as false + let mut scan_state_rc = ScanStateRC::new(); + let mut had_error = false; + loop { + let result = connection + .cluster_scan(scan_state_rc.clone(), ClusterScanArgs::default()) + .await; + + match result { + Ok((next_cursor, _)) => { + scan_state_rc = next_cursor; + if scan_state_rc.is_finished() { + break; + } + } + Err(e) => { + had_error = true; + assert_eq!(e.kind(), redis::ErrorKind::NotAllSlotsCovered); + break; + } + } + } + + assert!(had_error); + } } diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index a560e17697..73eee144b1 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -13,7 +13,7 @@ use redis::cluster_routing::{ }; use redis::cluster_slotmap::ReadFromReplicaStrategy; use redis::{ - Cmd, ErrorKind, FromRedisValue, InfoDict, ObjectType, PushInfo, RedisError, RedisResult, + ClusterScanArgs, Cmd, ErrorKind, FromRedisValue, PushInfo, RedisError, RedisResult, ScanStateRC, Value, }; pub use standalone_client::StandaloneClient; @@ -27,6 +27,7 @@ use self::value_conversion::{convert_to_expected_type, expected_type_for_cmd, ge mod reconnecting_connection; mod standalone_client; mod value_conversion; +use redis::InfoDict; use tokio::sync::mpsc; use versions::Versioning; @@ -310,33 +311,16 @@ impl Client { pub async fn cluster_scan<'a>( &'a mut self, scan_state_cursor: &'a ScanStateRC, - match_pattern: &'a Option>, - count: Option, - object_type: Option, + cluster_scan_args: ClusterScanArgs, ) -> RedisResult { match self.internal_client { ClientWrapper::Standalone(_) => { unreachable!("Cluster scan is not supported in standalone mode") } ClientWrapper::Cluster { ref mut client } => { - let (cursor, keys) = match match_pattern { - Some(pattern) => { - client - .cluster_scan_with_pattern( - scan_state_cursor.clone(), - pattern, - count, - object_type, - ) - .await? - } - None => { - client - .cluster_scan(scan_state_cursor.clone(), count, object_type) - .await? - } - }; - + let (cursor, keys) = client + .cluster_scan(scan_state_cursor.clone(), cluster_scan_args) + .await?; let cluster_cursor_id = if cursor.is_finished() { Value::BulkString(FINISHED_SCAN_CURSOR.into()) } else { diff --git a/glide-core/src/protobuf/command_request.proto b/glide-core/src/protobuf/command_request.proto index 30b33362af..d7c693cfd6 100644 --- a/glide-core/src/protobuf/command_request.proto +++ b/glide-core/src/protobuf/command_request.proto @@ -506,6 +506,7 @@ message ClusterScan { optional bytes match_pattern = 2; optional int64 count = 3; optional string object_type = 4; + bool allow_non_covered_slots = 5; } message UpdateConnectionPassword { diff --git a/glide-core/src/socket_listener.rs b/glide-core/src/socket_listener.rs index f148bbdede..4896f83565 100644 --- a/glide-core/src/socket_listener.rs +++ b/glide-core/src/socket_listener.rs @@ -19,7 +19,7 @@ use redis::cluster_routing::{ MultipleNodeRoutingInfo, Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr, }; use redis::cluster_routing::{ResponsePolicy, Routable}; -use redis::{Cmd, PushInfo, RedisError, ScanStateRC, Value}; +use redis::{ClusterScanArgs, Cmd, PushInfo, RedisError, ScanStateRC, Value}; use std::cell::Cell; use std::collections::HashSet; use std::rc::Rc; @@ -321,30 +321,23 @@ async fn cluster_scan(cluster_scan: ClusterScan, mut client: Client) -> ClientUs } else { get_cluster_scan_cursor(cursor)? }; - - let match_pattern = cluster_scan.match_pattern.map(|pattern| pattern.into()); - let count = cluster_scan.count.map(|count| count as usize); - - let object_type = match cluster_scan.object_type { - Some(char_object_type) => match char_object_type.to_string().to_lowercase().as_str() { - STRING => Some(redis::ObjectType::String), - LIST => Some(redis::ObjectType::List), - SET => Some(redis::ObjectType::Set), - ZSET => Some(redis::ObjectType::ZSet), - HASH => Some(redis::ObjectType::Hash), - STREAM => Some(redis::ObjectType::Stream), - _ => { - return Err(ClientUsageError::Internal(format!( - "Received invalid object type: {:?}", - char_object_type - ))) - } - }, - None => None, - }; + let mut cluster_scan_args_builder = + ClusterScanArgs::builder().allow_non_covered_slots(cluster_scan.allow_non_covered_slots); + if let Some(match_pattern) = cluster_scan.match_pattern { + cluster_scan_args_builder = + cluster_scan_args_builder.with_match_pattern::(match_pattern); + } + if let Some(count) = cluster_scan.count { + cluster_scan_args_builder = cluster_scan_args_builder.with_count(count as u32); + } + if let Some(object_type) = cluster_scan.object_type { + cluster_scan_args_builder = + cluster_scan_args_builder.with_object_type(object_type.to_string().into()); + } + let cluster_scan_args = cluster_scan_args_builder.build(); client - .cluster_scan(&cluster_scan_cursor, &match_pattern, count, object_type) + .cluster_scan(&cluster_scan_cursor, cluster_scan_args) .await .map_err(|err| err.into()) } diff --git a/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java b/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java index 6fffc46f35..2b7102bc2f 100644 --- a/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java @@ -6,6 +6,7 @@ import glide.api.models.GlideString; import glide.ffi.resolvers.ObjectTypeResolver; import glide.utils.ArrayTransformUtils; +import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.experimental.SuperBuilder; @@ -28,6 +29,13 @@ public class ScanOptions extends BaseScanOptions { */ private final ObjectType type; + /** + * If set to true, the scan will perform even if some slots are not covered by any node. It's + * important to note that when set to true, the scan has no guarantee to cover all keys in the + * cluster, and the method loses its way to validate the progress of the scan. Defaults to false. + */ + @Builder.Default private final Boolean allowNonCoveredSlots = false; + /** Defines the complex data types available for a SCAN request. */ public enum ObjectType { STRING(ObjectTypeResolver.OBJECT_TYPE_STRING_NATIVE_NAME), @@ -86,4 +94,11 @@ public Long getCount() { public ObjectType getType() { return type; } + + /** + * @return whether non-covered slots are allowed. + */ + public Boolean getAllowNonCoveredSlots() { + return allowNonCoveredSlots; + } } diff --git a/java/client/src/main/java/glide/managers/CommandManager.java b/java/client/src/main/java/glide/managers/CommandManager.java index d069c6bd72..47b0de7d75 100644 --- a/java/client/src/main/java/glide/managers/CommandManager.java +++ b/java/client/src/main/java/glide/managers/CommandManager.java @@ -428,6 +428,10 @@ protected CommandRequest.Builder prepareCursorRequest( clusterScanBuilder.setObjectType(options.getType().getNativeName()); } + if (options.getAllowNonCoveredSlots() != null) { + clusterScanBuilder.setAllowNonCoveredSlots(options.getAllowNonCoveredSlots()); + } + return CommandRequest.newBuilder().setClusterScan(clusterScanBuilder.build()); } diff --git a/node/src/Commands.ts b/node/src/Commands.ts index 8411cf212d..972c74777c 100644 --- a/node/src/Commands.ts +++ b/node/src/Commands.ts @@ -3842,6 +3842,19 @@ export interface ScanOptions extends BaseScanOptions { type?: ObjectType; } +/** + * Options for the SCAN command. + * `match`: The match filter is applied to the result of the command and will only include keys that match the pattern specified. + * `count`: `COUNT` is a just a hint for the command for how many elements to fetch from the server, the default is 10. + * `type`: The type of the object to scan. + * Types are the data types of Valkey: `string`, `list`, `set`, `zset`, `hash`, `stream`. + * `allowNonCoveredSlots`: If true, the scan will keep scanning even if slots are not covered by the cluster. + * By default, the scan will stop if slots are not covered by the cluster. + */ +export interface ClusterScanOptions extends ScanOptions { + allowNonCoveredSlots?: boolean; +} + /** * Options specific to the ZSCAN command, extending from the base scan options. */ diff --git a/node/src/GlideClusterClient.ts b/node/src/GlideClusterClient.ts index 0524128dd5..4e9aee579d 100644 --- a/node/src/GlideClusterClient.ts +++ b/node/src/GlideClusterClient.ts @@ -23,7 +23,7 @@ import { FunctionStatsSingleResponse, InfoOptions, LolwutOptions, - ScanOptions, + ClusterScanOptions, createClientGetName, createClientId, createConfigGet, @@ -146,7 +146,7 @@ export namespace GlideClusterClientConfiguration { /** * Configuration options for creating a {@link GlideClusterClient | GlideClusterClient}. * - * Extends `BaseClientConfiguration` with properties specific to `GlideClusterClient`, such as periodic topology checks + * Extends {@link BaseClientConfiguration | BaseClientConfiguration} with properties specific to `GlideClusterClient`, such as periodic topology checks * and Pub/Sub subscription settings. * * @remarks @@ -579,7 +579,7 @@ export class GlideClusterClient extends BaseClient { */ protected scanOptionsToProto( cursor: string, - options?: ScanOptions, + options?: ClusterScanOptions, ): command_request.ClusterScan { const command = command_request.ClusterScan.create(); command.cursor = cursor; @@ -596,6 +596,7 @@ export class GlideClusterClient extends BaseClient { command.objectType = options.type; } + command.allowNonCoveredSlots = options?.allowNonCoveredSlots ?? false; return command; } @@ -604,7 +605,7 @@ export class GlideClusterClient extends BaseClient { */ protected createClusterScanPromise( cursor: ClusterScanCursor, - options?: ScanOptions & DecoderOption, + options?: ClusterScanOptions & DecoderOption, ): Promise<[ClusterScanCursor, GlideString[]]> { // separate decoder option from scan options const { decoder, ...scanOptions } = options || {}; @@ -633,7 +634,7 @@ export class GlideClusterClient extends BaseClient { * * @param cursor - The cursor object that wraps the scan state. * To start a new scan, create a new empty `ClusterScanCursor` using {@link ClusterScanCursor}. - * @param options - (Optional) The scan options, see {@link ScanOptions} and {@link DecoderOption}. + * @param options - (Optional) The scan options, see {@link ClusterScanOptions} and {@link DecoderOption}. * @returns A Promise resolving to an array containing the next cursor and an array of keys, * formatted as [`ClusterScanCursor`, `string[]`]. * @@ -651,14 +652,14 @@ export class GlideClusterClient extends BaseClient { * console.log(allKeys); // ["key1", "key2", "key3"] * * // Iterate over keys matching a pattern - * await client.mset([{key: "key1", value: "value1"}, {key: "key2", value: "value2"}, {key: "notMykey", value: "value3"}, {key: "somethingElse", value: "value4"}]); + * await client.mset([{key: "key1", value: "value1"}, {key: "key2", value: "value2"}, {key: "notMyKey", value: "value3"}, {key: "somethingElse", value: "value4"}]); * let cursor = new ClusterScanCursor(); * const matchedKeys: GlideString[] = []; * while (!cursor.isFinished()) { * const [cursor, keys] = await client.scan(cursor, { match: "*key*", count: 10 }); * matchedKeys.push(...keys); * } - * console.log(matchedKeys); // ["key1", "key2", "notMykey"] + * console.log(matchedKeys); // ["key1", "key2", "notMyKey"] * * // Iterate over keys of a specific type * await client.mset([{key: "key1", value: "value1"}, {key: "key2", value: "value2"}, {key: "key3", value: "value3"}]); @@ -674,7 +675,7 @@ export class GlideClusterClient extends BaseClient { */ public async scan( cursor: ClusterScanCursor, - options?: ScanOptions & DecoderOption, + options?: ClusterScanOptions & DecoderOption, ): Promise<[ClusterScanCursor, GlideString[]]> { return this.createClusterScanPromise(cursor, options); } diff --git a/node/tests/ScanTest.test.ts b/node/tests/ScanTest.test.ts index bff90bab36..bb370a81db 100644 --- a/node/tests/ScanTest.test.ts +++ b/node/tests/ScanTest.test.ts @@ -12,6 +12,7 @@ import { GlideString, ObjectType, ProtocolVersion, + GlideClusterClientConfiguration, } from ".."; import { ValkeyCluster } from "../../utils/TestUtils.js"; import { @@ -19,6 +20,7 @@ import { getClientConfigurationOption, getServerVersion, parseEndpoints, + waitForClusterReady as isClusterReadyWithExpectedNodeCount, } from "./TestUtilities"; const TIMEOUT = 50000; @@ -376,6 +378,166 @@ describe("Scan GlideClusterClient", () => { }, TIMEOUT, ); + + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + `GlideClusterClient scan with allowNonCoveredSlots %p`, + async (protocol) => { + const testCluster = await ValkeyCluster.createCluster( + true, + 3, + 0, + getServerVersion, + ); + const config: GlideClusterClientConfiguration = { + addresses: testCluster + .getAddresses() + .map(([host, port]) => ({ host, port })), + protocol, + }; + const testClient = await GlideClusterClient.createClient(config); + + try { + for (let i = 0; i < 10000; i++) { + const result = await testClient.set(`${uuidv4()}`, "value"); + expect(result).toBe("OK"); + } + + // Perform an initial scan to ensure all works as expected + let cursor = new ClusterScanCursor(); + let result = await testClient.scan(cursor); + cursor = result[0]; + expect(cursor.isFinished()).toBe(false); + + // Set 'cluster-require-full-coverage' to 'no' to allow operations with missing slots + await testClient.configSet({ + "cluster-require-full-coverage": "no", + }); + + // Forget one server to simulate a node failure + const addresses = testCluster.getAddresses(); + const addressToForget = addresses[0]; + const allOtherAddresses = addresses.slice(1); + const idToForget = await testClient.customCommand( + ["CLUSTER", "MYID"], + { + route: { + type: "routeByAddress", + host: addressToForget[0], + port: addressToForget[1], + }, + }, + ); + + for (const address of allOtherAddresses) { + await testClient.customCommand( + ["CLUSTER", "FORGET", idToForget as string], + { + route: { + type: "routeByAddress", + host: address[0], + port: address[1], + }, + }, + ); + } + + // Wait for the cluster to stabilize after forgetting a node + const ready = await isClusterReadyWithExpectedNodeCount( + testClient, + allOtherAddresses.length, + ); + expect(ready).toBe(true); + + // Attempt to scan without 'allowNonCoveredSlots', expecting an error + // Since it might take time for the inner core to forget the missing node, + // we retry the scan until the expected error is thrown. + + const maxRetries = 10; + let retries = 0; + let errorReceived = false; + + while (retries < maxRetries && !errorReceived) { + retries++; + cursor = new ClusterScanCursor(); + + try { + while (!cursor.isFinished()) { + result = await testClient.scan(cursor); + cursor = result[0]; + } + + // If scan completes without error, wait and retry + await new Promise((resolve) => + setTimeout(resolve, 1000), + ); + } catch (error) { + if ( + error instanceof Error && + error.message.includes( + "Could not find an address covering a slot, SCAN operation cannot continue", + ) + ) { + // Expected error occurred + errorReceived = true; + } else { + // Unexpected error, rethrow + throw error; + } + } + } + + expect(errorReceived).toBe(true); + + // Perform scan with 'allowNonCoveredSlots: true' + cursor = new ClusterScanCursor(); + + while (!cursor.isFinished()) { + result = await testClient.scan(cursor, { + allowNonCoveredSlots: true, + }); + cursor = result[0]; + } + + expect(cursor.isFinished()).toBe(true); + + // Get keys using 'KEYS *' from the remaining nodes + const keys: GlideString[] = []; + + for (const address of allOtherAddresses) { + const result = await testClient.customCommand( + ["KEYS", "*"], + { + route: { + type: "routeByAddress", + host: address[0], + port: address[1], + }, + }, + ); + keys.push(...(result as GlideString[])); + } + + // Scan again with 'allowNonCoveredSlots: true' and collect results + cursor = new ClusterScanCursor(); + const results: GlideString[] = []; + + while (!cursor.isFinished()) { + result = await testClient.scan(cursor, { + allowNonCoveredSlots: true, + }); + results.push(...result[1]); + cursor = result[0]; + } + + // Compare the sets of keys obtained from 'KEYS *' and 'SCAN' + expect(new Set(results)).toEqual(new Set(keys)); + } finally { + testClient.close(); + await testCluster.close(); + } + }, + TIMEOUT, + ); }); //standalone tests diff --git a/node/tests/TestUtilities.ts b/node/tests/TestUtilities.ts index 6d79c768fc..cd21d20367 100644 --- a/node/tests/TestUtilities.ts +++ b/node/tests/TestUtilities.ts @@ -83,6 +83,51 @@ function intoArrayInternal(obj: any, builder: string[]) { } } +// The function is used to check if the cluster is ready with the count nodes known command using the client supplied. +// The way it works is by parsing the response of the CLUSTER INFO command and checking if the cluster_state is ok and the cluster_known_nodes is equal to the count. +// If so, we know the cluster is ready, and it has the amount of nodes we expect. +export async function waitForClusterReady( + client: GlideClusterClient, + count: number, +): Promise { + const timeout = 20000; // 20 seconds timeout in milliseconds + const startTime = Date.now(); + + while (true) { + if (Date.now() - startTime > timeout) { + return false; + } + + const clusterInfo = await client.customCommand(["CLUSTER", "INFO"]); + // parse the response + const clusterInfoMap = new Map(); + + if (clusterInfo) { + const clusterInfoLines = clusterInfo + .toString() + .split("\n") + .filter((line) => line.length > 0); + + for (const line of clusterInfoLines) { + const [key, value] = line.split(":"); + + clusterInfoMap.set(key.trim(), value.trim()); + } + + if ( + clusterInfoMap.get("cluster_state") == "ok" && + Number(clusterInfoMap.get("cluster_known_nodes")) == count + ) { + break; + } + } + + await new Promise((resolve) => setTimeout(resolve, 2000)); + } + + return true; +} + /** * accept any variable `v` and convert it into String, recursively */ diff --git a/package.json b/package.json index 3f61298feb..c6676131a2 100644 --- a/package.json +++ b/package.json @@ -1,12 +1,14 @@ { "devDependencies": { - "@eslint/js": "^9.10.0", + "@eslint/js": "9.17.0", "@types/eslint__js": "^8.42.3", "@types/eslint-config-prettier": "^6.11.3", - "eslint": "9.14.0", + "eslint": "9.17.0", "eslint-config-prettier": "^9.1.0", - "prettier": "^3.3.3", - "typescript": "^5.6.2", - "typescript-eslint": "^8.13" + "eslint-plugin-jsdoc": "^50.6.1", + "prettier": "3.4.2", + "prettier-eslint": "16.3.0", + "typescript": "5.7.2", + "typescript-eslint": "8.18.1" } } diff --git a/python/DEVELOPER.md b/python/DEVELOPER.md index 66127913c3..ae945b5835 100644 --- a/python/DEVELOPER.md +++ b/python/DEVELOPER.md @@ -109,7 +109,6 @@ cd python python3 -m venv .env source .env/bin/activate pip install -r requirements.txt -pip install -r dev_requirements.txt ``` ## Build the package (in release mode): @@ -211,7 +210,7 @@ Run from the main `/python` folder ```bash cd $HOME/src/valkey-glide/python source .env/bin/activate - pip install -r dev_requirements.txt + pip install -r requirements.txt isort . --profile black --skip-glob python/glide/protobuf --skip-glob .env black . --exclude python/glide/protobuf --exclude .env flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics \ diff --git a/python/dev_requirements.txt b/python/dev_requirements.txt deleted file mode 100644 index 02e9c4fd53..0000000000 --- a/python/dev_requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -black >= 24.3.0 -flake8 == 5.0 -isort == 5.10 -mypy == 1.2 -mypy-protobuf == 3.5 -packaging >= 22.0 -pyrsistent diff --git a/python/python/glide/async_commands/cluster_commands.py b/python/python/glide/async_commands/cluster_commands.py index 584091ab32..6167e1abaa 100644 --- a/python/python/glide/async_commands/cluster_commands.py +++ b/python/python/glide/async_commands/cluster_commands.py @@ -1076,67 +1076,78 @@ async def scan( match: Optional[TEncodable] = None, count: Optional[int] = None, type: Optional[ObjectType] = None, + allow_non_covered_slots: bool = False, ) -> List[Union[ClusterScanCursor, List[bytes]]]: """ - Incrementally iterates over the keys in the Cluster. + Incrementally iterates over the keys in the cluster. The method returns a list containing the next cursor and a list of keys. - This command is similar to the SCAN command, but it is designed to work in a Cluster environment. - For each iteration the new cursor object should be used to continue the scan. + This command is similar to the SCAN command but is designed to work in a cluster environment. + For each iteration, the new cursor object should be used to continue the scan. Using the same cursor object for multiple iterations will result in the same keys or unexpected behavior. - For more information about the Cluster Scan implementation, - see [Cluster Scan](https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#cluster-scan). + For more information about the Cluster Scan implementation, see [Cluster Scan](https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#cluster-scan). - As the SCAN command, the method can be used to iterate over the keys in the database, - to return all keys the database have from the time the scan started till the scan ends. - The same key can be returned in multiple scans iteration. + Like the SCAN command, the method can be used to iterate over the keys in the database, + returning all keys the database has from when the scan started until the scan ends. + The same key can be returned in multiple scan iterations. See https://valkey.io/commands/scan/ for more details. Args: cursor (ClusterScanCursor): The cursor object that wraps the scan state. - To start a new scan, create a new empty ClusterScanCursor using ClusterScanCursor(). + To start a new scan, create a new empty ClusterScanCursor using ClusterScanCursor(). match (Optional[TEncodable]): A pattern to match keys against. count (Optional[int]): The number of keys to return in a single iteration. - The actual number returned can vary and is not guaranteed to match this count exactly. - This parameter serves as a hint to the server on the number of steps to perform in each iteration. - The default value is 10. + The actual number returned can vary and is not guaranteed to match this count exactly. + This parameter serves as a hint to the server on the number of steps to perform in each iteration. + The default value is 10. type (Optional[ObjectType]): The type of object to scan for. + allow_non_covered_slots (bool): If set to True, the scan will perform even if some slots are not covered by any node. + It's important to note that when set to True, the scan has no guarantee to cover all keys in the cluster, + and the method loses its way to validate the progress of the scan. Defaults to False. Returns: List[Union[ClusterScanCursor, List[TEncodable]]]: A list containing the next cursor and a list of keys, - formatted as [ClusterScanCursor, [key1, key2, ...]]. + formatted as [ClusterScanCursor, [key1, key2, ...]]. Examples: - >>> # In the following example, we will iterate over the keys in the cluster. - await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) - cursor = ClusterScanCursor() - all_keys = [] - while not cursor.is_finished(): - cursor, keys = await client.scan(cursor, count=10) - all_keys.extend(keys) - print(all_keys) # [b'key1', b'key2', b'key3'] - >>> # In the following example, we will iterate over the keys in the cluster that match the pattern "*key*". - await client.mset({b"key1": b"value1", b"key2": b"value2", b"not_my_key": b"value3", b"something_else": b"value4"}) - cursor = ClusterScanCursor() - all_keys = [] - while not cursor.is_finished(): - cursor, keys = await client.scan(cursor, match=b"*key*", count=10) - all_keys.extend(keys) - print(all_keys) # [b'my_key1', b'my_key2', b'not_my_key'] - >>> # In the following example, we will iterate over the keys in the cluster that are of type STRING. - await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) - await client.sadd(b"this_is_a_set", [b"value4"]) - cursor = ClusterScanCursor() - all_keys = [] - while not cursor.is_finished(): - cursor, keys = await client.scan(cursor, type=ObjectType.STRING) - all_keys.extend(keys) - print(all_keys) # [b'key1', b'key2', b'key3'] + >>> # Iterate over all keys in the cluster. + >>> await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) + >>> cursor = ClusterScanCursor() + >>> all_keys = [] + >>> while not cursor.is_finished(): + >>> cursor, keys = await client.scan(cursor, count=10, allow_non_covered_slots=False) + >>> all_keys.extend(keys) + >>> print(all_keys) # [b'key1', b'key2', b'key3'] + + >>> # Iterate over keys matching the pattern "*key*". + >>> await client.mset({b"key1": b"value1", b"key2": b"value2", b"not_my_key": b"value3", b"something_else": b"value4"}) + >>> cursor = ClusterScanCursor() + >>> all_keys = [] + >>> while not cursor.is_finished(): + >>> cursor, keys = await client.scan(cursor, match=b"*key*", count=10, allow_non_covered_slots=False) + >>> all_keys.extend(keys) + >>> print(all_keys) # [b'key1', b'key2', b'not_my_key'] + + >>> # Iterate over keys of type STRING. + >>> await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) + >>> await client.sadd(b"this_is_a_set", [b"value4"]) + >>> cursor = ClusterScanCursor() + >>> all_keys = [] + >>> while not cursor.is_finished(): + >>> cursor, keys = await client.scan(cursor, type=ObjectType.STRING, allow_non_covered_slots=False) + >>> all_keys.extend(keys) + >>> print(all_keys) # [b'key1', b'key2', b'key3'] """ return cast( List[Union[ClusterScanCursor, List[bytes]]], - await self._cluster_scan(cursor, match, count, type), + await self._cluster_scan( + cursor=cursor, + match=match, + count=count, + type=type, + allow_non_covered_slots=allow_non_covered_slots, + ), ) async def script_exists( diff --git a/python/python/glide/async_commands/core.py b/python/python/glide/async_commands/core.py index 6ebc8d2ab6..3f1be75d98 100644 --- a/python/python/glide/async_commands/core.py +++ b/python/python/glide/async_commands/core.py @@ -390,6 +390,7 @@ async def _cluster_scan( match: Optional[TEncodable] = ..., count: Optional[int] = ..., type: Optional[ObjectType] = ..., + allow_non_covered_slots: bool = ..., ) -> TResult: ... async def _update_connection_password( diff --git a/python/python/glide/glide_client.py b/python/python/glide/glide_client.py index 6178b997a7..5ed558e709 100644 --- a/python/python/glide/glide_client.py +++ b/python/python/glide/glide_client.py @@ -566,6 +566,7 @@ async def _cluster_scan( match: Optional[TEncodable] = None, count: Optional[int] = None, type: Optional[ObjectType] = None, + allow_non_covered_slots: bool = False, ) -> List[Union[ClusterScanCursor, List[bytes]]]: if self._is_closed: raise ClosingError( @@ -576,6 +577,7 @@ async def _cluster_scan( # Take out the id string from the wrapping object cursor_string = cursor.get_cursor() request.cluster_scan.cursor = cursor_string + request.cluster_scan.allow_non_covered_slots = allow_non_covered_slots if match is not None: request.cluster_scan.match_pattern = ( self._encode_arg(match) if isinstance(match, str) else match diff --git a/python/python/tests/conftest.py b/python/python/tests/conftest.py index 0937ca2067..0ab5c9d6e9 100644 --- a/python/python/tests/conftest.py +++ b/python/python/tests/conftest.py @@ -252,14 +252,16 @@ async def create_client( inflight_requests_limit: Optional[int] = None, read_from: ReadFrom = ReadFrom.PRIMARY, client_az: Optional[str] = None, + valkey_cluster: Optional[ValkeyCluster] = None, ) -> Union[GlideClient, GlideClusterClient]: # Create async socket client use_tls = request.config.getoption("--tls") if cluster_mode: - assert type(pytest.valkey_cluster) is ValkeyCluster + valkey_cluster = valkey_cluster or pytest.valkey_cluster + assert type(valkey_cluster) is ValkeyCluster assert database_id == 0 - k = min(3, len(pytest.valkey_cluster.nodes_addr)) - seed_nodes = random.sample(pytest.valkey_cluster.nodes_addr, k=k) + k = min(3, len(valkey_cluster.nodes_addr)) + seed_nodes = random.sample(valkey_cluster.nodes_addr, k=k) cluster_config = GlideClusterClientConfiguration( addresses=seed_nodes if addresses is None else addresses, use_tls=use_tls, diff --git a/python/python/tests/test_scan.py b/python/python/tests/test_scan.py index 907dc703d5..69d5243aaf 100644 --- a/python/python/tests/test_scan.py +++ b/python/python/tests/test_scan.py @@ -1,16 +1,96 @@ -from __future__ import annotations - -from typing import List, cast +import asyncio +from typing import AsyncGenerator, List, cast import pytest -from glide import ClusterScanCursor +from glide import ByAddressRoute from glide.async_commands.command_args import ObjectType from glide.config import ProtocolVersion from glide.exceptions import RequestError +from glide.glide import ClusterScanCursor from glide.glide_client import GlideClient, GlideClusterClient +from tests.conftest import create_client +from tests.utils.cluster import ValkeyCluster from tests.utils.utils import get_random_string +# Helper function to get a number of nodes, and ask the cluster till we get the number of nodes +async def is_cluster_ready(client: GlideClusterClient, count: int) -> bool: + # we allow max 20 seconds to get the nodes + timeout = 20 + start_time = asyncio.get_event_loop().time() + + while True: + if asyncio.get_event_loop().time() - start_time > timeout: + return False + + cluster_info = await client.custom_command(["CLUSTER", "INFO"]) + cluster_info_map = {} + + if cluster_info: + info_str = ( + cluster_info + if isinstance(cluster_info, str) + else ( + cluster_info.decode() + if isinstance(cluster_info, bytes) + else str(cluster_info) + ) + ) + cluster_info_lines = info_str.split("\n") + cluster_info_lines = [line for line in cluster_info_lines if line] + + for line in cluster_info_lines: + key, value = line.split(":") + cluster_info_map[key.strip()] = value.strip() + + if ( + cluster_info_map.get("cluster_state") == "ok" + and int(cluster_info_map.get("cluster_known_nodes", "0")) == count + ): + break + + await asyncio.sleep(2) + + return True + + +# The slots not covered testing is messing with the cluster by removing a node, and then scanning the cluster +# When a node is forgot its getting into a banned state for one minutes, so in order to bring back the cluster to normal state +# we need to wait for the node to be unbanned, and then we can continue with the tests +# In order to avoid the time wasting and the chance that the restoration will not happen, we will run the test on separate cluster +@pytest.fixture(scope="function") +async def function_scoped_cluster(): + """ + Function-scoped fixture to create a new cluster for each test invocation. + """ + cluster = ValkeyCluster( + tls=False, cluster_mode=True, shard_count=3, replica_count=0 + ) + yield cluster + del cluster + + +# Since the cluster for slots covered is created separately, we need to create a client for the specific cluster +# The client is created with 100 timeout so looping over the keys with scan will return the error before we finish the loop +# otherwise the test will be flaky +@pytest.fixture(scope="function") +async def glide_client_scoped( + request, function_scoped_cluster: ValkeyCluster, protocol: ProtocolVersion +) -> AsyncGenerator[GlideClusterClient, None]: + """ + Get client for tests, adjusted to use the function-scoped cluster. + """ + client = await create_client( + request, + True, + valkey_cluster=function_scoped_cluster, + protocol=protocol, + ) + assert isinstance(client, GlideClusterClient) + yield client + await client.close() + + @pytest.mark.asyncio class TestScan: # Cluster scan tests @@ -251,6 +331,75 @@ async def test_cluster_scan_all_types(self, glide_client: GlideClusterClient): assert not set(encoded_list_keys).intersection(set(keys)) assert not set(encoded_zset_keys).intersection(set(keys)) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_cluster_scan_non_covered_slots( + self, + protocol: ProtocolVersion, + function_scoped_cluster: ValkeyCluster, + glide_client_scoped: GlideClusterClient, + ): + key = get_random_string(10) + for i in range(1000): + await glide_client_scoped.set(f"{key}:{i}", "value") + cursor = ClusterScanCursor() + result = await glide_client_scoped.scan(cursor) + cursor = cast(ClusterScanCursor, result[0]) + assert not cursor.is_finished() + await glide_client_scoped.config_set({"cluster-require-full-coverage": "no"}) + # forget one server + address_to_forget = glide_client_scoped.config.addresses[0] + all_other_addresses = glide_client_scoped.config.addresses[1:] + id_to_forget = await glide_client_scoped.custom_command( + ["CLUSTER", "MYID"], + ByAddressRoute(address_to_forget.host, address_to_forget.port), + ) + for address in all_other_addresses: + await glide_client_scoped.custom_command( + ["CLUSTER", "FORGET", cast(str, id_to_forget)], + ByAddressRoute(address.host, address.port), + ) + # now we let it few seconds gossip to get the new cluster configuration + await is_cluster_ready(glide_client_scoped, len(all_other_addresses)) + # Iterate scan until error is returned, as it might take time for the inner core to forget the missing node + cursor = ClusterScanCursor() + while True: + try: + while not cursor.is_finished(): + result = await glide_client_scoped.scan(cursor) + cursor = cast(ClusterScanCursor, result[0]) + # Reset cursor for next iteration + cursor = ClusterScanCursor() + except RequestError as e_info: + assert ( + "Could not find an address covering a slot, SCAN operation cannot continue" + in str(e_info) + ) + break + # Scan with allow_non_covered_slots=True + while not cursor.is_finished(): + result = await glide_client_scoped.scan( + cursor, allow_non_covered_slots=True + ) + cursor = cast(ClusterScanCursor, result[0]) + assert cursor.is_finished() + # check the keys we can get with keys command, and scan from the beginning + keys = [] + for address in all_other_addresses: + result = await glide_client_scoped.custom_command( + ["KEYS", "*"], ByAddressRoute(address.host, address.port) + ) + keys.extend(cast(List[bytes], result)) + + cursor = ClusterScanCursor() + results = [] + while not cursor.is_finished(): + result = await glide_client_scoped.scan( + cursor, allow_non_covered_slots=True + ) + results.extend(cast(List[bytes], result[1])) + cursor = cast(ClusterScanCursor, result[0]) + assert set(keys) == set(results) + # Standalone scan tests @pytest.mark.parametrize("cluster_mode", [False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) diff --git a/python/python/tests/test_transaction.py b/python/python/tests/test_transaction.py index ccdb309f58..c623f4e8c9 100644 --- a/python/python/tests/test_transaction.py +++ b/python/python/tests/test_transaction.py @@ -1033,10 +1033,7 @@ async def test_standalone_transaction(self, glide_client: GlideClient): assert result[5:13] == [2, 2, 2, [b"Bob", b"Alice"], 2, OK, None, 0] assert result[13:] == expected - @pytest.mark.filterwarnings( - action="ignore", message="The test " - ) - def test_transaction_clear(self): + async def test_transaction_clear(self): transaction = Transaction() transaction.info() transaction.select(1) diff --git a/python/requirements.txt b/python/requirements.txt index b39d1d96c8..b5880e6287 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,7 +1,12 @@ async-timeout==4.0.2;python_version<"3.11" maturin==0.14.17 # higher version break the needs structure changes, the name of the project is not the same as the package name, and the naming both glide create a circular dependency - TODO: fix this -protobuf==3.20.* pytest pytest-asyncio typing_extensions==4.8.0;python_version<"3.11" pytest-html +black >= 24.3.0 +flake8 == 5.0 +isort == 5.10 +mypy == 1.13.0 +mypy-protobuf == 3.5 +packaging >= 22.0 diff --git a/utils/TestUtils.ts b/utils/TestUtils.ts index 423bf8e9cb..9c89788528 100644 --- a/utils/TestUtils.ts +++ b/utils/TestUtils.ts @@ -21,9 +21,9 @@ function parseOutput(input: string): { .split(",") .map((address) => address.split(":")) .map((address) => [address[0], Number(address[1])]) as [ - string, - number, - ][]; + string, + number, + ][]; if (clusterFolder === undefined || ports === undefined) { throw new Error(`Insufficient data in input: ${input}`); @@ -82,7 +82,7 @@ export class ValkeyCluster { execFile( "python3", [PY_SCRIPT_PATH, ...command.split(" ")], - (error, stdout, stderr) => { + (error, stdout) => { if (error) { reject(error); } else { From 0bcca10f207a5958f3b7e6c228596396f62fd01e Mon Sep 17 00:00:00 2001 From: Shoham Elias <116083498+shohamazon@users.noreply.github.com> Date: Thu, 26 Dec 2024 14:57:34 +0200 Subject: [PATCH 16/18] Add connection timeout configuration (#2823) --------- Signed-off-by: Shoham Elias --- CHANGELOG.md | 4 +- glide-core/redis-rs/redis/src/client.rs | 5 + .../src/cluster_async/connections_logic.rs | 1 + .../redis-rs/redis/src/cluster_async/mod.rs | 1 + .../redis-rs/redis/src/cluster_routing.rs | 2 +- glide-core/src/client/mod.rs | 12 +- .../src/client/reconnecting_connection.rs | 20 ++- glide-core/src/client/standalone_client.rs | 10 ++ glide-core/src/client/types.rs | 3 + .../src/protobuf/connection_request.proto | 3 +- .../AdvancedBaseClientConfiguration.java | 22 ++++ .../AdvancedGlideClientConfiguration.java | 23 ++++ ...vancedGlideClusterClientConfiguration.java | 23 ++++ .../BaseClientConfiguration.java | 3 +- .../GlideClientConfiguration.java | 4 + .../GlideClusterClientConfiguration.java | 4 + .../glide/managers/ConnectionManager.java | 31 +++++ .../src/test/java/glide/ConnectionTests.java | 115 +++++++++++++++++ node/npm/glide/index.ts | 4 + node/rust-client/src/lib.rs | 6 +- node/src/BaseClient.ts | 46 ++++++- node/src/GlideClient.ts | 27 ++++ node/src/GlideClusterClient.ts | 27 ++++ node/tests/GlideClient.test.ts | 70 +++++++++++ node/tests/GlideClusterClient.test.ts | 62 +++++++++ python/python/glide/__init__.py | 4 + python/python/glide/config.py | 67 +++++++++- python/python/tests/conftest.py | 18 ++- python/python/tests/test_async_client.py | 119 +++++++++++++++--- python/python/tests/test_config.py | 27 ++++ python/python/tests/test_pubsub.py | 4 +- .../python/tests/test_read_from_strategy.py | 12 +- python/python/tests/test_transaction.py | 2 +- utils/cluster_manager.py | 16 ++- 34 files changed, 746 insertions(+), 51 deletions(-) create mode 100644 java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java create mode 100644 java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java create mode 100644 java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a1bc2cd15f..fdd6642c2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,9 @@ #### Changes * Node, Python, Java: Add allow uncovered slots scanning flag option in cluster scan ([#2814](https://github.com/valkey-io/valkey-glide/pull/2814), [#2815](https://github.com/valkey-io/valkey-glide/pull/2815), [#2860](https://github.com/valkey-io/valkey-glide/pull/2860)) -* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) +* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802)) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) * Node: Remove native package references for MacOs x64 architecture ([#2799](https://github.com/valkey-io/valkey-glide/issues/2799)) - +* Node, Python, Java: Add connection timeout to client configuration ([#2823](https://github.com/valkey-io/valkey-glide/issues/2823)) #### Breaking Changes #### Fixes diff --git a/glide-core/redis-rs/redis/src/client.rs b/glide-core/redis-rs/redis/src/client.rs index 6ac3f40bcf..2b97671110 100644 --- a/glide-core/redis-rs/redis/src/client.rs +++ b/glide-core/redis-rs/redis/src/client.rs @@ -89,6 +89,11 @@ pub struct GlideConnectionOptions { /// If ReadFromReplica strategy is set to AZAffinity, this parameter will be set to 'true'. /// In this case, an INFO command will be triggered in the connection's setup to update the connection's 'availability_zone' property. pub discover_az: bool, + /// Connection timeout duration. + /// + /// This optional field sets the maximum duration to wait when attempting to establish + /// a connection. If `None`, the connection will use `DEFAULT_CONNECTION_TIMEOUT`. + pub connection_timeout: Option, } /// To enable async support you need to enable the feature: `tokio-comp` diff --git a/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs b/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs index 4f9b3f0d4e..e5af8d1e50 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs @@ -193,6 +193,7 @@ where push_sender: None, disconnect_notifier, discover_az, + connection_timeout: Some(params.connection_timeout), }, ) .await diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs index 8164d09413..534fdd429e 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs @@ -1094,6 +1094,7 @@ where push_sender, disconnect_notifier, discover_az, + connection_timeout: Some(cluster_params.connection_timeout), }; let connections = Self::create_initial_connections( diff --git a/glide-core/redis-rs/redis/src/cluster_routing.rs b/glide-core/redis-rs/redis/src/cluster_routing.rs index 8bf11d19d4..011f5e08e6 100644 --- a/glide-core/redis-rs/redis/src/cluster_routing.rs +++ b/glide-core/redis-rs/redis/src/cluster_routing.rs @@ -623,6 +623,7 @@ fn base_routing(cmd: &[u8]) -> RouteBy { | b"FUNCTION STATS" => RouteBy::AllNodes, b"DBSIZE" + | b"DEBUG" | b"FLUSHALL" | b"FLUSHDB" | b"FT._ALIASLIST" @@ -717,7 +718,6 @@ fn base_routing(cmd: &[u8]) -> RouteBy { | b"COMMAND LIST" | b"COMMAND" | b"CONFIG GET" - | b"DEBUG" | b"ECHO" | b"FUNCTION LIST" | b"LASTSAVE" diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index 73eee144b1..005a38a9ca 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -33,10 +33,11 @@ use versions::Versioning; pub const HEARTBEAT_SLEEP_DURATION: Duration = Duration::from_secs(1); pub const DEFAULT_RETRIES: u32 = 3; +/// Note: If you change the default value, make sure to change the documentation in *all* wrappers. pub const DEFAULT_RESPONSE_TIMEOUT: Duration = Duration::from_millis(250); -pub const DEFAULT_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_millis(250); pub const DEFAULT_PERIODIC_TOPOLOGY_CHECKS_INTERVAL: Duration = Duration::from_secs(60); -pub const INTERNAL_CONNECTION_TIMEOUT: Duration = Duration::from_millis(250); +/// Note: If you change the default value, make sure to change the documentation in *all* wrappers. +pub const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_millis(250); pub const FINISHED_SCAN_CURSOR: &str = "finished"; /// The value of 1000 for the maximum number of inflight requests is determined based on Little's Law in queuing theory: @@ -571,8 +572,9 @@ async fn create_cluster_client( Some(PeriodicCheck::ManualInterval(interval)) => Some(interval), None => Some(DEFAULT_PERIODIC_TOPOLOGY_CHECKS_INTERVAL), }; + let connection_timeout = to_duration(request.connection_timeout, DEFAULT_CONNECTION_TIMEOUT); let mut builder = redis::cluster::ClusterClientBuilder::new(initial_nodes) - .connection_timeout(INTERNAL_CONNECTION_TIMEOUT) + .connection_timeout(connection_timeout) .retries(DEFAULT_RETRIES); let read_from_strategy = request.read_from.unwrap_or_default(); builder = builder.read_from(match read_from_strategy { @@ -718,6 +720,8 @@ fn sanitized_request_string(request: &ConnectionRequest) -> String { "\nStandalone mode" }; let request_timeout = format_optional_value("Request timeout", request.request_timeout); + let connection_timeout = + format_optional_value("Connection timeout", request.connection_timeout); let database_id = format!("\ndatabase ID: {}", request.database_id); let rfr_strategy = request .read_from @@ -774,7 +778,7 @@ fn sanitized_request_string(request: &ConnectionRequest) -> String { ); format!( - "\nAddresses: {addresses}{tls_mode}{cluster_mode}{request_timeout}{rfr_strategy}{connection_retry_strategy}{database_id}{protocol}{client_name}{periodic_checks}{pubsub_subscriptions}{inflight_requests_limit}", + "\nAddresses: {addresses}{tls_mode}{cluster_mode}{request_timeout}{connection_timeout}{rfr_strategy}{connection_retry_strategy}{database_id}{protocol}{client_name}{periodic_checks}{pubsub_subscriptions}{inflight_requests_limit}", ) } diff --git a/glide-core/src/client/reconnecting_connection.rs b/glide-core/src/client/reconnecting_connection.rs index c882dd29d6..197de503b9 100644 --- a/glide-core/src/client/reconnecting_connection.rs +++ b/glide-core/src/client/reconnecting_connection.rs @@ -18,7 +18,7 @@ use tokio::task; use tokio::time::timeout; use tokio_retry2::{Retry, RetryError}; -use super::{run_with_timeout, DEFAULT_CONNECTION_ATTEMPT_TIMEOUT}; +use super::{run_with_timeout, DEFAULT_CONNECTION_TIMEOUT}; /// The reason behind the call to `reconnect()` #[derive(PartialEq, Eq, Debug, Clone)] @@ -71,7 +71,11 @@ async fn get_multiplexed_connection( connection_options: &GlideConnectionOptions, ) -> RedisResult { run_with_timeout( - Some(DEFAULT_CONNECTION_ATTEMPT_TIMEOUT), + Some( + connection_options + .connection_timeout + .unwrap_or(DEFAULT_CONNECTION_TIMEOUT), + ), client.get_multiplexed_async_connection(connection_options.clone()), ) .await @@ -113,6 +117,7 @@ async fn create_connection( retry_strategy: RetryStrategy, push_sender: Option>, discover_az: bool, + connection_timeout: Duration, ) -> Result { let client = &connection_backend.connection_info; let connection_options = GlideConnectionOptions { @@ -121,6 +126,7 @@ async fn create_connection( TokioDisconnectNotifier::new(), )), discover_az, + connection_timeout: Some(connection_timeout), }; let action = || async { get_multiplexed_connection(client, &connection_options) @@ -206,6 +212,7 @@ impl ReconnectingConnection { tls_mode: TlsMode, push_sender: Option>, discover_az: bool, + connection_timeout: Duration, ) -> Result { log_debug( "connection creation", @@ -218,7 +225,14 @@ impl ReconnectingConnection { connection_available_signal: ManualResetEvent::new(true), client_dropped_flagged: AtomicBool::new(false), }; - create_connection(backend, connection_retry_strategy, push_sender, discover_az).await + create_connection( + backend, + connection_retry_strategy, + push_sender, + discover_az, + connection_timeout, + ) + .await } pub(crate) fn node_address(&self) -> String { diff --git a/glide-core/src/client/standalone_client.rs b/glide-core/src/client/standalone_client.rs index 5bc26999a8..c2c541c763 100644 --- a/glide-core/src/client/standalone_client.rs +++ b/glide-core/src/client/standalone_client.rs @@ -2,6 +2,7 @@ use super::get_redis_connection_info; use super::reconnecting_connection::{ReconnectReason, ReconnectingConnection}; +use super::{to_duration, DEFAULT_CONNECTION_TIMEOUT}; use super::{ConnectionRequest, NodeAddress, TlsMode}; use crate::client::types::ReadFrom as ClientReadFrom; use crate::retry_strategies::RetryStrategy; @@ -15,6 +16,7 @@ use redis::{PushInfo, RedisError, RedisResult, Value}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; +use std::time::Duration; use telemetrylib::Telemetry; use tokio::sync::mpsc; use tokio::task; @@ -130,6 +132,11 @@ impl StandaloneClient { Some(ClientReadFrom::AZAffinity(_)) ); + let connection_timeout = to_duration( + connection_request.connection_timeout, + DEFAULT_CONNECTION_TIMEOUT, + ); + let mut stream = stream::iter(connection_request.addresses.iter()) .map(|address| async { get_connection_and_replication_info( @@ -143,6 +150,7 @@ impl StandaloneClient { tls_mode.unwrap_or(TlsMode::NoTls), &push_sender, discover_az, + connection_timeout, ) .await .map_err(|err| (format!("{}:{}", address.host, address.port), err)) @@ -552,6 +560,7 @@ async fn get_connection_and_replication_info( tls_mode: TlsMode, push_sender: &Option>, discover_az: bool, + connection_timeout: Duration, ) -> Result<(ReconnectingConnection, Value), (ReconnectingConnection, RedisError)> { let result = ReconnectingConnection::new( address, @@ -560,6 +569,7 @@ async fn get_connection_and_replication_info( tls_mode, push_sender.clone(), discover_az, + connection_timeout, ) .await; let reconnecting_connection = match result { diff --git a/glide-core/src/client/types.rs b/glide-core/src/client/types.rs index a0053587c8..e2314a1ab6 100644 --- a/glide-core/src/client/types.rs +++ b/glide-core/src/client/types.rs @@ -20,6 +20,7 @@ pub struct ConnectionRequest { pub addresses: Vec, pub cluster_mode_enabled: bool, pub request_timeout: Option, + pub connection_timeout: Option, pub connection_retry_strategy: Option, pub periodic_checks: Option, pub pubsub_subscriptions: Option, @@ -147,6 +148,7 @@ impl From for ConnectionRequest { .collect(); let cluster_mode_enabled = value.cluster_mode_enabled; let request_timeout = none_if_zero(value.request_timeout); + let connection_timeout = none_if_zero(value.connection_timeout); let connection_retry_strategy = value .connection_retry_strategy @@ -214,6 +216,7 @@ impl From for ConnectionRequest { addresses, cluster_mode_enabled, request_timeout, + connection_timeout, connection_retry_strategy, periodic_checks, pubsub_subscriptions, diff --git a/glide-core/src/protobuf/connection_request.proto b/glide-core/src/protobuf/connection_request.proto index 5f4db44b00..8e33b39da3 100644 --- a/glide-core/src/protobuf/connection_request.proto +++ b/glide-core/src/protobuf/connection_request.proto @@ -26,7 +26,7 @@ message AuthenticationInfo { enum ProtocolVersion { RESP3 = 0; - RESP2 = 1; + RESP2 = 1; } message PeriodicChecksManualInterval { @@ -71,6 +71,7 @@ message ConnectionRequest { PubSubSubscriptions pubsub_subscriptions = 13; uint32 inflight_requests_limit = 14; string client_az = 15; + uint32 connection_timeout = 16; } message ConnectionRetryStrategy { diff --git a/java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java new file mode 100644 index 0000000000..5a28ee9fcf --- /dev/null +++ b/java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java @@ -0,0 +1,22 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.configuration; + +import lombok.Getter; +import lombok.experimental.SuperBuilder; + +/** + * Advanced configuration settings class for creating a client. Shared settings for standalone and + * cluster clients. + */ +@Getter +@SuperBuilder +public abstract class AdvancedBaseClientConfiguration { + + /** + * The duration in milliseconds to wait for a TCP/TLS connection to complete. This applies both + * during initial client creation and any reconnections that may occur during request processing. + * **Note**: A high connection timeout may lead to prolonged blocking of the entire command + * pipeline. If not explicitly set, a default value of 250 milliseconds will be used. + */ + private final Integer connectionTimeout; +} diff --git a/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java new file mode 100644 index 0000000000..0ce5f85958 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java @@ -0,0 +1,23 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.configuration; + +import glide.api.GlideClient; +import lombok.Getter; +import lombok.ToString; +import lombok.experimental.SuperBuilder; + +/** + * Represents advanced configuration settings for a Standalone {@link GlideClient} used in {@link + * GlideClientConfiguration}. + * + * @example + *
{@code
+ * AdvancedGlideClientConfiguration config = AdvancedGlideClientConfiguration.builder()
+ *     .connectionTimeout(500)
+ *     .build();
+ * }
+ */ +@Getter +@SuperBuilder +@ToString +public class AdvancedGlideClientConfiguration extends AdvancedBaseClientConfiguration {} diff --git a/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java new file mode 100644 index 0000000000..ff02b18c4f --- /dev/null +++ b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java @@ -0,0 +1,23 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.configuration; + +import glide.api.GlideClusterClient; +import lombok.Getter; +import lombok.ToString; +import lombok.experimental.SuperBuilder; + +/** + * Represents advanced configuration settings for a Standalone {@link GlideClusterClient} used in + * {@link GlideClusterClientConfiguration}. + * + * @example + *
{@code
+ * AdvancedGlideClusterClientConfiguration config = AdvancedGlideClusterClientConfiguration.builder()
+ *     .connectionTimeout(500)
+ *     .build();
+ * }
+ */ +@Getter +@SuperBuilder +@ToString +public class AdvancedGlideClusterClientConfiguration extends AdvancedBaseClientConfiguration {} diff --git a/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java index 7d9d5d5b68..7cd29a7cb8 100644 --- a/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java @@ -49,7 +49,8 @@ public abstract class BaseClientConfiguration { * The duration in milliseconds that the client should wait for a request to complete. This * duration encompasses sending the request, awaiting for a response from the server, and any * required reconnections or retries. If the specified timeout is exceeded for a pending request, - * it will result in a timeout error. If not set, a default value will be used. + * it will result in a timeout error. If not explicitly set, a default value of 250 milliseconds + * will be used. */ private final Integer requestTimeout; diff --git a/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java index 83d84e7c1f..4321f1dd39 100644 --- a/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java @@ -24,6 +24,7 @@ * .clientName("GLIDE") * .subscriptionConfiguration(subscriptionConfiguration) * .inflightRequestsLimit(1000) + * .advancedConfiguration(AdvancedGlideClientConfiguration.builder().connectionTimeout(500).build()) * .build(); * } */ @@ -39,4 +40,7 @@ public class GlideClientConfiguration extends BaseClientConfiguration { /** Subscription configuration for the current client. */ private final StandaloneSubscriptionConfiguration subscriptionConfiguration; + + /** Advanced configuration settings for the client. */ + private final AdvancedGlideClientConfiguration advancedConfiguration; } diff --git a/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java index b1d1c7590c..f0b6d7789d 100644 --- a/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java @@ -23,6 +23,7 @@ * .clientName("GLIDE") * .subscriptionConfiguration(subscriptionConfiguration) * .inflightRequestsLimit(1000) + * .advancedConfiguration(AdvancedGlideClusterClientConfiguration.builder().connectionTimeout(500).build()) * .build(); * } */ @@ -32,4 +33,7 @@ public class GlideClusterClientConfiguration extends BaseClientConfiguration { /** Subscription configuration for the current client. */ private final ClusterSubscriptionConfiguration subscriptionConfiguration; + + /** Advanced configuration settings for the client. */ + private final AdvancedGlideClusterClientConfiguration advancedConfiguration; } diff --git a/java/client/src/main/java/glide/managers/ConnectionManager.java b/java/client/src/main/java/glide/managers/ConnectionManager.java index 99b383a9ed..443384d5a6 100644 --- a/java/client/src/main/java/glide/managers/ConnectionManager.java +++ b/java/client/src/main/java/glide/managers/ConnectionManager.java @@ -8,6 +8,7 @@ import connection_request.ConnectionRequestOuterClass.PubSubChannelsOrPatterns; import connection_request.ConnectionRequestOuterClass.PubSubSubscriptions; import connection_request.ConnectionRequestOuterClass.TlsMode; +import glide.api.models.configuration.AdvancedBaseClientConfiguration; import glide.api.models.configuration.BaseClientConfiguration; import glide.api.models.configuration.GlideClientConfiguration; import glide.api.models.configuration.GlideClusterClientConfiguration; @@ -171,6 +172,30 @@ private ConnectionRequest.Builder setupConnectionRequestBuilderGlideClient( connectionRequestBuilder.setPubsubSubscriptions(subscriptionsBuilder.build()); } + if (configuration.getAdvancedConfiguration() != null) { + connectionRequestBuilder = + setupConnectionRequestBuilderAdvancedBaseConfiguration( + connectionRequestBuilder, configuration.getAdvancedConfiguration()); + } + + return connectionRequestBuilder; + } + + /** + * Configures the {@link ConnectionRequest.Builder} with settings from the provided {@link + * AdvancedBaseClientConfiguration}. + * + * @param connectionRequestBuilder The builder for the {@link ConnectionRequest}. + * @param configuration The advanced configuration settings. + * @return The updated {@link ConnectionRequest.Builder}. + */ + private ConnectionRequest.Builder setupConnectionRequestBuilderAdvancedBaseConfiguration( + ConnectionRequest.Builder connectionRequestBuilder, + AdvancedBaseClientConfiguration configuration) { + if (configuration.getConnectionTimeout() != null) { + connectionRequestBuilder.setConnectionTimeout(configuration.getConnectionTimeout()); + } + return connectionRequestBuilder; } @@ -199,6 +224,12 @@ private ConnectionRequest.Builder setupConnectionRequestBuilderGlideClusterClien connectionRequestBuilder.setPubsubSubscriptions(subscriptionsBuilder.build()); } + if (configuration.getAdvancedConfiguration() != null) { + connectionRequestBuilder = + setupConnectionRequestBuilderAdvancedBaseConfiguration( + connectionRequestBuilder, configuration.getAdvancedConfiguration()); + } + return connectionRequestBuilder; } diff --git a/java/integTest/src/test/java/glide/ConnectionTests.java b/java/integTest/src/test/java/glide/ConnectionTests.java index 2aec2e4e6b..45fea7065a 100644 --- a/java/integTest/src/test/java/glide/ConnectionTests.java +++ b/java/integTest/src/test/java/glide/ConnectionTests.java @@ -10,19 +10,33 @@ import static glide.api.models.configuration.RequestRoutingConfiguration.SlotType.PRIMARY; import static glide.api.models.configuration.RequestRoutingConfiguration.SlotType.REPLICA; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeTrue; +import glide.api.BaseClient; import glide.api.GlideClient; import glide.api.GlideClusterClient; import glide.api.models.ClusterValue; import glide.api.models.commands.InfoOptions; +import glide.api.models.configuration.AdvancedGlideClientConfiguration; +import glide.api.models.configuration.AdvancedGlideClusterClientConfiguration; +import glide.api.models.configuration.BackoffStrategy; import glide.api.models.configuration.ReadFrom; import glide.api.models.configuration.RequestRoutingConfiguration; +import glide.api.models.exceptions.ClosingException; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.stream.Stream; import lombok.SneakyThrows; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; @Timeout(10) // seconds public class ConnectionTests { @@ -52,6 +66,35 @@ public GlideClusterClient createAzTestClient(String az) { .get(); } + @SneakyThrows + public BaseClient createConnectionTimeoutClient( + Boolean clusterMode, + int connectionTimeout, + int requestTimeout, + BackoffStrategy backoffStrategy) { + if (clusterMode) { + var advancedConfiguration = + AdvancedGlideClusterClientConfiguration.builder() + .connectionTimeout(connectionTimeout) + .build(); + return GlideClusterClient.createClient( + commonClusterClientConfig() + .advancedConfiguration(advancedConfiguration) + .requestTimeout(requestTimeout) + .build()) + .get(); + } + var advancedConfiguration = + AdvancedGlideClientConfiguration.builder().connectionTimeout(connectionTimeout).build(); + return GlideClient.createClient( + commonClientConfig() + .advancedConfiguration(advancedConfiguration) + .requestTimeout(requestTimeout) + .reconnectStrategy(backoffStrategy) + .build()) + .get(); + } + /** * Test that the client with AZ affinity strategy routes in a round-robin manner to all replicas * within the specified AZ. @@ -202,4 +245,76 @@ public void test_az_affinity_non_existing_az() { assertEquals(4, matchingEntries); azTestClient.close(); } + + @SneakyThrows + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void test_connection_timeout(boolean clusterMode) { + var backoffStrategy = + BackoffStrategy.builder().exponentBase(2).factor(100).numOfRetries(1).build(); + var client = createConnectionTimeoutClient(clusterMode, 250, 20000, backoffStrategy); + + // Runnable for long-running DEBUG SLEEP command + Runnable debugSleepTask = + () -> { + try { + if (client instanceof GlideClusterClient) { + ((GlideClusterClient) client) + .customCommand(new String[] {"DEBUG", "sleep", "7"}, ALL_NODES) + .get(); + } else if (client instanceof GlideClient) { + ((GlideClient) client).customCommand(new String[] {"DEBUG", "sleep", "7"}).get(); + } + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException("Error during DEBUG SLEEP command", e); + } + }; + + // Runnable for testing connection failure due to timeout + Runnable failToConnectTask = + () -> { + try { + Thread.sleep(1000); // Wait to ensure the debug sleep command is running + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> createConnectionTimeoutClient(clusterMode, 100, 250, backoffStrategy)); + assertInstanceOf(ClosingException.class, executionException.getCause()); + assertTrue(executionException.getMessage().toLowerCase().contains("timed out")); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Thread was interrupted", e); + } + }; + + // Runnable for testing successful connection + Runnable connectToClientTask = + () -> { + try { + Thread.sleep(1000); // Wait to ensure the debug sleep command is running + var timeoutClient = + createConnectionTimeoutClient(clusterMode, 10000, 250, backoffStrategy); + assertEquals(timeoutClient.set("key", "value").get(), "OK"); + timeoutClient.close(); + } catch (Exception e) { + throw new RuntimeException("Error during successful connection attempt", e); + } + }; + + // Execute all tasks concurrently + ExecutorService executorService = Executors.newFixedThreadPool(3); + try { + executorService.invokeAll( + List.of( + Executors.callable(debugSleepTask), + Executors.callable(failToConnectTask), + Executors.callable(connectToClientTask))); + } finally { + executorService.shutdown(); + // Clean up the main client + if (client != null) { + client.close(); + } + } + } } diff --git a/node/npm/glide/index.ts b/node/npm/glide/index.ts index c4dab9795b..a7fad935c1 100644 --- a/node/npm/glide/index.ts +++ b/node/npm/glide/index.ts @@ -213,6 +213,8 @@ function initialize() { Script, ObjectType, ClusterScanCursor, + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, BaseClientConfiguration, GlideClusterClientConfiguration, LevelOptions, @@ -290,6 +292,8 @@ function initialize() { GlideClient, GlideClusterClient, GlideClientConfiguration, + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, FunctionListOptions, FunctionListResponse, FunctionStatsSingleResponse, diff --git a/node/rust-client/src/lib.rs b/node/rust-client/src/lib.rs index 963f966f24..ffa5b5c47f 100644 --- a/node/rust-client/src/lib.rs +++ b/node/rust-client/src/lib.rs @@ -40,9 +40,13 @@ pub enum Level { pub const MAX_REQUEST_ARGS_LEN: u32 = MAX_REQUEST_ARGS_LENGTH as u32; #[napi] -pub const DEFAULT_TIMEOUT_IN_MILLISECONDS: u32 = +pub const DEFAULT_REQUEST_TIMEOUT_IN_MILLISECONDS: u32 = glide_core::client::DEFAULT_RESPONSE_TIMEOUT.as_millis() as u32; +#[napi] +pub const DEFAULT_CONNECTION_TIMEOUT_IN_MILLISECONDS: u32 = + glide_core::client::DEFAULT_CONNECTION_TIMEOUT.as_millis() as u32; + #[napi] pub const DEFAULT_INFLIGHT_REQUESTS_LIMIT: u32 = glide_core::client::DEFAULT_MAX_INFLIGHT_REQUESTS; diff --git a/node/src/BaseClient.ts b/node/src/BaseClient.ts index 71b1ffd89c..e011daf8a6 100644 --- a/node/src/BaseClient.ts +++ b/node/src/BaseClient.ts @@ -3,8 +3,9 @@ */ import { ClusterScanCursor, + DEFAULT_CONNECTION_TIMEOUT_IN_MILLISECONDS, DEFAULT_INFLIGHT_REQUESTS_LIMIT, - DEFAULT_TIMEOUT_IN_MILLISECONDS, + DEFAULT_REQUEST_TIMEOUT_IN_MILLISECONDS, Script, StartSocketConnection, getStatistics, @@ -606,7 +607,7 @@ export interface BaseClientConfiguration { * The duration in milliseconds that the client should wait for a request to complete. * This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. * If the specified timeout is exceeded for a pending request, it will result in a timeout error. - * If not set, a default value will be used. + * If not explicitly set, a default value of 250 milliseconds will be used. * Value must be an integer. */ requestTimeout?: number; @@ -650,6 +651,33 @@ export interface BaseClientConfiguration { clientAz?: string; } +/** + * Represents advanced configuration settings for a client, including connection-related options. + * + * @remarks + * The `AdvancedBaseClientConfiguration` interface defines advanced configuration settings for managing the client's connection behavior. + * + * ### Connection Timeout + * + * - **Connection Timeout**: The `connectionTimeout` property specifies the duration (in milliseconds) the client should wait for a connection to be established. + * + * @example + * ```typescript + * const config: AdvancedBaseClientConfiguration = { + * connectionTimeout: 5000, // 5 seconds + * }; + * ``` + */ +export interface AdvancedBaseClientConfiguration { + /** + * The duration in milliseconds to wait for a TCP/TLS connection to complete. + * This applies both during initial client creation and any reconnections that may occur during request processing. + * **Note**: A high connection timeout may lead to prolonged blocking of the entire command pipeline. + * If not explicitly set, a default value of 250 milliseconds will be used. + */ + connectionTimeout?: number; +} + /** * Enum of Valkey data types * `STRING` @@ -951,7 +979,7 @@ export class BaseClient { Logger.log("info", "Client lifetime", `construct client`); this.config = options; this.requestTimeout = - options?.requestTimeout ?? DEFAULT_TIMEOUT_IN_MILLISECONDS; + options?.requestTimeout ?? DEFAULT_REQUEST_TIMEOUT_IN_MILLISECONDS; this.socket = socket; this.socket .on("data", (data) => this.handleReadData(data)) @@ -7657,6 +7685,18 @@ export class BaseClient { }; } + /** + * @internal + */ + protected configureAdvancedConfigurationBase( + options: AdvancedBaseClientConfiguration, + request: connection_request.IConnectionRequest, + ) { + request.connectionTimeout = + options.connectionTimeout ?? + DEFAULT_CONNECTION_TIMEOUT_IN_MILLISECONDS; + } + /** * @internal */ diff --git a/node/src/GlideClient.ts b/node/src/GlideClient.ts index acec0c377f..fc9301bd75 100644 --- a/node/src/GlideClient.ts +++ b/node/src/GlideClient.ts @@ -4,6 +4,7 @@ import * as net from "net"; import { + AdvancedBaseClientConfiguration, BaseClient, BaseClientConfiguration, convertGlideRecordToRecord, @@ -171,8 +172,26 @@ export type GlideClientConfiguration = BaseClientConfiguration & { * Will be applied via SUBSCRIBE/PSUBSCRIBE commands during connection establishment. */ pubsubSubscriptions?: GlideClientConfiguration.PubSubSubscriptions; + /** + * Advanced configuration settings for the client. + */ + advancedConfiguration?: AdvancedGlideClientConfiguration; }; +/** + * Represents advanced configuration settings for creating a {@link GlideClient | GlideClient} used in {@link GlideClientConfiguration | GlideClientConfiguration}. + * + * + * @example + * ```typescript + * const config: AdvancedGlideClientConfiguration = { + * connectionTimeout: 500, // Set the connection timeout to 500ms + * }; + * ``` + */ +export type AdvancedGlideClientConfiguration = + AdvancedBaseClientConfiguration & {}; + /** * Client used for connection to standalone servers. * @@ -189,6 +208,14 @@ export class GlideClient extends BaseClient { configuration.databaseId = options.databaseId; configuration.connectionRetryStrategy = options.connectionBackoff; this.configurePubsub(options, configuration); + + if (options.advancedConfiguration) { + this.configureAdvancedConfigurationBase( + options.advancedConfiguration, + configuration, + ); + } + return configuration; } /** diff --git a/node/src/GlideClusterClient.ts b/node/src/GlideClusterClient.ts index 4e9aee579d..c12264f078 100644 --- a/node/src/GlideClusterClient.ts +++ b/node/src/GlideClusterClient.ts @@ -5,6 +5,7 @@ import { ClusterScanCursor, Script } from "glide-rs"; import * as net from "net"; import { + AdvancedBaseClientConfiguration, BaseClient, BaseClientConfiguration, Decoder, @@ -190,8 +191,26 @@ export type GlideClusterClientConfiguration = BaseClientConfiguration & { * Will be applied via SUBSCRIBE/PSUBSCRIBE/SSUBSCRIBE commands during connection establishment. */ pubsubSubscriptions?: GlideClusterClientConfiguration.PubSubSubscriptions; + /** + * Advanced configuration settings for the client. + */ + advancedConfiguration?: AdvancedGlideClusterClientConfiguration; }; +/** + * Represents advanced configuration settings for creating a {@link GlideClusterClient | GlideClusterClient} used in {@link GlideClusterClientConfiguration | GlideClusterClientConfiguration}. + * + * + * @example + * ```typescript + * const config: AdvancedGlideClusterClientConfiguration = { + * connectionTimeout: 500, // Set the connection timeout to 500ms + * }; + * ``` + */ +export type AdvancedGlideClusterClientConfiguration = + AdvancedBaseClientConfiguration & {}; + /** * If the command's routing is to one node we will get T as a response type, * otherwise, we will get a dictionary of address: nodeResponse, address is of type string and nodeResponse is of type T. @@ -504,6 +523,14 @@ export class GlideClusterClient extends BaseClient { } this.configurePubsub(options, configuration); + + if (options.advancedConfiguration) { + this.configureAdvancedConfigurationBase( + options.advancedConfiguration, + configuration, + ); + } + return configuration; } /** diff --git a/node/tests/GlideClient.test.ts b/node/tests/GlideClient.test.ts index 0c77bde519..08f39e19d2 100644 --- a/node/tests/GlideClient.test.ts +++ b/node/tests/GlideClient.test.ts @@ -979,6 +979,76 @@ describe("GlideClient", () => { }, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "should handle connection timeout when client is blocked by long-running command (protocol: %p)", + async (protocol) => { + // Create a client configuration with a generous request timeout + const config = getClientConfigurationOption( + cluster.getAddresses(), + protocol, + { requestTimeout: 20000 }, // Long timeout to allow debugging operations (sleep for 7 seconds) + ); + + // Initialize the primary client + const client = await GlideClient.createClient(config); + + try { + // Run a long-running DEBUG SLEEP command using the first client (client) + const debugCommandPromise = client.customCommand( + ["DEBUG", "sleep", "7"], // Sleep for 7 seconds + ); + + // Function that tries to create a client with a short connection timeout (100ms) + const failToCreateClient = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + await expect( + GlideClient.createClient({ + connectionBackoff: { + exponentBase: 2, + factor: 100, + numberOfRetries: 1, + }, + advancedConfiguration: { connectionTimeout: 100 }, // 100ms connection timeout + ...config, // Include the rest of the config + }), + ).rejects.toThrowError(/timed out/i); // Ensure it throws a timeout error + }; + + // Function that verifies that a larger connection timeout allows connection + const connectWithLargeTimeout = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + const longerTimeoutClient = await GlideClient.createClient({ + connectionBackoff: { + exponentBase: 2, + factor: 100, + numberOfRetries: 1, + }, + advancedConfiguration: { connectionTimeout: 10000 }, // 10s connection timeout + ...config, // Include the rest of the config + }); + expect(await client.set("x", "y")).toEqual("OK"); + longerTimeoutClient.close(); // Close the client after successful connection + }; + + // Run both the long-running DEBUG SLEEP command and the client creation attempt in parallel + await Promise.all([ + debugCommandPromise, // Run the long-running command + failToCreateClient(), // Attempt to create the client with a short timeout + ]); + + // Run all tasks: fail short timeout, succeed with large timeout, and run the debug command + await Promise.all([ + debugCommandPromise, // Run the long-running command + connectWithLargeTimeout(), // Attempt to create the client with a short timeout + ]); + } finally { + // Clean up the test client and ensure everything is flushed and closed + client.close(); + } + }, + TIMEOUT, + ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "function kill RW func %p", async (protocol) => { diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index 43b039c61f..d6c6474872 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -1975,6 +1975,68 @@ describe("GlideClusterClient", () => { TIMEOUT, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "should handle connection timeout when client is blocked by long-running command (protocol: %p)", + async (protocol) => { + // Create a client configuration with a generous request timeout + const config = getClientConfigurationOption( + cluster.getAddresses(), + protocol, + { requestTimeout: 20000 }, // Long timeout to allow debugging operations (sleep for 7 seconds) + ); + + // Initialize the primary client + const client = await GlideClusterClient.createClient(config); + + try { + // Run a long-running DEBUG SLEEP command using the first client (client) + const debugCommandPromise = client.customCommand( + ["DEBUG", "sleep", "7"], + { route: "allNodes" }, // Sleep for 7 seconds + ); + + // Function that tries to create a client with a short connection timeout (100ms) + const failToCreateClient = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + await expect( + GlideClusterClient.createClient({ + advancedConfiguration: { connectionTimeout: 100 }, // 100ms connection timeout + ...config, // Include the rest of the config + }), + ).rejects.toThrowError(/timed out/i); // Ensure it throws a timeout error + }; + + // Function that verifies that a larger connection timeout allows connection + const connectWithLargeTimeout = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + const longerTimeoutClient = + await GlideClusterClient.createClient({ + advancedConfiguration: { connectionTimeout: 10000 }, // 10s connection timeout + ...config, // Include the rest of the config + }); + expect(await client.set("x", "y")).toEqual("OK"); + longerTimeoutClient.close(); // Close the client after successful connection + }; + + // Run both the long-running DEBUG SLEEP command and the client creation attempt in parallel + await Promise.all([ + debugCommandPromise, // Run the long-running command + failToCreateClient(), // Attempt to create the client with a short timeout + ]); + + // Run all tasks: fail short timeout, succeed with large timeout, and run the debug command + await Promise.all([ + debugCommandPromise, // Run the long-running command + connectWithLargeTimeout(), // Attempt to create the client with a short timeout + ]); + } finally { + // Clean up the test client and ensure everything is flushed and closed + client.close(); + } + }, + TIMEOUT, + ); + it.each([ [ProtocolVersion.RESP2, 5], [ProtocolVersion.RESP2, 100], diff --git a/python/python/glide/__init__.py b/python/python/glide/__init__.py index 8f6ceac47b..f2ecc3da4e 100644 --- a/python/python/glide/__init__.py +++ b/python/python/glide/__init__.py @@ -111,6 +111,8 @@ TTransaction, ) from glide.config import ( + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, BackoffStrategy, GlideClientConfiguration, GlideClusterClientConfiguration, @@ -176,6 +178,8 @@ "TGlideClient", "TTransaction", # Config + "AdvancedGlideClientConfiguration", + "AdvancedGlideClusterClientConfiguration", "GlideClientConfiguration", "GlideClusterClientConfiguration", "BackoffStrategy", diff --git a/python/python/glide/config.py b/python/python/glide/config.py index b33c037cbf..fc1acda94c 100644 --- a/python/python/glide/config.py +++ b/python/python/glide/config.py @@ -129,6 +129,28 @@ class PeriodicChecksStatus(Enum): """ +class AdvancedBaseClientConfiguration: + """ + Represents the advanced configuration settings for a base Glide client. + + Args: + connection_timeout (Optional[int]): The duration in milliseconds to wait for a TCP/TLS connection to complete. + This applies both during initial client creation and any reconnections that may occur during request processing. + **Note**: A high connection timeout may lead to prolonged blocking of the entire command pipeline. + If not explicitly set, a default value of 250 milliseconds will be used. + """ + + def __init__(self, connection_timeout: Optional[int] = None): + self.connection_timeout = connection_timeout + + def _create_a_protobuf_conn_request( + self, request: ConnectionRequest + ) -> ConnectionRequest: + if self.connection_timeout: + request.connection_timeout = self.connection_timeout + return request + + class BaseClientConfiguration: def __init__( self, @@ -141,6 +163,7 @@ def __init__( protocol: ProtocolVersion = ProtocolVersion.RESP3, inflight_requests_limit: Optional[int] = None, client_az: Optional[str] = None, + advanced_config: Optional[AdvancedBaseClientConfiguration] = None, ): """ Represents the configuration settings for a Glide client. @@ -163,12 +186,14 @@ def __init__( read_from (ReadFrom): If not set, `PRIMARY` will be used. request_timeout (Optional[int]): The duration in milliseconds that the client should wait for a request to complete. This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. - If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not set, a default value will be used. + If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not explicitly set, a default value of 250 milliseconds will be used. client_name (Optional[str]): Client name to be used for the client. Will be used with CLIENT SETNAME command during connection establishment. inflight_requests_limit (Optional[int]): The maximum number of concurrent requests allowed to be in-flight (sent but not yet completed). This limit is used to control the memory usage and prevent the client from overwhelming the server or getting stuck in case of a queue backlog. If not set, a default value will be used. - + client_az (Optional[str]): Availability Zone of the client. + If ReadFrom strategy is AZAffinity, this setting ensures that readonly commands are directed to replicas within the specified AZ if exits. + advanced_config (Optional[AdvancedBaseClientConfiguration]): Advanced configuration settings for the client. """ self.addresses = addresses self.use_tls = use_tls @@ -179,6 +204,7 @@ def __init__( self.protocol = protocol self.inflight_requests_limit = inflight_requests_limit self.client_az = client_az + self.advanced_config = advanced_config if read_from == ReadFrom.AZ_AFFINITY and not client_az: raise ValueError( @@ -218,6 +244,8 @@ def _create_a_protobuf_conn_request( request.inflight_requests_limit = self.inflight_requests_limit if self.client_az: request.client_az = self.client_az + if self.advanced_config: + self.advanced_config._create_a_protobuf_conn_request(request) return request @@ -230,6 +258,16 @@ def _get_pubsub_callback_and_context( return None, None +class AdvancedGlideClientConfiguration(AdvancedBaseClientConfiguration): + """ + Represents the advanced configuration settings for a Standalone Glide client. + """ + + def __init__(self, connection_timeout: Optional[int] = None): + + super().__init__(connection_timeout) + + class GlideClientConfiguration(BaseClientConfiguration): """ Represents the configuration settings for a Standalone Glide client. @@ -249,7 +287,7 @@ class GlideClientConfiguration(BaseClientConfiguration): request_timeout (Optional[int]): The duration in milliseconds that the client should wait for a request to complete. This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. If the specified timeout is exceeded for a pending request, it will result in a timeout error. - If not set, a default value will be used. + If not explicitly set, a default value of 250 milliseconds will be used. reconnect_strategy (Optional[BackoffStrategy]): Strategy used to determine how and when to reconnect, in case of connection failures. If not set, a default backoff strategy will be used. @@ -261,7 +299,9 @@ class GlideClientConfiguration(BaseClientConfiguration): inflight_requests_limit (Optional[int]): The maximum number of concurrent requests allowed to be in-flight (sent but not yet completed). This limit is used to control the memory usage and prevent the client from overwhelming the server or getting stuck in case of a queue backlog. If not set, a default value will be used. - + client_az (Optional[str]): Availability Zone of the client. + If ReadFrom strategy is AZAffinity, this setting ensures that readonly commands are directed to replicas within the specified AZ if exits. + advanced_config (Optional[AdvancedGlideClientConfiguration]): Advanced configuration settings for the client, see `AdvancedGlideClientConfiguration`. """ class PubSubChannelModes(IntEnum): @@ -308,6 +348,7 @@ def __init__( pubsub_subscriptions: Optional[PubSubSubscriptions] = None, inflight_requests_limit: Optional[int] = None, client_az: Optional[str] = None, + advanced_config: Optional[AdvancedGlideClientConfiguration] = None, ): super().__init__( addresses=addresses, @@ -319,6 +360,7 @@ def __init__( protocol=protocol, inflight_requests_limit=inflight_requests_limit, client_az=client_az, + advanced_config=advanced_config, ) self.reconnect_strategy = reconnect_strategy self.database_id = database_id @@ -375,6 +417,15 @@ def _get_pubsub_callback_and_context( return None, None +class AdvancedGlideClusterClientConfiguration(AdvancedBaseClientConfiguration): + """ + Represents the advanced configuration settings for a Glide Cluster client. + """ + + def __init__(self, connection_timeout: Optional[int] = None): + super().__init__(connection_timeout) + + class GlideClusterClientConfiguration(BaseClientConfiguration): """ Represents the configuration settings for a Cluster Glide client. @@ -392,7 +443,7 @@ class GlideClusterClientConfiguration(BaseClientConfiguration): read_from (ReadFrom): If not set, `PRIMARY` will be used. request_timeout (Optional[int]): The duration in milliseconds that the client should wait for a request to complete. This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. - If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not set, a default value will be used. + If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not explicitly set, a default value of 250 milliseconds will be used. client_name (Optional[str]): Client name to be used for the client. Will be used with CLIENT SETNAME command during connection establishment. protocol (ProtocolVersion): The version of the RESP protocol to communicate with the server. periodic_checks (Union[PeriodicChecksStatus, PeriodicChecksManualInterval]): Configure the periodic topology checks. @@ -404,7 +455,9 @@ class GlideClusterClientConfiguration(BaseClientConfiguration): inflight_requests_limit (Optional[int]): The maximum number of concurrent requests allowed to be in-flight (sent but not yet completed). This limit is used to control the memory usage and prevent the client from overwhelming the server or getting stuck in case of a queue backlog. If not set, a default value will be used. - + client_az (Optional[str]): Availability Zone of the client. + If ReadFrom strategy is AZAffinity, this setting ensures that readonly commands are directed to replicas within the specified AZ if exits. + advanced_config (Optional[AdvancedGlideClusterClientConfiguration]) : Advanced configuration settings for the client, see `AdvancedGlideClusterClientConfiguration`. Notes: @@ -459,6 +512,7 @@ def __init__( pubsub_subscriptions: Optional[PubSubSubscriptions] = None, inflight_requests_limit: Optional[int] = None, client_az: Optional[str] = None, + advanced_config: Optional[AdvancedGlideClusterClientConfiguration] = None, ): super().__init__( addresses=addresses, @@ -470,6 +524,7 @@ def __init__( protocol=protocol, inflight_requests_limit=inflight_requests_limit, client_az=client_az, + advanced_config=advanced_config, ) self.periodic_checks = periodic_checks self.pubsub_subscriptions = pubsub_subscriptions diff --git a/python/python/tests/conftest.py b/python/python/tests/conftest.py index 0ab5c9d6e9..b2a97b4d0a 100644 --- a/python/python/tests/conftest.py +++ b/python/python/tests/conftest.py @@ -5,6 +5,9 @@ import pytest from glide.config import ( + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, + BackoffStrategy, GlideClientConfiguration, GlideClusterClientConfiguration, NodeAddress, @@ -242,7 +245,8 @@ async def create_client( addresses: Optional[List[NodeAddress]] = None, client_name: Optional[str] = None, protocol: ProtocolVersion = ProtocolVersion.RESP3, - timeout: Optional[int] = 1000, + request_timeout: Optional[int] = 1000, + connection_timeout: Optional[int] = 1000, cluster_mode_pubsub: Optional[ GlideClusterClientConfiguration.PubSubSubscriptions ] = None, @@ -252,6 +256,7 @@ async def create_client( inflight_requests_limit: Optional[int] = None, read_from: ReadFrom = ReadFrom.PRIMARY, client_az: Optional[str] = None, + reconnect_strategy: Optional[BackoffStrategy] = None, valkey_cluster: Optional[ValkeyCluster] = None, ) -> Union[GlideClient, GlideClusterClient]: # Create async socket client @@ -268,11 +273,12 @@ async def create_client( credentials=credentials, client_name=client_name, protocol=protocol, - request_timeout=timeout, + request_timeout=request_timeout, pubsub_subscriptions=cluster_mode_pubsub, inflight_requests_limit=inflight_requests_limit, read_from=read_from, client_az=client_az, + advanced_config=AdvancedGlideClusterClientConfiguration(connection_timeout), ) return await GlideClusterClient.create(cluster_config) else: @@ -286,11 +292,13 @@ async def create_client( database_id=database_id, client_name=client_name, protocol=protocol, - request_timeout=timeout, + request_timeout=request_timeout, pubsub_subscriptions=standalone_mode_pubsub, inflight_requests_limit=inflight_requests_limit, read_from=read_from, client_az=client_az, + advanced_config=AdvancedGlideClientConfiguration(connection_timeout), + reconnect_strategy=reconnect_strategy, ) return await GlideClient.create(config) @@ -343,7 +351,7 @@ async def test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion): try: # Try connecting without credentials client = await create_client( - request, cluster_mode, protocol=protocol, timeout=2000 + request, cluster_mode, protocol=protocol, request_timeout=2000 ) await client.custom_command(["FLUSHALL"]) await client.close() @@ -356,7 +364,7 @@ async def test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion): request, cluster_mode, protocol=protocol, - timeout=2000, + request_timeout=2000, credentials=credentials, ) try: diff --git a/python/python/tests/test_async_client.py b/python/python/tests/test_async_client.py index d8d65d1ca7..3b1a458548 100644 --- a/python/python/tests/test_async_client.py +++ b/python/python/tests/test_async_client.py @@ -72,6 +72,7 @@ ) from glide.async_commands.transaction import ClusterTransaction, Transaction from glide.config import ( + BackoffStrategy, GlideClientConfiguration, GlideClusterClientConfiguration, ProtocolVersion, @@ -128,7 +129,7 @@ async def test_register_client_name_and_version(self, glide_client: TGlideClient @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_send_and_receive_large_values(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**25 # 33mb key = "0" * length @@ -302,6 +303,90 @@ async def test_statistics(self, glide_client: TGlideClient): assert "total_clients" in stats assert len(stats) == 2 + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_connection_timeout( + self, + request, + cluster_mode: bool, + protocol: ProtocolVersion, + ): + + client = await create_client( + request, + cluster_mode, + protocol=protocol, + request_timeout=2000, + connection_timeout=2000, + ) + assert isinstance(client, (GlideClient, GlideClusterClient)) + + assert await client.set("key", "value") == "OK" + + await client.close() + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_connection_timeout_when_client_is_blocked( + self, + request, + cluster_mode: bool, + protocol: ProtocolVersion, + ): + client = await create_client( + request, + cluster_mode, + protocol=protocol, + request_timeout=20000, # 20 seconds timeout + ) + + async def run_debug_sleep(): + """ + Run a long-running DEBUG SLEEP command. + """ + command = ["DEBUG", "sleep", "7"] + if isinstance(client, GlideClusterClient): + await client.custom_command(command, AllNodes()) + else: + await client.custom_command(command) + + async def fail_to_connect_to_client(): + # try to connect with a small timeout connection + await asyncio.sleep(1) + with pytest.raises(ClosingError) as e: + await create_client( + request, + cluster_mode, + protocol=protocol, + connection_timeout=100, # 100 ms + reconnect_strategy=BackoffStrategy( + 1, 100, 2 + ), # needs to be configured so that we wont be connected within 7 seconds bc of default retries + ) + assert "timed out" in str(e) + + async def connect_to_client(): + # Create a second client with a connection timeout of 7 seconds + await asyncio.sleep(1) + timeout_client = await create_client( + request, + cluster_mode, + protocol=protocol, + connection_timeout=10000, # 10-second connection timeout + reconnect_strategy=BackoffStrategy(1, 100, 2), + ) + + # Ensure the second client can connect and perform a simple operation + assert await timeout_client.set("key", "value") == "OK" + await timeout_client.close() + + # Run tests + await asyncio.gather(run_debug_sleep(), fail_to_connect_to_client()) + await asyncio.gather(run_debug_sleep(), connect_to_client()) + + # Clean up the main client + await client.close() + @pytest.mark.asyncio class TestCommands: @@ -5424,7 +5509,10 @@ async def test_xread_edge_cases_and_failures( ) test_client = await create_client( - request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900 + request=request, + protocol=protocol, + cluster_mode=cluster_mode, + request_timeout=900, ) # ensure command doesn't time out even if timeout > request timeout assert ( @@ -5817,7 +5905,10 @@ async def test_xreadgroup_edge_cases_and_failures( ) test_client = await create_client( - request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900 + request=request, + protocol=protocol, + cluster_mode=cluster_mode, + request_timeout=900, ) timeout_key = f"{{testKey}}:{get_random_string(10)}" timeout_group_name = get_random_string(10) @@ -8337,11 +8428,11 @@ async def test_function_stats_running_script( # create a second client to run fcall test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) test_client2 = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) async def endless_fcall_route_call(): @@ -8466,7 +8557,7 @@ async def test_function_kill_no_write( # create a second client to run fcall test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=15000 ) async def endless_fcall_route_call(): @@ -8521,7 +8612,7 @@ async def test_function_kill_write_is_unkillable( # create a second client to run fcall - and give it a long timeout test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=15000 ) # call fcall to run the function loaded function @@ -10369,7 +10460,7 @@ async def test_script_binary(self, glide_client: TGlideClient): @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_script_large_keys_no_args(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**13 # 8kb key = "0" * length @@ -10381,7 +10472,7 @@ async def test_script_large_keys_no_args(self, request, cluster_mode, protocol): @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_script_large_args_no_keys(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**12 # 4kb arg1 = "0" * length @@ -10397,7 +10488,7 @@ async def test_script_large_args_no_keys(self, request, cluster_mode, protocol): @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_script_large_keys_and_args(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**12 # 4kb key = "0" * length @@ -10481,7 +10572,7 @@ async def test_script_kill_route( # Create a second client to run the script test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) await script_kill_tests(glide_client, test_client, route) @@ -10497,7 +10588,7 @@ async def test_script_kill_no_route( ): # Create a second client to run the script test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) await script_kill_tests(glide_client, test_client) @@ -10509,12 +10600,12 @@ async def test_script_kill_unkillable( ): # Create a second client to run the script test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) # Create a second client to kill the script test_client2 = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=15000 ) # Add test for script_kill with writing script diff --git a/python/python/tests/test_config.py b/python/python/tests/test_config.py index 3b22adb09c..2476d8ec0f 100644 --- a/python/python/tests/test_config.py +++ b/python/python/tests/test_config.py @@ -1,16 +1,22 @@ # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from glide.config import ( + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, BaseClientConfiguration, + GlideClientConfiguration, GlideClusterClientConfiguration, NodeAddress, PeriodicChecksManualInterval, PeriodicChecksStatus, + ProtocolVersion, ReadFrom, ) +from glide.glide_client import GlideClient, GlideClusterClient from glide.protobuf.connection_request_pb2 import ConnectionRequest from glide.protobuf.connection_request_pb2 import ReadFrom as ProtobufReadFrom from glide.protobuf.connection_request_pb2 import TlsMode +from tests.conftest import create_client def test_default_client_config(): @@ -67,3 +73,24 @@ def test_convert_config_with_azaffinity_to_protobuf(): assert request.tls_mode is TlsMode.SecureTls assert request.read_from == ProtobufReadFrom.AZAffinity assert request.client_az == az + + +def test_connection_timeout_in_protobuf_request(): + connection_timeout = 5000 # in milliseconds + config = GlideClientConfiguration( + [NodeAddress("127.0.0.1")], + advanced_config=AdvancedGlideClientConfiguration(connection_timeout), + ) + request = config._create_a_protobuf_conn_request() + + assert isinstance(request, ConnectionRequest) + assert request.connection_timeout == connection_timeout + + config = GlideClusterClientConfiguration( + [NodeAddress("127.0.0.1")], + advanced_config=AdvancedGlideClusterClientConfiguration(connection_timeout), + ) + request = config._create_a_protobuf_conn_request(cluster_mode=True) + + assert isinstance(request, ConnectionRequest) + assert request.connection_timeout == connection_timeout diff --git a/python/python/tests/test_pubsub.py b/python/python/tests/test_pubsub.py index 6069104ed7..60baf383b2 100644 --- a/python/python/tests/test_pubsub.py +++ b/python/python/tests/test_pubsub.py @@ -66,7 +66,7 @@ async def create_two_clients_with_pubsub( cluster_mode_pubsub=cluster_mode_pubsub1, standalone_mode_pubsub=standalone_mode_pubsub1, protocol=protocol, - timeout=timeout, + request_timeout=timeout, ) try: client2 = await create_client( @@ -75,7 +75,7 @@ async def create_two_clients_with_pubsub( cluster_mode_pubsub=cluster_mode_pubsub2, standalone_mode_pubsub=standalone_mode_pubsub2, protocol=protocol, - timeout=timeout, + request_timeout=timeout, ) except Exception as e: await client1.close() diff --git a/python/python/tests/test_read_from_strategy.py b/python/python/tests/test_read_from_strategy.py index 03f3f8e9ae..cddb1e6f10 100644 --- a/python/python/tests/test_read_from_strategy.py +++ b/python/python/tests/test_read_from_strategy.py @@ -46,7 +46,7 @@ async def test_routing_with_az_affinity_strategy_to_1_replica( cluster_mode, # addresses=multiple_replicas_cluster.nodes_addr, protocol=protocol, - timeout=2000, + request_timeout=2000, ) # Reset the availability zone for all nodes @@ -67,7 +67,7 @@ async def test_routing_with_az_affinity_strategy_to_1_replica( cluster_mode, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, client_az=az, ) @@ -113,7 +113,7 @@ async def test_routing_by_slot_to_replica_with_az_affinity_strategy_to_all_repli cluster_mode, # addresses=multiple_replicas_cluster.nodes_addr, protocol=protocol, - timeout=2000, + request_timeout=2000, ) assert await client_for_config_set.config_resetstat() == OK await client_for_config_set.custom_command( @@ -125,7 +125,7 @@ async def test_routing_by_slot_to_replica_with_az_affinity_strategy_to_all_repli cluster_mode, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, client_az=az, ) azs = await client_for_testing_az.custom_command( @@ -181,7 +181,7 @@ async def test_az_affinity_non_existing_az( # addresses=multiple_replicas_cluster.nodes_addr, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, client_az="non-existing-az", ) assert await client_for_testing_az.config_resetstat() == OK @@ -217,5 +217,5 @@ async def test_az_affinity_requires_client_az( cluster_mode=cluster_mode, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, ) diff --git a/python/python/tests/test_transaction.py b/python/python/tests/test_transaction.py index c623f4e8c9..e284bb1100 100644 --- a/python/python/tests/test_transaction.py +++ b/python/python/tests/test_transaction.py @@ -972,7 +972,7 @@ async def test_can_return_null_on_watch_transaction_failures( @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_transaction_large_values(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**25 # 33mb key = "0" * length diff --git a/utils/cluster_manager.py b/utils/cluster_manager.py index dc196bcd4f..a167da3464 100644 --- a/utils/cluster_manager.py +++ b/utils/cluster_manager.py @@ -341,10 +341,22 @@ def get_server_command() -> str: except Exception as e: logging.error(f"Error checking {server}: {e}") raise Exception( - "Neither valkey-server nor redis-server found in the system." + "Neither valkey-server nor redis-server found in the system.") + + def get_server_version(server_name): + result = subprocess.run( + [server_name, "--version"], capture_output=True, text=True + ) + version_output = result.stdout + version_match = re.search( + r"(?:Redis|Valkey) server v=(\d+\.\d+\.\d+)", version_output, re.IGNORECASE ) + if version_match: + return tuple(map(int, version_match.group(1).split("."))) + raise Exception("Unable to determine server version.") server_name = get_server_command() + server_version = get_server_version(server_name) logfile = f"{node_folder}/redis.log" # Define command arguments cmd_args = [ @@ -360,6 +372,8 @@ def get_server_command() -> str: "--logfile", logfile, ] + if server_version >= (7, 0, 0): + cmd_args.extend(["--enable-debug-command", "yes"]) if load_module: if len(load_module) == 0: raise ValueError( From 151d9b13b3d25d55d9d44ae47c2dfee3255316ab Mon Sep 17 00:00:00 2001 From: Avi Fenesh <55848801+avifenesh@users.noreply.github.com> Date: Sun, 29 Dec 2024 12:34:52 +0200 Subject: [PATCH 17/18] CD fixes nedded for Release 1.2.1 (#2887) Refactor CI workflows to streamline engine installation and improve setup for self-hosted runners Signed-off-by: avifenesh --- .github/workflows/java-cd.yml | 4 ++++ .github/workflows/npm-cd.yml | 20 +++++++++++--------- .github/workflows/pypi-cd.yml | 29 +++++------------------------ 3 files changed, 20 insertions(+), 33 deletions(-) diff --git a/.github/workflows/java-cd.yml b/.github/workflows/java-cd.yml index 5e2fd5886e..ed618f1708 100644 --- a/.github/workflows/java-cd.yml +++ b/.github/workflows/java-cd.yml @@ -225,6 +225,10 @@ jobs: host: ${{ fromJson(needs.load-platform-matrix.outputs.PLATFORM_MATRIX) }} runs-on: ${{ matrix.host.RUNNER }} steps: + - name: Setup self-hosted runner access + if: ${{matrix.host.TARGET == 'aarch64-unknown-linux-gnu' }} + run: sudo chown -R $USER:$USER /home/ubuntu/action-runner-ilia/_work/valkey-glide + - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/npm-cd.yml b/.github/workflows/npm-cd.yml index 24497e83cc..3e5f673a4c 100644 --- a/.github/workflows/npm-cd.yml +++ b/.github/workflows/npm-cd.yml @@ -308,23 +308,18 @@ jobs: if: ${{ matrix.build.TARGET == 'aarch64-unknown-linux-gnu' }} run: sudo chown -R $USER:$USER /home/ubuntu/actions-runner/_work/valkey-glide - - name: install Redis and git for alpine + - name: install redis and git for alpine if: ${{ contains(matrix.build.TARGET, 'musl') }} run: | apk update - apk add redis git + apk add git redis node -v - - name: install Redis and Python for ubuntu + - name: install Python for ubuntu if: ${{ contains(matrix.build.TARGET, 'linux-gnu') }} run: | sudo apt-get update - sudo apt-get install redis-server python3 - - - name: install Redis, Python for macos - if: ${{ contains(matrix.build.RUNNER, 'mac') }} - run: | - brew install redis python3 + sudo apt-get install python3 - name: Checkout if: ${{ matrix.build.TARGET != 'aarch64-unknown-linux-musl'}} @@ -339,6 +334,13 @@ jobs: npm-auth-token: ${{ secrets.NPM_AUTH_TOKEN }} arch: ${{ matrix.build.ARCH }} + - name: Install engine + if: ${{ !contains(matrix.build.TARGET, 'musl') }} + uses: ./.github/workflows/install-engine + with: + engine-version: "8.0" + target: ${{ matrix.build.target }} + - name: Setup node if: ${{ !contains(matrix.build.TARGET, 'musl') }} uses: actions/setup-node@v4 diff --git a/.github/workflows/pypi-cd.yml b/.github/workflows/pypi-cd.yml index 4ea517a818..28d8de8579 100644 --- a/.github/workflows/pypi-cd.yml +++ b/.github/workflows/pypi-cd.yml @@ -232,31 +232,12 @@ jobs: with: python-version: 3.12 - - name: Install engine Ubuntu ARM - if: ${{ matrix.build.TARGET == 'aarch64-unknown-linux-gnu' }} - shell: bash - # in self hosted runner we first want to check that engine is not already installed - run: | - if [[ $(`which redis-server`) == '' ]] - then - sudo apt-get update - sudo apt-get install -y redis-server - else - echo "Redis is already installed" - fi - - - name: Install engine Ubuntu x86 - if: ${{ matrix.build.TARGET == 'x86_64-unknown-linux-gnu' }} - shell: bash - run: | - sudo apt-get update - sudo apt-get install -y redis-server + - name: Install engine + uses: ./.github/workflows/install-engine + with: + engine-version: "8.0" + target: ${{ matrix.build.target }} - - name: Install engine MacOS - if: ${{ matrix.build.OS == 'macos' }} - shell: bash - run: | - brew install redis - name: Check if RC and set a distribution tag for the package shell: bash From e507c71f8f96afb032e8776740982d65dd96d96a Mon Sep 17 00:00:00 2001 From: ikolomi Date: Mon, 30 Dec 2024 17:09:18 +0200 Subject: [PATCH 18/18] Merge fixups Signed-off-by: ikolomi --- python/python/tests/test_api_export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/python/tests/test_api_export.py b/python/python/tests/test_api_export.py index 996a8821a3..e169950da0 100644 --- a/python/python/tests/test_api_export.py +++ b/python/python/tests/test_api_export.py @@ -55,6 +55,7 @@ def _get_export_rename_map(): "FtSearchKeywords", # ClassDef "FtAggregateKeywords", # ClassDef "FtProfileKeywords", # ClassDef + "AdvancedBaseClientConfiguration", # ClassDef ]