From 914715d4a11357dd7c1a36cc702d11d459811803 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:05:50 -0800 Subject: [PATCH 01/32] Update changelog after release (#2782) changelog Signed-off-by: Yury-Fridlyand --- CHANGELOG.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e52f9b2161..dd3059aaef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +#### Changes + +#### Breaking Changes + +#### Fixes + +#### Operational Enhancements + +## 1.2.0 (2024-11-27) + #### Changes * Node: Client API for retrieving internal statistics ([#2727](https://github.com/valkey-io/valkey-glide/pull/2727)) * Python: Client API for retrieving internal statistics ([#2707](https://github.com/valkey-io/valkey-glide/pull/2707)) From 385c160c9b47785d81d004d374eb1f4f160f473c Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:23:16 -0800 Subject: [PATCH 02/32] Fix license header in core files (#2648) Signed-off-by: Yury-Fridlyand --- benchmarks/rust/src/main.rs | 4 +--- csharp/lib/src/lib.rs | 5 ++--- glide-core/benches/connections_benchmark.rs | 5 ++--- glide-core/benches/memory_benchmark.rs | 5 ++--- glide-core/benches/rotating_buffer_benchmark.rs | 5 ++--- glide-core/build.rs | 4 +--- glide-core/src/client/mod.rs | 5 ++--- glide-core/src/client/reconnecting_connection.rs | 5 ++--- glide-core/src/client/standalone_client.rs | 5 ++--- glide-core/src/client/types.rs | 4 +--- glide-core/src/client/value_conversion.rs | 5 ++--- glide-core/src/cluster_scan_container.rs | 5 ++--- glide-core/src/errors.rs | 4 +--- glide-core/src/lib.rs | 4 +--- glide-core/src/request_type.rs | 5 ++--- glide-core/src/retry_strategies.rs | 5 ++--- glide-core/src/rotating_buffer.rs | 4 +--- glide-core/src/scripts_container.rs | 5 ++--- glide-core/src/socket_listener.rs | 5 ++--- glide-core/tests/test_client.rs | 5 ++--- glide-core/tests/test_cluster_client.rs | 5 ++--- glide-core/tests/test_socket_listener.rs | 4 +--- glide-core/tests/test_standalone_client.rs | 5 ++--- glide-core/tests/utilities/cluster.rs | 5 ++--- glide-core/tests/utilities/mocks.rs | 5 ++--- glide-core/tests/utilities/mod.rs | 4 +--- go/src/lib.rs | 4 +--- java/src/errors.rs | 5 ++--- java/src/ffi_test.rs | 5 ++--- java/src/lib.rs | 5 ++--- node/rust-client/build.rs | 5 ++--- node/rust-client/src/lib.rs | 5 ++--- python/src/lib.rs | 5 ++--- 33 files changed, 57 insertions(+), 99 deletions(-) diff --git a/benchmarks/rust/src/main.rs b/benchmarks/rust/src/main.rs index edace91a30..f454928180 100644 --- a/benchmarks/rust/src/main.rs +++ b/benchmarks/rust/src/main.rs @@ -1,6 +1,4 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; diff --git a/csharp/lib/src/lib.rs b/csharp/lib/src/lib.rs index eac9254a53..c497410e31 100644 --- a/csharp/lib/src/lib.rs +++ b/csharp/lib/src/lib.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::client; use glide_core::client::Client as GlideClient; use glide_core::request_type::RequestType; diff --git a/glide-core/benches/connections_benchmark.rs b/glide-core/benches/connections_benchmark.rs index c26d5bba45..5f6075dec6 100644 --- a/glide-core/benches/connections_benchmark.rs +++ b/glide-core/benches/connections_benchmark.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use criterion::{criterion_group, criterion_main, Criterion}; use futures::future::join_all; use redis::{ diff --git a/glide-core/benches/memory_benchmark.rs b/glide-core/benches/memory_benchmark.rs index 1948d9a2cd..923ffeaa1b 100644 --- a/glide-core/benches/memory_benchmark.rs +++ b/glide-core/benches/memory_benchmark.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::{ client::Client, connection_request::{ConnectionRequest, NodeAddress, TlsMode}, diff --git a/glide-core/benches/rotating_buffer_benchmark.rs b/glide-core/benches/rotating_buffer_benchmark.rs index 224d1b702b..7f543a21d3 100644 --- a/glide-core/benches/rotating_buffer_benchmark.rs +++ b/glide-core/benches/rotating_buffer_benchmark.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use std::io::Write; use bytes::BufMut; diff --git a/glide-core/build.rs b/glide-core/build.rs index cd838c9a53..9ba9ca89a7 100644 --- a/glide-core/build.rs +++ b/glide-core/build.rs @@ -1,6 +1,4 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[cfg(feature = "socket-layer")] fn build_protobuf() { diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index cfe8d6dc05..29a3a05393 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod types; use crate::cluster_scan_container::insert_cluster_scan_cursor; diff --git a/glide-core/src/client/reconnecting_connection.rs b/glide-core/src/client/reconnecting_connection.rs index 39a4c1db62..c882dd29d6 100644 --- a/glide-core/src/client/reconnecting_connection.rs +++ b/glide-core/src/client/reconnecting_connection.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::{NodeAddress, TlsMode}; use crate::retry_strategies::RetryStrategy; use async_trait::async_trait; diff --git a/glide-core/src/client/standalone_client.rs b/glide-core/src/client/standalone_client.rs index c5e69fd6dd..5bc26999a8 100644 --- a/glide-core/src/client/standalone_client.rs +++ b/glide-core/src/client/standalone_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::get_redis_connection_info; use super::reconnecting_connection::{ReconnectReason, ReconnectingConnection}; use super::{ConnectionRequest, NodeAddress, TlsMode}; diff --git a/glide-core/src/client/types.rs b/glide-core/src/client/types.rs index 0c7680b3a6..a0053587c8 100644 --- a/glide-core/src/client/types.rs +++ b/glide-core/src/client/types.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[allow(unused_imports)] use logger_core::log_warn; diff --git a/glide-core/src/client/value_conversion.rs b/glide-core/src/client/value_conversion.rs index 6ba9dc757c..48996fc76d 100644 --- a/glide-core/src/client/value_conversion.rs +++ b/glide-core/src/client/value_conversion.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use redis::{ cluster_routing::Routable, from_owned_redis_value, Cmd, ErrorKind, RedisResult, Value, }; diff --git a/glide-core/src/cluster_scan_container.rs b/glide-core/src/cluster_scan_container.rs index 38cb1ec76d..2733b49092 100644 --- a/glide-core/src/cluster_scan_container.rs +++ b/glide-core/src/cluster_scan_container.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use logger_core::log_debug; use nanoid::nanoid; use once_cell::sync::Lazy; diff --git a/glide-core/src/errors.rs b/glide-core/src/errors.rs index b5f9b1af9e..b2e71eceaf 100644 --- a/glide-core/src/errors.rs +++ b/glide-core/src/errors.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 use redis::RedisError; diff --git a/glide-core/src/lib.rs b/glide-core/src/lib.rs index 9b22a8bb55..9450afef2c 100644 --- a/glide-core/src/lib.rs +++ b/glide-core/src/lib.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[cfg(feature = "socket-layer")] include!(concat!(env!("OUT_DIR"), "/protobuf/mod.rs")); diff --git a/glide-core/src/request_type.rs b/glide-core/src/request_type.rs index 9ee4ac92a9..d6b43ae027 100644 --- a/glide-core/src/request_type.rs +++ b/glide-core/src/request_type.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use redis::{cmd, Cmd}; #[cfg(feature = "socket-layer")] diff --git a/glide-core/src/retry_strategies.rs b/glide-core/src/retry_strategies.rs index 1a5157d225..3b60cc402e 100644 --- a/glide-core/src/retry_strategies.rs +++ b/glide-core/src/retry_strategies.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use crate::client::ConnectionRetryStrategy; use std::time::Duration; use tokio_retry2::strategy::{jitter_range, ExponentialBackoff}; diff --git a/glide-core/src/rotating_buffer.rs b/glide-core/src/rotating_buffer.rs index b87f666605..1bebb33c65 100644 --- a/glide-core/src/rotating_buffer.rs +++ b/glide-core/src/rotating_buffer.rs @@ -1,6 +1,4 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #[allow(unused_imports)] use bytes::{Bytes, BytesMut}; use integer_encoding::VarInt; diff --git a/glide-core/src/scripts_container.rs b/glide-core/src/scripts_container.rs index a039593f79..22f5237c2f 100644 --- a/glide-core/src/scripts_container.rs +++ b/glide-core/src/scripts_container.rs @@ -1,7 +1,6 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use bytes::BytesMut; -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ use logger_core::log_info; use once_cell::sync::Lazy; use sha1_smol::Sha1; diff --git a/glide-core/src/socket_listener.rs b/glide-core/src/socket_listener.rs index b9db4e6d99..f148bbdede 100644 --- a/glide-core/src/socket_listener.rs +++ b/glide-core/src/socket_listener.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::rotating_buffer::RotatingBuffer; use crate::client::Client; use crate::cluster_scan_container::get_cluster_scan_cursor; diff --git a/glide-core/tests/test_client.rs b/glide-core/tests/test_client.rs index ffc672fee6..b62210ed9f 100644 --- a/glide-core/tests/test_client.rs +++ b/glide-core/tests/test_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod utilities; #[macro_export] diff --git a/glide-core/tests/test_cluster_client.rs b/glide-core/tests/test_cluster_client.rs index 1c60dc8c79..2943ab21bb 100644 --- a/glide-core/tests/test_cluster_client.rs +++ b/glide-core/tests/test_cluster_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod utilities; #[cfg(test)] diff --git a/glide-core/tests/test_socket_listener.rs b/glide-core/tests/test_socket_listener.rs index 6f2aa566b9..5921236e36 100644 --- a/glide-core/tests/test_socket_listener.rs +++ b/glide-core/tests/test_socket_listener.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #![cfg(feature = "socket-layer")] use glide_core::*; diff --git a/glide-core/tests/test_standalone_client.rs b/glide-core/tests/test_standalone_client.rs index c118d6d28f..77363b5c18 100644 --- a/glide-core/tests/test_standalone_client.rs +++ b/glide-core/tests/test_standalone_client.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + mod utilities; #[cfg(test)] diff --git a/glide-core/tests/utilities/cluster.rs b/glide-core/tests/utilities/cluster.rs index 8f7ed6aca0..3cb298ffef 100644 --- a/glide-core/tests/utilities/cluster.rs +++ b/glide-core/tests/utilities/cluster.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use super::{create_connection_request, ClusterMode, TestConfiguration}; use futures::future::{join_all, BoxFuture}; use futures::FutureExt; diff --git a/glide-core/tests/utilities/mocks.rs b/glide-core/tests/utilities/mocks.rs index 33b8ae4121..198abe716a 100644 --- a/glide-core/tests/utilities/mocks.rs +++ b/glide-core/tests/utilities/mocks.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use futures_intrusive::sync::ManualResetEvent; use redis::{Cmd, ConnectionAddr, Value}; use std::collections::HashMap; diff --git a/glide-core/tests/utilities/mod.rs b/glide-core/tests/utilities/mod.rs index 7318d6e640..adae3aed07 100644 --- a/glide-core/tests/utilities/mod.rs +++ b/glide-core/tests/utilities/mod.rs @@ -1,6 +1,4 @@ -/* - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #![allow(dead_code)] use futures::Future; diff --git a/go/src/lib.rs b/go/src/lib.rs index 344dac6e45..1a04c7f756 100644 --- a/go/src/lib.rs +++ b/go/src/lib.rs @@ -1,6 +1,4 @@ -/* -* Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 -*/ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 #![deny(unsafe_op_in_unsafe_fn)] use glide_core::client::Client as GlideClient; diff --git a/java/src/errors.rs b/java/src/errors.rs index 93372a7257..3137a9583e 100644 --- a/java/src/errors.rs +++ b/java/src/errors.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use jni::{errors::Error as JNIError, JNIEnv}; use log::error; use std::string::FromUtf8Error; diff --git a/java/src/ffi_test.rs b/java/src/ffi_test.rs index ee3bd4ad32..fb54fc3b5b 100644 --- a/java/src/ffi_test.rs +++ b/java/src/ffi_test.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use crate::errors::{handle_errors, handle_panics, throw_java_exception, ExceptionType, FFIError}; use jni::{ objects::{JByteArray, JClass, JLongArray, JString}, diff --git a/java/src/lib.rs b/java/src/lib.rs index 311d9a13dc..3b493aa61e 100644 --- a/java/src/lib.rs +++ b/java/src/lib.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::start_socket_listener as start_socket_listener_core; // Protocol constants to expose to Java. diff --git a/node/rust-client/build.rs b/node/rust-client/build.rs index af38e8a35e..5284801493 100644 --- a/node/rust-client/build.rs +++ b/node/rust-client/build.rs @@ -1,6 +1,5 @@ -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + extern crate napi_build; fn main() { diff --git a/node/rust-client/src/lib.rs b/node/rust-client/src/lib.rs index b15b18f521..963f966f24 100644 --- a/node/rust-client/src/lib.rs +++ b/node/rust-client/src/lib.rs @@ -1,8 +1,7 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use glide_core::Telemetry; use redis::GlideConnectionOptions; -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ #[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; diff --git a/python/src/lib.rs b/python/src/lib.rs index 6b41123dd3..e3b9a298cb 100644 --- a/python/src/lib.rs +++ b/python/src/lib.rs @@ -1,8 +1,7 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + use bytes::Bytes; use glide_core::client::FINISHED_SCAN_CURSOR; -/** - * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 - */ use glide_core::start_socket_listener; use glide_core::Telemetry; use glide_core::MAX_REQUEST_ARGS_LENGTH; From 96f2597d5b3c42b9fe13c067dde0108a84f9ea37 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:31:25 -0800 Subject: [PATCH 03/32] Java: bump `netty` version (#2777) * bump netty Signed-off-by: Yury-Fridlyand --- CHANGELOG.md | 1 + java/client/build.gradle | 8 ++++---- java/integTest/build.gradle | 6 +++--- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index dd3059aaef..429093dec9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ #### Changes +* Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) #### Breaking Changes diff --git a/java/client/build.gradle b/java/client/build.gradle index cc2446251d..7337c16567 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -17,12 +17,12 @@ dependencies { implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.27.1' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.13.0' - implementation group: 'io.netty', name: 'netty-handler', version: '4.1.100.Final' + implementation group: 'io.netty', name: 'netty-handler', version: '4.1.115.Final' // https://github.com/netty/netty/wiki/Native-transports // At the moment, Windows is not supported - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-x86_64' - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-aarch_64' - implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.100.Final', classifier: 'osx-aarch_64' + implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.115.Final', classifier: 'linux-x86_64' + implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.115.Final', classifier: 'linux-aarch_64' + implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.115.Final', classifier: 'osx-aarch_64' // junit testImplementation group: 'org.mockito', name: 'mockito-inline', version: '3.12.4' diff --git a/java/integTest/build.gradle b/java/integTest/build.gradle index 53b690aa49..3e97f58f10 100644 --- a/java/integTest/build.gradle +++ b/java/integTest/build.gradle @@ -15,9 +15,9 @@ dependencies { // https://github.com/netty/netty/wiki/Native-transports // At the moment, Windows is not supported - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-x86_64' - implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.100.Final', classifier: 'linux-aarch_64' - implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.100.Final', classifier: 'osx-aarch_64' + implementation group: 'io.netty', name: 'netty-transport-native-epoll', version: '4.1.115.Final', classifier: 'linux-x86_64' + implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.115.Final', classifier: 'osx-x86_64' + implementation group: 'io.netty', name: 'netty-transport-native-kqueue', version: '4.1.115.Final', classifier: 'osx-aarch_64' // junit testImplementation 'org.mockito:mockito-junit-jupiter:3.12.4' From c99f30b63ba068bd4bed3747fc354c1d7b3f6ad4 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:48:24 -0800 Subject: [PATCH 04/32] [backport to 1.2] Bump protoc (#2561) * Bump protoc Signed-off-by: Yury-Fridlyand Co-authored-by: Andrew Carbonetto --- .github/workflows/java-cd.yml | 4 ++-- .github/workflows/java.yml | 4 ++-- CHANGELOG.md | 1 + java/DEVELOPER.md | 12 ++++++------ java/client/build.gradle | 10 +++++----- 5 files changed, 16 insertions(+), 15 deletions(-) diff --git a/.github/workflows/java-cd.yml b/.github/workflows/java-cd.yml index f4c0146342..7033141881 100644 --- a/.github/workflows/java-cd.yml +++ b/.github/workflows/java-cd.yml @@ -102,7 +102,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Create secret key ring file @@ -245,7 +245,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Start standalone Valkey server diff --git a/.github/workflows/java.yml b/.github/workflows/java.yml index 66c99cca3e..449c9f1a65 100644 --- a/.github/workflows/java.yml +++ b/.github/workflows/java.yml @@ -103,7 +103,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java client @@ -184,7 +184,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "28.2" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java wrapper diff --git a/CHANGELOG.md b/CHANGELOG.md index 429093dec9..e0679a64d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ #### Changes +* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561)) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) #### Breaking Changes diff --git a/java/DEVELOPER.md b/java/DEVELOPER.md index d8bc19b99c..3a275425fc 100644 --- a/java/DEVELOPER.md +++ b/java/DEVELOPER.md @@ -17,7 +17,7 @@ The Valkey GLIDE Java wrapper consists of both Java and Rust code. Rust bindings - git - GCC - pkg-config -- protoc (protobuf compiler) >= 26.1 +- protoc (protobuf compiler) >= 28.2 - openssl - openssl-dev - rustup @@ -64,17 +64,17 @@ Continue with **Install protobuf compiler** below. To install protobuf for MacOS, run: ```bash brew install protobuf -# Check that the protobuf compiler version 26.1 or higher is installed +# Check that the protobuf compiler version 28.2 or higher is installed protoc --version ``` For the remaining systems, do the following: ```bash PB_REL="https://github.com/protocolbuffers/protobuf/releases" -curl -LO $PB_REL/download/v26.1/protoc-26.1-linux-x86_64.zip -unzip protoc-26.1-linux-x86_64.zip -d $HOME/.local +curl -LO $PB_REL/download/v28.2/protoc-28.2-linux-x86_64.zip +unzip protoc-28.2-linux-x86_64.zip -d $HOME/.local export PATH="$PATH:$HOME/.local/bin" -# Check that the protobuf compiler version 26.1 or higher is installed +# Check that the protobuf compiler version 28.2 or higher is installed protoc --version ``` @@ -165,7 +165,7 @@ Some troubleshooting issues: - Failed to find `cargo` after `rustup`. - No Protobuf compiler (protoc) found. - If build fails because of rust compiler fails, make sure submodules are updated using `git submodule update`. -- If protobuf 26.0 or earlier is detected, upgrade to the latest protobuf release. +- If protobuf 28.0 or earlier is detected, upgrade to the latest protobuf release. ## Running Examples App diff --git a/java/client/build.gradle b/java/client/build.gradle index 7337c16567..fa617fb131 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -14,7 +14,7 @@ repositories { } dependencies { - implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.27.1' + implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.28.2' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.13.0' implementation group: 'io.netty', name: 'netty-handler', version: '4.1.115.Final' @@ -38,11 +38,11 @@ dependencies { ext { checkProtocVersion = { String output -> - // Line in format like: libprotoc 26.1 + // Line in format like: libprotoc 28.1 int majorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[0].trim()); int minorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[1].trim()); - if (majorVersion < 26 || (majorVersion == 26 && minorVersion < 1)) { - throw new GradleException("Protobuf compiler (protoc) version 26.1 or newer is required. Current version: $output"); + if (majorVersion < 28) { + throw new GradleException("Protobuf compiler (protoc) version 28.0 or newer is required. Current version: $output"); } return output.split(" ")[1] } @@ -61,7 +61,7 @@ tasks.register('protobuf', Exec) { } } catch (Exception e) { if (e.getMessage().startsWith("A problem occurred starting process")) { - throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 26.1 or newer is required."); + throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 28.0 or newer is required."); } throw e } From 2dd48fe21bbd3691c2d6fa764815c306ee86f4d1 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 10:55:23 -0800 Subject: [PATCH 05/32] Apply linters to submodule (#2783) * lint Signed-off-by: Yury-Fridlyand --- glide-core/redis-rs/redis/src/cluster.rs | 2 +- .../redis-rs/redis/src/cluster_routing.rs | 2 +- glide-core/redis-rs/redis/src/cmd.rs | 2 +- .../redis/src/commands/cluster_scan.rs | 30 +++++++++---------- glide-core/redis-rs/redis/src/connection.rs | 4 +-- glide-core/redis-rs/redis/src/types.rs | 4 +-- .../test_async_cluster_connections_logic.rs | 8 +++++ .../redis/tests/test_cluster_async.rs | 12 ++++---- 8 files changed, 36 insertions(+), 28 deletions(-) diff --git a/glide-core/redis-rs/redis/src/cluster.rs b/glide-core/redis-rs/redis/src/cluster.rs index ffd537152a..57d3a7eae4 100644 --- a/glide-core/redis-rs/redis/src/cluster.rs +++ b/glide-core/redis-rs/redis/src/cluster.rs @@ -105,7 +105,7 @@ impl<'a> Input<'a> { } } -impl<'a> Routable for Input<'a> { +impl Routable for Input<'_> { fn arg_idx(&self, idx: usize) -> Option<&[u8]> { match self { Input::Slice { cmd: _, routable } => routable.arg_idx(idx), diff --git a/glide-core/redis-rs/redis/src/cluster_routing.rs b/glide-core/redis-rs/redis/src/cluster_routing.rs index eab3bf398a..8bf11d19d4 100644 --- a/glide-core/redis-rs/redis/src/cluster_routing.rs +++ b/glide-core/redis-rs/redis/src/cluster_routing.rs @@ -1378,7 +1378,7 @@ impl ShardAddrs { } } -impl<'a> IntoIterator for &'a ShardAddrs { +impl IntoIterator for &ShardAddrs { type Item = Arc; type IntoIter = std::iter::Chain>, std::vec::IntoIter>>; diff --git a/glide-core/redis-rs/redis/src/cmd.rs b/glide-core/redis-rs/redis/src/cmd.rs index 3e248dad6f..92e8aea989 100644 --- a/glide-core/redis-rs/redis/src/cmd.rs +++ b/glide-core/redis-rs/redis/src/cmd.rs @@ -40,7 +40,7 @@ pub struct Iter<'a, T: FromRedisValue> { cmd: Cmd, } -impl<'a, T: FromRedisValue> Iterator for Iter<'a, T> { +impl Iterator for Iter<'_, T> { type Item = T; #[inline] diff --git a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs index 0fccb0e6f5..109aceca22 100644 --- a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs +++ b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs @@ -1,3 +1,18 @@ +//! This module contains the implementation of scanning operations in a Redis cluster. +//! +//! The [`ClusterScanArgs`] struct represents the arguments for a cluster scan operation, +//! including the scan state reference, match pattern, count, and object type. +//! +//! The [[`ScanStateRC`]] struct is a wrapper for managing the state of a scan operation in a cluster. +//! It holds a reference to the scan state and provides methods for accessing the state. +//! +//! The [[`ClusterInScan`]] trait defines the methods for interacting with a Redis cluster during scanning, +//! including retrieving address information, refreshing slot mapping, and routing commands to specific address. +//! +//! The [[`ScanState`]] struct represents the state of a scan operation in a Redis cluster. +//! It holds information about the current scan state, including the cursor position, scanned slots map, +//! address being scanned, and address's epoch. + use crate::aio::ConnectionLike; use crate::cluster_async::{ ClusterConnInner, Connect, Core, InternalRoutingInfo, InternalSingleNodeRouting, RefreshPolicy, @@ -10,21 +25,6 @@ use async_trait::async_trait; use std::sync::Arc; use strum_macros::Display; -/// This module contains the implementation of scanning operations in a Redis cluster. -/// -/// The [`ClusterScanArgs`] struct represents the arguments for a cluster scan operation, -/// including the scan state reference, match pattern, count, and object type. -/// -/// The [[`ScanStateRC`]] struct is a wrapper for managing the state of a scan operation in a cluster. -/// It holds a reference to the scan state and provides methods for accessing the state. -/// -/// The [[`ClusterInScan`]] trait defines the methods for interacting with a Redis cluster during scanning, -/// including retrieving address information, refreshing slot mapping, and routing commands to specific address. -/// -/// The [[`ScanState`]] struct represents the state of a scan operation in a Redis cluster. -/// It holds information about the current scan state, including the cursor position, scanned slots map, -/// address being scanned, and address's epoch. - const BITS_PER_U64: usize = u64::BITS as usize; const NUM_OF_SLOTS: usize = SLOT_SIZE as usize; const BITS_ARRAY_SIZE: usize = NUM_OF_SLOTS / BITS_PER_U64; diff --git a/glide-core/redis-rs/redis/src/connection.rs b/glide-core/redis-rs/redis/src/connection.rs index f75b9df494..527fb40fa0 100644 --- a/glide-core/redis-rs/redis/src/connection.rs +++ b/glide-core/redis-rs/redis/src/connection.rs @@ -287,7 +287,7 @@ impl IntoConnectionInfo for ConnectionInfo { /// - Specifying DB: `redis://127.0.0.1:6379/0` /// - Enabling TLS: `rediss://127.0.0.1:6379` /// - Enabling Insecure TLS: `rediss://127.0.0.1:6379/#insecure` -impl<'a> IntoConnectionInfo for &'a str { +impl IntoConnectionInfo for &str { fn into_connection_info(self) -> RedisResult { match parse_redis_url(self) { Some(u) => u.into_connection_info(), @@ -1578,7 +1578,7 @@ impl<'a> PubSub<'a> { } } -impl<'a> Drop for PubSub<'a> { +impl Drop for PubSub<'_> { fn drop(&mut self) { let _ = self.con.exit_pubsub(); } diff --git a/glide-core/redis-rs/redis/src/types.rs b/glide-core/redis-rs/redis/src/types.rs index 2d8035d697..f66be0291e 100644 --- a/glide-core/redis-rs/redis/src/types.rs +++ b/glide-core/redis-rs/redis/src/types.rs @@ -1443,7 +1443,7 @@ impl ToRedisArgs for String { } } -impl<'a> ToRedisArgs for &'a str { +impl ToRedisArgs for &str { fn write_redis_args(&self, out: &mut W) where W: ?Sized + RedisWrite, @@ -1465,7 +1465,7 @@ impl ToRedisArgs for Vec { } } -impl<'a, T: ToRedisArgs> ToRedisArgs for &'a [T] { +impl ToRedisArgs for &[T] { fn write_redis_args(&self, out: &mut W) where W: ?Sized + RedisWrite, diff --git a/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs b/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs index 356c5bfc8c..1b4acb3a20 100644 --- a/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs +++ b/glide-core/redis-rs/redis/tests/test_async_cluster_connections_logic.rs @@ -236,6 +236,7 @@ mod test_connect_and_check { ConnectionDetails { conn: user_conn, ip: Some(ip), + az: None, } .into_future(), None, @@ -283,6 +284,7 @@ mod test_connect_and_check { ConnectionDetails { conn: user_conn, ip: prev_ip, + az: None, } .into_future(), None, @@ -339,12 +341,14 @@ mod test_connect_and_check { ConnectionDetails { conn: old_user_conn, ip: Some(prev_ip), + az: None, } .into_future(), Some( ConnectionDetails { conn: management_conn, ip: Some(prev_ip), + az: None, } .into_future(), ), @@ -380,12 +384,14 @@ mod test_check_node_connections { ConnectionDetails { conn: get_mock_connection_with_port(name, 1, 6380), ip, + az: None, } .into_future(), Some( ConnectionDetails { conn: get_mock_connection_with_port(name, 2, 6381), ip, + az: None, } .into_future(), ), @@ -463,6 +469,7 @@ mod test_check_node_connections { ConnectionDetails { conn: get_mock_connection(name, 1), ip, + az: None, } .into_future(), None, @@ -547,6 +554,7 @@ mod test_check_node_connections { ConnectionDetails { conn: get_mock_connection(name, 1), ip: None, + az: None, } .into_future(), None, diff --git a/glide-core/redis-rs/redis/tests/test_cluster_async.rs b/glide-core/redis-rs/redis/tests/test_cluster_async.rs index 7273f98702..c69a9f933f 100644 --- a/glide-core/redis-rs/redis/tests/test_cluster_async.rs +++ b/glide-core/redis-rs/redis/tests/test_cluster_async.rs @@ -23,7 +23,7 @@ async fn engine_version_less_than(min_version: &str) -> bool { ); return true; } - return false; + false } /// Static function to get the engine version. When version looks like 8.0.0 -> 80000 and 12.0.1 -> 120001. @@ -49,10 +49,10 @@ async fn get_cluster_version() -> usize { cluster_version.set( parse_version_from_info(info_result.clone()) - .expect(format!("Invalid version string in INFO : {info_result}").as_str()), + .unwrap_or_else(|| panic!("Invalid version string in INFO : {info_result}")), ); } - return cluster_version.get(); + cluster_version.get() } fn parse_version_from_info(info: String) -> Option { @@ -273,7 +273,7 @@ mod cluster_async { .unwrap(); let info_result = redis::from_owned_redis_value::>(info).unwrap(); - let get_cmdstat = format!("cmdstat_get:calls="); + let get_cmdstat = "cmdstat_get:calls=".to_string(); let n_get_cmdstat = format!("cmdstat_get:calls={}", n); let client_az = format!("availability_zone:{}", az); @@ -363,7 +363,7 @@ mod cluster_async { .unwrap(); let info_result = redis::from_owned_redis_value::>(info).unwrap(); - let get_cmdstat = format!("cmdstat_get:calls="); + let get_cmdstat = "cmdstat_get:calls=".to_string(); let n_get_cmdstat = format!("cmdstat_get:calls={}", n); let client_az = format!("availability_zone:{}", az); @@ -385,7 +385,7 @@ mod cluster_async { (matching_entries_count.try_into() as Result).unwrap(), replica_num, "Test failed: expected exactly '{}' entries with '{}' and '{}', found {}", - replica_num.to_string(), + replica_num, get_cmdstat, client_az, matching_entries_count From 36a5d89c458857aebe6473fb055469a6c7da928d Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Fri, 6 Dec 2024 11:07:31 -0800 Subject: [PATCH 06/32] Node: Fix modules CI (#2768) Fix GHA Signed-off-by: Yury-Fridlyand --- node/DEVELOPER.md | 1 + node/package.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/node/DEVELOPER.md b/node/DEVELOPER.md index 8878fdd91d..9568e3be82 100644 --- a/node/DEVELOPER.md +++ b/node/DEVELOPER.md @@ -156,6 +156,7 @@ In order to run these tests, use: ```bash npm run test-modules -- --cluster-endpoints=
: ``` + Note: these tests don't run with standalone server as of now. ### REPL (interactive shell) diff --git a/node/package.json b/node/package.json index b581123576..5936805685 100644 --- a/node/package.json +++ b/node/package.json @@ -35,7 +35,7 @@ "test": "npm run build-test-utils && jest --verbose --testPathIgnorePatterns='ServerModules'", "test-dbg": "npm run build-test-utils && jest --runInBand", "test-minimum": "npm run build-test-utils && jest --verbose --testNamePattern='^(.(?!(GlideJson|GlideFt|pubsub|kill)))*$'", - "test-modules": "npm run build-test-utils && jest --verbose --testNamePattern='(GlideJson|GlideFt)'", + "test-modules": "npm run build-test-utils && jest --runInBand --verbose --testNamePattern='(GlideJson|GlideFt)'", "build-test-utils": "cd ../utils && npm i && npm run build", "lint:fix": "npm run install-linting && npx eslint -c ../eslint.config.mjs --fix && npm run prettier:format", "lint": "npm run install-linting && npx eslint -c ../eslint.config.mjs && npm run prettier:check:ci", From b69bba44815e520e3a35113b59ac2fabf7949250 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 10 Dec 2024 10:00:41 -0800 Subject: [PATCH 07/32] Backport #2793 to release-1.2 - Fix rust CI: bump idna (#2794) Fix rust CI: bump `idna`. (#2793) Bump `idna`. Signed-off-by: Yury-Fridlyand --- glide-core/redis-rs/redis/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/glide-core/redis-rs/redis/Cargo.toml b/glide-core/redis-rs/redis/Cargo.toml index 25b06f64c2..534b59fc81 100644 --- a/glide-core/redis-rs/redis/Cargo.toml +++ b/glide-core/redis-rs/redis/Cargo.toml @@ -33,7 +33,7 @@ strum_macros = "0.26" percent-encoding = "2.1" # We need this for redis url parsing -url = "= 2.5.0" +url = "2.5.4" # We need this for script support sha1_smol = { version = "1.0", optional = true } From 54d624c26ac289e06209f32acf1f7ed266be661c Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 10 Dec 2024 10:42:51 -0800 Subject: [PATCH 08/32] Fix value conversion for `CONFIG GET`. (#2381) Signed-off-by: Yury-Fridlyand --- CHANGELOG.md | 2 + glide-core/src/client/value_conversion.rs | 84 ++++++++++++----------- node/tests/GlideClusterClient.test.ts | 21 ++++++ python/python/tests/test_async_client.py | 12 ++++ 4 files changed, 80 insertions(+), 39 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e0679a64d2..44a8ca101e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -108,6 +108,8 @@ #### Fixes * Core: UDS Socket Handling Rework ([#2482](https://github.com/valkey-io/valkey-glide/pull/2482)) +* Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) + #### Operational Enhancements * Java: Add modules CI ([#2388](https://github.com/valkey-io/valkey-glide/pull/2388), [#2404](https://github.com/valkey-io/valkey-glide/pull/2404), [#2416](https://github.com/valkey-io/valkey-glide/pull/2416)) diff --git a/glide-core/src/client/value_conversion.rs b/glide-core/src/client/value_conversion.rs index 48996fc76d..7eafe3f373 100644 --- a/glide-core/src/client/value_conversion.rs +++ b/glide-core/src/client/value_conversion.rs @@ -10,6 +10,11 @@ pub(crate) enum ExpectedReturnType<'a> { key_type: &'a Option>, value_type: &'a Option>, }, + // Second parameter is a function which returns true if value needs to be converted + SingleOrMultiNode( + &'a Option>, + Option bool>, + ), MapOfStringToDouble, Double, Boolean, @@ -278,12 +283,6 @@ pub(crate) fn convert_to_expected_type( }, ExpectedReturnType::Lolwut => { match value { - // cluster (multi-node) response - go recursive - Value::Map(map) => convert_map_entries( - map, - Some(ExpectedReturnType::BulkString), - Some(ExpectedReturnType::Lolwut), - ), // RESP 2 response Value::BulkString(bytes) => { let text = std::str::from_utf8(&bytes).unwrap(); @@ -558,19 +557,7 @@ pub(crate) fn convert_to_expected_type( // Second part is converted as `Map[str, Map[str, int]]` ExpectedReturnType::FunctionStatsReturnType => match value { // TODO reuse https://github.com/Bit-Quill/glide-for-redis/pull/331 and https://github.com/valkey-io/valkey-glide/pull/1489 - Value::Map(map) => { - if map[0].0 == Value::BulkString(b"running_script".into()) { - // already a RESP3 response - do nothing - Ok(Value::Map(map)) - } else { - // cluster (multi-node) response - go recursive - convert_map_entries( - map, - Some(ExpectedReturnType::BulkString), - Some(ExpectedReturnType::FunctionStatsReturnType), - ) - } - } + Value::Map(map) => Ok(Value::Map(map)), Value::Array(mut array) if array.len() == 4 => { let mut result: Vec<(Value, Value)> = Vec::with_capacity(2); let running_script_info = array.remove(1); @@ -1144,6 +1131,19 @@ pub(crate) fn convert_to_expected_type( ) .into()) } + ExpectedReturnType::SingleOrMultiNode(value_type, value_checker) => match value { + Value::Map(ref map) => match value_checker { + Some(func) => { + if !map.is_empty() && func(map[0].clone().1) { + convert_to_expected_type(value, Some(ExpectedReturnType::Map { key_type: &None, value_type })) + } else { + Ok(value) + } + } + None => convert_to_expected_type(value, Some(ExpectedReturnType::Map { key_type: &None, value_type })), + } + _ => convert_to_expected_type(value, *value_type), + } } } @@ -1392,12 +1392,19 @@ pub(crate) fn expected_type_for_cmd(cmd: &Cmd) -> Option { // TODO use enum to avoid mistakes match command.as_slice() { - b"HGETALL" | b"CONFIG GET" | b"FT.CONFIG GET" | b"FT._ALIASLIST" | b"HELLO" => { + b"HGETALL" | b"FT.CONFIG GET" | b"FT._ALIASLIST" | b"HELLO" => { Some(ExpectedReturnType::Map { key_type: &None, value_type: &None, }) } + b"CONFIG GET" => Some(ExpectedReturnType::SingleOrMultiNode( + &Some(ExpectedReturnType::Map { + key_type: &None, + value_type: &None, + }), + Some(|val| matches!(val, Value::Array(_))), + )), b"XCLAIM" => { if cmd.position(b"JUSTID").is_some() { Some(ExpectedReturnType::ArrayOfStrings) @@ -1481,11 +1488,17 @@ pub(crate) fn expected_type_for_cmd(cmd: &Cmd) -> Option { None } } - b"LOLWUT" => Some(ExpectedReturnType::Lolwut), + b"LOLWUT" => Some(ExpectedReturnType::SingleOrMultiNode( + &Some(ExpectedReturnType::Lolwut), + None, + )), b"FUNCTION LIST" => Some(ExpectedReturnType::ArrayOfMaps(&Some( ExpectedReturnType::ArrayOfMaps(&Some(ExpectedReturnType::StringOrSet)), ))), - b"FUNCTION STATS" => Some(ExpectedReturnType::FunctionStatsReturnType), + b"FUNCTION STATS" => Some(ExpectedReturnType::SingleOrMultiNode( + &Some(ExpectedReturnType::FunctionStatsReturnType), + Some(|val| matches!(val, Value::Array(_))), + )), b"GEOSEARCH" => { if cmd.position(b"WITHDIST").is_some() || cmd.position(b"WITHHASH").is_some() @@ -1953,17 +1966,14 @@ mod tests { #[test] fn convert_lolwut() { - assert!(matches!( - expected_type_for_cmd(redis::cmd("LOLWUT").arg("version").arg("42")), - Some(ExpectedReturnType::Lolwut) - )); - let unconverted_string : String = "\x1b[0;97;107m \x1b[0m--\x1b[0;37;47m \x1b[0m--\x1b[0;90;100m \x1b[0m--\x1b[0;30;40m \x1b[0m".into(); let expected: String = "\u{2591}--\u{2592}--\u{2593}-- ".into(); + let mut cmd = redis::cmd("LOLWUT"); + let conversion_type = expected_type_for_cmd(cmd.arg("version").arg("42")); let converted_1 = convert_to_expected_type( Value::BulkString(unconverted_string.clone().into_bytes()), - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert_eq!( Value::BulkString(expected.clone().into_bytes()), @@ -1975,7 +1985,7 @@ mod tests { format: redis::VerbatimFormat::Text, text: unconverted_string.clone(), }, - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert_eq!( Value::BulkString(expected.clone().into_bytes()), @@ -1993,16 +2003,16 @@ mod tests { Value::BulkString(unconverted_string.clone().into_bytes()), ), ]), - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert_eq!( Value::Map(vec![ ( - Value::BulkString("node 1".into()), + Value::SimpleString("node 1".into()), Value::BulkString(expected.clone().into_bytes()) ), ( - Value::BulkString("node 2".into()), + Value::SimpleString("node 2".into()), Value::BulkString(expected.clone().into_bytes()) ), ]), @@ -2011,7 +2021,7 @@ mod tests { let converted_4 = convert_to_expected_type( Value::SimpleString(unconverted_string.clone()), - Some(ExpectedReturnType::Lolwut), + conversion_type, ); assert!(converted_4.is_err()); } @@ -2521,11 +2531,6 @@ mod tests { #[test] fn convert_function_stats() { - assert!(matches!( - expected_type_for_cmd(redis::cmd("FUNCTION").arg("STATS")), - Some(ExpectedReturnType::FunctionStatsReturnType) - )); - let resp2_response_non_empty_first_part_data = vec![ Value::BulkString(b"running_script".into()), Value::Array(vec![ @@ -2652,7 +2657,8 @@ mod tests { ), ]); - let conversion_type = Some(ExpectedReturnType::FunctionStatsReturnType); + let cmd = redis::cmd("FUNCTION STATS"); + let conversion_type = expected_type_for_cmd(&cmd); // resp2 -> resp3 conversion with non-empty `running_script` block assert_eq!( convert_to_expected_type( diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index f74e137cac..2c8325a013 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -328,6 +328,27 @@ describe("GlideClusterClient", () => { TIMEOUT, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + `config get with wildcard and multi node route %p`, + async (protocol) => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption(cluster.getAddresses(), protocol), + ); + const result = await client.configGet(["*file"], { + route: "allPrimaries", + }); + Object.values( + result as Record>, + ).forEach((resp) => { + const keys = Object.keys(resp); + expect(keys.length).toBeGreaterThan(5); + expect(keys).toContain("pidfile"); + expect(keys).toContain("logfile"); + }); + }, + TIMEOUT, + ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `can send transactions_%p`, async (protocol) => { diff --git a/python/python/tests/test_async_client.py b/python/python/tests/test_async_client.py index b32aa6936d..d8d65d1ca7 100644 --- a/python/python/tests/test_async_client.py +++ b/python/python/tests/test_async_client.py @@ -770,6 +770,18 @@ async def test_config_get_set(self, glide_client: TGlideClient): == OK ) + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_config_get_with_wildcard_and_multi_node_route( + self, glide_client: GlideClusterClient + ): + result = await glide_client.config_get(["*file"], AllPrimaries()) + assert isinstance(result, Dict) + for resp in result.values(): + assert len(resp) > 5 + assert b"pidfile" in resp + assert b"logfile" in resp + @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_decr_decrby_existing_key(self, glide_client: TGlideClient): From 79c3bc9ecb959fcba642c297324f57c0a243cb03 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 10 Dec 2024 17:22:19 -0800 Subject: [PATCH 09/32] [Backport 1.2] Protobuf: bump to 29.1 (#2802) * Protobuf: bump to 29.1 (#2801) Signed-off-by: Yury-Fridlyand --- .github/workflows/java-cd.yml | 4 ++-- .github/workflows/java.yml | 6 +++--- CHANGELOG.md | 5 ++--- java/DEVELOPER.md | 27 +++++++-------------------- java/client/build.gradle | 10 +++++----- 5 files changed, 19 insertions(+), 33 deletions(-) diff --git a/.github/workflows/java-cd.yml b/.github/workflows/java-cd.yml index 7033141881..5e2fd5886e 100644 --- a/.github/workflows/java-cd.yml +++ b/.github/workflows/java-cd.yml @@ -102,7 +102,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Create secret key ring file @@ -245,7 +245,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Start standalone Valkey server diff --git a/.github/workflows/java.yml b/.github/workflows/java.yml index 449c9f1a65..c837b22dc6 100644 --- a/.github/workflows/java.yml +++ b/.github/workflows/java.yml @@ -103,7 +103,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java client @@ -184,7 +184,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "28.2" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Build java wrapper @@ -235,7 +235,7 @@ jobs: - name: Install protoc (protobuf) uses: arduino/setup-protoc@v3 with: - version: "26.1" + version: "29.1" repo-token: ${{ secrets.GITHUB_TOKEN }} - name: Test java wrapper diff --git a/CHANGELOG.md b/CHANGELOG.md index 44a8ca101e..9f4421333d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,11 @@ #### Changes -* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561)) +* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) #### Breaking Changes #### Fixes +* Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) #### Operational Enhancements @@ -108,8 +109,6 @@ #### Fixes * Core: UDS Socket Handling Rework ([#2482](https://github.com/valkey-io/valkey-glide/pull/2482)) -* Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) - #### Operational Enhancements * Java: Add modules CI ([#2388](https://github.com/valkey-io/valkey-glide/pull/2388), [#2404](https://github.com/valkey-io/valkey-glide/pull/2404), [#2416](https://github.com/valkey-io/valkey-glide/pull/2416)) diff --git a/java/DEVELOPER.md b/java/DEVELOPER.md index 3a275425fc..9304dc3802 100644 --- a/java/DEVELOPER.md +++ b/java/DEVELOPER.md @@ -17,7 +17,7 @@ The Valkey GLIDE Java wrapper consists of both Java and Rust code. Rust bindings - git - GCC - pkg-config -- protoc (protobuf compiler) >= 28.2 +- protoc (protobuf compiler) >= 29.1 - openssl - openssl-dev - rustup @@ -64,17 +64,17 @@ Continue with **Install protobuf compiler** below. To install protobuf for MacOS, run: ```bash brew install protobuf -# Check that the protobuf compiler version 28.2 or higher is installed +# Check that the protobuf compiler version 29.1 or higher is installed protoc --version ``` For the remaining systems, do the following: ```bash PB_REL="https://github.com/protocolbuffers/protobuf/releases" -curl -LO $PB_REL/download/v28.2/protoc-28.2-linux-x86_64.zip -unzip protoc-28.2-linux-x86_64.zip -d $HOME/.local +curl -LO $PB_REL/download/v29.1/protoc-29.1-linux-x86_64.zip +unzip protoc-29.1-linux-x86_64.zip -d $HOME/.local export PATH="$PATH:$HOME/.local/bin" -# Check that the protobuf compiler version 28.2 or higher is installed +# Check that the protobuf compiler version 29.1 or higher is installed protoc --version ``` @@ -87,11 +87,7 @@ Before starting this step, make sure you've installed all software dependencies. git clone https://github.com/valkey-io/valkey-glide.git cd valkey-glide/java ``` -2. Initialize git submodule: - ```bash - git submodule update --init --recursive - ``` -3. Build the Java wrapper (Choose a build option from the following and run it from the `java` folder): +2. Build the Java wrapper (Choose a build option from the following and run it from the `java` folder): 1. Enter the java directory: @@ -164,8 +160,7 @@ Some troubleshooting issues: you may need to restart your machine. In particular, this may solve the following problems: - Failed to find `cargo` after `rustup`. - No Protobuf compiler (protoc) found. -- If build fails because of rust compiler fails, make sure submodules are updated using `git submodule update`. -- If protobuf 28.0 or earlier is detected, upgrade to the latest protobuf release. +- If protobuf 29.0 or earlier is detected, upgrade to the latest protobuf release. ## Running Examples App @@ -275,14 +270,6 @@ To (re)generate protobuf code, use the following command: ./gradlew protobuf ``` -### Submodules - -After pulling new changes, ensure that you update the submodules by running the following command: - -```bash -git submodule update -``` - ### Contributing new ValKey commands A Valkey command can either have a standalone or cluster implementation which is dependent on their specifications. diff --git a/java/client/build.gradle b/java/client/build.gradle index fa617fb131..0075b01f87 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -14,7 +14,7 @@ repositories { } dependencies { - implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.28.2' + implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '4.29.1' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.13.0' implementation group: 'io.netty', name: 'netty-handler', version: '4.1.115.Final' @@ -38,11 +38,11 @@ dependencies { ext { checkProtocVersion = { String output -> - // Line in format like: libprotoc 28.1 + // Line in format like: libprotoc 29.1 int majorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[0].trim()); int minorVersion = Integer.parseInt(output.split(" ")[1].split("\\.")[1].trim()); - if (majorVersion < 28) { - throw new GradleException("Protobuf compiler (protoc) version 28.0 or newer is required. Current version: $output"); + if (majorVersion < 29) { + throw new GradleException("Protobuf compiler (protoc) version 29.0 or newer is required. Current version: $output"); } return output.split(" ")[1] } @@ -61,7 +61,7 @@ tasks.register('protobuf', Exec) { } } catch (Exception e) { if (e.getMessage().startsWith("A problem occurred starting process")) { - throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 28.0 or newer is required."); + throw new GradleException("No Protobuf compiler (protoc) found. Protobuf compiler version 29.0 or newer is required."); } throw e } From b9d340644ef6b3f0a951177c6c9ac07a762b5823 Mon Sep 17 00:00:00 2001 From: Lior Sventitzky Date: Mon, 16 Dec 2024 14:36:16 +0200 Subject: [PATCH 10/32] Fix python memory leak (#2818) changes references to be bound Signed-off-by: lior sventitzky --- python/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/src/lib.rs b/python/src/lib.rs index e3b9a298cb..09914c2c59 100644 --- a/python/src/lib.rs +++ b/python/src/lib.rs @@ -84,7 +84,7 @@ impl Script { fn new(code: &Bound) -> PyResult { let hash = if let Ok(code_str) = code.extract::() { glide_core::scripts_container::add_script(code_str.as_bytes()) - } else if let Ok(code_bytes) = code.extract::<&PyBytes>() { + } else if let Ok(code_bytes) = code.extract::>() { glide_core::scripts_container::add_script(code_bytes.as_bytes()) } else { return Err(PyTypeError::new_err( @@ -267,7 +267,7 @@ fn glide(_py: Python, m: &Bound) -> PyResult<()> { } #[pyfunction] - pub fn create_leaked_bytes_vec(args_vec: Vec<&PyBytes>) -> usize { + pub fn create_leaked_bytes_vec(args_vec: Vec>) -> usize { // Convert the bytes vec -> Bytes vector let bytes_vec: Vec = args_vec .iter() From a86378590eb6553470bfda65f4ba03e767871ac5 Mon Sep 17 00:00:00 2001 From: ikolomi <152477505+ikolomi@users.noreply.github.com> Date: Tue, 17 Dec 2024 14:01:17 +0200 Subject: [PATCH 11/32] Ensure cluster client creation fail when engine is < 7.0 and sharded subscriptions are configured (#2819) Introduce engine version validation when sharded subscriptions are configured Signed-off-by: ikolomi --- CHANGELOG.md | 1 + glide-core/Cargo.toml | 1 + glide-core/src/client/mod.rs | 58 +++++++++++++++- glide-core/tests/test_cluster_client.rs | 89 ++++++++++++++++++++++++- 4 files changed, 145 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9f4421333d..1f8ebb2985 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ #### Fixes * Core: Fix RESP2 multi-node response from cluster ([#2381](https://github.com/valkey-io/valkey-glide/pull/2381)) +* Core: Ensure cluster client creation fail when engine is < 7.0 and sharded subscriptions are configured ([#2819](https://github.com/valkey-io/valkey-glide/pull/2819)) #### Operational Enhancements diff --git a/glide-core/Cargo.toml b/glide-core/Cargo.toml index bd12bb09c9..394c953f49 100644 --- a/glide-core/Cargo.toml +++ b/glide-core/Cargo.toml @@ -41,6 +41,7 @@ nanoid = "0.4.0" async-trait = { version = "0.1.24" } serde_json = "1" serde = { version = "1", features = ["derive"] } +versions = "6.3" [features] socket-layer = [ diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index 29a3a05393..a560e17697 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -12,7 +12,10 @@ use redis::cluster_routing::{ MultipleNodeRoutingInfo, ResponsePolicy, Routable, RoutingInfo, SingleNodeRoutingInfo, }; use redis::cluster_slotmap::ReadFromReplicaStrategy; -use redis::{Cmd, ErrorKind, ObjectType, PushInfo, RedisError, RedisResult, ScanStateRC, Value}; +use redis::{ + Cmd, ErrorKind, FromRedisValue, InfoDict, ObjectType, PushInfo, RedisError, RedisResult, + ScanStateRC, Value, +}; pub use standalone_client::StandaloneClient; use std::io; use std::sync::atomic::{AtomicIsize, Ordering}; @@ -25,6 +28,7 @@ mod reconnecting_connection; mod standalone_client; mod value_conversion; use tokio::sync::mpsc; +use versions::Versioning; pub const HEARTBEAT_SLEEP_DURATION: Duration = Duration::from_secs(1); pub const DEFAULT_RETRIES: u32 = 3; @@ -607,7 +611,7 @@ async fn create_cluster_client( }; builder = builder.tls(tls); } - if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions { + if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions.clone() { builder = builder.pubsub_subscriptions(pubsub_subscriptions); } @@ -615,7 +619,55 @@ async fn create_cluster_client( builder = builder.periodic_connections_checks(CONNECTION_CHECKS_INTERVAL); let client = builder.build()?; - client.get_async_connection(push_sender).await + let mut con = client.get_async_connection(push_sender).await?; + + // This validation ensures that sharded subscriptions are not applied to Redis engines older than version 7.0, + // preventing scenarios where the client becomes inoperable or, worse, unaware that sharded pubsub messages are not being received. + // The issue arises because `client.get_async_connection()` might succeed even if the engine does not support sharded pubsub. + // For example, initial connections may exclude the target node for sharded subscriptions, allowing the creation to succeed, + // but subsequent resubscription tasks will fail when `setup_connection()` cannot establish a connection to the node. + // + // One approach to handle this would be to check the engine version inside `setup_connection()` and skip applying sharded subscriptions. + // However, this approach would leave the application unaware that the subscriptions were not applied, requiring the user to analyze logs to identify the issue. + // Instead, we explicitly check the engine version here and fail the connection creation if it is incompatible with sharded subscriptions. + + if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions { + if pubsub_subscriptions.contains_key(&redis::PubSubSubscriptionKind::Sharded) { + let info_res = con + .route_command( + redis::cmd("INFO").arg("SERVER"), + RoutingInfo::SingleNode(SingleNodeRoutingInfo::Random), + ) + .await?; + let info_dict: InfoDict = FromRedisValue::from_redis_value(&info_res)?; + match info_dict.get::("redis_version") { + Some(version) => match (Versioning::new(version), Versioning::new("7.0")) { + (Some(server_ver), Some(min_ver)) => { + if server_ver < min_ver { + return Err(RedisError::from(( + ErrorKind::InvalidClientConfig, + "Sharded subscriptions provided, but the engine version is < 7.0", + ))); + } + } + _ => { + return Err(RedisError::from(( + ErrorKind::ResponseError, + "Failed to parse engine version", + ))) + } + }, + _ => { + return Err(RedisError::from(( + ErrorKind::ResponseError, + "Could not determine engine version from INFO result", + ))) + } + } + } + } + + Ok(con) } #[derive(thiserror::Error)] diff --git a/glide-core/tests/test_cluster_client.rs b/glide-core/tests/test_cluster_client.rs index 2943ab21bb..ec795481c6 100644 --- a/glide-core/tests/test_cluster_client.rs +++ b/glide-core/tests/test_cluster_client.rs @@ -7,13 +7,19 @@ mod cluster_client_tests { use std::collections::HashMap; use super::*; - use glide_core::connection_request::ReadFrom; + use cluster::{setup_cluster_with_replicas, LONG_CLUSTER_TEST_TIMEOUT}; + use glide_core::client::Client; + use glide_core::connection_request::{ + self, PubSubChannelsOrPatterns, PubSubSubscriptions, ReadFrom, + }; use redis::cluster_routing::{ MultipleNodeRoutingInfo, Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr, }; + use redis::InfoDict; use rstest::rstest; use utilities::cluster::{setup_test_basics_internal, SHORT_CLUSTER_TEST_TIMEOUT}; use utilities::*; + use versions::Versioning; fn count_primary_or_replica(value: &str) -> (u16, u16) { if value.contains("role:master") { @@ -214,4 +220,85 @@ mod cluster_client_tests { assert_eq!(replicas, 1); }); } + + #[rstest] + #[timeout(LONG_CLUSTER_TEST_TIMEOUT)] + fn test_fail_creation_with_unsupported_sharded_pubsub() { + block_on_all(async { + let mut test_basics = setup_cluster_with_replicas( + TestConfiguration { + cluster_mode: ClusterMode::Enabled, + shared_server: false, + ..Default::default() + }, + 0, + 3, + ) + .await; + + // get engine version + let cmd = redis::cmd("INFO"); + let info = test_basics + .client + .send_command( + &cmd, + Some(RoutingInfo::SingleNode(SingleNodeRoutingInfo::Random)), + ) + .await + .unwrap(); + + let info_dict: InfoDict = redis::from_owned_redis_value(info).unwrap(); + match info_dict.get::("redis_version") { + Some(version) => match (Versioning::new(version), Versioning::new("7.0")) { + (Some(server_ver), Some(min_ver)) => { + if server_ver < min_ver { + // try to create client with initial nodes lacking the target sharded subscription node + let cluster = test_basics.cluster.unwrap(); + let mut addresses = cluster.get_server_addresses(); + addresses.truncate(1); + + let mut connection_request = + connection_request::ConnectionRequest::new(); + connection_request.addresses = + addresses.iter().map(get_address_info).collect(); + + connection_request.cluster_mode_enabled = true; + // Assumes the current implementation of the test cluster, where slots are distributed across nodes + // in a monotonically increasing order. + let mut last_slot_channel = PubSubChannelsOrPatterns::new(); + last_slot_channel + .channels_or_patterns + .push("last-slot-channel-{16383}".as_bytes().into()); + + let mut subs = PubSubSubscriptions::new(); + // First try to create a client with the Exact subscription + subs.channels_or_patterns_by_type + .insert(0, last_slot_channel.clone()); + connection_request.pubsub_subscriptions = + protobuf::MessageField::from_option(Some(subs.clone())); + + let _client = Client::new(connection_request.clone().into(), None) + .await + .unwrap(); + + // Now try to create a client with a Sharded subscription which should fail + subs.channels_or_patterns_by_type + .insert(2, last_slot_channel); + connection_request.pubsub_subscriptions = + protobuf::MessageField::from_option(Some(subs)); + + let client = Client::new(connection_request.into(), None).await; + assert!(client.is_err()); + } + } + _ => { + panic!("Failed to parse engine version"); + } + }, + _ => { + panic!("Could not determine engine version from INFO result"); + } + } + }); + } } From e0b4186796bccca7040af7d2c01c52548c19ac06 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Tue, 17 Dec 2024 16:18:20 -0800 Subject: [PATCH 12/32] Fix node ITs (#2826) Fix tests Signed-off-by: Yury-Fridlyand --- node/tests/GlideClusterClient.test.ts | 404 +++++++++----------------- 1 file changed, 145 insertions(+), 259 deletions(-) diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index 2c8325a013..43b039c61f 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -27,7 +27,6 @@ import { InfoOptions, ListDirection, ProtocolVersion, - ReadFrom, RequestError, Routes, ScoreFilter, @@ -2023,62 +2022,81 @@ describe("GlideClusterClient", () => { } }, ); - describe("GlideClusterClient - AZAffinity Read Strategy Test", () => { + + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "should return valid statistics using protocol %p", + async (protocol) => { + let glideClientForTesting; + + try { + // Create a GlideClusterClient instance for testing + glideClientForTesting = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + { + requestTimeout: 2000, + }, + ), + ); + + // Fetch statistics using get_statistics method + const stats = glideClientForTesting.getStatistics(); + + // Assertions to check if stats object has correct structure + expect(typeof stats).toBe("object"); + expect(stats).toHaveProperty("total_connections"); + expect(stats).toHaveProperty("total_clients"); + expect(Object.keys(stats)).toHaveLength(2); + } finally { + // Ensure the client is properly closed + glideClientForTesting?.close(); + } + }, + ); + + describe("AZAffinity Read Strategy Tests", () => { async function getNumberOfReplicas( azClient: GlideClusterClient, ): Promise { - const replicationInfo = await azClient.customCommand([ - "INFO", - "REPLICATION", - ]); - - if (Array.isArray(replicationInfo)) { - // Handle array response from cluster (CME Mode) - let totalReplicas = 0; - - for (const node of replicationInfo) { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - - if (typeof nodeInfo.value === "string") { - const lines = nodeInfo.value.split(/\r?\n/); - const connectedReplicasLine = lines.find( - (line) => - line.startsWith("connected_slaves:") || - line.startsWith("connected_replicas:"), - ); + const replicationInfo = (await azClient.info({ + sections: [InfoOptions.Replication], + })) as Record; + let totalReplicas = 0; + Object.values(replicationInfo).forEach((nodeInfo) => { + const lines = nodeInfo.split(/\r?\n/); + const connectedReplicasLine = lines.find( + (line) => + line.startsWith("connected_slaves:") || + line.startsWith("connected_replicas:"), + ); - if (connectedReplicasLine) { - const parts = connectedReplicasLine.split(":"); - const numReplicas = parseInt(parts[1], 10); + if (connectedReplicasLine) { + const parts = connectedReplicasLine.split(":"); + const numReplicas = parseInt(parts[1], 10); - if (!isNaN(numReplicas)) { - // Sum up replicas from each primary node - totalReplicas += numReplicas; - } - } + if (!isNaN(numReplicas)) { + // Sum up replicas from each primary node + totalReplicas += numReplicas; } } + }); - if (totalReplicas > 0) { - return totalReplicas; - } - - throw new Error( - "Could not find replica information in any node's response", - ); + if (totalReplicas > 0) { + return totalReplicas; } throw new Error( - "Unexpected response format from INFO REPLICATION command", + "Could not find replica information in any node's response", ); } it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "should route GET commands to all replicas with the same AZ using protocol %p", async (protocol) => { + // Skip test if version is below 8.0.0 + if (cluster.checkIfServerVersionLessThan("8.0.0")) return; + const az = "us-east-1a"; const GET_CALLS_PER_REPLICA = 3; @@ -2094,21 +2112,9 @@ describe("GlideClusterClient", () => { protocol, ), ); - - // Skip test if version is below 8.0.0 - if (cluster.checkIfServerVersionLessThan("8.0.0")) { - console.log( - "Skipping test: requires Valkey 8.0.0 or higher", - ); - return; - } - - await client_for_config_set.customCommand([ - "CONFIG", - "RESETSTAT", - ]); - await client_for_config_set.customCommand( - ["CONFIG", "SET", "availability-zone", az], + await client_for_config_set.configResetStat(); + await client_for_config_set.configSet( + { "availability-zone": az }, { route: "allNodes" }, ); @@ -2133,46 +2139,22 @@ describe("GlideClusterClient", () => { azCluster.getAddresses(), protocol, { - readFrom: "AZAffinity" as ReadFrom, + readFrom: "AZAffinity", clientAz: az, }, ), ); - const azs = await client_for_testing_az.customCommand( - ["CONFIG", "GET", "availability-zone"], + const azs = (await client_for_testing_az.configGet( + ["availability-zone"], { route: "allNodes" }, - ); + )) as Record>; - if (Array.isArray(azs)) { - const allAZsMatch = azs.every((node) => { - const nodeResponse = node as { - key: string; - value: string | number; - }; - - if (protocol === ProtocolVersion.RESP2) { - // RESP2: Direct array format ["availability-zone", "us-east-1a"] - return ( - Array.isArray(nodeResponse.value) && - nodeResponse.value[1] === az - ); - } else { - // RESP3: Nested object format [{ key: "availability-zone", value: "us-east-1a" }] - return ( - Array.isArray(nodeResponse.value) && - nodeResponse.value[0]?.key === - "availability-zone" && - nodeResponse.value[0]?.value === az - ); - } - }); - expect(allAZsMatch).toBe(true); - } else { - throw new Error( - "Unexpected response format from CONFIG GET command", - ); - } + Object.values(azs).forEach((nodeResponse) => + expect(nodeResponse["availability-zone"]).toEqual( + "us-east-1a", + ), + ); // Stage 3: Set test data and perform GET operations await client_for_testing_az.set("foo", "testvalue"); @@ -2182,52 +2164,40 @@ describe("GlideClusterClient", () => { } // Stage 4: Verify GET commands were routed correctly - const info_result = - await client_for_testing_az.customCommand( - ["INFO", "ALL"], // Get both replication and commandstats info - { route: "allNodes" }, - ); - - if (Array.isArray(info_result)) { - const matching_entries_count = info_result.filter( - (node) => { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - const infoStr = - nodeInfo.value?.toString() || ""; - - // Check if this is a replica node AND it has the expected number of GET calls - const isReplicaNode = - infoStr.includes("role:slave") || - infoStr.includes("role:replica"); - - return ( - isReplicaNode && - infoStr.includes(get_cmdstat) - ); - }, - ).length; - - expect(matching_entries_count).toBe(n_replicas); // Should expect 12 as the cluster was created with 3 primary and 4 replicas, totalling 12 replica nodes - } else { - throw new Error( - "Unexpected response format from INFO command", - ); - } + const info_result = (await client_for_testing_az.info( + { sections: [InfoOptions.All], route: "allNodes" }, // Get both replication and commandstats info + )) as Record; + + const matching_entries_count = Object.values( + info_result, + ).filter((infoStr) => { + // Check if this is a replica node AND it has the expected number of GET calls + const isReplicaNode = + infoStr.includes("role:slave") || + infoStr.includes("role:replica"); + + return isReplicaNode && infoStr.includes(get_cmdstat); + }).length; + + expect(matching_entries_count).toBe(n_replicas); // Should expect 12 as the cluster was created with 3 primary and 4 replicas, totalling 12 replica nodes } finally { // Cleanup - await client_for_config_set?.close(); - await client_for_testing_az?.close(); + await client_for_config_set?.configSet( + { "availability-zone": "" }, + { route: "allNodes" }, + ); + client_for_config_set?.close(); + client_for_testing_az?.close(); } }, ); - }); - describe("GlideClusterClient - AZAffinity Routing to 1 replica", () => { + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "should route commands to single replica with AZ using protocol %p", async (protocol) => { + // Skip test if version is below 8.0.0 + if (cluster.checkIfServerVersionLessThan("8.0.0")) return; + const az = "us-east-1a"; const GET_CALLS = 3; const get_cmdstat = `calls=${GET_CALLS}`; @@ -2244,26 +2214,15 @@ describe("GlideClusterClient", () => { ), ); - // Skip test if version is below 8.0.0 - if (cluster.checkIfServerVersionLessThan("8.0.0")) { - console.log( - "Skipping test: requires Valkey 8.0.0 or higher", - ); - return; - } - - await client_for_config_set.customCommand( - ["CONFIG", "SET", "availability-zone", ""], + await client_for_config_set.configSet( + { "availability-zone": "" }, { route: "allNodes" }, ); - await client_for_config_set.customCommand([ - "CONFIG", - "RESETSTAT", - ]); + await client_for_config_set.configResetStat(); - await client_for_config_set.customCommand( - ["CONFIG", "SET", "availability-zone", az], + await client_for_config_set.configSet( + { "availability-zone": az }, { route: { type: "replicaSlotId", id: 12182 } }, ); @@ -2286,73 +2245,54 @@ describe("GlideClusterClient", () => { } // Stage 4: Verify GET commands were routed correctly - const info_result = - await client_for_testing_az.customCommand( - ["INFO", "ALL"], - { route: "allNodes" }, - ); + const info_result = (await client_for_testing_az.info({ + sections: [InfoOptions.All], + route: "allNodes", + })) as Record; // Process the info_result to check that only one replica has the GET calls - if (Array.isArray(info_result)) { - // Count the number of nodes where both get_cmdstat and az are present - const matching_entries_count = info_result.filter( - (node) => { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - const infoStr = - nodeInfo.value?.toString() || ""; - return ( - infoStr.includes(get_cmdstat) && - infoStr.includes(`availability_zone:${az}`) - ); - }, - ).length; - - expect(matching_entries_count).toBe(1); - - // Check that only one node has the availability zone set to az - const changed_az_count = info_result.filter((node) => { - const nodeInfo = node as { - key: string; - value: string | string[] | null; - }; - const infoStr = nodeInfo.value?.toString() || ""; + const matching_entries_count = Object.values( + info_result, + ).filter((infoStr) => { + return ( + infoStr.includes(get_cmdstat) && + infoStr.includes(`availability_zone:${az}`) + ); + }).length; + + expect(matching_entries_count).toBe(1); + + // Check that only one node has the availability zone set to az + const changed_az_count = Object.values(info_result).filter( + (infoStr) => { return infoStr.includes(`availability_zone:${az}`); - }).length; + }, + ).length; - expect(changed_az_count).toBe(1); - } else { - throw new Error( - "Unexpected response format from INFO command", - ); - } + expect(changed_az_count).toBe(1); } finally { - await client_for_config_set?.close(); - await client_for_testing_az?.close(); + await client_for_config_set?.configSet( + { "availability-zone": "" }, + { route: "allNodes" }, + ); + client_for_config_set?.close(); + client_for_testing_az?.close(); } }, ); - }); - describe("GlideClusterClient - AZAffinity with Non-existing AZ", () => { + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "should route commands to a replica when AZ does not exist using protocol %p", async (protocol) => { + // Skip test if version is below 8.0.0 + if (cluster.checkIfServerVersionLessThan("8.0.0")) return; + const GET_CALLS = 4; const replica_calls = 1; const get_cmdstat = `cmdstat_get:calls=${replica_calls}`; let client_for_testing_az; try { - // Skip test if server version is below 8.0.0 - if (azCluster.checkIfServerVersionLessThan("8.0.0")) { - console.log( - "Skipping test: requires Valkey 8.0.0 or higher", - ); - return; - } - // Create a client configured for AZAffinity with a non-existing AZ client_for_testing_az = await GlideClusterClient.createClient( @@ -2368,10 +2308,9 @@ describe("GlideClusterClient", () => { ); // Reset command stats on all nodes - await client_for_testing_az.customCommand( - ["CONFIG", "RESETSTAT"], - { route: "allNodes" }, - ); + await client_for_testing_az.configResetStat({ + route: "allNodes", + }); // Issue GET commands for (let i = 0; i < GET_CALLS; i++) { @@ -2379,76 +2318,23 @@ describe("GlideClusterClient", () => { } // Fetch command stats from all nodes - const info_result = - await client_for_testing_az.customCommand( - ["INFO", "COMMANDSTATS"], - { route: "allNodes" }, - ); + const info_result = (await client_for_testing_az.info({ + sections: [InfoOptions.Commandstats], + route: "allNodes", + })) as Record; // Inline matching logic - let matchingEntriesCount = 0; - - if ( - typeof info_result === "object" && - info_result !== null - ) { - const nodeResponses = Object.values(info_result); - - for (const response of nodeResponses) { - if ( - response && - typeof response === "object" && - "value" in response && - response.value.includes(get_cmdstat) - ) { - matchingEntriesCount++; - } - } - } else { - throw new Error( - "Unexpected response format from INFO command", - ); - } + const matchingEntriesCount = Object.values( + info_result, + ).filter((nodeResponses) => { + return nodeResponses.includes(get_cmdstat); + }).length; // Validate that only one replica handled the GET calls expect(matchingEntriesCount).toBe(4); } finally { // Cleanup: Close the client after test execution - await client_for_testing_az?.close(); - } - }, - ); - }); - describe("GlideClusterClient - Get Statistics", () => { - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "should return valid statistics using protocol %p", - async (protocol) => { - let glideClientForTesting; - - try { - // Create a GlideClusterClient instance for testing - glideClientForTesting = - await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - { - requestTimeout: 2000, - }, - ), - ); - - // Fetch statistics using get_statistics method - const stats = await glideClientForTesting.getStatistics(); - - // Assertions to check if stats object has correct structure - expect(typeof stats).toBe("object"); - expect(stats).toHaveProperty("total_connections"); - expect(stats).toHaveProperty("total_clients"); - expect(Object.keys(stats)).toHaveLength(2); - } finally { - // Ensure the client is properly closed - await glideClientForTesting?.close(); + client_for_testing_az?.close(); } }, ); From 36452b77ab2a0647efd9c87b89929ba4ef8ee441 Mon Sep 17 00:00:00 2001 From: Joseph Brinkman Date: Tue, 17 Dec 2024 20:12:12 -0500 Subject: [PATCH 13/32] remove native package references for MacOS x64 architecture (#2824) * remove native package references for MacOS x64 architecture Signed-off-by: jbrinkman --- CHANGELOG.md | 5 +++++ node/npm/glide/index.ts | 3 --- node/npm/glide/package.json | 1 - 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1f8ebb2985..f820f45e05 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,8 @@ #### Changes + * Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) +* Node: Remove native package references for MacOs x64 architecture ([#2799](https://github.com/valkey-io/valkey-glide/issues/2799)) #### Breaking Changes @@ -13,6 +15,7 @@ ## 1.2.0 (2024-11-27) #### Changes + * Node: Client API for retrieving internal statistics ([#2727](https://github.com/valkey-io/valkey-glide/pull/2727)) * Python: Client API for retrieving internal statistics ([#2707](https://github.com/valkey-io/valkey-glide/pull/2707)) * Node, Python, Java: Adding support for replacing connection configured password ([#2651](https://github.com/valkey-io/valkey-glide/pull/2651), [#2659](https://github.com/valkey-io/valkey-glide/pull/2659), [#2677](https://github.com/valkey-io/valkey-glide/pull/2677)) @@ -107,7 +110,9 @@ #### Breaking Changes + #### Fixes + * Core: UDS Socket Handling Rework ([#2482](https://github.com/valkey-io/valkey-glide/pull/2482)) #### Operational Enhancements diff --git a/node/npm/glide/index.ts b/node/npm/glide/index.ts index da719c51c7..c4dab9795b 100644 --- a/node/npm/glide/index.ts +++ b/node/npm/glide/index.ts @@ -53,9 +53,6 @@ function loadNativeBinding() { break; case "darwin": switch (arch) { - case "x64": - nativeBinding = require("@scope/valkey-glide-darwin-x64"); - break; case "arm64": nativeBinding = require("@scope/valkey-glide-darwin-arm64"); break; diff --git a/node/npm/glide/package.json b/node/npm/glide/package.json index 78ec8d0821..8444de8ef9 100644 --- a/node/npm/glide/package.json +++ b/node/npm/glide/package.json @@ -41,7 +41,6 @@ }, "optionalDependencies": { "${scope}valkey-glide-darwin-arm64": "${package_version}", - "${scope}valkey-glide-darwin-x64": "${package_version}", "${scope}valkey-glide-linux-arm64": "${package_version}", "${scope}valkey-glide-linux-x64": "${package_version}", "${scope}valkey-glide-linux-musl-arm64": "${package_version}", From 4de48472cb3c29a3f04db2b18e18ec7e36f0f850 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Thu, 19 Dec 2024 09:54:10 -0800 Subject: [PATCH 14/32] Fix full matrix CI (#2784) Fix CI Signed-off-by: Yury-Fridlyand --- .github/workflows/create-test-matrices/action.yml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/.github/workflows/create-test-matrices/action.yml b/.github/workflows/create-test-matrices/action.yml index 5bd777b5a1..e1a7186521 100644 --- a/.github/workflows/create-test-matrices/action.yml +++ b/.github/workflows/create-test-matrices/action.yml @@ -41,12 +41,11 @@ runs: echo 'Select server engines to run tests against' if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" || "${{ inputs.run-full-matrix }}" == "false" ]]; then echo 'Pick engines marked as `"run": "always"` only - on PR, push or manually triggered job which does not require full matrix' - jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT else echo 'Pick all engines - on cron (schedule) or if manually triggered job requires a full matrix' - jq -c . < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c . < .github/json_matrices/engine-matrix.json | awk '{ printf "engine-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT fi - cat $GITHUB_OUTPUT - name: Load host matrix id: load-host-matrix @@ -57,12 +56,11 @@ runs: echo 'Select runners (VMs) to run tests on' if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" || "${{ inputs.run-full-matrix }}" == "false" ]]; then echo 'Pick runners marked as '"run": "always"' only - on PR, push or manually triggered job which does not require full matrix' - jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.run == "always")]' < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT else echo 'Pick all runners assigned for the chosen client (language) - on cron (schedule) or if manually triggered job requires a full matrix' - jq -c "[.[] | select(.languages? and any(.languages[] == \"${{ inputs.language-name }}\"; .) and $CONDITION)]" < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c "[.[] | select(.languages? and any(.languages[] == \"${{ inputs.language-name }}\"; .) and $CONDITION)]" < .github/json_matrices/build-matrix.json | awk '{ printf "host-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT fi - cat $GITHUB_OUTPUT - name: Create language version matrix id: create-lang-version-matrix @@ -72,9 +70,8 @@ runs: echo 'Select language (framework/SDK) versions to run tests on' if [[ "${{ github.event_name }}" == "pull_request" || "${{ github.event_name }}" == "push" || "${{ inputs.run-full-matrix }}" == "false" ]]; then echo 'Pick language versions listed in 'always-run-versions' only - on PR, push or manually triggered job which does not require full matrix' - jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .["always-run-versions"]][0] // []' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .["always-run-versions"]][0] // []' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT else echo 'Pick language versions listed in 'versions' - on cron (schedule) or if manually triggered job requires a full matrix' - jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .versions][0]' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $1 }' | tee -a $GITHUB_OUTPUT + jq -c '[.[] | select(.language == "${{ inputs.language-name }}") | .versions][0]' < .github/json_matrices/supported-languages-versions.json | awk '{ printf "version-matrix=%s\n", $0 }' | tee -a $GITHUB_OUTPUT fi - cat $GITHUB_OUTPUT From 8bfa0d87b3ab4a4aec58dc34b08af6f6cab32801 Mon Sep 17 00:00:00 2001 From: Avi Fenesh <55848801+avifenesh@users.noreply.github.com> Date: Wed, 25 Dec 2024 19:01:31 +0200 Subject: [PATCH 15/32] Release 1.2 uncoverd slots (#2869) * Add allow_non_covered_slots option to cluster scan across Node, Python, and Java implementations Signed-off-by: avifenesh * Merge python requirement files. (#2597) * Merge python requirement files. --------- Signed-off-by: Yury-Fridlyand Signed-off-by: avifenesh --------- Signed-off-by: avifenesh Signed-off-by: Yury-Fridlyand Co-authored-by: Yury-Fridlyand --- .github/workflows/python.yml | 4 +- CHANGELOG.md | 3 +- Makefile | 1 - eslint.config.mjs | 8 + .../redis-rs/redis/src/cluster_async/mod.rs | 150 +-- .../redis-rs/redis/src/cluster_slotmap.rs | 2 +- .../redis/src/commands/cluster_scan.rs | 1095 +++++++++-------- glide-core/redis-rs/redis/src/commands/mod.rs | 3 + glide-core/redis-rs/redis/src/lib.rs | 3 + .../redis-rs/redis/tests/test_cluster_scan.rs | 549 ++++++++- glide-core/src/client/mod.rs | 28 +- glide-core/src/protobuf/command_request.proto | 1 + glide-core/src/socket_listener.rs | 39 +- .../api/models/commands/scan/ScanOptions.java | 15 + .../java/glide/managers/CommandManager.java | 4 + node/src/Commands.ts | 13 + node/src/GlideClusterClient.ts | 17 +- node/tests/ScanTest.test.ts | 162 +++ node/tests/TestUtilities.ts | 45 + package.json | 12 +- python/DEVELOPER.md | 3 +- python/dev_requirements.txt | 7 - .../glide/async_commands/cluster_commands.py | 89 +- python/python/glide/async_commands/core.py | 1 + python/python/glide/glide_client.py | 2 + python/python/tests/conftest.py | 8 +- python/python/tests/test_scan.py | 157 ++- python/python/tests/test_transaction.py | 5 +- python/requirements.txt | 7 +- utils/TestUtils.ts | 8 +- 30 files changed, 1683 insertions(+), 758 deletions(-) delete mode 100644 python/dev_requirements.txt diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 699033cf1a..11df78697a 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -115,7 +115,7 @@ jobs: working-directory: ./python run: | source .env/bin/activate - pip install -r dev_requirements.txt + pip install -r requirements.txt cd python/tests/ pytest --asyncio-mode=auto --html=pytest_report.html --self-contained-html @@ -178,7 +178,7 @@ jobs: working-directory: ./python run: | source .env/bin/activate - pip install -r dev_requirements.txt + pip install -r requirements.txt cd python/tests/ pytest --asyncio-mode=auto -k test_pubsub --html=pytest_report.html --self-contained-html diff --git a/CHANGELOG.md b/CHANGELOG.md index f820f45e05..a1bc2cd15f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,5 @@ #### Changes - +* Node, Python, Java: Add allow uncovered slots scanning flag option in cluster scan ([#2814](https://github.com/valkey-io/valkey-glide/pull/2814), [#2815](https://github.com/valkey-io/valkey-glide/pull/2815), [#2860](https://github.com/valkey-io/valkey-glide/pull/2860)) * Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) * Node: Remove native package references for MacOs x64 architecture ([#2799](https://github.com/valkey-io/valkey-glide/issues/2799)) @@ -535,4 +535,3 @@ Preview release of **GLIDE for Redis** a Polyglot Redis client. See the [README](README.md) for additional information. - diff --git a/Makefile b/Makefile index 877d78f6e9..d4088fb898 100644 --- a/Makefile +++ b/Makefile @@ -58,7 +58,6 @@ python-test: .build/python_deps check-redis-server @cd python && python3 -m venv .env @echo "$(GREEN)Installing requirements...$(RESET)" @cd python && .env/bin/pip install -r requirements.txt - @cd python && .env/bin/pip install -r dev_requirements.txt @mkdir -p .build/ && touch .build/python_deps ## diff --git a/eslint.config.mjs b/eslint.config.mjs index 21995480f4..a96d4fdecd 100644 --- a/eslint.config.mjs +++ b/eslint.config.mjs @@ -2,6 +2,7 @@ import eslint from "@eslint/js"; import prettierConfig from "eslint-config-prettier"; import tseslint from "typescript-eslint"; +import jsdoc from "eslint-plugin-jsdoc"; export default tseslint.config( eslint.configs.recommended, @@ -54,6 +55,13 @@ export default tseslint.config( next: "*", }, ], + "@typescript-eslint/indent": ["error", 4, { + "SwitchCase": 1, + "ObjectExpression": 1, + "FunctionDeclaration": {"parameters": "first"}, + "FunctionExpression": {"parameters": "first"}, + "ignoredNodes": ["TSTypeParameterInstantiation"] + }], }, }, prettierConfig, diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs index 3726d7a674..8164d09413 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs @@ -32,15 +32,13 @@ pub mod testing { use crate::{ client::GlideConnectionOptions, cluster_routing::{Routable, RoutingInfo, ShardUpdateResult}, - cluster_slotmap::SlotMap, cluster_topology::{ calculate_topology, get_slot, SlotRefreshState, DEFAULT_NUMBER_OF_REFRESH_SLOTS_RETRIES, DEFAULT_REFRESH_SLOTS_RETRY_BASE_DURATION_MILLIS, DEFAULT_REFRESH_SLOTS_RETRY_BASE_FACTOR, - SLOT_SIZE, }, cmd, - commands::cluster_scan::{cluster_scan, ClusterScanArgs, ObjectType, ScanStateRC}, - FromRedisValue, InfoDict, ToRedisArgs, + commands::cluster_scan::{cluster_scan, ClusterScanArgs, ScanStateRC}, + FromRedisValue, InfoDict, }; use dashmap::DashMap; use std::{ @@ -111,7 +109,7 @@ use crate::types::RetryMethod; pub(crate) const MUTEX_READ_ERR: &str = "Failed to obtain read lock. Poisoned mutex?"; const MUTEX_WRITE_ERR: &str = "Failed to obtain write lock. Poisoned mutex?"; -/// This represents an async Redis Cluster connection. It stores the +/// This represents an async Cluster connection. It stores the /// underlying connections maintained for each node in the cluster, as well /// as common parameters for connecting to nodes and executing commands. #[derive(Clone)] @@ -142,68 +140,9 @@ where }) } - /// Special handling for `SCAN` command, using `cluster_scan`. - /// If you wish to use a match pattern, use [`cluster_scan_with_pattern`]. - /// Perform a `SCAN` command on a Redis cluster, using scan state object in order to handle changes in topology - /// and make sure that all keys that were in the cluster from start to end of the scan are scanned. - /// In order to make sure all keys in the cluster scanned, topology refresh occurs more frequently and may affect performance. - /// - /// # Arguments - /// - /// * `scan_state_rc` - A reference to the scan state, For initiating new scan send [`ScanStateRC::new()`], - /// for each subsequent iteration use the returned [`ScanStateRC`]. - /// * `count` - An optional count of keys requested, - /// the amount returned can vary and not obligated to return exactly count. - /// * `object_type` - An optional [`ObjectType`] enum of requested key redis type. - /// - /// # Returns - /// - /// A [`ScanStateRC`] for the updated state of the scan and the vector of keys that were found in the scan. - /// structure of returned value: - /// `Ok((ScanStateRC, Vec))` - /// - /// When the scan is finished [`ScanStateRC`] will be None, and can be checked by calling `scan_state_wrapper.is_finished()`. - /// - /// # Example - /// ```rust,no_run - /// use redis::cluster::ClusterClient; - /// use redis::{ScanStateRC, FromRedisValue, from_redis_value, Value, ObjectType}; - /// - /// async fn scan_all_cluster() -> Vec { - /// let nodes = vec!["redis://127.0.0.1/"]; - /// let client = ClusterClient::new(nodes).unwrap(); - /// let mut connection = client.get_async_connection(None).await.unwrap(); - /// let mut scan_state_rc = ScanStateRC::new(); - /// let mut keys: Vec = vec![]; - /// loop { - /// let (next_cursor, scan_keys): (ScanStateRC, Vec) = - /// connection.cluster_scan(scan_state_rc, None, None).await.unwrap(); - /// scan_state_rc = next_cursor; - /// let mut scan_keys = scan_keys - /// .into_iter() - /// .map(|v| from_redis_value(&v).unwrap()) - /// .collect::>(); // Change the type of `keys` to `Vec` - /// keys.append(&mut scan_keys); - /// if scan_state_rc.is_finished() { - /// break; - /// } - /// } - /// keys - /// } - /// ``` - pub async fn cluster_scan( - &mut self, - scan_state_rc: ScanStateRC, - count: Option, - object_type: Option, - ) -> RedisResult<(ScanStateRC, Vec)> { - let cluster_scan_args = ClusterScanArgs::new(scan_state_rc, None, count, object_type); - self.route_cluster_scan(cluster_scan_args).await - } - /// Special handling for `SCAN` command, using `cluster_scan_with_pattern`. /// It is a special case of [`cluster_scan`], with an additional match pattern. - /// Perform a `SCAN` command on a Redis cluster, using scan state object in order to handle changes in topology + /// Perform a `SCAN` command on a cluster, using scan state object in order to handle changes in topology /// and make sure that all keys that were in the cluster from start to end of the scan are scanned. /// In order to make sure all keys in the cluster scanned, topology refresh occurs more frequently and may affect performance. /// @@ -211,10 +150,8 @@ where /// /// * `scan_state_rc` - A reference to the scan state, For initiating new scan send [`ScanStateRC::new()`], /// for each subsequent iteration use the returned [`ScanStateRC`]. - /// * `match_pattern` - A match pattern of requested keys. - /// * `count` - An optional count of keys requested, - /// the amount returned can vary and not obligated to return exactly count. - /// * `object_type` - An optional [`ObjectType`] enum of requested key redis type. + /// * `cluster_scan_args` - A [`ClusterScanArgs`] struct containing the arguments for the cluster scan command - match pattern, count, + /// object type and the allow_non_covered_slots flag. /// /// # Returns /// @@ -227,7 +164,7 @@ where /// # Example /// ```rust,no_run /// use redis::cluster::ClusterClient; - /// use redis::{ScanStateRC, FromRedisValue, from_redis_value, Value, ObjectType}; + /// use redis::{ScanStateRC, from_redis_value, Value, ObjectType, ClusterScanArgs}; /// /// async fn scan_all_cluster() -> Vec { /// let nodes = vec!["redis://127.0.0.1/"]; @@ -235,9 +172,10 @@ where /// let mut connection = client.get_async_connection(None).await.unwrap(); /// let mut scan_state_rc = ScanStateRC::new(); /// let mut keys: Vec = vec![]; + /// let cluster_scan_args = ClusterScanArgs::builder().with_count(1000).with_object_type(ObjectType::String).build(); /// loop { /// let (next_cursor, scan_keys): (ScanStateRC, Vec) = - /// connection.cluster_scan_with_pattern(scan_state_rc, b"my_key", None, None).await.unwrap(); + /// connection.cluster_scan(scan_state_rc, cluster_scan_args.clone()).await.unwrap(); /// scan_state_rc = next_cursor; /// let mut scan_keys = scan_keys /// .into_iter() @@ -251,19 +189,12 @@ where /// keys /// } /// ``` - pub async fn cluster_scan_with_pattern( + pub async fn cluster_scan( &mut self, scan_state_rc: ScanStateRC, - match_pattern: K, - count: Option, - object_type: Option, + mut cluster_scan_args: ClusterScanArgs, ) -> RedisResult<(ScanStateRC, Vec)> { - let cluster_scan_args = ClusterScanArgs::new( - scan_state_rc, - Some(match_pattern.to_redis_args().concat()), - count, - object_type, - ); + cluster_scan_args.set_scan_state_cursor(scan_state_rc); self.route_cluster_scan(cluster_scan_args).await } @@ -279,18 +210,18 @@ where sender, }) .await - .map_err(|_| { + .map_err(|e| { RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to send command", + format!("Cluster: Error occurred while trying to send SCAN command to internal send task. {e:?}"), )) })?; receiver .await - .unwrap_or_else(|_| { + .unwrap_or_else(|e| { Err(RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to receive command", + format!("Cluster: Failed to receive SCAN command response from internal send task. {e:?}"), ))) }) .map(|response| match response { @@ -316,18 +247,20 @@ where sender, }) .await - .map_err(|_| { + .map_err(|e| { RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to send command", + format!("Cluster: Error occurred while trying to send command to internal sender. {e:?}"), )) })?; receiver .await - .unwrap_or_else(|_| { + .unwrap_or_else(|e| { Err(RedisError::from(io::Error::new( io::ErrorKind::BrokenPipe, - "redis_cluster: Unable to receive command", + format!( + "Cluster: Failed to receive command response from internal sender. {e:?}" + ), ))) }) .map(|response| match response { @@ -489,21 +422,8 @@ where .map_err(|_| RedisError::from((ErrorKind::ClientError, MUTEX_WRITE_ERR))) } - // return address of node for slot - pub(crate) async fn get_address_from_slot( - &self, - slot: u16, - slot_addr: SlotAddr, - ) -> Option> { - self.conn_lock - .read() - .expect(MUTEX_READ_ERR) - .slot_map - .get_node_address_for_slot(slot, slot_addr) - } - // return epoch of node - pub(crate) async fn get_address_epoch(&self, node_address: &str) -> Result { + pub(crate) async fn address_epoch(&self, node_address: &str) -> Result { let command = cmd("CLUSTER").arg("INFO").to_owned(); let node_conn = self .conn_lock @@ -541,14 +461,26 @@ where } } - // return slots of node - pub(crate) async fn get_slots_of_address(&self, node_address: Arc) -> Vec { + /// return slots of node + pub(crate) async fn slots_of_address(&self, node_address: Arc) -> Vec { self.conn_lock .read() .expect(MUTEX_READ_ERR) .slot_map .get_slots_of_node(node_address) } + + /// Get connection for address + pub(crate) async fn connection_for_address( + &self, + address: &str, + ) -> Option> { + self.conn_lock + .read() + .expect(MUTEX_READ_ERR) + .connection_for_address(address) + .map(|(_, conn)| conn) + } } pub(crate) struct ClusterConnInner { @@ -1884,14 +1816,6 @@ where Self::refresh_slots_inner(inner, curr_retry).await } - pub(crate) fn check_if_all_slots_covered(slot_map: &SlotMap) -> bool { - let mut slots_covered = 0; - for (end, slots) in slot_map.slots.iter() { - slots_covered += end.saturating_sub(slots.start).saturating_add(1); - } - slots_covered == SLOT_SIZE - } - // Query a node to discover slot-> master mappings async fn refresh_slots_inner(inner: Arc>, curr_retry: usize) -> RedisResult<()> { let num_of_nodes = inner.conn_lock.read().expect(MUTEX_READ_ERR).len(); diff --git a/glide-core/redis-rs/redis/src/cluster_slotmap.rs b/glide-core/redis-rs/redis/src/cluster_slotmap.rs index 88e7549323..f2e43b4449 100644 --- a/glide-core/redis-rs/redis/src/cluster_slotmap.rs +++ b/glide-core/redis-rs/redis/src/cluster_slotmap.rs @@ -202,7 +202,7 @@ impl SlotMap { .collect() } - pub(crate) fn get_node_address_for_slot( + pub(crate) fn node_address_for_slot( &self, slot: u16, slot_addr: SlotAddr, diff --git a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs index 109aceca22..517d43c6b3 100644 --- a/glide-core/redis-rs/redis/src/commands/cluster_scan.rs +++ b/glide-core/redis-rs/redis/src/commands/cluster_scan.rs @@ -1,73 +1,269 @@ -//! This module contains the implementation of scanning operations in a Redis cluster. +//! This module implements cluster-wide scanning operations for clusters. //! -//! The [`ClusterScanArgs`] struct represents the arguments for a cluster scan operation, -//! including the scan state reference, match pattern, count, and object type. +//! # Overview //! -//! The [[`ScanStateRC`]] struct is a wrapper for managing the state of a scan operation in a cluster. -//! It holds a reference to the scan state and provides methods for accessing the state. +//! The module provides functionality to scan keys across all nodes in a cluster, +//! handling topology changes, failovers, and partial cluster coverage scenarios. +//! It maintains state between scan iterations and ensures consistent scanning even +//! when cluster topology changes. //! -//! The [[`ClusterInScan`]] trait defines the methods for interacting with a Redis cluster during scanning, -//! including retrieving address information, refreshing slot mapping, and routing commands to specific address. +//! # Key Components //! -//! The [[`ScanState`]] struct represents the state of a scan operation in a Redis cluster. -//! It holds information about the current scan state, including the cursor position, scanned slots map, -//! address being scanned, and address's epoch. +//! - [`ClusterScanArgs`]: Configuration for scan operations including filtering and behavior options +//! - [`ScanStateRC`]: Thread-safe reference-counted wrapper for scan state management +//! - [`ScanState`]: Internal state tracking for cluster-wide scanning progress +//! - [`ObjectType`]: Supported data types for filtering scan results +//! +//! # Key Features +//! +//! - Resumable scanning across cluster nodes +//! - Automatic handling of cluster topology changes +//! - Support for all regular SCAN options +//! - Resilient to node failures and resharding +//! +//! # Implementation Details +//! +//! The scanning process is implemented using a bitmap to track scanned slots and +//! maintains epoch information to handle topology changes. The implementation: +//! +//! - Uses a 64-bit aligned bitmap for efficient slot tracking +//! - Maintains cursor position per node +//! - Handles partial cluster coverage scenarios +//! - Provides automatic recovery from node failures +//! - Ensures consistent scanning across topology changes +//! +//! # Error Handling +//! +//! The module handles various error scenarios including: +//! - Node failures during scanning +//! - Cluster topology changes +//! - Network connectivity issues +//! - Invalid routing scenarios use crate::aio::ConnectionLike; -use crate::cluster_async::{ - ClusterConnInner, Connect, Core, InternalRoutingInfo, InternalSingleNodeRouting, RefreshPolicy, - Response, MUTEX_READ_ERR, -}; +use crate::cluster_async::{ClusterConnInner, Connect, InnerCore, RefreshPolicy, MUTEX_READ_ERR}; use crate::cluster_routing::SlotAddr; use crate::cluster_topology::SLOT_SIZE; -use crate::{cmd, from_redis_value, Cmd, ErrorKind, RedisError, RedisResult, Value}; -use async_trait::async_trait; +use crate::{cmd, from_redis_value, ErrorKind, RedisError, RedisResult, Value}; use std::sync::Arc; -use strum_macros::Display; +use strum_macros::{Display, EnumString}; + +const BITS_PER_U64: u16 = u64::BITS as u16; +const NUM_OF_SLOTS: u16 = SLOT_SIZE; +const BITS_ARRAY_SIZE: u16 = NUM_OF_SLOTS / BITS_PER_U64; +const END_OF_SCAN: u16 = NUM_OF_SLOTS; +type SlotsBitsArray = [u64; BITS_ARRAY_SIZE as usize]; + +/// Holds configuration for a cluster scan operation. +/// +/// # Fields +/// - `scan_state_cursor`: Internal state tracking scan progress +/// - `match_pattern`: Optional pattern to filter keys +/// - `count`: Optional limit on number of keys returned per iteration +/// - `object_type`: Optional filter for specific data types +/// - `allow_non_covered_slots`: Whether to continue if some slots are uncovered +/// +/// See examples below for usage with the builder pattern. +/// # Examples +/// +/// Create a new `ClusterScanArgs` instance using the builder pattern: +/// +/// ```rust,no_run +/// use redis::ClusterScanArgs; +/// use redis::ObjectType; +/// +/// // Create basic scan args with defaults +/// let basic_scan = ClusterScanArgs::builder().build(); +/// +/// // Create scan args with custom options +/// let custom_scan = ClusterScanArgs::builder() +/// .with_match_pattern("user:*") // Match keys starting with "user:" +/// .with_count(100) // Return 100 keys per iteration +/// .with_object_type(ObjectType::Hash) // Only scan hash objects +/// .allow_non_covered_slots(true) // Continue scanning even if some slots aren't covered +/// .build(); +/// +/// // The builder can be used to create multiple configurations +/// let another_scan = ClusterScanArgs::builder() +/// .with_match_pattern("session:*") +/// .with_object_type(ObjectType::String) +/// .build(); +/// ``` -const BITS_PER_U64: usize = u64::BITS as usize; -const NUM_OF_SLOTS: usize = SLOT_SIZE as usize; -const BITS_ARRAY_SIZE: usize = NUM_OF_SLOTS / BITS_PER_U64; -const END_OF_SCAN: u16 = NUM_OF_SLOTS as u16 + 1; -type SlotsBitsArray = [u64; BITS_ARRAY_SIZE]; +#[derive(Clone, Default)] +pub struct ClusterScanArgs { + /// Reference-counted scan state cursor, managed internally. + pub scan_state_cursor: ScanStateRC, -#[derive(Clone)] -pub(crate) struct ClusterScanArgs { - pub(crate) scan_state_cursor: ScanStateRC, + /// Optional pattern to match keys during the scan. + pub match_pattern: Option>, + + /// A "hint" to the cluster of how much keys to return per scan iteration, if none is sent to the server, the default value is 10. + pub count: Option, + + /// Optional filter to include only keys of a specific data type. + pub object_type: Option, + + /// Flag indicating whether to allow scanning when there are slots not covered by the cluster, by default it is set to false and the scan will stop if some slots are not covered. + pub allow_non_covered_slots: bool, +} + +impl ClusterScanArgs { + /// Creates a new [`ClusterScanArgsBuilder`] instance. + /// + /// # Returns + /// + /// A [`ClusterScanArgsBuilder`] instance for configuring cluster scan arguments. + pub fn builder() -> ClusterScanArgsBuilder { + ClusterScanArgsBuilder::default() + } + pub(crate) fn set_scan_state_cursor(&mut self, scan_state_cursor: ScanStateRC) { + self.scan_state_cursor = scan_state_cursor; + } +} + +#[derive(Default)] +/// Builder pattern for creating cluster scan arguments. +/// +/// This struct allows configuring various parameters for scanning keys in a cluster: +/// * Pattern matching for key filtering +/// * Count limit for returned keys +/// * Filtering by object type +/// * Control over scanning non-covered slots +/// +/// # Example +/// ``` +/// use redis::{ClusterScanArgs, ObjectType}; +/// +/// let args = ClusterScanArgs::builder() +/// .with_match_pattern(b"user:*") +/// .with_count(100) +/// .with_object_type(ObjectType::Hash) +/// .build(); +/// ``` +pub struct ClusterScanArgsBuilder { + /// By default, the match pattern is set to `None` and no filtering is applied. match_pattern: Option>, - count: Option, + /// A "hint" to the cluster of how much keys to return per scan iteration, by default none is sent to the server, the default value is 10. + count: Option, + /// By default, the object type is set to `None` and no filtering is applied. object_type: Option, + /// By default, the flag to allow scanning non-covered slots is set to `false`, meaning scanning will stop if some slots are not covered. + allow_non_covered_slots: Option, } -#[derive(Debug, Clone, Display)] -/// Represents the type of an object in Redis. +impl ClusterScanArgsBuilder { + /// Sets the match pattern for the scan operation. + /// + /// # Arguments + /// + /// * `pattern` - The pattern to match keys against. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn with_match_pattern>>(mut self, pattern: T) -> Self { + self.match_pattern = Some(pattern.into()); + self + } + + /// Sets the count for the scan operation. + /// + /// # Arguments + /// + /// * `count` - A "hint" to the cluster of how much keys to return per scan iteration. + /// + /// The actual number of keys returned may be less or more than the count specified. + /// + /// 4,294,967,295 is the maximum keys possible in a cluster, so higher values will be capped. + /// Hence the count is represented as a `u32` instead of `usize`. + /// + /// The default value is 10, if nothing is sent to the server, meaning nothing set in the builder. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn with_count(mut self, count: u32) -> Self { + self.count = Some(count); + self + } + + /// Sets the object type for the scan operation. + /// + /// # Arguments + /// + /// * `object_type` - The type of object to filter keys by. + /// + /// See [`ObjectType`] for supported data types. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn with_object_type(mut self, object_type: ObjectType) -> Self { + self.object_type = Some(object_type); + self + } + + /// Sets the flag to allow scanning non-covered slots. + /// + /// # Arguments + /// + /// * `allow` - A boolean flag indicating whether to allow scanning non-covered slots. + /// + /// # Returns + /// + /// The updated [`ClusterScanArgsBuilder`] instance. + pub fn allow_non_covered_slots(mut self, allow: bool) -> Self { + self.allow_non_covered_slots = Some(allow); + self + } + + /// Builds the [`ClusterScanArgs`] instance with the provided configuration. + /// + /// # Returns + /// + /// A [`ClusterScanArgs`] instance with the configured options. + pub fn build(self) -> ClusterScanArgs { + ClusterScanArgs { + scan_state_cursor: ScanStateRC::new(), + match_pattern: self.match_pattern, + count: self.count, + object_type: self.object_type, + allow_non_covered_slots: self.allow_non_covered_slots.unwrap_or(false), + } + } +} + +/// Represents the type of an object used to filter keys by data type. +/// +/// This enum is used with the `TYPE` option in the `SCAN` command to +/// filter keys by their data type. +#[derive(Debug, Clone, Display, PartialEq, EnumString)] pub enum ObjectType { - /// Represents a string object in Redis. + /// String data type. String, - /// Represents a list object in Redis. + /// List data type. List, - /// Represents a set object in Redis. + /// Set data type. Set, - /// Represents a sorted set object in Redis. + /// Sorted set data type. ZSet, - /// Represents a hash object in Redis. + /// Hash data type. Hash, - /// Represents a stream object in Redis. + /// Stream data type. Stream, } -impl ClusterScanArgs { - pub(crate) fn new( - scan_state_cursor: ScanStateRC, - match_pattern: Option>, - count: Option, - object_type: Option, - ) -> Self { - Self { - scan_state_cursor, - match_pattern, - count, - object_type, +impl From for ObjectType { + fn from(s: String) -> Self { + match s.to_lowercase().as_str() { + "string" => ObjectType::String, + "list" => ObjectType::List, + "set" => ObjectType::Set, + "zset" => ObjectType::ZSet, + "hash" => ObjectType::Hash, + "stream" => ObjectType::Stream, + _ => ObjectType::String, } } } @@ -80,10 +276,11 @@ pub enum ScanStateStage { Finished, } +/// Wrapper struct for managing the state of a cluster scan operation. +/// +/// This struct holds an `Arc` to the actual scan state and a status indicating +/// whether the scan is initiating, in progress, or finished. #[derive(Debug, Clone, Default)] -/// A wrapper struct for managing the state of a scan operation in a cluster. -/// It holds a reference to the scan state and provides methods for accessing the state. -/// The `status` field indicates the status of the scan operation. pub struct ScanStateRC { scan_state_rc: Arc>, status: ScanStateStage, @@ -121,7 +318,7 @@ impl ScanStateRC { } /// Returns a clone of the scan state, if it exist. - pub(crate) fn get_state_from_wrapper(&self) -> Option { + pub(crate) fn state_from_wrapper(&self) -> Option { if self.status == ScanStateStage::Initiating || self.status == ScanStateStage::Finished { None } else { @@ -130,33 +327,10 @@ impl ScanStateRC { } } -/// This trait defines the methods for interacting with a Redis cluster during scanning. -#[async_trait] -pub(crate) trait ClusterInScan { - /// Retrieves the address associated with a given slot in the cluster. - async fn get_address_by_slot(&self, slot: u16) -> RedisResult>; - - /// Retrieves the epoch of a given address in the cluster. - /// The epoch represents the version of the address, which is updated when a failover occurs or slots migrate in. - async fn get_address_epoch(&self, address: &str) -> Result; - - /// Retrieves the slots assigned to a given address in the cluster. - async fn get_slots_of_address(&self, address: Arc) -> Vec; - - /// Routes a Redis command to a specific address in the cluster. - async fn route_command(&self, cmd: Cmd, address: &str) -> RedisResult; - - /// Check if all slots are covered by the cluster - async fn are_all_slots_covered(&self) -> bool; - - /// Check if the topology of the cluster has changed and refresh the slots if needed - async fn refresh_if_topology_changed(&self) -> RedisResult; -} - -/// Represents the state of a scan operation in a Redis cluster. +/// Represents the state of a cluster scan operation. /// -/// This struct holds information about the current scan state, including the cursor position, -/// the scanned slots map, the address being scanned, and the address's epoch. +/// This struct keeps track of the current cursor, which slots have been scanned, +/// the address currently being scanned, and the epoch of that address. #[derive(PartialEq, Debug, Clone)] pub(crate) struct ScanState { // the real cursor in the scan operation @@ -205,7 +379,7 @@ impl ScanState { fn create_finished_state() -> Self { Self { cursor: 0, - scanned_slots_map: [0; BITS_ARRAY_SIZE], + scanned_slots_map: [0; BITS_ARRAY_SIZE as usize], address_in_scan: Default::default(), address_epoch: 0, scan_status: ScanStateStage::Finished, @@ -217,63 +391,68 @@ impl ScanState { /// and the address set to the address associated with slot 0. /// The address epoch is set to the epoch of the address. /// If the address epoch cannot be retrieved, the method returns an error. - async fn initiate_scan(connection: &C) -> RedisResult { - let new_scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; + async fn initiate_scan( + core: &InnerCore, + allow_non_covered_slots: bool, + ) -> RedisResult + where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, + { + let mut new_scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE as usize]; let new_cursor = 0; - let address = connection.get_address_by_slot(0).await?; - let address_epoch = connection.get_address_epoch(&address).await.unwrap_or(0); - Ok(ScanState::new( - new_cursor, - new_scanned_slots_map, - address, - address_epoch, - ScanStateStage::InProgress, - )) - } + let address = + next_address_to_scan(core, 0, &mut new_scanned_slots_map, allow_non_covered_slots)?; - /// Get the next slot to be scanned based on the scanned slots map. - /// If all slots have been scanned, the method returns [`END_OF_SCAN`]. - fn get_next_slot(&self, scanned_slots_map: &SlotsBitsArray) -> Option { - let all_slots_scanned = scanned_slots_map.iter().all(|&word| word == u64::MAX); - if all_slots_scanned { - return Some(END_OF_SCAN); - } - for (i, slot) in scanned_slots_map.iter().enumerate() { - let mut mask = 1; - for j in 0..BITS_PER_U64 { - if (slot & mask) == 0 { - return Some((i * BITS_PER_U64 + j) as u16); - } - mask <<= 1; + match address { + NextNodeResult::AllSlotsCompleted => Ok(ScanState::create_finished_state()), + NextNodeResult::Address(address) => { + let address_epoch = core.address_epoch(&address).await.unwrap_or(0); + Ok(ScanState::new( + new_cursor, + new_scanned_slots_map, + address, + address_epoch, + ScanStateStage::InProgress, + )) } } - None } /// Update the scan state without updating the scanned slots map. /// This method is used when the address epoch has changed, and we can't determine which slots are new. /// In this case, we skip updating the scanned slots map and only update the address and cursor. - async fn creating_state_without_slot_changes( + async fn new_scan_state( &self, - connection: &C, - ) -> RedisResult { - let next_slot = self.get_next_slot(&self.scanned_slots_map).unwrap_or(0); - let new_address = if next_slot == END_OF_SCAN { - return Ok(ScanState::create_finished_state()); - } else { - connection.get_address_by_slot(next_slot).await - }; - match new_address { - Ok(address) => { - let new_epoch = connection.get_address_epoch(&address).await.unwrap_or(0); + core: Arc>, + allow_non_covered_slots: bool, + new_scanned_slots_map: Option, + ) -> RedisResult + where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, + { + // If the new scanned slots map is not provided, use the current scanned slots map. + // The new scanned slots map is provided in the general case when the address epoch has not changed, + // meaning that we could safely update the scanned slots map with the slots owned by the node. + // Epoch change means that some slots are new, and we can't determine which slots been there from the beginning and which are new. + let mut scanned_slots_map = new_scanned_slots_map.unwrap_or(self.scanned_slots_map); + let next_slot = next_slot(&scanned_slots_map).unwrap_or(0); + match next_address_to_scan( + &core, + next_slot, + &mut scanned_slots_map, + allow_non_covered_slots, + ) { + Ok(NextNodeResult::Address(new_address)) => { + let new_epoch = core.address_epoch(&new_address).await.unwrap_or(0); Ok(ScanState::new( 0, - self.scanned_slots_map, - address, + scanned_slots_map, + new_address, new_epoch, ScanStateStage::InProgress, )) } + Ok(NextNodeResult::AllSlotsCompleted) => Ok(ScanState::create_finished_state()), Err(err) => Err(err), } } @@ -284,210 +463,204 @@ impl ScanState { /// If the address epoch has changed, the method skips updating the scanned slots map and only updates the address and cursor. /// If the address epoch has not changed, the method updates the scanned slots map with the slots owned by the address. /// The method returns the new scan state with the updated cursor, scanned slots map, address, and epoch. - async fn create_updated_scan_state_for_completed_address( + async fn create_updated_scan_state_for_completed_address( &mut self, - connection: &C, - ) -> RedisResult { - connection - .refresh_if_topology_changed() - .await - .map_err(|err| { - RedisError::from(( - ErrorKind::ResponseError, - "Error during cluster scan: failed to refresh slots", - format!("{:?}", err), - )) - })?; + core: Arc>, + allow_non_covered_slots: bool, + ) -> RedisResult + where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, + { + ClusterConnInner::check_topology_and_refresh_if_diff( + core.clone(), + &RefreshPolicy::NotThrottable, + ) + .await?; + let mut scanned_slots_map = self.scanned_slots_map; // If the address epoch changed it mean that some slots in the address are new, so we cant know which slots been there from the beginning and which are new, or out and in later. // In this case we will skip updating the scanned_slots_map and will just update the address and the cursor - let new_address_epoch = connection - .get_address_epoch(&self.address_in_scan) - .await - .unwrap_or(0); + let new_address_epoch = core.address_epoch(&self.address_in_scan).await.unwrap_or(0); if new_address_epoch != self.address_epoch { - return self.creating_state_without_slot_changes(connection).await; + return self + .new_scan_state(core, allow_non_covered_slots, None) + .await; } // If epoch wasn't changed, the slots owned by the address after the refresh are all valid as slots that been scanned // So we will update the scanned_slots_map with the slots owned by the address - let slots_scanned = connection - .get_slots_of_address(self.address_in_scan.clone()) - .await; + let slots_scanned = core.slots_of_address(self.address_in_scan.clone()).await; for slot in slots_scanned { - let slot_index = slot as usize / BITS_PER_U64; - let slot_bit = slot as usize % BITS_PER_U64; - scanned_slots_map[slot_index] |= 1 << slot_bit; + mark_slot_as_scanned(&mut scanned_slots_map, slot); } // Get the next address to scan and its param base on the next slot set to 0 in the scanned_slots_map - let next_slot = self.get_next_slot(&scanned_slots_map).unwrap_or(0); - let new_address = if next_slot == END_OF_SCAN { - return Ok(ScanState::create_finished_state()); - } else { - connection.get_address_by_slot(next_slot).await - }; - match new_address { - Ok(new_address) => { - let new_epoch = connection - .get_address_epoch(&new_address) - .await - .unwrap_or(0); - let new_cursor = 0; - Ok(ScanState::new( - new_cursor, - scanned_slots_map, - new_address, - new_epoch, - ScanStateStage::InProgress, - )) - } - Err(err) => Err(err), - } + self.new_scan_state(core, allow_non_covered_slots, Some(scanned_slots_map)) + .await } } -// Implement the [`ClusterInScan`] trait for [`InnerCore`] of async cluster connection. -#[async_trait] -impl ClusterInScan for Core +fn mark_slot_as_scanned(scanned_slots_map: &mut SlotsBitsArray, slot: u16) { + let slot_index = (slot as u64 / BITS_PER_U64 as u64) as usize; + let slot_bit = slot as u64 % (BITS_PER_U64 as u64); + scanned_slots_map[slot_index] |= 1 << slot_bit; +} + +#[derive(PartialEq, Debug, Clone)] +/// The address type representing a connection address +/// +/// # Fields +/// +/// * `Address` - A thread-safe shared string containing the server address +/// * `AllSlotsCompleted` - Indicates that all slots have been scanned +enum NextNodeResult { + Address(Arc), + AllSlotsCompleted, +} + +/// Determines the next node address to scan within the cluster. +/// +/// This asynchronous function iterates through cluster slots to find the next available +/// node responsible for scanning. If a slot is not covered and `allow_non_covered_slots` +/// is enabled, it marks the slot as scanned and proceeds to the next one. The process +/// continues until a valid address is found or all slots have been scanned. +/// +/// # Arguments +/// +/// * `core` - Reference to the cluster's inner core connection. +/// * `slot` - The current slot number to scan. +/// * `scanned_slots_map` - Mutable reference to the bitmap tracking scanned slots. +/// * `allow_non_covered_slots` - Flag indicating whether to allow scanning of uncovered slots. +/// +/// # Returns +/// +/// * `RedisResult` - Returns the next node address to scan or indicates completion. +/// +/// # Type Parameters +/// +/// * `C`: The connection type that must implement `ConnectionLike`, `Connect`, `Clone`, `Send`, `Sync`, and `'static`. +/// +fn next_address_to_scan( + core: &InnerCore, + mut slot: u16, + scanned_slots_map: &mut SlotsBitsArray, + allow_non_covered_slots: bool, +) -> RedisResult where C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - async fn get_address_by_slot(&self, slot: u16) -> RedisResult> { - let address = self - .get_address_from_slot(slot, SlotAddr::ReplicaRequired) - .await; - match address { - Some(addr) => Ok(addr), - None => { - if self.are_all_slots_covered().await { - Err(RedisError::from(( - ErrorKind::IoError, - "Failed to get connection to the node cover the slot, please check the cluster configuration ", - ))) - } else { - Err(RedisError::from(( - ErrorKind::NotAllSlotsCovered, - "All slots are not covered by the cluster, please check the cluster configuration ", - ))) - } - } + loop { + if slot == END_OF_SCAN { + return Ok(NextNodeResult::AllSlotsCompleted); } - } - async fn get_address_epoch(&self, address: &str) -> Result { - self.as_ref().get_address_epoch(address).await - } - async fn get_slots_of_address(&self, address: Arc) -> Vec { - self.as_ref().get_slots_of_address(address).await - } - async fn route_command(&self, cmd: Cmd, address: &str) -> RedisResult { - let routing = InternalRoutingInfo::SingleNode(InternalSingleNodeRouting::ByAddress( - address.to_string(), - )); - let core = self.to_owned(); - let response = ClusterConnInner::::try_cmd_request(Arc::new(cmd), routing, core) - .await - .map_err(|err| err.1)?; - match response { - Response::Single(value) => Ok(value), - _ => Err(RedisError::from(( - ErrorKind::ClientError, - "Expected single response, got unexpected response", - ))), + if let Some(addr) = core + .conn_lock + .read() + .expect(MUTEX_READ_ERR) + .slot_map + .node_address_for_slot(slot, SlotAddr::ReplicaRequired) + { + // Found a valid address for the slot + return Ok(NextNodeResult::Address(addr)); + } else if allow_non_covered_slots { + // Mark the current slot as scanned + mark_slot_as_scanned(scanned_slots_map, slot); + slot = next_slot(scanned_slots_map).unwrap(); + } else { + // Error if slots are not covered and scanning is not allowed + return Err(RedisError::from(( + ErrorKind::NotAllSlotsCovered, + "Could not find an address covering a slot, SCAN operation cannot continue \n + If you want to continue scanning even if some slots are not covered, set allow_non_covered_slots to true \n + Note that this may lead to incomplete scanning, and the SCAN operation lose its all guarantees ", + ))); } } - async fn are_all_slots_covered(&self) -> bool { - ClusterConnInner::::check_if_all_slots_covered( - &self.conn_lock.read().expect(MUTEX_READ_ERR).slot_map, - ) - } - async fn refresh_if_topology_changed(&self) -> RedisResult { - ClusterConnInner::check_topology_and_refresh_if_diff( - self.to_owned(), - // The cluster SCAN implementation must refresh the slots when a topology change is found - // to ensure the scan logic is correct. - &RefreshPolicy::NotThrottable, - ) - .await +} + +/// Get the next slot to be scanned based on the scanned slots map. +/// If all slots have been scanned, the method returns [`END_OF_SCAN`]. +fn next_slot(scanned_slots_map: &SlotsBitsArray) -> Option { + let all_slots_scanned = scanned_slots_map.iter().all(|&word| word == u64::MAX); + if all_slots_scanned { + return Some(END_OF_SCAN); + } + for (i, slot) in scanned_slots_map.iter().enumerate() { + let mut mask = 1; + for j in 0..BITS_PER_U64 { + if (slot & mask) == 0 { + return Some(i as u16 * BITS_PER_U64 + j); + } + mask <<= 1; + } } + None } -/// Perform a cluster scan operation. -/// This function performs a scan operation in a Redis cluster using the given [`ClusterInScan`] connection. -/// It scans the cluster for keys based on the given `ClusterScanArgs` arguments. -/// The function returns a tuple containing the new scan state cursor and the keys found in the scan operation. -/// If the scan operation fails, an error is returned. +/// Performs a cluster-wide `SCAN` operation. +/// +/// This function scans the cluster for keys based on the provided arguments. +/// It handles the initiation of a new scan or continues an existing scan, manages +/// scan state, handles routing failures, and ensures consistent scanning across +/// cluster topology changes. /// /// # Arguments -/// * `core` - The connection to the Redis cluster. -/// * `cluster_scan_args` - The arguments for the cluster scan operation. +/// +/// * `core` - An `Arc`-wrapped reference to the cluster connection (`InnerCore`). +/// * `cluster_scan_args` - Configuration and arguments for the scan operation. /// /// # Returns -/// A tuple containing the new scan state cursor and the keys found in the scan operation. -/// If the scan operation fails, an error is returned. +/// +/// * `RedisResult<(ScanStateRC, Vec)>` - +/// - On success: A tuple containing the updated scan state (`ScanStateRC`) and a vector of `Value`s representing the found keys. +/// - On failure: A `RedisError` detailing the reason for the failure. +/// +/// # Type Parameters +/// +/// * `C`: The connection type that must implement `ConnectionLike`, `Connect`, `Clone`, `Send`, `Sync`, and `'static`. +/// pub(crate) async fn cluster_scan( - core: C, + core: Arc>, cluster_scan_args: ClusterScanArgs, ) -> RedisResult<(ScanStateRC, Vec)> where - C: ClusterInScan, + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - let ClusterScanArgs { - scan_state_cursor, - match_pattern, - count, - object_type, - } = cluster_scan_args; - // If scan_state is None, meaning we start a new scan - let scan_state = match scan_state_cursor.get_state_from_wrapper() { + // Extract the current scan state cursor and the flag for non-covered slots + let scan_state_cursor = &cluster_scan_args.scan_state_cursor; + let allow_non_covered_slots = cluster_scan_args.allow_non_covered_slots; + + // Determine the current scan state: + // - If an existing scan state is present, use it. + // - Otherwise, initiate a new scan. + let scan_state = match scan_state_cursor.state_from_wrapper() { Some(state) => state, - None => match ScanState::initiate_scan(&core).await { + None => match ScanState::initiate_scan(&core, allow_non_covered_slots).await { Ok(state) => state, Err(err) => { + // Early return if initiating the scan fails return Err(err); } }, }; - // Send the actual scan command to the address in the scan_state - let scan_result = send_scan( - &scan_state, - &core, - match_pattern.clone(), - count, - object_type.clone(), - ) - .await; - let ((new_cursor, new_keys), mut scan_state): ((u64, Vec), ScanState) = match scan_result - { - Ok(scan_result) => (from_redis_value(&scan_result)?, scan_state.clone()), - Err(err) => match err.kind() { - // If the scan command failed to route to the address because the address is not found in the cluster or - // the connection to the address cant be reached from different reasons, we will check we want to check if - // the problem is problem that we can recover from like failover or scale down or some network issue - // that we can retry the scan command to an address that own the next slot we are at. - ErrorKind::IoError - | ErrorKind::AllConnectionsUnavailable - | ErrorKind::ConnectionNotFoundForRoute => { - let retry = - retry_scan(&scan_state, &core, match_pattern, count, object_type).await?; - (from_redis_value(&retry.0?)?, retry.1) - } - _ => return Err(err), - }, - }; + // Send the SCAN command using the current scan state and scan arguments + let ((new_cursor, new_keys), mut scan_state) = + try_scan(&scan_state, &cluster_scan_args, core.clone()).await?; - // If the cursor is 0, meaning we finished scanning the address - // we will update the scan state to get the next address to scan + // Check if the cursor indicates the end of the current scan segment if new_cursor == 0 { + // Update the scan state to move to the next address/node in the cluster scan_state = scan_state - .create_updated_scan_state_for_completed_address(&core) + .create_updated_scan_state_for_completed_address(core, allow_non_covered_slots) .await?; } - // If the address is empty, meaning we finished scanning all the address + // Verify if the entire cluster has been scanned if scan_state.scan_status == ScanStateStage::Finished { + // Return the final scan state and the collected keys return Ok((ScanStateRC::create_finished(), new_keys)); } + // Update the scan state with the new cursor and maintain the progress scan_state = ScanState::new( new_cursor, scan_state.scanned_slots_map, @@ -495,256 +668,214 @@ where scan_state.address_epoch, ScanStateStage::InProgress, ); + + // Return the updated scan state and the newly found keys Ok((ScanStateRC::from_scan_state(scan_state), new_keys)) } -// Send the scan command to the address in the scan_state +/// Sends the `SCAN` command to the specified address. +/// +/// # Arguments +/// +/// * `scan_state` - The current scan state. +/// * `cluster_scan_args` - Arguments for the scan operation, including match pattern, count, object type, and allow_non_covered_slots. +/// * `core` - The cluster connection. +/// +/// # Returns +/// +/// A `RedisResult` containing the response from the `SCAN` command. async fn send_scan( scan_state: &ScanState, - core: &C, - match_pattern: Option>, - count: Option, - object_type: Option, + cluster_scan_args: &ClusterScanArgs, + core: Arc>, ) -> RedisResult where - C: ClusterInScan, + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - let mut scan_command = cmd("SCAN"); - scan_command.arg(scan_state.cursor); - if let Some(match_pattern) = match_pattern { - scan_command.arg("MATCH").arg(match_pattern); - } - if let Some(count) = count { - scan_command.arg("COUNT").arg(count); - } - if let Some(object_type) = object_type { - scan_command.arg("TYPE").arg(object_type.to_string()); + if let Some(conn_future) = core + .connection_for_address(&scan_state.address_in_scan) + .await + { + let mut conn = conn_future.await; + let mut scan_command = cmd("SCAN"); + scan_command.arg(scan_state.cursor); + if let Some(match_pattern) = cluster_scan_args.match_pattern.as_ref() { + scan_command.arg("MATCH").arg(match_pattern); + } + if let Some(count) = cluster_scan_args.count { + scan_command.arg("COUNT").arg(count); + } + if let Some(object_type) = &cluster_scan_args.object_type { + scan_command.arg("TYPE").arg(object_type.to_string()); + } + conn.req_packed_command(&scan_command).await + } else { + Err(RedisError::from(( + ErrorKind::ConnectionNotFoundForRoute, + "Cluster scan failed. No connection available for address: ", + format!("{}", scan_state.address_in_scan), + ))) } +} - core.route_command(scan_command, &scan_state.address_in_scan) - .await +/// Checks if the error is retryable during scanning. +/// Retryable errors include network issues, cluster topology changes, and unavailable connections. +/// Scan operations are not keyspace operations, so they are not affected by keyspace errors like `MOVED`. +fn is_scanwise_retryable_error(err: &RedisError) -> bool { + matches!( + err.kind(), + ErrorKind::IoError + | ErrorKind::AllConnectionsUnavailable + | ErrorKind::ConnectionNotFoundForRoute + | ErrorKind::ClusterDown + | ErrorKind::FatalSendError + ) } -// If the scan command failed to route to the address we will check we will first refresh the slots, we will check if all slots are covered by cluster, -// and if so we will try to get a new address to scan for handling case of failover. -// if all slots are not covered by the cluster we will return an error indicating that the cluster is not well configured. -// if all slots are covered by cluster but we failed to get a new address to scan we will return an error indicating that we failed to get a new address to scan. -// if we got a new address to scan but the scan command failed to route to the address we will return an error indicating that we failed to route the command. -async fn retry_scan( +/// Gets the next scan state by finding the next address to scan. +/// The method updates the scanned slots map and retrieves the next address to scan. +/// If the address epoch has changed, the method creates a new scan state without updating the scanned slots map. +/// If the address epoch has not changed, the method updates the scanned slots map with the slots owned by the address. +/// The method returns the new scan state with the updated cursor, scanned slots map, address, and epoch. +/// The method is used to continue scanning the cluster after completing a scan segment. +async fn next_scan_state( + core: &Arc>, scan_state: &ScanState, - core: &C, - match_pattern: Option>, - count: Option, - object_type: Option, -) -> RedisResult<(RedisResult, ScanState)> + cluster_scan_args: &ClusterScanArgs, +) -> RedisResult> where - C: ClusterInScan, + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, { - // TODO: This mechanism of refreshing on failure to route to address should be part of the routing mechanism - // After the routing mechanism is updated to handle this case, this refresh in the case bellow should be removed - core.refresh_if_topology_changed().await.map_err(|err| { - RedisError::from(( - ErrorKind::ResponseError, - "Error during cluster scan: failed to refresh slots", - format!("{:?}", err), - )) - })?; - if !core.are_all_slots_covered().await { - return Err(RedisError::from(( - ErrorKind::NotAllSlotsCovered, - "Not all slots are covered by the cluster, please check the cluster configuration", - ))); + let next_slot = next_slot(&scan_state.scanned_slots_map).unwrap_or(0); + let mut scanned_slots_map = scan_state.scanned_slots_map; + match next_address_to_scan( + core, + next_slot, + &mut scanned_slots_map, + cluster_scan_args.allow_non_covered_slots, + ) { + Ok(NextNodeResult::Address(new_address)) => { + let new_epoch = core.address_epoch(&new_address).await.unwrap_or(0); + Ok(Some(ScanState::new( + 0, + scanned_slots_map, + new_address, + new_epoch, + ScanStateStage::InProgress, + ))) + } + Ok(NextNodeResult::AllSlotsCompleted) => Ok(None), + Err(err) => Err(err), + } +} + +/// Attempts to scan the cluster for keys based on the current scan state. +/// Sends the `SCAN` command to the current address and processes the response. +/// On retryable errors, refreshes the cluster topology and retries the scan. +/// Returns the new cursor and keys found upon success. +async fn try_scan( + scan_state: &ScanState, + cluster_scan_args: &ClusterScanArgs, + core: Arc>, +) -> RedisResult<((u64, Vec), ScanState)> +where + C: ConnectionLike + Connect + Clone + Send + Sync + 'static, +{ + let mut new_scan_state = scan_state.clone(); + + loop { + match send_scan(&new_scan_state, cluster_scan_args, core.clone()).await { + Ok(scan_response) => { + let (new_cursor, new_keys) = from_redis_value::<(u64, Vec)>(&scan_response)?; + return Ok(((new_cursor, new_keys), new_scan_state)); + } + Err(err) if is_scanwise_retryable_error(&err) => { + ClusterConnInner::check_topology_and_refresh_if_diff( + core.clone(), + &RefreshPolicy::NotThrottable, + ) + .await?; + + if let Some(next_scan_state) = + next_scan_state(&core, &new_scan_state, cluster_scan_args).await? + { + new_scan_state = next_scan_state; + } else { + return Ok(((0, Vec::new()), ScanState::create_finished_state())); + } + } + Err(err) => return Err(err), + } } - // If for some reason we failed to reach the address we don't know if its a scale down or a failover. - // Since it might be scale down we cant just keep going with the current state we the same cursor as we are at - // the same point in the new address, so we need to get the new address own the next slot that haven't been scanned - // and start from the beginning of this address. - let next_slot = scan_state - .get_next_slot(&scan_state.scanned_slots_map) - .unwrap_or(0); - let address = core.get_address_by_slot(next_slot).await?; - - let new_epoch = core.get_address_epoch(&address).await.unwrap_or(0); - let scan_state = &ScanState::new( - 0, - scan_state.scanned_slots_map, - address, - new_epoch, - ScanStateStage::InProgress, - ); - let res = ( - send_scan(scan_state, core, match_pattern, count, object_type).await, - scan_state.clone(), - ); - Ok(res) } #[cfg(test)] mod tests { - use super::*; - #[test] - fn test_creation_of_empty_scan_wrapper() { - let scan_state_wrapper = ScanStateRC::new(); - assert!(scan_state_wrapper.status == ScanStateStage::Initiating); - } + #[tokio::test] + async fn test_cluster_scan_args_builder() { + let args = ClusterScanArgs::builder() + .with_match_pattern("user:*") + .with_count(100) + .with_object_type(ObjectType::Hash) + .allow_non_covered_slots(true) + .build(); - #[test] - fn test_creation_of_scan_state_wrapper_from() { - let scan_state = ScanState { - cursor: 0, - scanned_slots_map: [0; BITS_ARRAY_SIZE], - address_in_scan: String::from("address1").into(), - address_epoch: 1, - scan_status: ScanStateStage::InProgress, - }; - - let scan_state_wrapper = ScanStateRC::from_scan_state(scan_state); - assert!(!scan_state_wrapper.is_finished()); + assert_eq!(args.match_pattern, Some(b"user:*".to_vec())); + assert_eq!(args.count, Some(100)); + assert_eq!(args.object_type, Some(ObjectType::Hash)); + assert!(args.allow_non_covered_slots); } - #[test] - // Test the get_next_slot method - fn test_scan_state_get_next_slot() { - let scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; - let scan_state = ScanState { - cursor: 0, - scanned_slots_map, - address_in_scan: String::from("address1").into(), - address_epoch: 1, - scan_status: ScanStateStage::InProgress, - }; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(0)); - // Set the first slot to 1 - let mut scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; - scanned_slots_map[0] = 1; - let scan_state = ScanState { - cursor: 0, - scanned_slots_map, - address_in_scan: String::from("address1").into(), - address_epoch: 1, - scan_status: ScanStateStage::InProgress, - }; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(1)); - } - // Create a mock connection - struct MockConnection; - #[async_trait] - impl ClusterInScan for MockConnection { - async fn refresh_if_topology_changed(&self) -> RedisResult { - Ok(true) - } - async fn get_address_by_slot(&self, _slot: u16) -> RedisResult> { - Ok("mock_address".to_string().into()) - } - async fn get_address_epoch(&self, _address: &str) -> Result { - Ok(0) - } - async fn get_slots_of_address(&self, address: Arc) -> Vec { - if address.as_str() == "mock_address" { - vec![3, 4, 5] - } else { - vec![0, 1, 2] - } - } - async fn route_command(&self, _: Cmd, _: &str) -> RedisResult { - unimplemented!() - } - async fn are_all_slots_covered(&self) -> bool { - true - } - } - // Test the initiate_scan function #[tokio::test] - async fn test_initiate_scan() { - let connection = MockConnection; - let scan_state = ScanState::initiate_scan(&connection).await.unwrap(); - - // Assert that the scan state is initialized correctly - assert_eq!(scan_state.cursor, 0); - assert_eq!(scan_state.scanned_slots_map, [0; BITS_ARRAY_SIZE]); - assert_eq!( - scan_state.address_in_scan, - "mock_address".to_string().into() + async fn test_scan_state_new() { + let address = Arc::new("127.0.0.1:6379".to_string()); + let scan_state = ScanState::new( + 0, + [0; BITS_ARRAY_SIZE as usize], + address.clone(), + 1, + ScanStateStage::InProgress, ); - assert_eq!(scan_state.address_epoch, 0); - } - // Test the get_next_slot function - #[test] - fn test_get_next_slot() { - let scan_state = ScanState { - cursor: 0, - scanned_slots_map: [0; BITS_ARRAY_SIZE], - address_in_scan: "".to_string().into(), - address_epoch: 0, - scan_status: ScanStateStage::InProgress, - }; - // Test when all first bits of each u6 are set to 1, the next slots should be 1 - let scanned_slots_map: SlotsBitsArray = [1; BITS_ARRAY_SIZE]; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(1)); - - // Test when all slots are scanned, the next slot should be 0 - let scanned_slots_map: SlotsBitsArray = [u64::MAX; BITS_ARRAY_SIZE]; - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(16385)); - - // Test when first, second, fourth, sixth and eighth slots scanned, the next slot should be 2 - let mut scanned_slots_map: SlotsBitsArray = [0; BITS_ARRAY_SIZE]; - scanned_slots_map[0] = 171; // 10101011 - let next_slot = scan_state.get_next_slot(&scanned_slots_map); - assert_eq!(next_slot, Some(2)); + assert_eq!(scan_state.cursor, 0); + assert_eq!(scan_state.scanned_slots_map, [0; BITS_ARRAY_SIZE as usize]); + assert_eq!(scan_state.address_in_scan, address); + assert_eq!(scan_state.address_epoch, 1); + assert_eq!(scan_state.scan_status, ScanStateStage::InProgress); } - // Test the update_scan_state_and_get_next_address function #[tokio::test] - async fn test_update_scan_state_and_get_next_address() { - let connection = MockConnection; - let scan_state = ScanState::initiate_scan(&connection).await; - let updated_scan_state = scan_state - .unwrap() - .create_updated_scan_state_for_completed_address(&connection) - .await - .unwrap(); + async fn test_scan_state_create_finished() { + let scan_state = ScanState::create_finished_state(); - // cursor should be reset to 0 - assert_eq!(updated_scan_state.cursor, 0); + assert_eq!(scan_state.cursor, 0); + assert_eq!(scan_state.scanned_slots_map, [0; BITS_ARRAY_SIZE as usize]); + assert_eq!(scan_state.address_in_scan, Arc::new(String::new())); + assert_eq!(scan_state.address_epoch, 0); + assert_eq!(scan_state.scan_status, ScanStateStage::Finished); + } - // address_in_scan should be updated to the new address - assert_eq!( - updated_scan_state.address_in_scan, - "mock_address".to_string().into() - ); + #[tokio::test] + async fn test_mark_slot_as_scanned() { + let mut scanned_slots_map = [0; BITS_ARRAY_SIZE as usize]; + mark_slot_as_scanned(&mut scanned_slots_map, 5); - // address_epoch should be updated to the new address epoch - assert_eq!(updated_scan_state.address_epoch, 0); + assert_eq!(scanned_slots_map[0], 1 << 5); } #[tokio::test] - async fn test_update_scan_state_without_updating_scanned_map() { - let connection = MockConnection; + async fn test_next_slot() { let scan_state = ScanState::new( 0, - [0; BITS_ARRAY_SIZE], - "address".to_string().into(), - 0, + [0; BITS_ARRAY_SIZE as usize], + Arc::new("127.0.0.1:6379".to_string()), + 1, ScanStateStage::InProgress, ); - let scanned_slots_map = scan_state.scanned_slots_map; - let updated_scan_state = scan_state - .creating_state_without_slot_changes(&connection) - .await - .unwrap(); - assert_eq!(updated_scan_state.scanned_slots_map, scanned_slots_map); - assert_eq!(updated_scan_state.cursor, 0); - assert_eq!( - updated_scan_state.address_in_scan, - "mock_address".to_string().into() - ); - assert_eq!(updated_scan_state.address_epoch, 0); + let next_slot = next_slot(&scan_state.scanned_slots_map); + + assert_eq!(next_slot, Some(0)); } } diff --git a/glide-core/redis-rs/redis/src/commands/mod.rs b/glide-core/redis-rs/redis/src/commands/mod.rs index 22a68cc987..19dae750c2 100644 --- a/glide-core/redis-rs/redis/src/commands/mod.rs +++ b/glide-core/redis-rs/redis/src/commands/mod.rs @@ -16,6 +16,9 @@ mod json; #[cfg(feature = "cluster-async")] pub use cluster_scan::ScanStateRC; +#[cfg(feature = "cluster-async")] +pub use cluster_scan::ClusterScanArgs; + #[cfg(feature = "cluster-async")] pub(crate) mod cluster_scan; diff --git a/glide-core/redis-rs/redis/src/lib.rs b/glide-core/redis-rs/redis/src/lib.rs index 0c960f3b4e..7121ee03c7 100644 --- a/glide-core/redis-rs/redis/src/lib.rs +++ b/glide-core/redis-rs/redis/src/lib.rs @@ -457,6 +457,9 @@ pub use crate::commands::ScanStateRC; #[cfg(feature = "cluster-async")] pub use crate::commands::ObjectType; +#[cfg(feature = "cluster-async")] +pub use crate::commands::ClusterScanArgs; + #[cfg(feature = "cluster")] mod cluster_client; diff --git a/glide-core/redis-rs/redis/tests/test_cluster_scan.rs b/glide-core/redis-rs/redis/tests/test_cluster_scan.rs index cfc4bae594..96910fe7f8 100644 --- a/glide-core/redis-rs/redis/tests/test_cluster_scan.rs +++ b/glide-core/redis-rs/redis/tests/test_cluster_scan.rs @@ -5,9 +5,68 @@ mod support; mod test_cluster_scan_async { use crate::support::*; use rand::Rng; - use redis::cluster_routing::{RoutingInfo, SingleNodeRoutingInfo}; - use redis::{cmd, from_redis_value, ObjectType, RedisResult, ScanStateRC, Value}; + use redis::cluster_routing::{ + MultipleNodeRoutingInfo, ResponsePolicy, RoutingInfo, SingleNodeRoutingInfo, + }; + use redis::{ + cmd, from_redis_value, ClusterScanArgs, ObjectType, RedisResult, ScanStateRC, Value, + }; use std::time::Duration; + use tokio::time::{sleep, Instant}; + + async fn del_slots_range( + cluster: &TestClusterContext, + range: (u16, u16), + ) -> Result<(), &'static str> { + let mut cluster_conn = cluster.async_connection(None).await; + let mut del_slots_cmd = cmd("CLUSTER"); + let (start, end) = range; + del_slots_cmd.arg("DELSLOTSRANGE").arg(start).arg(end); + let _: RedisResult = cluster_conn + .route_command( + &del_slots_cmd, + RoutingInfo::MultiNode(( + MultipleNodeRoutingInfo::AllNodes, + Some(ResponsePolicy::AllSucceeded), + )), + ) + .await; + + let timeout = Duration::from_secs(10); + let mut invalid = false; + loop { + sleep(Duration::from_millis(500)).await; + + let now = Instant::now(); + if now.elapsed() > timeout { + return Err("Timeout while waiting for slots to be deleted"); + } + + let slot_distribution = + cluster.get_slots_ranges_distribution(&cluster.get_cluster_nodes().await); + for (_, _, _, slot_ranges) in slot_distribution { + println!("slot_ranges: {:?}", slot_ranges); + for slot_range in slot_ranges { + let (slot_start, slot_end) = (slot_range[0], slot_range[1]); + + println!("slot_start: {}, slot_end: {}", slot_start, slot_end); + if slot_start >= start && slot_start <= end { + invalid = true; + continue; + } + if slot_end >= start && slot_end <= end { + invalid = true; + continue; + } + } + } + + if invalid { + continue; + } + return Ok(()); + } + } async fn kill_one_node( cluster: &TestClusterContext, @@ -49,7 +108,12 @@ mod test_cluster_scan_async { #[tokio::test] #[serial_test::serial] async fn test_async_cluster_scan() { - let cluster = TestClusterContext::new(3, 0); + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); let mut connection = cluster.async_connection(None).await; // Set some keys @@ -67,14 +131,14 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; loop { let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -88,10 +152,114 @@ mod test_cluster_scan_async { } } + #[tokio::test] + #[serial_test::serial] + async fn test_async_cluster_scan_with_allow_non_covered_slots() { + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); + + let mut connection = cluster.async_connection(None).await; + let mut expected_keys: Vec = Vec::new(); + + for i in 0..1000 { + let key = format!("key{}", i); + let _: Result<(), redis::RedisError> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + expected_keys.push(key); + } + + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = Vec::new(); + loop { + let cluster_scan_args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, cluster_scan_args) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + + keys.sort(); + expected_keys.sort(); + assert_eq!(keys, expected_keys); + } + + #[tokio::test] + #[serial_test::serial] + async fn test_async_cluster_scan_with_delslots() { + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); + let mut connection = cluster.async_connection(None).await; + let mut expected_keys: Vec = Vec::new(); + + for i in 0..1000 { + let key = format!("key{}", i); + let _: Result<(), redis::RedisError> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + expected_keys.push(key); + } + + del_slots_range(&cluster, (1, 100)).await.unwrap(); + + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = Vec::new(); + loop { + let cluster_scan_args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, cluster_scan_args) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + + keys.sort(); + expected_keys.sort(); + assert_eq!(keys, expected_keys); + } + #[tokio::test] #[serial_test::serial] // test cluster scan with slot migration in the middle async fn test_async_cluster_scan_with_migration() { - let cluster = TestClusterContext::new(3, 0); + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(1), + false, + ); let mut connection = cluster.async_connection(None).await; // Set some keys @@ -114,7 +282,7 @@ mod test_cluster_scan_async { loop { count += 1; let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; @@ -189,18 +357,21 @@ mod test_cluster_scan_async { let mut keys: Vec = Vec::new(); let mut count = 0; let mut result: RedisResult = Ok(Value::Nil); + let mut next_cursor = ScanStateRC::new(); + let mut scan_keys; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; - let (next_cursor, scan_keys) = match scan_response { - Ok((cursor, keys)) => (cursor, keys), + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; + (next_cursor, scan_keys) = match scan_response { + Ok((cursor, keys)) => (cursor.clone(), keys), Err(e) => { result = Err(e); break; } }; - scan_state_rc = next_cursor; + scan_state_rc = next_cursor.clone(); keys.extend(scan_keys.into_iter().map(|v| from_redis_value(&v).unwrap())); if scan_state_rc.is_finished() { break; @@ -225,6 +396,47 @@ mod test_cluster_scan_async { } // We expect an error of finding address assert!(result.is_err()); + + // Test we can continue scanning after the fail using allow_non_covered_slots=true + scan_state_rc = next_cursor; + // config cluster to allow missing slots + let mut config_cmd = cmd("CONFIG"); + config_cmd + .arg("SET") + .arg("cluster-require-full-coverage") + .arg("no"); + let res: RedisResult = connection + .route_command( + &config_cmd, + RoutingInfo::MultiNode((MultipleNodeRoutingInfo::AllNodes, None)), + ) + .await; + print!("config result: {:?}", res); + let args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + loop { + let res = connection + .cluster_scan(scan_state_rc.clone(), args.clone()) + .await; + let (next_cursor, scan_keys): (ScanStateRC, Vec) = match res { + Ok((cursor, keys)) => (cursor.clone(), keys), + Err(e) => { + println!("error: {:?}", e); + break; + } + }; + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + assert!(scan_state_rc.is_finished()); } #[tokio::test] @@ -268,8 +480,9 @@ mod test_cluster_scan_async { let mut count = 0; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -439,12 +652,13 @@ mod test_cluster_scan_async { } // Scan the keys let mut scan_state_rc = ScanStateRC::new(); - let mut keys: Vec = Vec::new(); + let mut keys: Vec = vec![]; let mut count = 0; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -513,7 +727,7 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; loop { let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; @@ -574,7 +788,7 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; loop { let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, None) + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) .await .unwrap(); scan_state_rc = next_cursor; @@ -642,15 +856,20 @@ mod test_cluster_scan_async { let mut scan_state_rc = ScanStateRC::new(); let mut keys: Vec = vec![]; loop { + let cluster_scan_args = ClusterScanArgs::builder() + .with_match_pattern("key:pattern:*") + .allow_non_covered_slots(false) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan_with_pattern(scan_state_rc, "key:pattern:*", None, None) + .cluster_scan(scan_state_rc, cluster_scan_args) .await .unwrap(); scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -665,7 +884,7 @@ mod test_cluster_scan_async { for key in expected_keys.iter() { assert!(keys.contains(key)); } - assert!(keys.len() == expected_keys.len()); + assert_eq!(keys.len(), expected_keys.len()); } #[tokio::test] @@ -701,15 +920,20 @@ mod test_cluster_scan_async { let mut scan_state_rc = ScanStateRC::new(); let mut keys: Vec = vec![]; loop { + let cluster_scan_args = ClusterScanArgs::builder() + .with_object_type(ObjectType::Set) + .allow_non_covered_slots(false) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, None, Some(ObjectType::Set)) + .cluster_scan(scan_state_rc, cluster_scan_args) .await .unwrap(); scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -724,7 +948,7 @@ mod test_cluster_scan_async { for key in expected_keys.iter() { assert!(keys.contains(key)); } - assert!(keys.len() == expected_keys.len()); + assert_eq!(keys.len(), expected_keys.len()); } #[tokio::test] @@ -755,24 +979,35 @@ mod test_cluster_scan_async { let mut keys: Vec = vec![]; let mut comparing_times = 0; loop { + let cluster_scan_args = ClusterScanArgs::builder() + .with_count(100) + .allow_non_covered_slots(false) + .build(); + + let cluster_scan_args_no_count = ClusterScanArgs::builder() + .allow_non_covered_slots(false) + .build(); + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc.clone(), Some(100), None) + .cluster_scan(scan_state_rc.clone(), cluster_scan_args) .await .unwrap(); + let (_, scan_without_count_keys): (ScanStateRC, Vec) = connection - .cluster_scan(scan_state_rc, Some(100), None) + .cluster_scan(scan_state_rc, cluster_scan_args_no_count) .await .unwrap(); + if !scan_keys.is_empty() && !scan_without_count_keys.is_empty() { assert!(scan_keys.len() >= scan_without_count_keys.len()); - comparing_times += 1; } + scan_state_rc = next_cursor; let mut scan_keys = scan_keys .into_iter() .map(|v| from_redis_value(&v).unwrap()) - .collect::>(); // Change the type of `keys` to `Vec` + .collect::>(); keys.append(&mut scan_keys); if scan_state_rc.is_finished() { break; @@ -788,7 +1023,7 @@ mod test_cluster_scan_async { for key in expected_keys.iter() { assert!(keys.contains(key)); } - assert!(keys.len() == expected_keys.len()); + assert_eq!(keys.len(), expected_keys.len()); } #[tokio::test] @@ -821,8 +1056,9 @@ mod test_cluster_scan_async { let mut count = 0; loop { count += 1; - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -835,7 +1071,7 @@ mod test_cluster_scan_async { if count == 5 { drop(cluster); let scan_response: RedisResult<(ScanStateRC, Vec)> = connection - .cluster_scan(scan_state_rc.clone(), None, None) + .cluster_scan(scan_state_rc.clone(), ClusterScanArgs::default()) .await; assert!(scan_response.is_err()); break; @@ -844,8 +1080,9 @@ mod test_cluster_scan_async { cluster = TestClusterContext::new(3, 0); connection = cluster.async_connection(None).await; loop { - let scan_response: RedisResult<(ScanStateRC, Vec)> = - connection.cluster_scan(scan_state_rc, None, None).await; + let scan_response: RedisResult<(ScanStateRC, Vec)> = connection + .cluster_scan(scan_state_rc, ClusterScanArgs::default()) + .await; if scan_response.is_err() { println!("error: {:?}", scan_response); } @@ -857,4 +1094,246 @@ mod test_cluster_scan_async { } } } + + #[tokio::test] + #[serial_test::serial] + /// Test a case where a node is killed, key set into the cluster, and the client is still able to scan all keys + async fn test_async_cluster_scan_uncovered_slots_of_missing_node() { + // Create a cluster with 3 nodes + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(0), + false, + ); + let mut connection = cluster.async_connection(None).await; + + let mut config_cmd = cmd("CONFIG"); + config_cmd + .arg("SET") + .arg("cluster-require-full-coverage") + .arg("no"); + let _: RedisResult = connection + .route_command( + &config_cmd, + RoutingInfo::MultiNode((MultipleNodeRoutingInfo::AllNodes, None)), + ) + .await; + // Kill one node + let mut cluster_nodes = cluster.get_cluster_nodes().await; + let slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + let killed_node_routing = kill_one_node(&cluster, slot_distribution.clone()).await; + let ready = cluster.wait_for_fail_to_finish(&killed_node_routing).await; + match ready { + Ok(_) => {} + Err(e) => { + println!("error: {:?}", e); + } + } + + // Compare slot distribution before and after killing a node + cluster_nodes = cluster.get_cluster_nodes().await; + let new_slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + assert_ne!(slot_distribution, new_slot_distribution); + let mut excepted_keys: Vec = vec![]; + // Set some keys + for i in 0..100 { + let key = format!("key{}", i); + let res: Result<(), redis::RedisError> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + if res.is_ok() { + excepted_keys.push(key); + } + } + + // Scan the keys + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = vec![]; + let args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + loop { + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, args.clone()) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); // Change the type of `keys` to `Vec` + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + // Check if all keys available scanned + keys.sort(); + keys.dedup(); + excepted_keys.sort(); + excepted_keys.dedup(); + for key in excepted_keys.iter() { + assert!(keys.contains(key)); + } + assert!(keys.len() > 0); + } + + #[tokio::test] + #[serial_test::serial] + /// Test scanning after killing a node and compare with "KEYS *" from remaining nodes + async fn test_async_cluster_scan_after_node_killed() { + // Create a cluster with 3 nodes + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(0), + false, + ); + let mut connection = cluster.async_connection(None).await; + + // Set cluster-require-full-coverage to no + let mut config_cmd = cmd("CONFIG"); + config_cmd + .arg("SET") + .arg("cluster-require-full-coverage") + .arg("no"); + let _: RedisResult = connection + .route_command( + &config_cmd, + RoutingInfo::MultiNode((MultipleNodeRoutingInfo::AllNodes, None)), + ) + .await; + + for i in 0..100 { + let key = format!("key{}", i); + let _res: RedisResult<()> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + } + + // Kill one node + let cluster_nodes = cluster.get_cluster_nodes().await; + let slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + let killed_node_routing = kill_one_node(&cluster, slot_distribution.clone()).await; + let ready = cluster.wait_for_fail_to_finish(&killed_node_routing).await; + match ready { + Ok(_) => {} + Err(e) => { + println!("error: {:?}", e); + } + } + + // Scan the keys + let mut scan_state_rc = ScanStateRC::new(); + let mut keys: Vec = vec![]; + let args = ClusterScanArgs::builder() + .allow_non_covered_slots(true) + .build(); + loop { + let (next_cursor, scan_keys): (ScanStateRC, Vec) = connection + .cluster_scan(scan_state_rc, args.clone()) + .await + .unwrap(); + scan_state_rc = next_cursor; + let mut scan_keys = scan_keys + .into_iter() + .map(|v| from_redis_value(&v).unwrap()) + .collect::>(); // Change the type of `keys` to `Vec` + keys.append(&mut scan_keys); + if scan_state_rc.is_finished() { + break; + } + } + + // Get keys from remaining nodes using "KEYS *" + let mut keys_from_keys_command: Vec = Vec::new(); + let key_res: RedisResult = connection + .route_command( + cmd("KEYS").arg("*"), + RoutingInfo::MultiNode(( + MultipleNodeRoutingInfo::AllNodes, + Some(ResponsePolicy::CombineArrays), + )), + ) + .await; + if let Ok(value) = key_res { + let values: Vec = from_redis_value(&value).unwrap(); + keys_from_keys_command + .extend(values.into_iter().map(|v| from_redis_value(&v).unwrap())); + } + + // Sort and dedup keys + keys.sort(); + keys.dedup(); + keys_from_keys_command.sort(); + keys_from_keys_command.dedup(); + + // Check if scanned keys match keys from "KEYS *" + assert_eq!(keys, keys_from_keys_command); + } + + #[tokio::test] + #[serial_test::serial] + /// Test scanning with allow_non_covered_slots as false after killing a node + async fn test_async_cluster_scan_uncovered_slots_fail() { + // Create a cluster with 3 nodes + let cluster = TestClusterContext::new_with_cluster_client_builder( + 3, + 0, + |builder| builder.retries(0), + false, + ); + let mut connection = cluster.async_connection(None).await; + + // Kill one node + let cluster_nodes = cluster.get_cluster_nodes().await; + let slot_distribution = cluster.get_slots_ranges_distribution(&cluster_nodes); + let killed_node_routing = kill_one_node(&cluster, slot_distribution.clone()).await; + let ready = cluster.wait_for_fail_to_finish(&killed_node_routing).await; + match ready { + Ok(_) => {} + Err(e) => { + println!("error: {:?}", e); + } + } + + for i in 0..100 { + let key = format!("key{}", i); + let _res: RedisResult<()> = redis::cmd("SET") + .arg(&key) + .arg("value") + .query_async(&mut connection) + .await; + } + + // Try scanning with allow_non_covered_slots as false + let mut scan_state_rc = ScanStateRC::new(); + let mut had_error = false; + loop { + let result = connection + .cluster_scan(scan_state_rc.clone(), ClusterScanArgs::default()) + .await; + + match result { + Ok((next_cursor, _)) => { + scan_state_rc = next_cursor; + if scan_state_rc.is_finished() { + break; + } + } + Err(e) => { + had_error = true; + assert_eq!(e.kind(), redis::ErrorKind::NotAllSlotsCovered); + break; + } + } + } + + assert!(had_error); + } } diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index a560e17697..73eee144b1 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -13,7 +13,7 @@ use redis::cluster_routing::{ }; use redis::cluster_slotmap::ReadFromReplicaStrategy; use redis::{ - Cmd, ErrorKind, FromRedisValue, InfoDict, ObjectType, PushInfo, RedisError, RedisResult, + ClusterScanArgs, Cmd, ErrorKind, FromRedisValue, PushInfo, RedisError, RedisResult, ScanStateRC, Value, }; pub use standalone_client::StandaloneClient; @@ -27,6 +27,7 @@ use self::value_conversion::{convert_to_expected_type, expected_type_for_cmd, ge mod reconnecting_connection; mod standalone_client; mod value_conversion; +use redis::InfoDict; use tokio::sync::mpsc; use versions::Versioning; @@ -310,33 +311,16 @@ impl Client { pub async fn cluster_scan<'a>( &'a mut self, scan_state_cursor: &'a ScanStateRC, - match_pattern: &'a Option>, - count: Option, - object_type: Option, + cluster_scan_args: ClusterScanArgs, ) -> RedisResult { match self.internal_client { ClientWrapper::Standalone(_) => { unreachable!("Cluster scan is not supported in standalone mode") } ClientWrapper::Cluster { ref mut client } => { - let (cursor, keys) = match match_pattern { - Some(pattern) => { - client - .cluster_scan_with_pattern( - scan_state_cursor.clone(), - pattern, - count, - object_type, - ) - .await? - } - None => { - client - .cluster_scan(scan_state_cursor.clone(), count, object_type) - .await? - } - }; - + let (cursor, keys) = client + .cluster_scan(scan_state_cursor.clone(), cluster_scan_args) + .await?; let cluster_cursor_id = if cursor.is_finished() { Value::BulkString(FINISHED_SCAN_CURSOR.into()) } else { diff --git a/glide-core/src/protobuf/command_request.proto b/glide-core/src/protobuf/command_request.proto index 30b33362af..d7c693cfd6 100644 --- a/glide-core/src/protobuf/command_request.proto +++ b/glide-core/src/protobuf/command_request.proto @@ -506,6 +506,7 @@ message ClusterScan { optional bytes match_pattern = 2; optional int64 count = 3; optional string object_type = 4; + bool allow_non_covered_slots = 5; } message UpdateConnectionPassword { diff --git a/glide-core/src/socket_listener.rs b/glide-core/src/socket_listener.rs index f148bbdede..4896f83565 100644 --- a/glide-core/src/socket_listener.rs +++ b/glide-core/src/socket_listener.rs @@ -19,7 +19,7 @@ use redis::cluster_routing::{ MultipleNodeRoutingInfo, Route, RoutingInfo, SingleNodeRoutingInfo, SlotAddr, }; use redis::cluster_routing::{ResponsePolicy, Routable}; -use redis::{Cmd, PushInfo, RedisError, ScanStateRC, Value}; +use redis::{ClusterScanArgs, Cmd, PushInfo, RedisError, ScanStateRC, Value}; use std::cell::Cell; use std::collections::HashSet; use std::rc::Rc; @@ -321,30 +321,23 @@ async fn cluster_scan(cluster_scan: ClusterScan, mut client: Client) -> ClientUs } else { get_cluster_scan_cursor(cursor)? }; - - let match_pattern = cluster_scan.match_pattern.map(|pattern| pattern.into()); - let count = cluster_scan.count.map(|count| count as usize); - - let object_type = match cluster_scan.object_type { - Some(char_object_type) => match char_object_type.to_string().to_lowercase().as_str() { - STRING => Some(redis::ObjectType::String), - LIST => Some(redis::ObjectType::List), - SET => Some(redis::ObjectType::Set), - ZSET => Some(redis::ObjectType::ZSet), - HASH => Some(redis::ObjectType::Hash), - STREAM => Some(redis::ObjectType::Stream), - _ => { - return Err(ClientUsageError::Internal(format!( - "Received invalid object type: {:?}", - char_object_type - ))) - } - }, - None => None, - }; + let mut cluster_scan_args_builder = + ClusterScanArgs::builder().allow_non_covered_slots(cluster_scan.allow_non_covered_slots); + if let Some(match_pattern) = cluster_scan.match_pattern { + cluster_scan_args_builder = + cluster_scan_args_builder.with_match_pattern::(match_pattern); + } + if let Some(count) = cluster_scan.count { + cluster_scan_args_builder = cluster_scan_args_builder.with_count(count as u32); + } + if let Some(object_type) = cluster_scan.object_type { + cluster_scan_args_builder = + cluster_scan_args_builder.with_object_type(object_type.to_string().into()); + } + let cluster_scan_args = cluster_scan_args_builder.build(); client - .cluster_scan(&cluster_scan_cursor, &match_pattern, count, object_type) + .cluster_scan(&cluster_scan_cursor, cluster_scan_args) .await .map_err(|err| err.into()) } diff --git a/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java b/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java index 6fffc46f35..2b7102bc2f 100644 --- a/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/scan/ScanOptions.java @@ -6,6 +6,7 @@ import glide.api.models.GlideString; import glide.ffi.resolvers.ObjectTypeResolver; import glide.utils.ArrayTransformUtils; +import lombok.Builder; import lombok.EqualsAndHashCode; import lombok.experimental.SuperBuilder; @@ -28,6 +29,13 @@ public class ScanOptions extends BaseScanOptions { */ private final ObjectType type; + /** + * If set to true, the scan will perform even if some slots are not covered by any node. It's + * important to note that when set to true, the scan has no guarantee to cover all keys in the + * cluster, and the method loses its way to validate the progress of the scan. Defaults to false. + */ + @Builder.Default private final Boolean allowNonCoveredSlots = false; + /** Defines the complex data types available for a SCAN request. */ public enum ObjectType { STRING(ObjectTypeResolver.OBJECT_TYPE_STRING_NATIVE_NAME), @@ -86,4 +94,11 @@ public Long getCount() { public ObjectType getType() { return type; } + + /** + * @return whether non-covered slots are allowed. + */ + public Boolean getAllowNonCoveredSlots() { + return allowNonCoveredSlots; + } } diff --git a/java/client/src/main/java/glide/managers/CommandManager.java b/java/client/src/main/java/glide/managers/CommandManager.java index d069c6bd72..47b0de7d75 100644 --- a/java/client/src/main/java/glide/managers/CommandManager.java +++ b/java/client/src/main/java/glide/managers/CommandManager.java @@ -428,6 +428,10 @@ protected CommandRequest.Builder prepareCursorRequest( clusterScanBuilder.setObjectType(options.getType().getNativeName()); } + if (options.getAllowNonCoveredSlots() != null) { + clusterScanBuilder.setAllowNonCoveredSlots(options.getAllowNonCoveredSlots()); + } + return CommandRequest.newBuilder().setClusterScan(clusterScanBuilder.build()); } diff --git a/node/src/Commands.ts b/node/src/Commands.ts index 8411cf212d..972c74777c 100644 --- a/node/src/Commands.ts +++ b/node/src/Commands.ts @@ -3842,6 +3842,19 @@ export interface ScanOptions extends BaseScanOptions { type?: ObjectType; } +/** + * Options for the SCAN command. + * `match`: The match filter is applied to the result of the command and will only include keys that match the pattern specified. + * `count`: `COUNT` is a just a hint for the command for how many elements to fetch from the server, the default is 10. + * `type`: The type of the object to scan. + * Types are the data types of Valkey: `string`, `list`, `set`, `zset`, `hash`, `stream`. + * `allowNonCoveredSlots`: If true, the scan will keep scanning even if slots are not covered by the cluster. + * By default, the scan will stop if slots are not covered by the cluster. + */ +export interface ClusterScanOptions extends ScanOptions { + allowNonCoveredSlots?: boolean; +} + /** * Options specific to the ZSCAN command, extending from the base scan options. */ diff --git a/node/src/GlideClusterClient.ts b/node/src/GlideClusterClient.ts index 0524128dd5..4e9aee579d 100644 --- a/node/src/GlideClusterClient.ts +++ b/node/src/GlideClusterClient.ts @@ -23,7 +23,7 @@ import { FunctionStatsSingleResponse, InfoOptions, LolwutOptions, - ScanOptions, + ClusterScanOptions, createClientGetName, createClientId, createConfigGet, @@ -146,7 +146,7 @@ export namespace GlideClusterClientConfiguration { /** * Configuration options for creating a {@link GlideClusterClient | GlideClusterClient}. * - * Extends `BaseClientConfiguration` with properties specific to `GlideClusterClient`, such as periodic topology checks + * Extends {@link BaseClientConfiguration | BaseClientConfiguration} with properties specific to `GlideClusterClient`, such as periodic topology checks * and Pub/Sub subscription settings. * * @remarks @@ -579,7 +579,7 @@ export class GlideClusterClient extends BaseClient { */ protected scanOptionsToProto( cursor: string, - options?: ScanOptions, + options?: ClusterScanOptions, ): command_request.ClusterScan { const command = command_request.ClusterScan.create(); command.cursor = cursor; @@ -596,6 +596,7 @@ export class GlideClusterClient extends BaseClient { command.objectType = options.type; } + command.allowNonCoveredSlots = options?.allowNonCoveredSlots ?? false; return command; } @@ -604,7 +605,7 @@ export class GlideClusterClient extends BaseClient { */ protected createClusterScanPromise( cursor: ClusterScanCursor, - options?: ScanOptions & DecoderOption, + options?: ClusterScanOptions & DecoderOption, ): Promise<[ClusterScanCursor, GlideString[]]> { // separate decoder option from scan options const { decoder, ...scanOptions } = options || {}; @@ -633,7 +634,7 @@ export class GlideClusterClient extends BaseClient { * * @param cursor - The cursor object that wraps the scan state. * To start a new scan, create a new empty `ClusterScanCursor` using {@link ClusterScanCursor}. - * @param options - (Optional) The scan options, see {@link ScanOptions} and {@link DecoderOption}. + * @param options - (Optional) The scan options, see {@link ClusterScanOptions} and {@link DecoderOption}. * @returns A Promise resolving to an array containing the next cursor and an array of keys, * formatted as [`ClusterScanCursor`, `string[]`]. * @@ -651,14 +652,14 @@ export class GlideClusterClient extends BaseClient { * console.log(allKeys); // ["key1", "key2", "key3"] * * // Iterate over keys matching a pattern - * await client.mset([{key: "key1", value: "value1"}, {key: "key2", value: "value2"}, {key: "notMykey", value: "value3"}, {key: "somethingElse", value: "value4"}]); + * await client.mset([{key: "key1", value: "value1"}, {key: "key2", value: "value2"}, {key: "notMyKey", value: "value3"}, {key: "somethingElse", value: "value4"}]); * let cursor = new ClusterScanCursor(); * const matchedKeys: GlideString[] = []; * while (!cursor.isFinished()) { * const [cursor, keys] = await client.scan(cursor, { match: "*key*", count: 10 }); * matchedKeys.push(...keys); * } - * console.log(matchedKeys); // ["key1", "key2", "notMykey"] + * console.log(matchedKeys); // ["key1", "key2", "notMyKey"] * * // Iterate over keys of a specific type * await client.mset([{key: "key1", value: "value1"}, {key: "key2", value: "value2"}, {key: "key3", value: "value3"}]); @@ -674,7 +675,7 @@ export class GlideClusterClient extends BaseClient { */ public async scan( cursor: ClusterScanCursor, - options?: ScanOptions & DecoderOption, + options?: ClusterScanOptions & DecoderOption, ): Promise<[ClusterScanCursor, GlideString[]]> { return this.createClusterScanPromise(cursor, options); } diff --git a/node/tests/ScanTest.test.ts b/node/tests/ScanTest.test.ts index bff90bab36..bb370a81db 100644 --- a/node/tests/ScanTest.test.ts +++ b/node/tests/ScanTest.test.ts @@ -12,6 +12,7 @@ import { GlideString, ObjectType, ProtocolVersion, + GlideClusterClientConfiguration, } from ".."; import { ValkeyCluster } from "../../utils/TestUtils.js"; import { @@ -19,6 +20,7 @@ import { getClientConfigurationOption, getServerVersion, parseEndpoints, + waitForClusterReady as isClusterReadyWithExpectedNodeCount, } from "./TestUtilities"; const TIMEOUT = 50000; @@ -376,6 +378,166 @@ describe("Scan GlideClusterClient", () => { }, TIMEOUT, ); + + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + `GlideClusterClient scan with allowNonCoveredSlots %p`, + async (protocol) => { + const testCluster = await ValkeyCluster.createCluster( + true, + 3, + 0, + getServerVersion, + ); + const config: GlideClusterClientConfiguration = { + addresses: testCluster + .getAddresses() + .map(([host, port]) => ({ host, port })), + protocol, + }; + const testClient = await GlideClusterClient.createClient(config); + + try { + for (let i = 0; i < 10000; i++) { + const result = await testClient.set(`${uuidv4()}`, "value"); + expect(result).toBe("OK"); + } + + // Perform an initial scan to ensure all works as expected + let cursor = new ClusterScanCursor(); + let result = await testClient.scan(cursor); + cursor = result[0]; + expect(cursor.isFinished()).toBe(false); + + // Set 'cluster-require-full-coverage' to 'no' to allow operations with missing slots + await testClient.configSet({ + "cluster-require-full-coverage": "no", + }); + + // Forget one server to simulate a node failure + const addresses = testCluster.getAddresses(); + const addressToForget = addresses[0]; + const allOtherAddresses = addresses.slice(1); + const idToForget = await testClient.customCommand( + ["CLUSTER", "MYID"], + { + route: { + type: "routeByAddress", + host: addressToForget[0], + port: addressToForget[1], + }, + }, + ); + + for (const address of allOtherAddresses) { + await testClient.customCommand( + ["CLUSTER", "FORGET", idToForget as string], + { + route: { + type: "routeByAddress", + host: address[0], + port: address[1], + }, + }, + ); + } + + // Wait for the cluster to stabilize after forgetting a node + const ready = await isClusterReadyWithExpectedNodeCount( + testClient, + allOtherAddresses.length, + ); + expect(ready).toBe(true); + + // Attempt to scan without 'allowNonCoveredSlots', expecting an error + // Since it might take time for the inner core to forget the missing node, + // we retry the scan until the expected error is thrown. + + const maxRetries = 10; + let retries = 0; + let errorReceived = false; + + while (retries < maxRetries && !errorReceived) { + retries++; + cursor = new ClusterScanCursor(); + + try { + while (!cursor.isFinished()) { + result = await testClient.scan(cursor); + cursor = result[0]; + } + + // If scan completes without error, wait and retry + await new Promise((resolve) => + setTimeout(resolve, 1000), + ); + } catch (error) { + if ( + error instanceof Error && + error.message.includes( + "Could not find an address covering a slot, SCAN operation cannot continue", + ) + ) { + // Expected error occurred + errorReceived = true; + } else { + // Unexpected error, rethrow + throw error; + } + } + } + + expect(errorReceived).toBe(true); + + // Perform scan with 'allowNonCoveredSlots: true' + cursor = new ClusterScanCursor(); + + while (!cursor.isFinished()) { + result = await testClient.scan(cursor, { + allowNonCoveredSlots: true, + }); + cursor = result[0]; + } + + expect(cursor.isFinished()).toBe(true); + + // Get keys using 'KEYS *' from the remaining nodes + const keys: GlideString[] = []; + + for (const address of allOtherAddresses) { + const result = await testClient.customCommand( + ["KEYS", "*"], + { + route: { + type: "routeByAddress", + host: address[0], + port: address[1], + }, + }, + ); + keys.push(...(result as GlideString[])); + } + + // Scan again with 'allowNonCoveredSlots: true' and collect results + cursor = new ClusterScanCursor(); + const results: GlideString[] = []; + + while (!cursor.isFinished()) { + result = await testClient.scan(cursor, { + allowNonCoveredSlots: true, + }); + results.push(...result[1]); + cursor = result[0]; + } + + // Compare the sets of keys obtained from 'KEYS *' and 'SCAN' + expect(new Set(results)).toEqual(new Set(keys)); + } finally { + testClient.close(); + await testCluster.close(); + } + }, + TIMEOUT, + ); }); //standalone tests diff --git a/node/tests/TestUtilities.ts b/node/tests/TestUtilities.ts index 6d79c768fc..cd21d20367 100644 --- a/node/tests/TestUtilities.ts +++ b/node/tests/TestUtilities.ts @@ -83,6 +83,51 @@ function intoArrayInternal(obj: any, builder: string[]) { } } +// The function is used to check if the cluster is ready with the count nodes known command using the client supplied. +// The way it works is by parsing the response of the CLUSTER INFO command and checking if the cluster_state is ok and the cluster_known_nodes is equal to the count. +// If so, we know the cluster is ready, and it has the amount of nodes we expect. +export async function waitForClusterReady( + client: GlideClusterClient, + count: number, +): Promise { + const timeout = 20000; // 20 seconds timeout in milliseconds + const startTime = Date.now(); + + while (true) { + if (Date.now() - startTime > timeout) { + return false; + } + + const clusterInfo = await client.customCommand(["CLUSTER", "INFO"]); + // parse the response + const clusterInfoMap = new Map(); + + if (clusterInfo) { + const clusterInfoLines = clusterInfo + .toString() + .split("\n") + .filter((line) => line.length > 0); + + for (const line of clusterInfoLines) { + const [key, value] = line.split(":"); + + clusterInfoMap.set(key.trim(), value.trim()); + } + + if ( + clusterInfoMap.get("cluster_state") == "ok" && + Number(clusterInfoMap.get("cluster_known_nodes")) == count + ) { + break; + } + } + + await new Promise((resolve) => setTimeout(resolve, 2000)); + } + + return true; +} + /** * accept any variable `v` and convert it into String, recursively */ diff --git a/package.json b/package.json index 3f61298feb..c6676131a2 100644 --- a/package.json +++ b/package.json @@ -1,12 +1,14 @@ { "devDependencies": { - "@eslint/js": "^9.10.0", + "@eslint/js": "9.17.0", "@types/eslint__js": "^8.42.3", "@types/eslint-config-prettier": "^6.11.3", - "eslint": "9.14.0", + "eslint": "9.17.0", "eslint-config-prettier": "^9.1.0", - "prettier": "^3.3.3", - "typescript": "^5.6.2", - "typescript-eslint": "^8.13" + "eslint-plugin-jsdoc": "^50.6.1", + "prettier": "3.4.2", + "prettier-eslint": "16.3.0", + "typescript": "5.7.2", + "typescript-eslint": "8.18.1" } } diff --git a/python/DEVELOPER.md b/python/DEVELOPER.md index 66127913c3..ae945b5835 100644 --- a/python/DEVELOPER.md +++ b/python/DEVELOPER.md @@ -109,7 +109,6 @@ cd python python3 -m venv .env source .env/bin/activate pip install -r requirements.txt -pip install -r dev_requirements.txt ``` ## Build the package (in release mode): @@ -211,7 +210,7 @@ Run from the main `/python` folder ```bash cd $HOME/src/valkey-glide/python source .env/bin/activate - pip install -r dev_requirements.txt + pip install -r requirements.txt isort . --profile black --skip-glob python/glide/protobuf --skip-glob .env black . --exclude python/glide/protobuf --exclude .env flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics \ diff --git a/python/dev_requirements.txt b/python/dev_requirements.txt deleted file mode 100644 index 02e9c4fd53..0000000000 --- a/python/dev_requirements.txt +++ /dev/null @@ -1,7 +0,0 @@ -black >= 24.3.0 -flake8 == 5.0 -isort == 5.10 -mypy == 1.2 -mypy-protobuf == 3.5 -packaging >= 22.0 -pyrsistent diff --git a/python/python/glide/async_commands/cluster_commands.py b/python/python/glide/async_commands/cluster_commands.py index 584091ab32..6167e1abaa 100644 --- a/python/python/glide/async_commands/cluster_commands.py +++ b/python/python/glide/async_commands/cluster_commands.py @@ -1076,67 +1076,78 @@ async def scan( match: Optional[TEncodable] = None, count: Optional[int] = None, type: Optional[ObjectType] = None, + allow_non_covered_slots: bool = False, ) -> List[Union[ClusterScanCursor, List[bytes]]]: """ - Incrementally iterates over the keys in the Cluster. + Incrementally iterates over the keys in the cluster. The method returns a list containing the next cursor and a list of keys. - This command is similar to the SCAN command, but it is designed to work in a Cluster environment. - For each iteration the new cursor object should be used to continue the scan. + This command is similar to the SCAN command but is designed to work in a cluster environment. + For each iteration, the new cursor object should be used to continue the scan. Using the same cursor object for multiple iterations will result in the same keys or unexpected behavior. - For more information about the Cluster Scan implementation, - see [Cluster Scan](https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#cluster-scan). + For more information about the Cluster Scan implementation, see [Cluster Scan](https://github.com/valkey-io/valkey-glide/wiki/General-Concepts#cluster-scan). - As the SCAN command, the method can be used to iterate over the keys in the database, - to return all keys the database have from the time the scan started till the scan ends. - The same key can be returned in multiple scans iteration. + Like the SCAN command, the method can be used to iterate over the keys in the database, + returning all keys the database has from when the scan started until the scan ends. + The same key can be returned in multiple scan iterations. See https://valkey.io/commands/scan/ for more details. Args: cursor (ClusterScanCursor): The cursor object that wraps the scan state. - To start a new scan, create a new empty ClusterScanCursor using ClusterScanCursor(). + To start a new scan, create a new empty ClusterScanCursor using ClusterScanCursor(). match (Optional[TEncodable]): A pattern to match keys against. count (Optional[int]): The number of keys to return in a single iteration. - The actual number returned can vary and is not guaranteed to match this count exactly. - This parameter serves as a hint to the server on the number of steps to perform in each iteration. - The default value is 10. + The actual number returned can vary and is not guaranteed to match this count exactly. + This parameter serves as a hint to the server on the number of steps to perform in each iteration. + The default value is 10. type (Optional[ObjectType]): The type of object to scan for. + allow_non_covered_slots (bool): If set to True, the scan will perform even if some slots are not covered by any node. + It's important to note that when set to True, the scan has no guarantee to cover all keys in the cluster, + and the method loses its way to validate the progress of the scan. Defaults to False. Returns: List[Union[ClusterScanCursor, List[TEncodable]]]: A list containing the next cursor and a list of keys, - formatted as [ClusterScanCursor, [key1, key2, ...]]. + formatted as [ClusterScanCursor, [key1, key2, ...]]. Examples: - >>> # In the following example, we will iterate over the keys in the cluster. - await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) - cursor = ClusterScanCursor() - all_keys = [] - while not cursor.is_finished(): - cursor, keys = await client.scan(cursor, count=10) - all_keys.extend(keys) - print(all_keys) # [b'key1', b'key2', b'key3'] - >>> # In the following example, we will iterate over the keys in the cluster that match the pattern "*key*". - await client.mset({b"key1": b"value1", b"key2": b"value2", b"not_my_key": b"value3", b"something_else": b"value4"}) - cursor = ClusterScanCursor() - all_keys = [] - while not cursor.is_finished(): - cursor, keys = await client.scan(cursor, match=b"*key*", count=10) - all_keys.extend(keys) - print(all_keys) # [b'my_key1', b'my_key2', b'not_my_key'] - >>> # In the following example, we will iterate over the keys in the cluster that are of type STRING. - await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) - await client.sadd(b"this_is_a_set", [b"value4"]) - cursor = ClusterScanCursor() - all_keys = [] - while not cursor.is_finished(): - cursor, keys = await client.scan(cursor, type=ObjectType.STRING) - all_keys.extend(keys) - print(all_keys) # [b'key1', b'key2', b'key3'] + >>> # Iterate over all keys in the cluster. + >>> await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) + >>> cursor = ClusterScanCursor() + >>> all_keys = [] + >>> while not cursor.is_finished(): + >>> cursor, keys = await client.scan(cursor, count=10, allow_non_covered_slots=False) + >>> all_keys.extend(keys) + >>> print(all_keys) # [b'key1', b'key2', b'key3'] + + >>> # Iterate over keys matching the pattern "*key*". + >>> await client.mset({b"key1": b"value1", b"key2": b"value2", b"not_my_key": b"value3", b"something_else": b"value4"}) + >>> cursor = ClusterScanCursor() + >>> all_keys = [] + >>> while not cursor.is_finished(): + >>> cursor, keys = await client.scan(cursor, match=b"*key*", count=10, allow_non_covered_slots=False) + >>> all_keys.extend(keys) + >>> print(all_keys) # [b'key1', b'key2', b'not_my_key'] + + >>> # Iterate over keys of type STRING. + >>> await client.mset({b'key1': b'value1', b'key2': b'value2', b'key3': b'value3'}) + >>> await client.sadd(b"this_is_a_set", [b"value4"]) + >>> cursor = ClusterScanCursor() + >>> all_keys = [] + >>> while not cursor.is_finished(): + >>> cursor, keys = await client.scan(cursor, type=ObjectType.STRING, allow_non_covered_slots=False) + >>> all_keys.extend(keys) + >>> print(all_keys) # [b'key1', b'key2', b'key3'] """ return cast( List[Union[ClusterScanCursor, List[bytes]]], - await self._cluster_scan(cursor, match, count, type), + await self._cluster_scan( + cursor=cursor, + match=match, + count=count, + type=type, + allow_non_covered_slots=allow_non_covered_slots, + ), ) async def script_exists( diff --git a/python/python/glide/async_commands/core.py b/python/python/glide/async_commands/core.py index 6ebc8d2ab6..3f1be75d98 100644 --- a/python/python/glide/async_commands/core.py +++ b/python/python/glide/async_commands/core.py @@ -390,6 +390,7 @@ async def _cluster_scan( match: Optional[TEncodable] = ..., count: Optional[int] = ..., type: Optional[ObjectType] = ..., + allow_non_covered_slots: bool = ..., ) -> TResult: ... async def _update_connection_password( diff --git a/python/python/glide/glide_client.py b/python/python/glide/glide_client.py index 6178b997a7..5ed558e709 100644 --- a/python/python/glide/glide_client.py +++ b/python/python/glide/glide_client.py @@ -566,6 +566,7 @@ async def _cluster_scan( match: Optional[TEncodable] = None, count: Optional[int] = None, type: Optional[ObjectType] = None, + allow_non_covered_slots: bool = False, ) -> List[Union[ClusterScanCursor, List[bytes]]]: if self._is_closed: raise ClosingError( @@ -576,6 +577,7 @@ async def _cluster_scan( # Take out the id string from the wrapping object cursor_string = cursor.get_cursor() request.cluster_scan.cursor = cursor_string + request.cluster_scan.allow_non_covered_slots = allow_non_covered_slots if match is not None: request.cluster_scan.match_pattern = ( self._encode_arg(match) if isinstance(match, str) else match diff --git a/python/python/tests/conftest.py b/python/python/tests/conftest.py index 0937ca2067..0ab5c9d6e9 100644 --- a/python/python/tests/conftest.py +++ b/python/python/tests/conftest.py @@ -252,14 +252,16 @@ async def create_client( inflight_requests_limit: Optional[int] = None, read_from: ReadFrom = ReadFrom.PRIMARY, client_az: Optional[str] = None, + valkey_cluster: Optional[ValkeyCluster] = None, ) -> Union[GlideClient, GlideClusterClient]: # Create async socket client use_tls = request.config.getoption("--tls") if cluster_mode: - assert type(pytest.valkey_cluster) is ValkeyCluster + valkey_cluster = valkey_cluster or pytest.valkey_cluster + assert type(valkey_cluster) is ValkeyCluster assert database_id == 0 - k = min(3, len(pytest.valkey_cluster.nodes_addr)) - seed_nodes = random.sample(pytest.valkey_cluster.nodes_addr, k=k) + k = min(3, len(valkey_cluster.nodes_addr)) + seed_nodes = random.sample(valkey_cluster.nodes_addr, k=k) cluster_config = GlideClusterClientConfiguration( addresses=seed_nodes if addresses is None else addresses, use_tls=use_tls, diff --git a/python/python/tests/test_scan.py b/python/python/tests/test_scan.py index 907dc703d5..69d5243aaf 100644 --- a/python/python/tests/test_scan.py +++ b/python/python/tests/test_scan.py @@ -1,16 +1,96 @@ -from __future__ import annotations - -from typing import List, cast +import asyncio +from typing import AsyncGenerator, List, cast import pytest -from glide import ClusterScanCursor +from glide import ByAddressRoute from glide.async_commands.command_args import ObjectType from glide.config import ProtocolVersion from glide.exceptions import RequestError +from glide.glide import ClusterScanCursor from glide.glide_client import GlideClient, GlideClusterClient +from tests.conftest import create_client +from tests.utils.cluster import ValkeyCluster from tests.utils.utils import get_random_string +# Helper function to get a number of nodes, and ask the cluster till we get the number of nodes +async def is_cluster_ready(client: GlideClusterClient, count: int) -> bool: + # we allow max 20 seconds to get the nodes + timeout = 20 + start_time = asyncio.get_event_loop().time() + + while True: + if asyncio.get_event_loop().time() - start_time > timeout: + return False + + cluster_info = await client.custom_command(["CLUSTER", "INFO"]) + cluster_info_map = {} + + if cluster_info: + info_str = ( + cluster_info + if isinstance(cluster_info, str) + else ( + cluster_info.decode() + if isinstance(cluster_info, bytes) + else str(cluster_info) + ) + ) + cluster_info_lines = info_str.split("\n") + cluster_info_lines = [line for line in cluster_info_lines if line] + + for line in cluster_info_lines: + key, value = line.split(":") + cluster_info_map[key.strip()] = value.strip() + + if ( + cluster_info_map.get("cluster_state") == "ok" + and int(cluster_info_map.get("cluster_known_nodes", "0")) == count + ): + break + + await asyncio.sleep(2) + + return True + + +# The slots not covered testing is messing with the cluster by removing a node, and then scanning the cluster +# When a node is forgot its getting into a banned state for one minutes, so in order to bring back the cluster to normal state +# we need to wait for the node to be unbanned, and then we can continue with the tests +# In order to avoid the time wasting and the chance that the restoration will not happen, we will run the test on separate cluster +@pytest.fixture(scope="function") +async def function_scoped_cluster(): + """ + Function-scoped fixture to create a new cluster for each test invocation. + """ + cluster = ValkeyCluster( + tls=False, cluster_mode=True, shard_count=3, replica_count=0 + ) + yield cluster + del cluster + + +# Since the cluster for slots covered is created separately, we need to create a client for the specific cluster +# The client is created with 100 timeout so looping over the keys with scan will return the error before we finish the loop +# otherwise the test will be flaky +@pytest.fixture(scope="function") +async def glide_client_scoped( + request, function_scoped_cluster: ValkeyCluster, protocol: ProtocolVersion +) -> AsyncGenerator[GlideClusterClient, None]: + """ + Get client for tests, adjusted to use the function-scoped cluster. + """ + client = await create_client( + request, + True, + valkey_cluster=function_scoped_cluster, + protocol=protocol, + ) + assert isinstance(client, GlideClusterClient) + yield client + await client.close() + + @pytest.mark.asyncio class TestScan: # Cluster scan tests @@ -251,6 +331,75 @@ async def test_cluster_scan_all_types(self, glide_client: GlideClusterClient): assert not set(encoded_list_keys).intersection(set(keys)) assert not set(encoded_zset_keys).intersection(set(keys)) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_cluster_scan_non_covered_slots( + self, + protocol: ProtocolVersion, + function_scoped_cluster: ValkeyCluster, + glide_client_scoped: GlideClusterClient, + ): + key = get_random_string(10) + for i in range(1000): + await glide_client_scoped.set(f"{key}:{i}", "value") + cursor = ClusterScanCursor() + result = await glide_client_scoped.scan(cursor) + cursor = cast(ClusterScanCursor, result[0]) + assert not cursor.is_finished() + await glide_client_scoped.config_set({"cluster-require-full-coverage": "no"}) + # forget one server + address_to_forget = glide_client_scoped.config.addresses[0] + all_other_addresses = glide_client_scoped.config.addresses[1:] + id_to_forget = await glide_client_scoped.custom_command( + ["CLUSTER", "MYID"], + ByAddressRoute(address_to_forget.host, address_to_forget.port), + ) + for address in all_other_addresses: + await glide_client_scoped.custom_command( + ["CLUSTER", "FORGET", cast(str, id_to_forget)], + ByAddressRoute(address.host, address.port), + ) + # now we let it few seconds gossip to get the new cluster configuration + await is_cluster_ready(glide_client_scoped, len(all_other_addresses)) + # Iterate scan until error is returned, as it might take time for the inner core to forget the missing node + cursor = ClusterScanCursor() + while True: + try: + while not cursor.is_finished(): + result = await glide_client_scoped.scan(cursor) + cursor = cast(ClusterScanCursor, result[0]) + # Reset cursor for next iteration + cursor = ClusterScanCursor() + except RequestError as e_info: + assert ( + "Could not find an address covering a slot, SCAN operation cannot continue" + in str(e_info) + ) + break + # Scan with allow_non_covered_slots=True + while not cursor.is_finished(): + result = await glide_client_scoped.scan( + cursor, allow_non_covered_slots=True + ) + cursor = cast(ClusterScanCursor, result[0]) + assert cursor.is_finished() + # check the keys we can get with keys command, and scan from the beginning + keys = [] + for address in all_other_addresses: + result = await glide_client_scoped.custom_command( + ["KEYS", "*"], ByAddressRoute(address.host, address.port) + ) + keys.extend(cast(List[bytes], result)) + + cursor = ClusterScanCursor() + results = [] + while not cursor.is_finished(): + result = await glide_client_scoped.scan( + cursor, allow_non_covered_slots=True + ) + results.extend(cast(List[bytes], result[1])) + cursor = cast(ClusterScanCursor, result[0]) + assert set(keys) == set(results) + # Standalone scan tests @pytest.mark.parametrize("cluster_mode", [False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) diff --git a/python/python/tests/test_transaction.py b/python/python/tests/test_transaction.py index ccdb309f58..c623f4e8c9 100644 --- a/python/python/tests/test_transaction.py +++ b/python/python/tests/test_transaction.py @@ -1033,10 +1033,7 @@ async def test_standalone_transaction(self, glide_client: GlideClient): assert result[5:13] == [2, 2, 2, [b"Bob", b"Alice"], 2, OK, None, 0] assert result[13:] == expected - @pytest.mark.filterwarnings( - action="ignore", message="The test " - ) - def test_transaction_clear(self): + async def test_transaction_clear(self): transaction = Transaction() transaction.info() transaction.select(1) diff --git a/python/requirements.txt b/python/requirements.txt index b39d1d96c8..b5880e6287 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,7 +1,12 @@ async-timeout==4.0.2;python_version<"3.11" maturin==0.14.17 # higher version break the needs structure changes, the name of the project is not the same as the package name, and the naming both glide create a circular dependency - TODO: fix this -protobuf==3.20.* pytest pytest-asyncio typing_extensions==4.8.0;python_version<"3.11" pytest-html +black >= 24.3.0 +flake8 == 5.0 +isort == 5.10 +mypy == 1.13.0 +mypy-protobuf == 3.5 +packaging >= 22.0 diff --git a/utils/TestUtils.ts b/utils/TestUtils.ts index 423bf8e9cb..9c89788528 100644 --- a/utils/TestUtils.ts +++ b/utils/TestUtils.ts @@ -21,9 +21,9 @@ function parseOutput(input: string): { .split(",") .map((address) => address.split(":")) .map((address) => [address[0], Number(address[1])]) as [ - string, - number, - ][]; + string, + number, + ][]; if (clusterFolder === undefined || ports === undefined) { throw new Error(`Insufficient data in input: ${input}`); @@ -82,7 +82,7 @@ export class ValkeyCluster { execFile( "python3", [PY_SCRIPT_PATH, ...command.split(" ")], - (error, stdout, stderr) => { + (error, stdout) => { if (error) { reject(error); } else { From 0bcca10f207a5958f3b7e6c228596396f62fd01e Mon Sep 17 00:00:00 2001 From: Shoham Elias <116083498+shohamazon@users.noreply.github.com> Date: Thu, 26 Dec 2024 14:57:34 +0200 Subject: [PATCH 16/32] Add connection timeout configuration (#2823) --------- Signed-off-by: Shoham Elias --- CHANGELOG.md | 4 +- glide-core/redis-rs/redis/src/client.rs | 5 + .../src/cluster_async/connections_logic.rs | 1 + .../redis-rs/redis/src/cluster_async/mod.rs | 1 + .../redis-rs/redis/src/cluster_routing.rs | 2 +- glide-core/src/client/mod.rs | 12 +- .../src/client/reconnecting_connection.rs | 20 ++- glide-core/src/client/standalone_client.rs | 10 ++ glide-core/src/client/types.rs | 3 + .../src/protobuf/connection_request.proto | 3 +- .../AdvancedBaseClientConfiguration.java | 22 ++++ .../AdvancedGlideClientConfiguration.java | 23 ++++ ...vancedGlideClusterClientConfiguration.java | 23 ++++ .../BaseClientConfiguration.java | 3 +- .../GlideClientConfiguration.java | 4 + .../GlideClusterClientConfiguration.java | 4 + .../glide/managers/ConnectionManager.java | 31 +++++ .../src/test/java/glide/ConnectionTests.java | 115 +++++++++++++++++ node/npm/glide/index.ts | 4 + node/rust-client/src/lib.rs | 6 +- node/src/BaseClient.ts | 46 ++++++- node/src/GlideClient.ts | 27 ++++ node/src/GlideClusterClient.ts | 27 ++++ node/tests/GlideClient.test.ts | 70 +++++++++++ node/tests/GlideClusterClient.test.ts | 62 +++++++++ python/python/glide/__init__.py | 4 + python/python/glide/config.py | 67 +++++++++- python/python/tests/conftest.py | 18 ++- python/python/tests/test_async_client.py | 119 +++++++++++++++--- python/python/tests/test_config.py | 27 ++++ python/python/tests/test_pubsub.py | 4 +- .../python/tests/test_read_from_strategy.py | 12 +- python/python/tests/test_transaction.py | 2 +- utils/cluster_manager.py | 16 ++- 34 files changed, 746 insertions(+), 51 deletions(-) create mode 100644 java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java create mode 100644 java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java create mode 100644 java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a1bc2cd15f..fdd6642c2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,9 @@ #### Changes * Node, Python, Java: Add allow uncovered slots scanning flag option in cluster scan ([#2814](https://github.com/valkey-io/valkey-glide/pull/2814), [#2815](https://github.com/valkey-io/valkey-glide/pull/2815), [#2860](https://github.com/valkey-io/valkey-glide/pull/2860)) -* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802) +* Java: Bump protobuf (protoc) version ([#2561](https://github.com/valkey-io/valkey-glide/pull/2561), [#2802](https://github.com/valkey-io/valkey-glide/pull/2802)) * Java: bump `netty` version ([#2777](https://github.com/valkey-io/valkey-glide/pull/2777)) * Node: Remove native package references for MacOs x64 architecture ([#2799](https://github.com/valkey-io/valkey-glide/issues/2799)) - +* Node, Python, Java: Add connection timeout to client configuration ([#2823](https://github.com/valkey-io/valkey-glide/issues/2823)) #### Breaking Changes #### Fixes diff --git a/glide-core/redis-rs/redis/src/client.rs b/glide-core/redis-rs/redis/src/client.rs index 6ac3f40bcf..2b97671110 100644 --- a/glide-core/redis-rs/redis/src/client.rs +++ b/glide-core/redis-rs/redis/src/client.rs @@ -89,6 +89,11 @@ pub struct GlideConnectionOptions { /// If ReadFromReplica strategy is set to AZAffinity, this parameter will be set to 'true'. /// In this case, an INFO command will be triggered in the connection's setup to update the connection's 'availability_zone' property. pub discover_az: bool, + /// Connection timeout duration. + /// + /// This optional field sets the maximum duration to wait when attempting to establish + /// a connection. If `None`, the connection will use `DEFAULT_CONNECTION_TIMEOUT`. + pub connection_timeout: Option, } /// To enable async support you need to enable the feature: `tokio-comp` diff --git a/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs b/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs index 4f9b3f0d4e..e5af8d1e50 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/connections_logic.rs @@ -193,6 +193,7 @@ where push_sender: None, disconnect_notifier, discover_az, + connection_timeout: Some(params.connection_timeout), }, ) .await diff --git a/glide-core/redis-rs/redis/src/cluster_async/mod.rs b/glide-core/redis-rs/redis/src/cluster_async/mod.rs index 8164d09413..534fdd429e 100644 --- a/glide-core/redis-rs/redis/src/cluster_async/mod.rs +++ b/glide-core/redis-rs/redis/src/cluster_async/mod.rs @@ -1094,6 +1094,7 @@ where push_sender, disconnect_notifier, discover_az, + connection_timeout: Some(cluster_params.connection_timeout), }; let connections = Self::create_initial_connections( diff --git a/glide-core/redis-rs/redis/src/cluster_routing.rs b/glide-core/redis-rs/redis/src/cluster_routing.rs index 8bf11d19d4..011f5e08e6 100644 --- a/glide-core/redis-rs/redis/src/cluster_routing.rs +++ b/glide-core/redis-rs/redis/src/cluster_routing.rs @@ -623,6 +623,7 @@ fn base_routing(cmd: &[u8]) -> RouteBy { | b"FUNCTION STATS" => RouteBy::AllNodes, b"DBSIZE" + | b"DEBUG" | b"FLUSHALL" | b"FLUSHDB" | b"FT._ALIASLIST" @@ -717,7 +718,6 @@ fn base_routing(cmd: &[u8]) -> RouteBy { | b"COMMAND LIST" | b"COMMAND" | b"CONFIG GET" - | b"DEBUG" | b"ECHO" | b"FUNCTION LIST" | b"LASTSAVE" diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index 73eee144b1..005a38a9ca 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -33,10 +33,11 @@ use versions::Versioning; pub const HEARTBEAT_SLEEP_DURATION: Duration = Duration::from_secs(1); pub const DEFAULT_RETRIES: u32 = 3; +/// Note: If you change the default value, make sure to change the documentation in *all* wrappers. pub const DEFAULT_RESPONSE_TIMEOUT: Duration = Duration::from_millis(250); -pub const DEFAULT_CONNECTION_ATTEMPT_TIMEOUT: Duration = Duration::from_millis(250); pub const DEFAULT_PERIODIC_TOPOLOGY_CHECKS_INTERVAL: Duration = Duration::from_secs(60); -pub const INTERNAL_CONNECTION_TIMEOUT: Duration = Duration::from_millis(250); +/// Note: If you change the default value, make sure to change the documentation in *all* wrappers. +pub const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_millis(250); pub const FINISHED_SCAN_CURSOR: &str = "finished"; /// The value of 1000 for the maximum number of inflight requests is determined based on Little's Law in queuing theory: @@ -571,8 +572,9 @@ async fn create_cluster_client( Some(PeriodicCheck::ManualInterval(interval)) => Some(interval), None => Some(DEFAULT_PERIODIC_TOPOLOGY_CHECKS_INTERVAL), }; + let connection_timeout = to_duration(request.connection_timeout, DEFAULT_CONNECTION_TIMEOUT); let mut builder = redis::cluster::ClusterClientBuilder::new(initial_nodes) - .connection_timeout(INTERNAL_CONNECTION_TIMEOUT) + .connection_timeout(connection_timeout) .retries(DEFAULT_RETRIES); let read_from_strategy = request.read_from.unwrap_or_default(); builder = builder.read_from(match read_from_strategy { @@ -718,6 +720,8 @@ fn sanitized_request_string(request: &ConnectionRequest) -> String { "\nStandalone mode" }; let request_timeout = format_optional_value("Request timeout", request.request_timeout); + let connection_timeout = + format_optional_value("Connection timeout", request.connection_timeout); let database_id = format!("\ndatabase ID: {}", request.database_id); let rfr_strategy = request .read_from @@ -774,7 +778,7 @@ fn sanitized_request_string(request: &ConnectionRequest) -> String { ); format!( - "\nAddresses: {addresses}{tls_mode}{cluster_mode}{request_timeout}{rfr_strategy}{connection_retry_strategy}{database_id}{protocol}{client_name}{periodic_checks}{pubsub_subscriptions}{inflight_requests_limit}", + "\nAddresses: {addresses}{tls_mode}{cluster_mode}{request_timeout}{connection_timeout}{rfr_strategy}{connection_retry_strategy}{database_id}{protocol}{client_name}{periodic_checks}{pubsub_subscriptions}{inflight_requests_limit}", ) } diff --git a/glide-core/src/client/reconnecting_connection.rs b/glide-core/src/client/reconnecting_connection.rs index c882dd29d6..197de503b9 100644 --- a/glide-core/src/client/reconnecting_connection.rs +++ b/glide-core/src/client/reconnecting_connection.rs @@ -18,7 +18,7 @@ use tokio::task; use tokio::time::timeout; use tokio_retry2::{Retry, RetryError}; -use super::{run_with_timeout, DEFAULT_CONNECTION_ATTEMPT_TIMEOUT}; +use super::{run_with_timeout, DEFAULT_CONNECTION_TIMEOUT}; /// The reason behind the call to `reconnect()` #[derive(PartialEq, Eq, Debug, Clone)] @@ -71,7 +71,11 @@ async fn get_multiplexed_connection( connection_options: &GlideConnectionOptions, ) -> RedisResult { run_with_timeout( - Some(DEFAULT_CONNECTION_ATTEMPT_TIMEOUT), + Some( + connection_options + .connection_timeout + .unwrap_or(DEFAULT_CONNECTION_TIMEOUT), + ), client.get_multiplexed_async_connection(connection_options.clone()), ) .await @@ -113,6 +117,7 @@ async fn create_connection( retry_strategy: RetryStrategy, push_sender: Option>, discover_az: bool, + connection_timeout: Duration, ) -> Result { let client = &connection_backend.connection_info; let connection_options = GlideConnectionOptions { @@ -121,6 +126,7 @@ async fn create_connection( TokioDisconnectNotifier::new(), )), discover_az, + connection_timeout: Some(connection_timeout), }; let action = || async { get_multiplexed_connection(client, &connection_options) @@ -206,6 +212,7 @@ impl ReconnectingConnection { tls_mode: TlsMode, push_sender: Option>, discover_az: bool, + connection_timeout: Duration, ) -> Result { log_debug( "connection creation", @@ -218,7 +225,14 @@ impl ReconnectingConnection { connection_available_signal: ManualResetEvent::new(true), client_dropped_flagged: AtomicBool::new(false), }; - create_connection(backend, connection_retry_strategy, push_sender, discover_az).await + create_connection( + backend, + connection_retry_strategy, + push_sender, + discover_az, + connection_timeout, + ) + .await } pub(crate) fn node_address(&self) -> String { diff --git a/glide-core/src/client/standalone_client.rs b/glide-core/src/client/standalone_client.rs index 5bc26999a8..c2c541c763 100644 --- a/glide-core/src/client/standalone_client.rs +++ b/glide-core/src/client/standalone_client.rs @@ -2,6 +2,7 @@ use super::get_redis_connection_info; use super::reconnecting_connection::{ReconnectReason, ReconnectingConnection}; +use super::{to_duration, DEFAULT_CONNECTION_TIMEOUT}; use super::{ConnectionRequest, NodeAddress, TlsMode}; use crate::client::types::ReadFrom as ClientReadFrom; use crate::retry_strategies::RetryStrategy; @@ -15,6 +16,7 @@ use redis::{PushInfo, RedisError, RedisResult, Value}; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering; use std::sync::Arc; +use std::time::Duration; use telemetrylib::Telemetry; use tokio::sync::mpsc; use tokio::task; @@ -130,6 +132,11 @@ impl StandaloneClient { Some(ClientReadFrom::AZAffinity(_)) ); + let connection_timeout = to_duration( + connection_request.connection_timeout, + DEFAULT_CONNECTION_TIMEOUT, + ); + let mut stream = stream::iter(connection_request.addresses.iter()) .map(|address| async { get_connection_and_replication_info( @@ -143,6 +150,7 @@ impl StandaloneClient { tls_mode.unwrap_or(TlsMode::NoTls), &push_sender, discover_az, + connection_timeout, ) .await .map_err(|err| (format!("{}:{}", address.host, address.port), err)) @@ -552,6 +560,7 @@ async fn get_connection_and_replication_info( tls_mode: TlsMode, push_sender: &Option>, discover_az: bool, + connection_timeout: Duration, ) -> Result<(ReconnectingConnection, Value), (ReconnectingConnection, RedisError)> { let result = ReconnectingConnection::new( address, @@ -560,6 +569,7 @@ async fn get_connection_and_replication_info( tls_mode, push_sender.clone(), discover_az, + connection_timeout, ) .await; let reconnecting_connection = match result { diff --git a/glide-core/src/client/types.rs b/glide-core/src/client/types.rs index a0053587c8..e2314a1ab6 100644 --- a/glide-core/src/client/types.rs +++ b/glide-core/src/client/types.rs @@ -20,6 +20,7 @@ pub struct ConnectionRequest { pub addresses: Vec, pub cluster_mode_enabled: bool, pub request_timeout: Option, + pub connection_timeout: Option, pub connection_retry_strategy: Option, pub periodic_checks: Option, pub pubsub_subscriptions: Option, @@ -147,6 +148,7 @@ impl From for ConnectionRequest { .collect(); let cluster_mode_enabled = value.cluster_mode_enabled; let request_timeout = none_if_zero(value.request_timeout); + let connection_timeout = none_if_zero(value.connection_timeout); let connection_retry_strategy = value .connection_retry_strategy @@ -214,6 +216,7 @@ impl From for ConnectionRequest { addresses, cluster_mode_enabled, request_timeout, + connection_timeout, connection_retry_strategy, periodic_checks, pubsub_subscriptions, diff --git a/glide-core/src/protobuf/connection_request.proto b/glide-core/src/protobuf/connection_request.proto index 5f4db44b00..8e33b39da3 100644 --- a/glide-core/src/protobuf/connection_request.proto +++ b/glide-core/src/protobuf/connection_request.proto @@ -26,7 +26,7 @@ message AuthenticationInfo { enum ProtocolVersion { RESP3 = 0; - RESP2 = 1; + RESP2 = 1; } message PeriodicChecksManualInterval { @@ -71,6 +71,7 @@ message ConnectionRequest { PubSubSubscriptions pubsub_subscriptions = 13; uint32 inflight_requests_limit = 14; string client_az = 15; + uint32 connection_timeout = 16; } message ConnectionRetryStrategy { diff --git a/java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java new file mode 100644 index 0000000000..5a28ee9fcf --- /dev/null +++ b/java/client/src/main/java/glide/api/models/configuration/AdvancedBaseClientConfiguration.java @@ -0,0 +1,22 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.configuration; + +import lombok.Getter; +import lombok.experimental.SuperBuilder; + +/** + * Advanced configuration settings class for creating a client. Shared settings for standalone and + * cluster clients. + */ +@Getter +@SuperBuilder +public abstract class AdvancedBaseClientConfiguration { + + /** + * The duration in milliseconds to wait for a TCP/TLS connection to complete. This applies both + * during initial client creation and any reconnections that may occur during request processing. + * **Note**: A high connection timeout may lead to prolonged blocking of the entire command + * pipeline. If not explicitly set, a default value of 250 milliseconds will be used. + */ + private final Integer connectionTimeout; +} diff --git a/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java new file mode 100644 index 0000000000..0ce5f85958 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClientConfiguration.java @@ -0,0 +1,23 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.configuration; + +import glide.api.GlideClient; +import lombok.Getter; +import lombok.ToString; +import lombok.experimental.SuperBuilder; + +/** + * Represents advanced configuration settings for a Standalone {@link GlideClient} used in {@link + * GlideClientConfiguration}. + * + * @example + *
{@code
+ * AdvancedGlideClientConfiguration config = AdvancedGlideClientConfiguration.builder()
+ *     .connectionTimeout(500)
+ *     .build();
+ * }
+ */ +@Getter +@SuperBuilder +@ToString +public class AdvancedGlideClientConfiguration extends AdvancedBaseClientConfiguration {} diff --git a/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java new file mode 100644 index 0000000000..ff02b18c4f --- /dev/null +++ b/java/client/src/main/java/glide/api/models/configuration/AdvancedGlideClusterClientConfiguration.java @@ -0,0 +1,23 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.configuration; + +import glide.api.GlideClusterClient; +import lombok.Getter; +import lombok.ToString; +import lombok.experimental.SuperBuilder; + +/** + * Represents advanced configuration settings for a Standalone {@link GlideClusterClient} used in + * {@link GlideClusterClientConfiguration}. + * + * @example + *
{@code
+ * AdvancedGlideClusterClientConfiguration config = AdvancedGlideClusterClientConfiguration.builder()
+ *     .connectionTimeout(500)
+ *     .build();
+ * }
+ */ +@Getter +@SuperBuilder +@ToString +public class AdvancedGlideClusterClientConfiguration extends AdvancedBaseClientConfiguration {} diff --git a/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java index 7d9d5d5b68..7cd29a7cb8 100644 --- a/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java @@ -49,7 +49,8 @@ public abstract class BaseClientConfiguration { * The duration in milliseconds that the client should wait for a request to complete. This * duration encompasses sending the request, awaiting for a response from the server, and any * required reconnections or retries. If the specified timeout is exceeded for a pending request, - * it will result in a timeout error. If not set, a default value will be used. + * it will result in a timeout error. If not explicitly set, a default value of 250 milliseconds + * will be used. */ private final Integer requestTimeout; diff --git a/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java index 83d84e7c1f..4321f1dd39 100644 --- a/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/GlideClientConfiguration.java @@ -24,6 +24,7 @@ * .clientName("GLIDE") * .subscriptionConfiguration(subscriptionConfiguration) * .inflightRequestsLimit(1000) + * .advancedConfiguration(AdvancedGlideClientConfiguration.builder().connectionTimeout(500).build()) * .build(); * } */ @@ -39,4 +40,7 @@ public class GlideClientConfiguration extends BaseClientConfiguration { /** Subscription configuration for the current client. */ private final StandaloneSubscriptionConfiguration subscriptionConfiguration; + + /** Advanced configuration settings for the client. */ + private final AdvancedGlideClientConfiguration advancedConfiguration; } diff --git a/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java index b1d1c7590c..f0b6d7789d 100644 --- a/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/GlideClusterClientConfiguration.java @@ -23,6 +23,7 @@ * .clientName("GLIDE") * .subscriptionConfiguration(subscriptionConfiguration) * .inflightRequestsLimit(1000) + * .advancedConfiguration(AdvancedGlideClusterClientConfiguration.builder().connectionTimeout(500).build()) * .build(); * } */ @@ -32,4 +33,7 @@ public class GlideClusterClientConfiguration extends BaseClientConfiguration { /** Subscription configuration for the current client. */ private final ClusterSubscriptionConfiguration subscriptionConfiguration; + + /** Advanced configuration settings for the client. */ + private final AdvancedGlideClusterClientConfiguration advancedConfiguration; } diff --git a/java/client/src/main/java/glide/managers/ConnectionManager.java b/java/client/src/main/java/glide/managers/ConnectionManager.java index 99b383a9ed..443384d5a6 100644 --- a/java/client/src/main/java/glide/managers/ConnectionManager.java +++ b/java/client/src/main/java/glide/managers/ConnectionManager.java @@ -8,6 +8,7 @@ import connection_request.ConnectionRequestOuterClass.PubSubChannelsOrPatterns; import connection_request.ConnectionRequestOuterClass.PubSubSubscriptions; import connection_request.ConnectionRequestOuterClass.TlsMode; +import glide.api.models.configuration.AdvancedBaseClientConfiguration; import glide.api.models.configuration.BaseClientConfiguration; import glide.api.models.configuration.GlideClientConfiguration; import glide.api.models.configuration.GlideClusterClientConfiguration; @@ -171,6 +172,30 @@ private ConnectionRequest.Builder setupConnectionRequestBuilderGlideClient( connectionRequestBuilder.setPubsubSubscriptions(subscriptionsBuilder.build()); } + if (configuration.getAdvancedConfiguration() != null) { + connectionRequestBuilder = + setupConnectionRequestBuilderAdvancedBaseConfiguration( + connectionRequestBuilder, configuration.getAdvancedConfiguration()); + } + + return connectionRequestBuilder; + } + + /** + * Configures the {@link ConnectionRequest.Builder} with settings from the provided {@link + * AdvancedBaseClientConfiguration}. + * + * @param connectionRequestBuilder The builder for the {@link ConnectionRequest}. + * @param configuration The advanced configuration settings. + * @return The updated {@link ConnectionRequest.Builder}. + */ + private ConnectionRequest.Builder setupConnectionRequestBuilderAdvancedBaseConfiguration( + ConnectionRequest.Builder connectionRequestBuilder, + AdvancedBaseClientConfiguration configuration) { + if (configuration.getConnectionTimeout() != null) { + connectionRequestBuilder.setConnectionTimeout(configuration.getConnectionTimeout()); + } + return connectionRequestBuilder; } @@ -199,6 +224,12 @@ private ConnectionRequest.Builder setupConnectionRequestBuilderGlideClusterClien connectionRequestBuilder.setPubsubSubscriptions(subscriptionsBuilder.build()); } + if (configuration.getAdvancedConfiguration() != null) { + connectionRequestBuilder = + setupConnectionRequestBuilderAdvancedBaseConfiguration( + connectionRequestBuilder, configuration.getAdvancedConfiguration()); + } + return connectionRequestBuilder; } diff --git a/java/integTest/src/test/java/glide/ConnectionTests.java b/java/integTest/src/test/java/glide/ConnectionTests.java index 2aec2e4e6b..45fea7065a 100644 --- a/java/integTest/src/test/java/glide/ConnectionTests.java +++ b/java/integTest/src/test/java/glide/ConnectionTests.java @@ -10,19 +10,33 @@ import static glide.api.models.configuration.RequestRoutingConfiguration.SlotType.PRIMARY; import static glide.api.models.configuration.RequestRoutingConfiguration.SlotType.REPLICA; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeTrue; +import glide.api.BaseClient; import glide.api.GlideClient; import glide.api.GlideClusterClient; import glide.api.models.ClusterValue; import glide.api.models.commands.InfoOptions; +import glide.api.models.configuration.AdvancedGlideClientConfiguration; +import glide.api.models.configuration.AdvancedGlideClusterClientConfiguration; +import glide.api.models.configuration.BackoffStrategy; import glide.api.models.configuration.ReadFrom; import glide.api.models.configuration.RequestRoutingConfiguration; +import glide.api.models.exceptions.ClosingException; +import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.stream.Stream; import lombok.SneakyThrows; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; @Timeout(10) // seconds public class ConnectionTests { @@ -52,6 +66,35 @@ public GlideClusterClient createAzTestClient(String az) { .get(); } + @SneakyThrows + public BaseClient createConnectionTimeoutClient( + Boolean clusterMode, + int connectionTimeout, + int requestTimeout, + BackoffStrategy backoffStrategy) { + if (clusterMode) { + var advancedConfiguration = + AdvancedGlideClusterClientConfiguration.builder() + .connectionTimeout(connectionTimeout) + .build(); + return GlideClusterClient.createClient( + commonClusterClientConfig() + .advancedConfiguration(advancedConfiguration) + .requestTimeout(requestTimeout) + .build()) + .get(); + } + var advancedConfiguration = + AdvancedGlideClientConfiguration.builder().connectionTimeout(connectionTimeout).build(); + return GlideClient.createClient( + commonClientConfig() + .advancedConfiguration(advancedConfiguration) + .requestTimeout(requestTimeout) + .reconnectStrategy(backoffStrategy) + .build()) + .get(); + } + /** * Test that the client with AZ affinity strategy routes in a round-robin manner to all replicas * within the specified AZ. @@ -202,4 +245,76 @@ public void test_az_affinity_non_existing_az() { assertEquals(4, matchingEntries); azTestClient.close(); } + + @SneakyThrows + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void test_connection_timeout(boolean clusterMode) { + var backoffStrategy = + BackoffStrategy.builder().exponentBase(2).factor(100).numOfRetries(1).build(); + var client = createConnectionTimeoutClient(clusterMode, 250, 20000, backoffStrategy); + + // Runnable for long-running DEBUG SLEEP command + Runnable debugSleepTask = + () -> { + try { + if (client instanceof GlideClusterClient) { + ((GlideClusterClient) client) + .customCommand(new String[] {"DEBUG", "sleep", "7"}, ALL_NODES) + .get(); + } else if (client instanceof GlideClient) { + ((GlideClient) client).customCommand(new String[] {"DEBUG", "sleep", "7"}).get(); + } + } catch (InterruptedException | ExecutionException e) { + throw new RuntimeException("Error during DEBUG SLEEP command", e); + } + }; + + // Runnable for testing connection failure due to timeout + Runnable failToConnectTask = + () -> { + try { + Thread.sleep(1000); // Wait to ensure the debug sleep command is running + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> createConnectionTimeoutClient(clusterMode, 100, 250, backoffStrategy)); + assertInstanceOf(ClosingException.class, executionException.getCause()); + assertTrue(executionException.getMessage().toLowerCase().contains("timed out")); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Thread was interrupted", e); + } + }; + + // Runnable for testing successful connection + Runnable connectToClientTask = + () -> { + try { + Thread.sleep(1000); // Wait to ensure the debug sleep command is running + var timeoutClient = + createConnectionTimeoutClient(clusterMode, 10000, 250, backoffStrategy); + assertEquals(timeoutClient.set("key", "value").get(), "OK"); + timeoutClient.close(); + } catch (Exception e) { + throw new RuntimeException("Error during successful connection attempt", e); + } + }; + + // Execute all tasks concurrently + ExecutorService executorService = Executors.newFixedThreadPool(3); + try { + executorService.invokeAll( + List.of( + Executors.callable(debugSleepTask), + Executors.callable(failToConnectTask), + Executors.callable(connectToClientTask))); + } finally { + executorService.shutdown(); + // Clean up the main client + if (client != null) { + client.close(); + } + } + } } diff --git a/node/npm/glide/index.ts b/node/npm/glide/index.ts index c4dab9795b..a7fad935c1 100644 --- a/node/npm/glide/index.ts +++ b/node/npm/glide/index.ts @@ -213,6 +213,8 @@ function initialize() { Script, ObjectType, ClusterScanCursor, + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, BaseClientConfiguration, GlideClusterClientConfiguration, LevelOptions, @@ -290,6 +292,8 @@ function initialize() { GlideClient, GlideClusterClient, GlideClientConfiguration, + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, FunctionListOptions, FunctionListResponse, FunctionStatsSingleResponse, diff --git a/node/rust-client/src/lib.rs b/node/rust-client/src/lib.rs index 963f966f24..ffa5b5c47f 100644 --- a/node/rust-client/src/lib.rs +++ b/node/rust-client/src/lib.rs @@ -40,9 +40,13 @@ pub enum Level { pub const MAX_REQUEST_ARGS_LEN: u32 = MAX_REQUEST_ARGS_LENGTH as u32; #[napi] -pub const DEFAULT_TIMEOUT_IN_MILLISECONDS: u32 = +pub const DEFAULT_REQUEST_TIMEOUT_IN_MILLISECONDS: u32 = glide_core::client::DEFAULT_RESPONSE_TIMEOUT.as_millis() as u32; +#[napi] +pub const DEFAULT_CONNECTION_TIMEOUT_IN_MILLISECONDS: u32 = + glide_core::client::DEFAULT_CONNECTION_TIMEOUT.as_millis() as u32; + #[napi] pub const DEFAULT_INFLIGHT_REQUESTS_LIMIT: u32 = glide_core::client::DEFAULT_MAX_INFLIGHT_REQUESTS; diff --git a/node/src/BaseClient.ts b/node/src/BaseClient.ts index 71b1ffd89c..e011daf8a6 100644 --- a/node/src/BaseClient.ts +++ b/node/src/BaseClient.ts @@ -3,8 +3,9 @@ */ import { ClusterScanCursor, + DEFAULT_CONNECTION_TIMEOUT_IN_MILLISECONDS, DEFAULT_INFLIGHT_REQUESTS_LIMIT, - DEFAULT_TIMEOUT_IN_MILLISECONDS, + DEFAULT_REQUEST_TIMEOUT_IN_MILLISECONDS, Script, StartSocketConnection, getStatistics, @@ -606,7 +607,7 @@ export interface BaseClientConfiguration { * The duration in milliseconds that the client should wait for a request to complete. * This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. * If the specified timeout is exceeded for a pending request, it will result in a timeout error. - * If not set, a default value will be used. + * If not explicitly set, a default value of 250 milliseconds will be used. * Value must be an integer. */ requestTimeout?: number; @@ -650,6 +651,33 @@ export interface BaseClientConfiguration { clientAz?: string; } +/** + * Represents advanced configuration settings for a client, including connection-related options. + * + * @remarks + * The `AdvancedBaseClientConfiguration` interface defines advanced configuration settings for managing the client's connection behavior. + * + * ### Connection Timeout + * + * - **Connection Timeout**: The `connectionTimeout` property specifies the duration (in milliseconds) the client should wait for a connection to be established. + * + * @example + * ```typescript + * const config: AdvancedBaseClientConfiguration = { + * connectionTimeout: 5000, // 5 seconds + * }; + * ``` + */ +export interface AdvancedBaseClientConfiguration { + /** + * The duration in milliseconds to wait for a TCP/TLS connection to complete. + * This applies both during initial client creation and any reconnections that may occur during request processing. + * **Note**: A high connection timeout may lead to prolonged blocking of the entire command pipeline. + * If not explicitly set, a default value of 250 milliseconds will be used. + */ + connectionTimeout?: number; +} + /** * Enum of Valkey data types * `STRING` @@ -951,7 +979,7 @@ export class BaseClient { Logger.log("info", "Client lifetime", `construct client`); this.config = options; this.requestTimeout = - options?.requestTimeout ?? DEFAULT_TIMEOUT_IN_MILLISECONDS; + options?.requestTimeout ?? DEFAULT_REQUEST_TIMEOUT_IN_MILLISECONDS; this.socket = socket; this.socket .on("data", (data) => this.handleReadData(data)) @@ -7657,6 +7685,18 @@ export class BaseClient { }; } + /** + * @internal + */ + protected configureAdvancedConfigurationBase( + options: AdvancedBaseClientConfiguration, + request: connection_request.IConnectionRequest, + ) { + request.connectionTimeout = + options.connectionTimeout ?? + DEFAULT_CONNECTION_TIMEOUT_IN_MILLISECONDS; + } + /** * @internal */ diff --git a/node/src/GlideClient.ts b/node/src/GlideClient.ts index acec0c377f..fc9301bd75 100644 --- a/node/src/GlideClient.ts +++ b/node/src/GlideClient.ts @@ -4,6 +4,7 @@ import * as net from "net"; import { + AdvancedBaseClientConfiguration, BaseClient, BaseClientConfiguration, convertGlideRecordToRecord, @@ -171,8 +172,26 @@ export type GlideClientConfiguration = BaseClientConfiguration & { * Will be applied via SUBSCRIBE/PSUBSCRIBE commands during connection establishment. */ pubsubSubscriptions?: GlideClientConfiguration.PubSubSubscriptions; + /** + * Advanced configuration settings for the client. + */ + advancedConfiguration?: AdvancedGlideClientConfiguration; }; +/** + * Represents advanced configuration settings for creating a {@link GlideClient | GlideClient} used in {@link GlideClientConfiguration | GlideClientConfiguration}. + * + * + * @example + * ```typescript + * const config: AdvancedGlideClientConfiguration = { + * connectionTimeout: 500, // Set the connection timeout to 500ms + * }; + * ``` + */ +export type AdvancedGlideClientConfiguration = + AdvancedBaseClientConfiguration & {}; + /** * Client used for connection to standalone servers. * @@ -189,6 +208,14 @@ export class GlideClient extends BaseClient { configuration.databaseId = options.databaseId; configuration.connectionRetryStrategy = options.connectionBackoff; this.configurePubsub(options, configuration); + + if (options.advancedConfiguration) { + this.configureAdvancedConfigurationBase( + options.advancedConfiguration, + configuration, + ); + } + return configuration; } /** diff --git a/node/src/GlideClusterClient.ts b/node/src/GlideClusterClient.ts index 4e9aee579d..c12264f078 100644 --- a/node/src/GlideClusterClient.ts +++ b/node/src/GlideClusterClient.ts @@ -5,6 +5,7 @@ import { ClusterScanCursor, Script } from "glide-rs"; import * as net from "net"; import { + AdvancedBaseClientConfiguration, BaseClient, BaseClientConfiguration, Decoder, @@ -190,8 +191,26 @@ export type GlideClusterClientConfiguration = BaseClientConfiguration & { * Will be applied via SUBSCRIBE/PSUBSCRIBE/SSUBSCRIBE commands during connection establishment. */ pubsubSubscriptions?: GlideClusterClientConfiguration.PubSubSubscriptions; + /** + * Advanced configuration settings for the client. + */ + advancedConfiguration?: AdvancedGlideClusterClientConfiguration; }; +/** + * Represents advanced configuration settings for creating a {@link GlideClusterClient | GlideClusterClient} used in {@link GlideClusterClientConfiguration | GlideClusterClientConfiguration}. + * + * + * @example + * ```typescript + * const config: AdvancedGlideClusterClientConfiguration = { + * connectionTimeout: 500, // Set the connection timeout to 500ms + * }; + * ``` + */ +export type AdvancedGlideClusterClientConfiguration = + AdvancedBaseClientConfiguration & {}; + /** * If the command's routing is to one node we will get T as a response type, * otherwise, we will get a dictionary of address: nodeResponse, address is of type string and nodeResponse is of type T. @@ -504,6 +523,14 @@ export class GlideClusterClient extends BaseClient { } this.configurePubsub(options, configuration); + + if (options.advancedConfiguration) { + this.configureAdvancedConfigurationBase( + options.advancedConfiguration, + configuration, + ); + } + return configuration; } /** diff --git a/node/tests/GlideClient.test.ts b/node/tests/GlideClient.test.ts index 0c77bde519..08f39e19d2 100644 --- a/node/tests/GlideClient.test.ts +++ b/node/tests/GlideClient.test.ts @@ -979,6 +979,76 @@ describe("GlideClient", () => { }, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "should handle connection timeout when client is blocked by long-running command (protocol: %p)", + async (protocol) => { + // Create a client configuration with a generous request timeout + const config = getClientConfigurationOption( + cluster.getAddresses(), + protocol, + { requestTimeout: 20000 }, // Long timeout to allow debugging operations (sleep for 7 seconds) + ); + + // Initialize the primary client + const client = await GlideClient.createClient(config); + + try { + // Run a long-running DEBUG SLEEP command using the first client (client) + const debugCommandPromise = client.customCommand( + ["DEBUG", "sleep", "7"], // Sleep for 7 seconds + ); + + // Function that tries to create a client with a short connection timeout (100ms) + const failToCreateClient = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + await expect( + GlideClient.createClient({ + connectionBackoff: { + exponentBase: 2, + factor: 100, + numberOfRetries: 1, + }, + advancedConfiguration: { connectionTimeout: 100 }, // 100ms connection timeout + ...config, // Include the rest of the config + }), + ).rejects.toThrowError(/timed out/i); // Ensure it throws a timeout error + }; + + // Function that verifies that a larger connection timeout allows connection + const connectWithLargeTimeout = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + const longerTimeoutClient = await GlideClient.createClient({ + connectionBackoff: { + exponentBase: 2, + factor: 100, + numberOfRetries: 1, + }, + advancedConfiguration: { connectionTimeout: 10000 }, // 10s connection timeout + ...config, // Include the rest of the config + }); + expect(await client.set("x", "y")).toEqual("OK"); + longerTimeoutClient.close(); // Close the client after successful connection + }; + + // Run both the long-running DEBUG SLEEP command and the client creation attempt in parallel + await Promise.all([ + debugCommandPromise, // Run the long-running command + failToCreateClient(), // Attempt to create the client with a short timeout + ]); + + // Run all tasks: fail short timeout, succeed with large timeout, and run the debug command + await Promise.all([ + debugCommandPromise, // Run the long-running command + connectWithLargeTimeout(), // Attempt to create the client with a short timeout + ]); + } finally { + // Clean up the test client and ensure everything is flushed and closed + client.close(); + } + }, + TIMEOUT, + ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "function kill RW func %p", async (protocol) => { diff --git a/node/tests/GlideClusterClient.test.ts b/node/tests/GlideClusterClient.test.ts index 43b039c61f..d6c6474872 100644 --- a/node/tests/GlideClusterClient.test.ts +++ b/node/tests/GlideClusterClient.test.ts @@ -1975,6 +1975,68 @@ describe("GlideClusterClient", () => { TIMEOUT, ); + it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( + "should handle connection timeout when client is blocked by long-running command (protocol: %p)", + async (protocol) => { + // Create a client configuration with a generous request timeout + const config = getClientConfigurationOption( + cluster.getAddresses(), + protocol, + { requestTimeout: 20000 }, // Long timeout to allow debugging operations (sleep for 7 seconds) + ); + + // Initialize the primary client + const client = await GlideClusterClient.createClient(config); + + try { + // Run a long-running DEBUG SLEEP command using the first client (client) + const debugCommandPromise = client.customCommand( + ["DEBUG", "sleep", "7"], + { route: "allNodes" }, // Sleep for 7 seconds + ); + + // Function that tries to create a client with a short connection timeout (100ms) + const failToCreateClient = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + await expect( + GlideClusterClient.createClient({ + advancedConfiguration: { connectionTimeout: 100 }, // 100ms connection timeout + ...config, // Include the rest of the config + }), + ).rejects.toThrowError(/timed out/i); // Ensure it throws a timeout error + }; + + // Function that verifies that a larger connection timeout allows connection + const connectWithLargeTimeout = async () => { + await new Promise((resolve) => setTimeout(resolve, 1000)); // Wait for 1 second before retry + const longerTimeoutClient = + await GlideClusterClient.createClient({ + advancedConfiguration: { connectionTimeout: 10000 }, // 10s connection timeout + ...config, // Include the rest of the config + }); + expect(await client.set("x", "y")).toEqual("OK"); + longerTimeoutClient.close(); // Close the client after successful connection + }; + + // Run both the long-running DEBUG SLEEP command and the client creation attempt in parallel + await Promise.all([ + debugCommandPromise, // Run the long-running command + failToCreateClient(), // Attempt to create the client with a short timeout + ]); + + // Run all tasks: fail short timeout, succeed with large timeout, and run the debug command + await Promise.all([ + debugCommandPromise, // Run the long-running command + connectWithLargeTimeout(), // Attempt to create the client with a short timeout + ]); + } finally { + // Clean up the test client and ensure everything is flushed and closed + client.close(); + } + }, + TIMEOUT, + ); + it.each([ [ProtocolVersion.RESP2, 5], [ProtocolVersion.RESP2, 100], diff --git a/python/python/glide/__init__.py b/python/python/glide/__init__.py index 8f6ceac47b..f2ecc3da4e 100644 --- a/python/python/glide/__init__.py +++ b/python/python/glide/__init__.py @@ -111,6 +111,8 @@ TTransaction, ) from glide.config import ( + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, BackoffStrategy, GlideClientConfiguration, GlideClusterClientConfiguration, @@ -176,6 +178,8 @@ "TGlideClient", "TTransaction", # Config + "AdvancedGlideClientConfiguration", + "AdvancedGlideClusterClientConfiguration", "GlideClientConfiguration", "GlideClusterClientConfiguration", "BackoffStrategy", diff --git a/python/python/glide/config.py b/python/python/glide/config.py index b33c037cbf..fc1acda94c 100644 --- a/python/python/glide/config.py +++ b/python/python/glide/config.py @@ -129,6 +129,28 @@ class PeriodicChecksStatus(Enum): """ +class AdvancedBaseClientConfiguration: + """ + Represents the advanced configuration settings for a base Glide client. + + Args: + connection_timeout (Optional[int]): The duration in milliseconds to wait for a TCP/TLS connection to complete. + This applies both during initial client creation and any reconnections that may occur during request processing. + **Note**: A high connection timeout may lead to prolonged blocking of the entire command pipeline. + If not explicitly set, a default value of 250 milliseconds will be used. + """ + + def __init__(self, connection_timeout: Optional[int] = None): + self.connection_timeout = connection_timeout + + def _create_a_protobuf_conn_request( + self, request: ConnectionRequest + ) -> ConnectionRequest: + if self.connection_timeout: + request.connection_timeout = self.connection_timeout + return request + + class BaseClientConfiguration: def __init__( self, @@ -141,6 +163,7 @@ def __init__( protocol: ProtocolVersion = ProtocolVersion.RESP3, inflight_requests_limit: Optional[int] = None, client_az: Optional[str] = None, + advanced_config: Optional[AdvancedBaseClientConfiguration] = None, ): """ Represents the configuration settings for a Glide client. @@ -163,12 +186,14 @@ def __init__( read_from (ReadFrom): If not set, `PRIMARY` will be used. request_timeout (Optional[int]): The duration in milliseconds that the client should wait for a request to complete. This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. - If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not set, a default value will be used. + If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not explicitly set, a default value of 250 milliseconds will be used. client_name (Optional[str]): Client name to be used for the client. Will be used with CLIENT SETNAME command during connection establishment. inflight_requests_limit (Optional[int]): The maximum number of concurrent requests allowed to be in-flight (sent but not yet completed). This limit is used to control the memory usage and prevent the client from overwhelming the server or getting stuck in case of a queue backlog. If not set, a default value will be used. - + client_az (Optional[str]): Availability Zone of the client. + If ReadFrom strategy is AZAffinity, this setting ensures that readonly commands are directed to replicas within the specified AZ if exits. + advanced_config (Optional[AdvancedBaseClientConfiguration]): Advanced configuration settings for the client. """ self.addresses = addresses self.use_tls = use_tls @@ -179,6 +204,7 @@ def __init__( self.protocol = protocol self.inflight_requests_limit = inflight_requests_limit self.client_az = client_az + self.advanced_config = advanced_config if read_from == ReadFrom.AZ_AFFINITY and not client_az: raise ValueError( @@ -218,6 +244,8 @@ def _create_a_protobuf_conn_request( request.inflight_requests_limit = self.inflight_requests_limit if self.client_az: request.client_az = self.client_az + if self.advanced_config: + self.advanced_config._create_a_protobuf_conn_request(request) return request @@ -230,6 +258,16 @@ def _get_pubsub_callback_and_context( return None, None +class AdvancedGlideClientConfiguration(AdvancedBaseClientConfiguration): + """ + Represents the advanced configuration settings for a Standalone Glide client. + """ + + def __init__(self, connection_timeout: Optional[int] = None): + + super().__init__(connection_timeout) + + class GlideClientConfiguration(BaseClientConfiguration): """ Represents the configuration settings for a Standalone Glide client. @@ -249,7 +287,7 @@ class GlideClientConfiguration(BaseClientConfiguration): request_timeout (Optional[int]): The duration in milliseconds that the client should wait for a request to complete. This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. If the specified timeout is exceeded for a pending request, it will result in a timeout error. - If not set, a default value will be used. + If not explicitly set, a default value of 250 milliseconds will be used. reconnect_strategy (Optional[BackoffStrategy]): Strategy used to determine how and when to reconnect, in case of connection failures. If not set, a default backoff strategy will be used. @@ -261,7 +299,9 @@ class GlideClientConfiguration(BaseClientConfiguration): inflight_requests_limit (Optional[int]): The maximum number of concurrent requests allowed to be in-flight (sent but not yet completed). This limit is used to control the memory usage and prevent the client from overwhelming the server or getting stuck in case of a queue backlog. If not set, a default value will be used. - + client_az (Optional[str]): Availability Zone of the client. + If ReadFrom strategy is AZAffinity, this setting ensures that readonly commands are directed to replicas within the specified AZ if exits. + advanced_config (Optional[AdvancedGlideClientConfiguration]): Advanced configuration settings for the client, see `AdvancedGlideClientConfiguration`. """ class PubSubChannelModes(IntEnum): @@ -308,6 +348,7 @@ def __init__( pubsub_subscriptions: Optional[PubSubSubscriptions] = None, inflight_requests_limit: Optional[int] = None, client_az: Optional[str] = None, + advanced_config: Optional[AdvancedGlideClientConfiguration] = None, ): super().__init__( addresses=addresses, @@ -319,6 +360,7 @@ def __init__( protocol=protocol, inflight_requests_limit=inflight_requests_limit, client_az=client_az, + advanced_config=advanced_config, ) self.reconnect_strategy = reconnect_strategy self.database_id = database_id @@ -375,6 +417,15 @@ def _get_pubsub_callback_and_context( return None, None +class AdvancedGlideClusterClientConfiguration(AdvancedBaseClientConfiguration): + """ + Represents the advanced configuration settings for a Glide Cluster client. + """ + + def __init__(self, connection_timeout: Optional[int] = None): + super().__init__(connection_timeout) + + class GlideClusterClientConfiguration(BaseClientConfiguration): """ Represents the configuration settings for a Cluster Glide client. @@ -392,7 +443,7 @@ class GlideClusterClientConfiguration(BaseClientConfiguration): read_from (ReadFrom): If not set, `PRIMARY` will be used. request_timeout (Optional[int]): The duration in milliseconds that the client should wait for a request to complete. This duration encompasses sending the request, awaiting for a response from the server, and any required reconnections or retries. - If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not set, a default value will be used. + If the specified timeout is exceeded for a pending request, it will result in a timeout error. If not explicitly set, a default value of 250 milliseconds will be used. client_name (Optional[str]): Client name to be used for the client. Will be used with CLIENT SETNAME command during connection establishment. protocol (ProtocolVersion): The version of the RESP protocol to communicate with the server. periodic_checks (Union[PeriodicChecksStatus, PeriodicChecksManualInterval]): Configure the periodic topology checks. @@ -404,7 +455,9 @@ class GlideClusterClientConfiguration(BaseClientConfiguration): inflight_requests_limit (Optional[int]): The maximum number of concurrent requests allowed to be in-flight (sent but not yet completed). This limit is used to control the memory usage and prevent the client from overwhelming the server or getting stuck in case of a queue backlog. If not set, a default value will be used. - + client_az (Optional[str]): Availability Zone of the client. + If ReadFrom strategy is AZAffinity, this setting ensures that readonly commands are directed to replicas within the specified AZ if exits. + advanced_config (Optional[AdvancedGlideClusterClientConfiguration]) : Advanced configuration settings for the client, see `AdvancedGlideClusterClientConfiguration`. Notes: @@ -459,6 +512,7 @@ def __init__( pubsub_subscriptions: Optional[PubSubSubscriptions] = None, inflight_requests_limit: Optional[int] = None, client_az: Optional[str] = None, + advanced_config: Optional[AdvancedGlideClusterClientConfiguration] = None, ): super().__init__( addresses=addresses, @@ -470,6 +524,7 @@ def __init__( protocol=protocol, inflight_requests_limit=inflight_requests_limit, client_az=client_az, + advanced_config=advanced_config, ) self.periodic_checks = periodic_checks self.pubsub_subscriptions = pubsub_subscriptions diff --git a/python/python/tests/conftest.py b/python/python/tests/conftest.py index 0ab5c9d6e9..b2a97b4d0a 100644 --- a/python/python/tests/conftest.py +++ b/python/python/tests/conftest.py @@ -5,6 +5,9 @@ import pytest from glide.config import ( + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, + BackoffStrategy, GlideClientConfiguration, GlideClusterClientConfiguration, NodeAddress, @@ -242,7 +245,8 @@ async def create_client( addresses: Optional[List[NodeAddress]] = None, client_name: Optional[str] = None, protocol: ProtocolVersion = ProtocolVersion.RESP3, - timeout: Optional[int] = 1000, + request_timeout: Optional[int] = 1000, + connection_timeout: Optional[int] = 1000, cluster_mode_pubsub: Optional[ GlideClusterClientConfiguration.PubSubSubscriptions ] = None, @@ -252,6 +256,7 @@ async def create_client( inflight_requests_limit: Optional[int] = None, read_from: ReadFrom = ReadFrom.PRIMARY, client_az: Optional[str] = None, + reconnect_strategy: Optional[BackoffStrategy] = None, valkey_cluster: Optional[ValkeyCluster] = None, ) -> Union[GlideClient, GlideClusterClient]: # Create async socket client @@ -268,11 +273,12 @@ async def create_client( credentials=credentials, client_name=client_name, protocol=protocol, - request_timeout=timeout, + request_timeout=request_timeout, pubsub_subscriptions=cluster_mode_pubsub, inflight_requests_limit=inflight_requests_limit, read_from=read_from, client_az=client_az, + advanced_config=AdvancedGlideClusterClientConfiguration(connection_timeout), ) return await GlideClusterClient.create(cluster_config) else: @@ -286,11 +292,13 @@ async def create_client( database_id=database_id, client_name=client_name, protocol=protocol, - request_timeout=timeout, + request_timeout=request_timeout, pubsub_subscriptions=standalone_mode_pubsub, inflight_requests_limit=inflight_requests_limit, read_from=read_from, client_az=client_az, + advanced_config=AdvancedGlideClientConfiguration(connection_timeout), + reconnect_strategy=reconnect_strategy, ) return await GlideClient.create(config) @@ -343,7 +351,7 @@ async def test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion): try: # Try connecting without credentials client = await create_client( - request, cluster_mode, protocol=protocol, timeout=2000 + request, cluster_mode, protocol=protocol, request_timeout=2000 ) await client.custom_command(["FLUSHALL"]) await client.close() @@ -356,7 +364,7 @@ async def test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion): request, cluster_mode, protocol=protocol, - timeout=2000, + request_timeout=2000, credentials=credentials, ) try: diff --git a/python/python/tests/test_async_client.py b/python/python/tests/test_async_client.py index d8d65d1ca7..3b1a458548 100644 --- a/python/python/tests/test_async_client.py +++ b/python/python/tests/test_async_client.py @@ -72,6 +72,7 @@ ) from glide.async_commands.transaction import ClusterTransaction, Transaction from glide.config import ( + BackoffStrategy, GlideClientConfiguration, GlideClusterClientConfiguration, ProtocolVersion, @@ -128,7 +129,7 @@ async def test_register_client_name_and_version(self, glide_client: TGlideClient @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_send_and_receive_large_values(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**25 # 33mb key = "0" * length @@ -302,6 +303,90 @@ async def test_statistics(self, glide_client: TGlideClient): assert "total_clients" in stats assert len(stats) == 2 + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_connection_timeout( + self, + request, + cluster_mode: bool, + protocol: ProtocolVersion, + ): + + client = await create_client( + request, + cluster_mode, + protocol=protocol, + request_timeout=2000, + connection_timeout=2000, + ) + assert isinstance(client, (GlideClient, GlideClusterClient)) + + assert await client.set("key", "value") == "OK" + + await client.close() + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_connection_timeout_when_client_is_blocked( + self, + request, + cluster_mode: bool, + protocol: ProtocolVersion, + ): + client = await create_client( + request, + cluster_mode, + protocol=protocol, + request_timeout=20000, # 20 seconds timeout + ) + + async def run_debug_sleep(): + """ + Run a long-running DEBUG SLEEP command. + """ + command = ["DEBUG", "sleep", "7"] + if isinstance(client, GlideClusterClient): + await client.custom_command(command, AllNodes()) + else: + await client.custom_command(command) + + async def fail_to_connect_to_client(): + # try to connect with a small timeout connection + await asyncio.sleep(1) + with pytest.raises(ClosingError) as e: + await create_client( + request, + cluster_mode, + protocol=protocol, + connection_timeout=100, # 100 ms + reconnect_strategy=BackoffStrategy( + 1, 100, 2 + ), # needs to be configured so that we wont be connected within 7 seconds bc of default retries + ) + assert "timed out" in str(e) + + async def connect_to_client(): + # Create a second client with a connection timeout of 7 seconds + await asyncio.sleep(1) + timeout_client = await create_client( + request, + cluster_mode, + protocol=protocol, + connection_timeout=10000, # 10-second connection timeout + reconnect_strategy=BackoffStrategy(1, 100, 2), + ) + + # Ensure the second client can connect and perform a simple operation + assert await timeout_client.set("key", "value") == "OK" + await timeout_client.close() + + # Run tests + await asyncio.gather(run_debug_sleep(), fail_to_connect_to_client()) + await asyncio.gather(run_debug_sleep(), connect_to_client()) + + # Clean up the main client + await client.close() + @pytest.mark.asyncio class TestCommands: @@ -5424,7 +5509,10 @@ async def test_xread_edge_cases_and_failures( ) test_client = await create_client( - request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900 + request=request, + protocol=protocol, + cluster_mode=cluster_mode, + request_timeout=900, ) # ensure command doesn't time out even if timeout > request timeout assert ( @@ -5817,7 +5905,10 @@ async def test_xreadgroup_edge_cases_and_failures( ) test_client = await create_client( - request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900 + request=request, + protocol=protocol, + cluster_mode=cluster_mode, + request_timeout=900, ) timeout_key = f"{{testKey}}:{get_random_string(10)}" timeout_group_name = get_random_string(10) @@ -8337,11 +8428,11 @@ async def test_function_stats_running_script( # create a second client to run fcall test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) test_client2 = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) async def endless_fcall_route_call(): @@ -8466,7 +8557,7 @@ async def test_function_kill_no_write( # create a second client to run fcall test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=15000 ) async def endless_fcall_route_call(): @@ -8521,7 +8612,7 @@ async def test_function_kill_write_is_unkillable( # create a second client to run fcall - and give it a long timeout test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=15000 ) # call fcall to run the function loaded function @@ -10369,7 +10460,7 @@ async def test_script_binary(self, glide_client: TGlideClient): @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_script_large_keys_no_args(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**13 # 8kb key = "0" * length @@ -10381,7 +10472,7 @@ async def test_script_large_keys_no_args(self, request, cluster_mode, protocol): @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_script_large_args_no_keys(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**12 # 4kb arg1 = "0" * length @@ -10397,7 +10488,7 @@ async def test_script_large_args_no_keys(self, request, cluster_mode, protocol): @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_script_large_keys_and_args(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**12 # 4kb key = "0" * length @@ -10481,7 +10572,7 @@ async def test_script_kill_route( # Create a second client to run the script test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) await script_kill_tests(glide_client, test_client, route) @@ -10497,7 +10588,7 @@ async def test_script_kill_no_route( ): # Create a second client to run the script test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) await script_kill_tests(glide_client, test_client) @@ -10509,12 +10600,12 @@ async def test_script_kill_unkillable( ): # Create a second client to run the script test_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=30000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=30000 ) # Create a second client to kill the script test_client2 = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=15000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=15000 ) # Add test for script_kill with writing script diff --git a/python/python/tests/test_config.py b/python/python/tests/test_config.py index 3b22adb09c..2476d8ec0f 100644 --- a/python/python/tests/test_config.py +++ b/python/python/tests/test_config.py @@ -1,16 +1,22 @@ # Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from glide.config import ( + AdvancedGlideClientConfiguration, + AdvancedGlideClusterClientConfiguration, BaseClientConfiguration, + GlideClientConfiguration, GlideClusterClientConfiguration, NodeAddress, PeriodicChecksManualInterval, PeriodicChecksStatus, + ProtocolVersion, ReadFrom, ) +from glide.glide_client import GlideClient, GlideClusterClient from glide.protobuf.connection_request_pb2 import ConnectionRequest from glide.protobuf.connection_request_pb2 import ReadFrom as ProtobufReadFrom from glide.protobuf.connection_request_pb2 import TlsMode +from tests.conftest import create_client def test_default_client_config(): @@ -67,3 +73,24 @@ def test_convert_config_with_azaffinity_to_protobuf(): assert request.tls_mode is TlsMode.SecureTls assert request.read_from == ProtobufReadFrom.AZAffinity assert request.client_az == az + + +def test_connection_timeout_in_protobuf_request(): + connection_timeout = 5000 # in milliseconds + config = GlideClientConfiguration( + [NodeAddress("127.0.0.1")], + advanced_config=AdvancedGlideClientConfiguration(connection_timeout), + ) + request = config._create_a_protobuf_conn_request() + + assert isinstance(request, ConnectionRequest) + assert request.connection_timeout == connection_timeout + + config = GlideClusterClientConfiguration( + [NodeAddress("127.0.0.1")], + advanced_config=AdvancedGlideClusterClientConfiguration(connection_timeout), + ) + request = config._create_a_protobuf_conn_request(cluster_mode=True) + + assert isinstance(request, ConnectionRequest) + assert request.connection_timeout == connection_timeout diff --git a/python/python/tests/test_pubsub.py b/python/python/tests/test_pubsub.py index 6069104ed7..60baf383b2 100644 --- a/python/python/tests/test_pubsub.py +++ b/python/python/tests/test_pubsub.py @@ -66,7 +66,7 @@ async def create_two_clients_with_pubsub( cluster_mode_pubsub=cluster_mode_pubsub1, standalone_mode_pubsub=standalone_mode_pubsub1, protocol=protocol, - timeout=timeout, + request_timeout=timeout, ) try: client2 = await create_client( @@ -75,7 +75,7 @@ async def create_two_clients_with_pubsub( cluster_mode_pubsub=cluster_mode_pubsub2, standalone_mode_pubsub=standalone_mode_pubsub2, protocol=protocol, - timeout=timeout, + request_timeout=timeout, ) except Exception as e: await client1.close() diff --git a/python/python/tests/test_read_from_strategy.py b/python/python/tests/test_read_from_strategy.py index 03f3f8e9ae..cddb1e6f10 100644 --- a/python/python/tests/test_read_from_strategy.py +++ b/python/python/tests/test_read_from_strategy.py @@ -46,7 +46,7 @@ async def test_routing_with_az_affinity_strategy_to_1_replica( cluster_mode, # addresses=multiple_replicas_cluster.nodes_addr, protocol=protocol, - timeout=2000, + request_timeout=2000, ) # Reset the availability zone for all nodes @@ -67,7 +67,7 @@ async def test_routing_with_az_affinity_strategy_to_1_replica( cluster_mode, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, client_az=az, ) @@ -113,7 +113,7 @@ async def test_routing_by_slot_to_replica_with_az_affinity_strategy_to_all_repli cluster_mode, # addresses=multiple_replicas_cluster.nodes_addr, protocol=protocol, - timeout=2000, + request_timeout=2000, ) assert await client_for_config_set.config_resetstat() == OK await client_for_config_set.custom_command( @@ -125,7 +125,7 @@ async def test_routing_by_slot_to_replica_with_az_affinity_strategy_to_all_repli cluster_mode, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, client_az=az, ) azs = await client_for_testing_az.custom_command( @@ -181,7 +181,7 @@ async def test_az_affinity_non_existing_az( # addresses=multiple_replicas_cluster.nodes_addr, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, client_az="non-existing-az", ) assert await client_for_testing_az.config_resetstat() == OK @@ -217,5 +217,5 @@ async def test_az_affinity_requires_client_az( cluster_mode=cluster_mode, protocol=protocol, read_from=ReadFrom.AZ_AFFINITY, - timeout=2000, + request_timeout=2000, ) diff --git a/python/python/tests/test_transaction.py b/python/python/tests/test_transaction.py index c623f4e8c9..e284bb1100 100644 --- a/python/python/tests/test_transaction.py +++ b/python/python/tests/test_transaction.py @@ -972,7 +972,7 @@ async def test_can_return_null_on_watch_transaction_failures( @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_transaction_large_values(self, request, cluster_mode, protocol): glide_client = await create_client( - request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + request, cluster_mode=cluster_mode, protocol=protocol, request_timeout=5000 ) length = 2**25 # 33mb key = "0" * length diff --git a/utils/cluster_manager.py b/utils/cluster_manager.py index dc196bcd4f..a167da3464 100644 --- a/utils/cluster_manager.py +++ b/utils/cluster_manager.py @@ -341,10 +341,22 @@ def get_server_command() -> str: except Exception as e: logging.error(f"Error checking {server}: {e}") raise Exception( - "Neither valkey-server nor redis-server found in the system." + "Neither valkey-server nor redis-server found in the system.") + + def get_server_version(server_name): + result = subprocess.run( + [server_name, "--version"], capture_output=True, text=True + ) + version_output = result.stdout + version_match = re.search( + r"(?:Redis|Valkey) server v=(\d+\.\d+\.\d+)", version_output, re.IGNORECASE ) + if version_match: + return tuple(map(int, version_match.group(1).split("."))) + raise Exception("Unable to determine server version.") server_name = get_server_command() + server_version = get_server_version(server_name) logfile = f"{node_folder}/redis.log" # Define command arguments cmd_args = [ @@ -360,6 +372,8 @@ def get_server_command() -> str: "--logfile", logfile, ] + if server_version >= (7, 0, 0): + cmd_args.extend(["--enable-debug-command", "yes"]) if load_module: if len(load_module) == 0: raise ValueError( From 151d9b13b3d25d55d9d44ae47c2dfee3255316ab Mon Sep 17 00:00:00 2001 From: Avi Fenesh <55848801+avifenesh@users.noreply.github.com> Date: Sun, 29 Dec 2024 12:34:52 +0200 Subject: [PATCH 17/32] CD fixes nedded for Release 1.2.1 (#2887) Refactor CI workflows to streamline engine installation and improve setup for self-hosted runners Signed-off-by: avifenesh --- .github/workflows/java-cd.yml | 4 ++++ .github/workflows/npm-cd.yml | 20 +++++++++++--------- .github/workflows/pypi-cd.yml | 29 +++++------------------------ 3 files changed, 20 insertions(+), 33 deletions(-) diff --git a/.github/workflows/java-cd.yml b/.github/workflows/java-cd.yml index 5e2fd5886e..ed618f1708 100644 --- a/.github/workflows/java-cd.yml +++ b/.github/workflows/java-cd.yml @@ -225,6 +225,10 @@ jobs: host: ${{ fromJson(needs.load-platform-matrix.outputs.PLATFORM_MATRIX) }} runs-on: ${{ matrix.host.RUNNER }} steps: + - name: Setup self-hosted runner access + if: ${{matrix.host.TARGET == 'aarch64-unknown-linux-gnu' }} + run: sudo chown -R $USER:$USER /home/ubuntu/action-runner-ilia/_work/valkey-glide + - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/npm-cd.yml b/.github/workflows/npm-cd.yml index 24497e83cc..3e5f673a4c 100644 --- a/.github/workflows/npm-cd.yml +++ b/.github/workflows/npm-cd.yml @@ -308,23 +308,18 @@ jobs: if: ${{ matrix.build.TARGET == 'aarch64-unknown-linux-gnu' }} run: sudo chown -R $USER:$USER /home/ubuntu/actions-runner/_work/valkey-glide - - name: install Redis and git for alpine + - name: install redis and git for alpine if: ${{ contains(matrix.build.TARGET, 'musl') }} run: | apk update - apk add redis git + apk add git redis node -v - - name: install Redis and Python for ubuntu + - name: install Python for ubuntu if: ${{ contains(matrix.build.TARGET, 'linux-gnu') }} run: | sudo apt-get update - sudo apt-get install redis-server python3 - - - name: install Redis, Python for macos - if: ${{ contains(matrix.build.RUNNER, 'mac') }} - run: | - brew install redis python3 + sudo apt-get install python3 - name: Checkout if: ${{ matrix.build.TARGET != 'aarch64-unknown-linux-musl'}} @@ -339,6 +334,13 @@ jobs: npm-auth-token: ${{ secrets.NPM_AUTH_TOKEN }} arch: ${{ matrix.build.ARCH }} + - name: Install engine + if: ${{ !contains(matrix.build.TARGET, 'musl') }} + uses: ./.github/workflows/install-engine + with: + engine-version: "8.0" + target: ${{ matrix.build.target }} + - name: Setup node if: ${{ !contains(matrix.build.TARGET, 'musl') }} uses: actions/setup-node@v4 diff --git a/.github/workflows/pypi-cd.yml b/.github/workflows/pypi-cd.yml index 4ea517a818..28d8de8579 100644 --- a/.github/workflows/pypi-cd.yml +++ b/.github/workflows/pypi-cd.yml @@ -232,31 +232,12 @@ jobs: with: python-version: 3.12 - - name: Install engine Ubuntu ARM - if: ${{ matrix.build.TARGET == 'aarch64-unknown-linux-gnu' }} - shell: bash - # in self hosted runner we first want to check that engine is not already installed - run: | - if [[ $(`which redis-server`) == '' ]] - then - sudo apt-get update - sudo apt-get install -y redis-server - else - echo "Redis is already installed" - fi - - - name: Install engine Ubuntu x86 - if: ${{ matrix.build.TARGET == 'x86_64-unknown-linux-gnu' }} - shell: bash - run: | - sudo apt-get update - sudo apt-get install -y redis-server + - name: Install engine + uses: ./.github/workflows/install-engine + with: + engine-version: "8.0" + target: ${{ matrix.build.target }} - - name: Install engine MacOS - if: ${{ matrix.build.OS == 'macos' }} - shell: bash - run: | - brew install redis - name: Check if RC and set a distribution tag for the package shell: bash From 110a1dffe435f91459e3769d26429bd04e701102 Mon Sep 17 00:00:00 2001 From: ikolomi <152477505+ikolomi@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:31:09 +0200 Subject: [PATCH 18/32] Update merging guidance in PR template Signed-off-by: ikolomi <152477505+ikolomi@users.noreply.github.com> --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ff120235e3..85aa5ac214 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -19,4 +19,4 @@ Before submitting the PR make sure the following are checked: - [ ] Tests are added or updated. - [ ] CHANGELOG.md and documentation files are updated. - [ ] Destination branch is correct - main or release -- [ ] Commits will be squashed upon merging. +- [ ] Create merge commit if merging release branach into main, squash otherwise. From 0277cd6bb853ec05bf3e2fde3e0f991fc88c3399 Mon Sep 17 00:00:00 2001 From: ikolomi <152477505+ikolomi@users.noreply.github.com> Date: Mon, 30 Dec 2024 16:32:51 +0200 Subject: [PATCH 19/32] Update pull_request_template.md Signed-off-by: ikolomi <152477505+ikolomi@users.noreply.github.com> --- .github/pull_request_template.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 85aa5ac214..df7ed24749 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -19,4 +19,4 @@ Before submitting the PR make sure the following are checked: - [ ] Tests are added or updated. - [ ] CHANGELOG.md and documentation files are updated. - [ ] Destination branch is correct - main or release -- [ ] Create merge commit if merging release branach into main, squash otherwise. +- [ ] Create merge commit if merging release branch into main, squash otherwise. From e507c71f8f96afb032e8776740982d65dd96d96a Mon Sep 17 00:00:00 2001 From: ikolomi Date: Mon, 30 Dec 2024 17:09:18 +0200 Subject: [PATCH 20/32] Merge fixups Signed-off-by: ikolomi --- python/python/tests/test_api_export.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/python/tests/test_api_export.py b/python/python/tests/test_api_export.py index 996a8821a3..e169950da0 100644 --- a/python/python/tests/test_api_export.py +++ b/python/python/tests/test_api_export.py @@ -55,6 +55,7 @@ def _get_export_rename_map(): "FtSearchKeywords", # ClassDef "FtAggregateKeywords", # ClassDef "FtProfileKeywords", # ClassDef + "AdvancedBaseClientConfiguration", # ClassDef ] From 78b849b85f456c4798e40ab3d2b648b0f554eb1e Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Mon, 30 Dec 2024 07:53:55 -0800 Subject: [PATCH 21/32] Go: `ZREM`. (#2880) Signed-off-by: Yury-Fridlyand --- go/api/base_client.go | 8 +++++++ go/api/sorted_set_commands.go | 20 ++++++++++++++++ go/integTest/shared_commands_test.go | 36 ++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) diff --git a/go/api/base_client.go b/go/api/base_client.go index 3de2a5170f..d4f97f05d0 100644 --- a/go/api/base_client.go +++ b/go/api/base_client.go @@ -1405,3 +1405,11 @@ func (client *baseClient) ZPopMaxWithCount(key string, count int64) (map[Result[ } return handleStringDoubleMapResponse(result) } + +func (client *baseClient) ZRem(key string, members []string) (Result[int64], error) { + result, err := client.executeCommand(C.ZRem, append([]string{key}, members...)) + if err != nil { + return CreateNilInt64Result(), err + } + return handleLongResponse(result) +} diff --git a/go/api/sorted_set_commands.go b/go/api/sorted_set_commands.go index 0296dae8d5..835f7a5228 100644 --- a/go/api/sorted_set_commands.go +++ b/go/api/sorted_set_commands.go @@ -192,4 +192,24 @@ type SortedSetCommands interface { // // [valkey.io]: https://valkey.io/commands/zpopmin/ ZPopMaxWithCount(key string, count int64) (map[Result[string]]Result[float64], error) + + // Removes the specified members from the sorted set stored at `key`. + // Specified members that are not a member of this set are ignored. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the sorted set. + // members - The members to remove. + // + // Return value: + // The number of members that were removed from the sorted set, not including non-existing members. + // If `key` does not exist, it is treated as an empty sorted set, and this command returns 0. + // + // Example: + // res, err := client.ZRem("mySortedSet", []string{""member1", "member2", "missing"}) + // fmt.Println(res.Value()) // Output: 2 + // + // [valkey.io]: https://valkey.io/commands/zrem/ + ZRem(key string, members []string) (Result[int64], error) } diff --git a/go/integTest/shared_commands_test.go b/go/integTest/shared_commands_test.go index b508b69004..51881f710d 100644 --- a/go/integTest/shared_commands_test.go +++ b/go/integTest/shared_commands_test.go @@ -4098,3 +4098,39 @@ func (suite *GlideTestSuite) TestZPopMax() { assert.IsType(suite.T(), &api.RequestError{}, err) }) } + +func (suite *GlideTestSuite) TestZRem() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key := uuid.New().String() + memberScoreMap := map[string]float64{ + "one": 1.0, + "two": 2.0, + "three": 3.0, + } + res, err := client.ZAdd(key, memberScoreMap) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(3), res.Value()) + + // no members to remove + _, err = client.ZRem(key, []string{}) + assert.NotNil(suite.T(), err) + assert.IsType(suite.T(), &api.RequestError{}, err) + + res, err = client.ZRem(key, []string{"one"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res.Value()) + + // TODO: run ZCard there + res, err = client.ZRem(key, []string{"one", "two", "three"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(2), res.Value()) + + // non sorted set key + _, err = client.Set(key, "test") + assert.Nil(suite.T(), err) + + _, err = client.ZRem(key, []string{"value"}) + assert.NotNil(suite.T(), err) + assert.IsType(suite.T(), &api.RequestError{}, err) + }) +} From abec8855b3fd3d34db9704d9cdcd5225474e44b0 Mon Sep 17 00:00:00 2001 From: Niharika Bhavaraju <31915502+niharikabhavaraju@users.noreply.github.com> Date: Tue, 31 Dec 2024 04:52:52 +0000 Subject: [PATCH 22/32] Go: Implement Pfadd, Pfcount and Select commands (#2822) * pfadd, pfcount and select commands Signed-off-by: Niharika Bhavaraju --- go/api/base_client.go | 19 ++++++ go/api/glide_client.go | 26 ++++++++ go/api/hyperloglog_commands.go | 57 +++++++++++++++++ go/integTest/shared_commands_test.go | 81 ++++++++++++++++++++++++ go/integTest/standalone_commands_test.go | 57 +++++++++++++++++ 5 files changed, 240 insertions(+) create mode 100644 go/api/hyperloglog_commands.go diff --git a/go/api/base_client.go b/go/api/base_client.go index d4f97f05d0..8e4cb5a020 100644 --- a/go/api/base_client.go +++ b/go/api/base_client.go @@ -31,6 +31,7 @@ type BaseClient interface { StreamCommands SortedSetCommands ConnectionManagementCommands + HyperLogLogCommands GenericBaseCommands // Close terminates the client by closing all associated resources. Close() @@ -1213,6 +1214,24 @@ func (client *baseClient) PTTL(key string) (Result[int64], error) { return handleLongResponse(result) } +func (client *baseClient) PfAdd(key string, elements []string) (Result[int64], error) { + result, err := client.executeCommand(C.PfAdd, append([]string{key}, elements...)) + if err != nil { + return CreateNilInt64Result(), err + } + + return handleLongResponse(result) +} + +func (client *baseClient) PfCount(keys []string) (Result[int64], error) { + result, err := client.executeCommand(C.PfCount, keys) + if err != nil { + return CreateNilInt64Result(), err + } + + return handleLongResponse(result) +} + func (client *baseClient) Unlink(keys []string) (Result[int64], error) { result, err := client.executeCommand(C.Unlink, keys) if err != nil { diff --git a/go/api/glide_client.go b/go/api/glide_client.go index 04e43266f2..a0b38479b0 100644 --- a/go/api/glide_client.go +++ b/go/api/glide_client.go @@ -106,3 +106,29 @@ func (client *GlideClient) ConfigGet(args []string) (map[Result[string]]Result[s } return handleStringToStringMapResponse(res) } + +// Select changes the currently selected database. +// +// Parameters: +// +// index - The index of the database to select. +// +// Return value: +// +// A simple OK response. +// +// Example: +// +// result, err := client.Select(2) +// result.Value() : "OK" +// result.IsNil() : false +// +// [valkey.io]: https://valkey.io/commands/select/ +func (client *GlideClient) Select(index int64) (Result[string], error) { + result, err := client.executeCommand(C.Select, []string{utils.IntToString(index)}) + if err != nil { + return CreateNilStringResult(), err + } + + return handleStringResponse(result) +} diff --git a/go/api/hyperloglog_commands.go b/go/api/hyperloglog_commands.go new file mode 100644 index 0000000000..12fcd8d566 --- /dev/null +++ b/go/api/hyperloglog_commands.go @@ -0,0 +1,57 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + +package api + +// Supports commands and transactions for the "HyperLogLog" group of commands for standalone and cluster clients. +// +// See [valkey.io] for details. +// +// [valkey.io]: https://valkey.io/commands/#hyperloglog +type HyperLogLogCommands interface { + // PfAdd adds all elements to the HyperLogLog data structure stored at the specified key. + // Creates a new structure if the key does not exist. + // When no elements are provided, and key exists and is a HyperLogLog, then no operation is performed. + // If key does not exist, then the HyperLogLog structure is created. + // + // Parameters: + // key - The key of the HyperLogLog data structure to add elements into. + // elements - An array of members to add to the HyperLogLog stored at key. + // + // Return value: + // If the HyperLogLog is newly created, or if the HyperLogLog approximated cardinality is + // altered, then returns 1. Otherwise, returns 0. + // + // Example: + // result, err := client.PfAdd("key",[]string{"value1", "value2", "value3"}) + // result.Value(): 1 + // result.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/pfadd/ + PfAdd(key string, elements []string) (Result[int64], error) + + // Estimates the cardinality of the data stored in a HyperLogLog structure for a single key or + // calculates the combined cardinality of multiple keys by merging their HyperLogLogs temporarily. + // + // Note: + // In cluster mode, if keys in `keyValueMap` map to different hash slots, the command + // will be split across these slots and executed separately for each. This means the command + // is atomic only at the slot level. If one or more slot-specific requests fail, the entire + // call will return the first encountered error, even though some requests may have succeeded + // while others did not. If this behavior impacts your application logic, consider splitting + // the request into sub-requests per slot to ensure atomicity. + // + // Parameters: + // key - The keys of the HyperLogLog data structures to be analyzed. + // + // Return value: + // The approximated cardinality of given HyperLogLog data structures. + // The cardinality of a key that does not exist is 0. + // + // Example: + // result, err := client.PfCount([]string{"key1","key2"}) + // result.Value(): 5 + // result.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/pfcount/ + PfCount(keys []string) (Result[int64], error) +} diff --git a/go/integTest/shared_commands_test.go b/go/integTest/shared_commands_test.go index 51881f710d..30a96f0d04 100644 --- a/go/integTest/shared_commands_test.go +++ b/go/integTest/shared_commands_test.go @@ -3623,6 +3623,87 @@ func (suite *GlideTestSuite) TestPTTL_WithExpiredKey() { }) } +func (suite *GlideTestSuite) TestPfAdd_SuccessfulAddition() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key := uuid.New().String() + res, err := client.PfAdd(key, []string{"a", "b", "c", "d", "e"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res.Value()) + }) +} + +func (suite *GlideTestSuite) TestPfAdd_DuplicateElements() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key := uuid.New().String() + + // case : Add elements and add same elements again + res, err := client.PfAdd(key, []string{"a", "b", "c", "d", "e"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res.Value()) + + res2, err := client.PfAdd(key, []string{"a", "b", "c", "d", "e"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(0), res2.Value()) + + // case : (mixed elements) add new elements with 1 duplicate elements + res1, err := client.PfAdd(key, []string{"f", "g", "h"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res1.Value()) + + res2, err = client.PfAdd(key, []string{"i", "j", "g"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res2.Value()) + + // case : add empty array(no elements to the HyperLogLog) + res, err = client.PfAdd(key, []string{}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(0), res.Value()) + }) +} + +func (suite *GlideTestSuite) TestPfCount_SingleKey() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key := uuid.New().String() + res, err := client.PfAdd(key, []string{"i", "j", "g"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res.Value()) + + resCount, err := client.PfCount([]string{key}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(3), resCount.Value()) + }) +} + +func (suite *GlideTestSuite) TestPfCount_MultipleKeys() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key1 := uuid.New().String() + "{group}" + key2 := uuid.New().String() + "{group}" + + res, err := client.PfAdd(key1, []string{"a", "b", "c"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res.Value()) + + res, err = client.PfAdd(key2, []string{"c", "d", "e"}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(1), res.Value()) + + resCount, err := client.PfCount([]string{key1, key2}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(5), resCount.Value()) + }) +} + +func (suite *GlideTestSuite) TestPfCount_NoExistingKeys() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key1 := uuid.New().String() + "{group}" + key2 := uuid.New().String() + "{group}" + + resCount, err := client.PfCount([]string{key1, key2}) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), int64(0), resCount.Value()) + }) +} + func (suite *GlideTestSuite) TestBLMove() { if suite.serverVersion < "6.2.0" { suite.T().Skip("This feature is added in version 6.2.0") diff --git a/go/integTest/standalone_commands_test.go b/go/integTest/standalone_commands_test.go index 318c2d18ed..26cd0d5077 100644 --- a/go/integTest/standalone_commands_test.go +++ b/go/integTest/standalone_commands_test.go @@ -225,3 +225,60 @@ func (suite *GlideTestSuite) TestConfigSetAndGet_invalidArgs() { assert.Equal(suite.T(), map[api.Result[string]]api.Result[string]{}, result2) assert.Nil(suite.T(), err) } + +func (suite *GlideTestSuite) TestSelect_WithValidIndex() { + client := suite.defaultClient() + index := int64(1) + result, err := client.Select(index) + + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), "OK", result.Value()) + + key := uuid.New().String() + value := uuid.New().String() + suite.verifyOK(client.Set(key, value)) + + res, err := client.Get(key) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), value, res.Value()) +} + +func (suite *GlideTestSuite) TestSelect_InvalidIndex_OutOfBounds() { + client := suite.defaultClient() + + result, err := client.Select(-1) + assert.NotNil(suite.T(), err) + assert.Equal(suite.T(), "", result.Value()) + + result, err = client.Select(1000) + assert.NotNil(suite.T(), err) + assert.Equal(suite.T(), "", result.Value()) +} + +func (suite *GlideTestSuite) TestSelect_SwitchBetweenDatabases() { + client := suite.defaultClient() + + key1 := uuid.New().String() + value1 := uuid.New().String() + suite.verifyOK(client.Select(0)) + suite.verifyOK(client.Set(key1, value1)) + + key2 := uuid.New().String() + value2 := uuid.New().String() + suite.verifyOK(client.Select(1)) + suite.verifyOK(client.Set(key2, value2)) + + result, err := client.Get(key1) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), "", result.Value()) + + suite.verifyOK(client.Select(0)) + result, err = client.Get(key2) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), "", result.Value()) + + suite.verifyOK(client.Select(1)) + result, err = client.Get(key2) + assert.Nil(suite.T(), err) + assert.Equal(suite.T(), value2, result.Value()) +} From a26f914f7b2c92a7da549857945fc55bdb62b41e Mon Sep 17 00:00:00 2001 From: James Xin Date: Tue, 31 Dec 2024 11:39:03 -0800 Subject: [PATCH 23/32] Java: Add binary version of ZRANK WITHSCORE (#2896) * Java: Add binary version of ZRANK WITHSCORE --------- Signed-off-by: James Xin --- CHANGELOG.md | 1 + .../src/main/java/glide/api/BaseClient.java | 9 +++++++ .../api/commands/SortedSetBaseCommands.java | 22 ++++++++++++++++ .../test/java/glide/api/GlideClientTest.java | 25 +++++++++++++++++++ .../test/java/glide/SharedCommandTests.java | 1 + 5 files changed, 58 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 142854a347..938f0a96fa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * Go: Add `SScan` and `SMove` ([#2789](https://github.com/valkey-io/valkey-glide/issues/2789)) * Go: Add `ZADD` ([#2813](https://github.com/valkey-io/valkey-glide/issues/2813)) * Go: Add `ZPopMin` and `ZPopMax` ([#2850](https://github.com/valkey-io/valkey-glide/pull/2850)) +* Java: Add binary version of `ZRANK WITHSCORE` ([#2896](https://github.com/valkey-io/valkey-glide/pull/2896)) #### Breaking Changes diff --git a/java/client/src/main/java/glide/api/BaseClient.java b/java/client/src/main/java/glide/api/BaseClient.java index fd8015cc2e..6039f84e8a 100644 --- a/java/client/src/main/java/glide/api/BaseClient.java +++ b/java/client/src/main/java/glide/api/BaseClient.java @@ -2207,6 +2207,15 @@ public CompletableFuture zrankWithScore(@NonNull String key, @NonNull ZRank, new String[] {key, member, WITH_SCORE_VALKEY_API}, this::handleArrayOrNullResponse); } + @Override + public CompletableFuture zrankWithScore( + @NonNull GlideString key, @NonNull GlideString member) { + return commandManager.submitNewCommand( + ZRank, + new GlideString[] {key, member, gs(WITH_SCORE_VALKEY_API)}, + this::handleArrayOrNullResponse); + } + @Override public CompletableFuture zrevrank(@NonNull String key, @NonNull String member) { return commandManager.submitNewCommand( diff --git a/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java b/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java index 814a41cc99..c3e6fbb7ec 100644 --- a/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java @@ -1163,6 +1163,28 @@ CompletableFuture zrangestore( */ CompletableFuture zrankWithScore(String key, String member); + /** + * Returns the rank of member in the sorted set stored at key with its + * score, where scores are ordered from the lowest to highest, starting from 0.
+ * + * @see valkey.io for more details. + * @param key The key of the sorted set. + * @param member The member whose rank is to be retrieved. + * @return An array containing the rank (as Long) and score (as Double) + * of member in the sorted set.
+ * If key doesn't exist, or if member is not present in the set, + * null will be returned. + * @example + *
{@code
+     * Object[] result1 = client.zrankWithScore(gs("mySortedSet"), gs("member2")).get();
+     * assert ((Long) result1[0]) == 1L && ((Double) result1[1]) == 6.0; // Indicates that "member2" with score 6.0 has the second-lowest score in the sorted set "mySortedSet".
+     *
+     * Object[] result2 = client.zrankWithScore(gs("mySortedSet"), gs("nonExistingMember")).get();
+     * assert result2 == null; // Indicates that "nonExistingMember" is not present in the sorted set "mySortedSet".
+     * }
+ */ + CompletableFuture zrankWithScore(GlideString key, GlideString member); + /** * Returns the rank of member in the sorted set stored at key, where * scores are ordered from the highest to lowest, starting from 0.
diff --git a/java/client/src/test/java/glide/api/GlideClientTest.java b/java/client/src/test/java/glide/api/GlideClientTest.java index 92decc27d5..4b55459328 100644 --- a/java/client/src/test/java/glide/api/GlideClientTest.java +++ b/java/client/src/test/java/glide/api/GlideClientTest.java @@ -5652,6 +5652,31 @@ public void zrankWithScore_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zrankWithScore_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString member = gs("testMember"); + GlideString[] arguments = new GlideString[] {key, member, gs(WITH_SCORE_VALKEY_API)}; + Object[] value = new Object[] {1, 6.0}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZRank), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zrankWithScore(key, member); + Object[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zrevrank_returns_success() { diff --git a/java/integTest/src/test/java/glide/SharedCommandTests.java b/java/integTest/src/test/java/glide/SharedCommandTests.java index 59c6c88feb..9272692a07 100644 --- a/java/integTest/src/test/java/glide/SharedCommandTests.java +++ b/java/integTest/src/test/java/glide/SharedCommandTests.java @@ -3924,6 +3924,7 @@ public void zrank(BaseClient client) { if (SERVER_VERSION.isGreaterThanOrEqualTo("7.2.0")) { assertArrayEquals(new Object[] {0L, 1.5}, client.zrankWithScore(key, "one").get()); + assertArrayEquals(new Object[] {0L, 1.5}, client.zrankWithScore(gs(key), gs("one")).get()); assertNull(client.zrankWithScore(key, "nonExistingMember").get()); assertNull(client.zrankWithScore("nonExistingKey", "nonExistingMember").get()); } From 492367da01a8539adab602afca5467ba89eeaab3 Mon Sep 17 00:00:00 2001 From: Chloe Yip <168601573+cyip10@users.noreply.github.com> Date: Tue, 31 Dec 2024 17:25:54 -0800 Subject: [PATCH 24/32] Go: add ZCARD (#2838) * zcard implemented --------- Signed-off-by: Chloe Yip Signed-off-by: James Xin Co-authored-by: James Xin --- CHANGELOG.md | 1 + go/api/base_client.go | 9 +++++++++ go/api/sorted_set_commands.go | 20 ++++++++++++++++++++ go/integTest/shared_commands_test.go | 27 +++++++++++++++++++++++++++ 4 files changed, 57 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 938f0a96fa..1b85345f69 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,7 @@ * Go: Add `ZADD` ([#2813](https://github.com/valkey-io/valkey-glide/issues/2813)) * Go: Add `ZPopMin` and `ZPopMax` ([#2850](https://github.com/valkey-io/valkey-glide/pull/2850)) * Java: Add binary version of `ZRANK WITHSCORE` ([#2896](https://github.com/valkey-io/valkey-glide/pull/2896)) +* Go: Add `ZCARD` ([#2838](https://github.com/valkey-io/valkey-glide/pull/2838)) #### Breaking Changes diff --git a/go/api/base_client.go b/go/api/base_client.go index 8e4cb5a020..99cab3608d 100644 --- a/go/api/base_client.go +++ b/go/api/base_client.go @@ -1432,3 +1432,12 @@ func (client *baseClient) ZRem(key string, members []string) (Result[int64], err } return handleLongResponse(result) } + +func (client *baseClient) ZCard(key string) (Result[int64], error) { + result, err := client.executeCommand(C.ZCard, []string{key}) + if err != nil { + return CreateNilInt64Result(), err + } + + return handleLongResponse(result) +} diff --git a/go/api/sorted_set_commands.go b/go/api/sorted_set_commands.go index 835f7a5228..4159acabe1 100644 --- a/go/api/sorted_set_commands.go +++ b/go/api/sorted_set_commands.go @@ -212,4 +212,24 @@ type SortedSetCommands interface { // // [valkey.io]: https://valkey.io/commands/zrem/ ZRem(key string, members []string) (Result[int64], error) + + // Returns the cardinality (number of elements) of the sorted set stored at `key`. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the set. + // + // Return value: + // The number of elements in the sorted set. + // + // If `key` does not exist, it is treated as an empty sorted set, and this command returns 0. + // If `key` holds a value that is not a sorted set, an error is returned. + // + // Example: + // result1, err := client.ZCard("mySet") + // result1.Value() :1 // There is 1 item in the set + // + // [valkey.io]: https://valkey.io/commands/zcard/ + ZCard(key string) (Result[int64], error) } diff --git a/go/integTest/shared_commands_test.go b/go/integTest/shared_commands_test.go index 30a96f0d04..dd0fc29022 100644 --- a/go/integTest/shared_commands_test.go +++ b/go/integTest/shared_commands_test.go @@ -3544,6 +3544,33 @@ func (suite *GlideTestSuite) TestPExpireTime() { }) } +func (suite *GlideTestSuite) Test_ZCard() { + suite.runWithDefaultClients(func(client api.BaseClient) { + key := "{key}" + uuid.NewString() + membersScores := map[string]float64{ + "one": 1.0, + "two": 2.0, + "three": 3.0, + } + t := suite.T() + res1, err := client.ZAdd(key, membersScores) + assert.Nil(t, err) + assert.Equal(t, int64(3), res1.Value()) + + res2, err := client.ZCard(key) + assert.Nil(t, err) + assert.Equal(t, int64(3), res2.Value()) + + res3, err := client.ZRem(key, []string{"one"}) + assert.Nil(t, err) + assert.Equal(t, int64(1), res3.Value()) + + res4, err := client.ZCard(key) + assert.Nil(t, err) + assert.Equal(t, int64(2), res4.Value()) + }) +} + func (suite *GlideTestSuite) TestPExpireTime_KeyDoesNotExist() { suite.SkipIfServerVersionLowerThanBy("7.0.0") suite.runWithDefaultClients(func(client api.BaseClient) { From 9f67e18af179608c306203524fd4ac12a8776aca Mon Sep 17 00:00:00 2001 From: Lior Sventitzky Date: Wed, 1 Jan 2025 14:10:19 +0200 Subject: [PATCH 25/32] CI: Add workflow that runs prettier on yaml workflow files (#2884) * added yaml linter --------- Signed-off-by: lior sventitzky --- .../ISSUE_TEMPLATE/flaky-ci-test-issue.yml | 259 +++++++++--------- .github/ISSUE_TEMPLATE/inquiry.yml | 112 ++++---- .github/ISSUE_TEMPLATE/task.yml | 44 +-- .github/workflows/lint-yaml.yml | 28 ++ .github/workflows/pypi-cd.yml | 3 +- 5 files changed, 236 insertions(+), 210 deletions(-) create mode 100644 .github/workflows/lint-yaml.yml diff --git a/.github/ISSUE_TEMPLATE/flaky-ci-test-issue.yml b/.github/ISSUE_TEMPLATE/flaky-ci-test-issue.yml index 3f25dc4fba..5698f64c0d 100644 --- a/.github/ISSUE_TEMPLATE/flaky-ci-test-issue.yml +++ b/.github/ISSUE_TEMPLATE/flaky-ci-test-issue.yml @@ -5,133 +5,132 @@ labels: ["bug", "flaky-test"] assignees: [] body: -- type: markdown - attributes: - value: | - ## Description of the Flaky Test - -- type: input - id: test-name - attributes: - label: Test Name - description: Name of the test that is flaky - placeholder: e.g., test_example - -- type: input - id: test-location - attributes: - label: Test Location - description: File and line number or test suite - placeholder: e.g., test_suite.py line 42 - -- type: input - id: failure-permlink - attributes: - label: Failure Permlink - description: Permlink to the failure line in the test run - placeholder: e.g., https://ci.example.com/build/123 - -- type: input - id: frequency - attributes: - label: Frequency - description: How often does the test fail? - placeholder: e.g., 1 in 10 runs - -- type: markdown - attributes: - value: | - ## Steps to Reproduce - -- type: textarea - id: steps-to-reproduce - attributes: - label: Steps to Reproduce - description: List the steps required to reproduce the flaky test - placeholder: - 1. Step 1 - 2. Step 2 - 3. Step 3 - -- type: markdown - attributes: - value: | - ## Additional Context - -- type: input - id: system-information - attributes: - label: System Information - description: Operating system, CI environment, etc. - placeholder: e.g., Ubuntu 20.04, GitHub Actions - -- type: input - id: language-and-version - attributes: - label: Language and Version - description: Programming language and its version - placeholder: e.g., Python 3.8 - -- type: input - id: engine-version - attributes: - label: Engine Version - description: Engine version used - placeholder: e.g., v6.2 - -- type: textarea - id: logs - attributes: - label: Logs - description: Include any relevant logs or error messages - placeholder: Paste logs here... - -- type: textarea - id: screenshots - attributes: - label: Screenshots - description: If applicable, add screenshots to help explain the issue - placeholder: Paste screenshots here... - -- type: input - id: glide-version - attributes: - label: Glide Version - description: Glide version used - placeholder: e.g., 1.2.3 - -- type: markdown - attributes: - value: | - ## Expected Behavior - -- type: textarea - id: expected-behavior - attributes: - label: Expected Behavior - description: Describe what you expected to happen - placeholder: Describe the expected behavior... - -- type: markdown - attributes: - value: | - ## Actual Behavior - -- type: textarea - id: actual-behavior - attributes: - label: Actual Behavior - description: Describe what actually happened - placeholder: Describe the actual behavior... - -- type: markdown - attributes: - value: | - ## Possible Fixes - -- type: textarea - id: possible-fixes - attributes: - label: Possible Fixes - description: If you have any insight into what might be causing the flakiness, mention it here - placeholder: Describe possible fixes... + - type: markdown + attributes: + value: | + ## Description of the Flaky Test + + - type: input + id: test-name + attributes: + label: Test Name + description: Name of the test that is flaky + placeholder: e.g., test_example + + - type: input + id: test-location + attributes: + label: Test Location + description: File and line number or test suite + placeholder: e.g., test_suite.py line 42 + + - type: input + id: failure-permlink + attributes: + label: Failure Permlink + description: Permlink to the failure line in the test run + placeholder: e.g., https://ci.example.com/build/123 + + - type: input + id: frequency + attributes: + label: Frequency + description: How often does the test fail? + placeholder: e.g., 1 in 10 runs + + - type: markdown + attributes: + value: | + ## Steps to Reproduce + + - type: textarea + id: steps-to-reproduce + attributes: + label: Steps to Reproduce + description: List the steps required to reproduce the flaky test + placeholder: 1. Step 1 + 2. Step 2 + 3. Step 3 + + - type: markdown + attributes: + value: | + ## Additional Context + + - type: input + id: system-information + attributes: + label: System Information + description: Operating system, CI environment, etc. + placeholder: e.g., Ubuntu 20.04, GitHub Actions + + - type: input + id: language-and-version + attributes: + label: Language and Version + description: Programming language and its version + placeholder: e.g., Python 3.8 + + - type: input + id: engine-version + attributes: + label: Engine Version + description: Engine version used + placeholder: e.g., v6.2 + + - type: textarea + id: logs + attributes: + label: Logs + description: Include any relevant logs or error messages + placeholder: Paste logs here... + + - type: textarea + id: screenshots + attributes: + label: Screenshots + description: If applicable, add screenshots to help explain the issue + placeholder: Paste screenshots here... + + - type: input + id: glide-version + attributes: + label: Glide Version + description: Glide version used + placeholder: e.g., 1.2.3 + + - type: markdown + attributes: + value: | + ## Expected Behavior + + - type: textarea + id: expected-behavior + attributes: + label: Expected Behavior + description: Describe what you expected to happen + placeholder: Describe the expected behavior... + + - type: markdown + attributes: + value: | + ## Actual Behavior + + - type: textarea + id: actual-behavior + attributes: + label: Actual Behavior + description: Describe what actually happened + placeholder: Describe the actual behavior... + + - type: markdown + attributes: + value: | + ## Possible Fixes + + - type: textarea + id: possible-fixes + attributes: + label: Possible Fixes + description: If you have any insight into what might be causing the flakiness, mention it here + placeholder: Describe possible fixes... diff --git a/.github/ISSUE_TEMPLATE/inquiry.yml b/.github/ISSUE_TEMPLATE/inquiry.yml index ac1fd9e84b..6e50ba4f4e 100644 --- a/.github/ISSUE_TEMPLATE/inquiry.yml +++ b/.github/ISSUE_TEMPLATE/inquiry.yml @@ -5,69 +5,69 @@ labels: ["Inquiry"] assignees: [] body: -- type: markdown - attributes: - value: | - ## Question + - type: markdown + attributes: + value: | + ## Question -- type: textarea - id: question-description - attributes: - label: Inquiry - description: Describe your inquiry in detail - placeholder: Describe your inquiry... + - type: textarea + id: question-description + attributes: + label: Inquiry + description: Describe your inquiry in detail + placeholder: Describe your inquiry... -- type: markdown - attributes: - value: | - ## Language and Version + - type: markdown + attributes: + value: | + ## Language and Version -- type: input - id: language - attributes: - label: Language - description: Optional - Specify the programming language - placeholder: e.g., Python, Java + - type: input + id: language + attributes: + label: Language + description: Optional - Specify the programming language + placeholder: e.g., Python, Java -- type: input - id: language-version - attributes: - label: Language Version - description: Optional - Specify the version of the language - placeholder: e.g., 3.8, 11 + - type: input + id: language-version + attributes: + label: Language Version + description: Optional - Specify the version of the language + placeholder: e.g., 3.8, 11 -- type: markdown - attributes: - value: | - ## Engine Version + - type: markdown + attributes: + value: | + ## Engine Version -- type: input - id: engine-version - attributes: - label: Engine Version - description: Optional - Specify the engine version - placeholder: e.g., ValKey 8.0.1, Redis-OSS 6.2.14 + - type: input + id: engine-version + attributes: + label: Engine Version + description: Optional - Specify the engine version + placeholder: e.g., ValKey 8.0.1, Redis-OSS 6.2.14 -- type: markdown - attributes: - value: | - ## Operating System + - type: markdown + attributes: + value: | + ## Operating System -- type: input - id: os - attributes: - label: Operating System - description: Optional - Specify the operating system - placeholder: e.g., MacOs 14, Ubuntu 20.04 + - type: input + id: os + attributes: + label: Operating System + description: Optional - Specify the operating system + placeholder: e.g., MacOs 14, Ubuntu 20.04 -- type: markdown - attributes: - value: | - ## Additional Technical Information + - type: markdown + attributes: + value: | + ## Additional Technical Information -- type: textarea - id: additional-info - attributes: - label: Additional Technical Information - description: Optional - Provide any additional technical information - placeholder: Additional context or details... + - type: textarea + id: additional-info + attributes: + label: Additional Technical Information + description: Optional - Provide any additional technical information + placeholder: Additional context or details... diff --git a/.github/ISSUE_TEMPLATE/task.yml b/.github/ISSUE_TEMPLATE/task.yml index 9567bd71c2..2b3d18f8f1 100644 --- a/.github/ISSUE_TEMPLATE/task.yml +++ b/.github/ISSUE_TEMPLATE/task.yml @@ -5,28 +5,28 @@ labels: ["task"] assignees: [] body: - - type: markdown - attributes: - value: | - ## Task Description + - type: markdown + attributes: + value: | + ## Task Description - - type: textarea - attributes: - label: Description - description: Describe the task in detail - placeholder: Describe the task... + - type: textarea + attributes: + label: Description + description: Describe the task in detail + placeholder: Describe the task... - - type: checkboxes - attributes: - label: Checklist - description: Add items to be completed - options: - - label: Task item 1 - - label: Task item 2 - - label: Task item 3 + - type: checkboxes + attributes: + label: Checklist + description: Add items to be completed + options: + - label: Task item 1 + - label: Task item 2 + - label: Task item 3 - - type: textarea - attributes: - label: Additional Notes - description: Add any additional notes or comments - placeholder: Any additional notes... + - type: textarea + attributes: + label: Additional Notes + description: Add any additional notes or comments + placeholder: Any additional notes... diff --git a/.github/workflows/lint-yaml.yml b/.github/workflows/lint-yaml.yml new file mode 100644 index 0000000000..9c077487fb --- /dev/null +++ b/.github/workflows/lint-yaml.yml @@ -0,0 +1,28 @@ +name: lint-yaml + +on: + push: + branches: + - main + - release-* + paths: + - ".github/**/*.yml" + - ".github/**/*.yaml" + pull_request: + paths: + - ".github/**/*.yml" + - ".github/**/*.yaml" + workflow_dispatch: + +jobs: + lint: + runs-on: ubuntu-latest + timeout-minutes: 10 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Prettier on YAML files + run: | + npx prettier --check .github/ diff --git a/.github/workflows/pypi-cd.yml b/.github/workflows/pypi-cd.yml index 28d8de8579..1be3527e4a 100644 --- a/.github/workflows/pypi-cd.yml +++ b/.github/workflows/pypi-cd.yml @@ -223,7 +223,7 @@ jobs: - name: Setup self-hosted runner access if: ${{ matrix.build.TARGET == 'aarch64-unknown-linux-gnu' }} run: sudo chown -R $USER:$USER /home/ubuntu/actions-runner/_work/valkey-glide - + - name: checkout uses: actions/checkout@v4 @@ -238,7 +238,6 @@ jobs: engine-version: "8.0" target: ${{ matrix.build.target }} - - name: Check if RC and set a distribution tag for the package shell: bash run: | From 91eb2cf0494d0109179dac22936277621de1a292 Mon Sep 17 00:00:00 2001 From: BoazBD <50696333+BoazBD@users.noreply.github.com> Date: Thu, 2 Jan 2025 13:19:31 +0200 Subject: [PATCH 26/32] Update ORT to skip approved packages or those under testing (#2890) * Update ORT to skip approved packages or those under testing Signed-off-by: BoazBD * bump version Signed-off-by: BoazBD * add installation for dev_requirements in ci Signed-off-by: BoazBD * Update workflows and documentation to use dev_requirements.txt instead of requirements.txt, and improve clarity in documentation. Signed-off-by: BoazBD * Update python/pyproject.toml Co-authored-by: Bar Shaul <88437685+barshaul@users.noreply.github.com> Signed-off-by: BoazBD <50696333+BoazBD@users.noreply.github.com> * move licences to line 15 Signed-off-by: BoazBD --------- Signed-off-by: BoazBD Signed-off-by: BoazBD <50696333+BoazBD@users.noreply.github.com> Co-authored-by: Bar Shaul <88437685+barshaul@users.noreply.github.com> --- .../workflows/build-python-wrapper/action.yml | 2 +- .github/workflows/ort.yml | 3 ++- .github/workflows/python.yml | 4 ++-- python/.ort.yml | 3 +++ python/DEVELOPER.md | 4 ++-- python/dev_requirements.txt | 10 ++++++++++ python/pyproject.toml | 2 ++ python/requirements.txt | 17 +++++------------ utils/get_licenses_from_ort.py | 10 +++++++++- 9 files changed, 36 insertions(+), 19 deletions(-) create mode 100644 python/dev_requirements.txt diff --git a/.github/workflows/build-python-wrapper/action.yml b/.github/workflows/build-python-wrapper/action.yml index 25c7e20b7d..4b960a458a 100644 --- a/.github/workflows/build-python-wrapper/action.yml +++ b/.github/workflows/build-python-wrapper/action.yml @@ -65,5 +65,5 @@ runs: source "$HOME/.cargo/env" python3 -m venv .env source .env/bin/activate - python3 -m pip install --no-cache-dir -r requirements.txt + python3 -m pip install --no-cache-dir -r dev_requirements.txt maturin develop diff --git a/.github/workflows/ort.yml b/.github/workflows/ort.yml index 2134f1f7a4..0f1b394880 100644 --- a/.github/workflows/ort.yml +++ b/.github/workflows/ort.yml @@ -74,7 +74,7 @@ jobs: with: repository: "oss-review-toolkit/ort" path: "./ort" - ref: "26.0.0" + ref: "44.0.0" submodules: recursive - name: Install Rust toolchain @@ -93,6 +93,7 @@ jobs: cat << EOF > ~/.ort/config/config.yml ort: analyzer: + skip_excluded: true allowDynamicVersions: true enabledPackageManagers: [Cargo, NPM, PIP, GradleInspector] EOF diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 11df78697a..699033cf1a 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -115,7 +115,7 @@ jobs: working-directory: ./python run: | source .env/bin/activate - pip install -r requirements.txt + pip install -r dev_requirements.txt cd python/tests/ pytest --asyncio-mode=auto --html=pytest_report.html --self-contained-html @@ -178,7 +178,7 @@ jobs: working-directory: ./python run: | source .env/bin/activate - pip install -r requirements.txt + pip install -r dev_requirements.txt cd python/tests/ pytest --asyncio-mode=auto -k test_pubsub --html=pytest_report.html --self-contained-html diff --git a/python/.ort.yml b/python/.ort.yml index 0f33f38ece..f9b92c4ce1 100644 --- a/python/.ort.yml +++ b/python/.ort.yml @@ -7,6 +7,9 @@ excludes: reason: "DEV_DEPENDENCY_OF" comment: "Packages for development only." paths: + - pattern: "dev_requirements.txt" + reason: "TEST_TOOL_OF" + comment: "Packages for testing only." - pattern: ".*" reason: "BUILD_TOOL_OF" comment: "invisible" diff --git a/python/DEVELOPER.md b/python/DEVELOPER.md index ae945b5835..02b4ee3001 100644 --- a/python/DEVELOPER.md +++ b/python/DEVELOPER.md @@ -108,7 +108,7 @@ protoc -Iprotobuf=${GLIDE_ROOT}/glide-core/src/protobuf/ \ cd python python3 -m venv .env source .env/bin/activate -pip install -r requirements.txt +pip install -r dev_requirements.txt ``` ## Build the package (in release mode): @@ -210,7 +210,7 @@ Run from the main `/python` folder ```bash cd $HOME/src/valkey-glide/python source .env/bin/activate - pip install -r requirements.txt + pip install -r dev_requirements.txt isort . --profile black --skip-glob python/glide/protobuf --skip-glob .env black . --exclude python/glide/protobuf --exclude .env flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics \ diff --git a/python/dev_requirements.txt b/python/dev_requirements.txt new file mode 100644 index 0000000000..e912acca8c --- /dev/null +++ b/python/dev_requirements.txt @@ -0,0 +1,10 @@ +maturin==0.14.17 # higher version break the needs structure changes, the name of the project is not the same as the package name, and the naming both glide create a circular dependency - TODO: fix this +pytest +pytest-asyncio +pytest-html +black >= 24.3.0 +flake8 == 5.0 +isort == 5.10 +mypy == 1.13.0 +mypy-protobuf == 3.5 +packaging >= 22.0 diff --git a/python/pyproject.toml b/python/pyproject.toml index 013a4b0e57..ca71479e62 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -6,6 +6,8 @@ build-backend = "maturin" name = "valkey-glide" requires-python = ">=3.9" dependencies = [ + # Note: If you add a dependency here, make sure to also add it to requirements.txt + # Once issue https://github.com/aboutcode-org/python-inspector/issues/197 is resolved, the requirements.txt file can be removed. "async-timeout>=4.0.2; python_version < '3.11'", "typing-extensions>=4.8.0; python_version < '3.11'", "protobuf>=3.20", diff --git a/python/requirements.txt b/python/requirements.txt index b5880e6287..c69ec6dc52 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,12 +1,5 @@ -async-timeout==4.0.2;python_version<"3.11" -maturin==0.14.17 # higher version break the needs structure changes, the name of the project is not the same as the package name, and the naming both glide create a circular dependency - TODO: fix this -pytest -pytest-asyncio -typing_extensions==4.8.0;python_version<"3.11" -pytest-html -black >= 24.3.0 -flake8 == 5.0 -isort == 5.10 -mypy == 1.13.0 -mypy-protobuf == 3.5 -packaging >= 22.0 +# Note: The main location for tracking dependencies is pyproject.toml. This file is used only for the ORT process. When adding a dependency, make sure to add it both to this file and to pyproject.toml. +# Once issue https://github.com/aboutcode-org/python-inspector/issues/197 is resolved, this file can be removed. +async-timeout>=4.0.2 +typing-extensions>=4.8.0 +protobuf>=3.20 diff --git a/utils/get_licenses_from_ort.py b/utils/get_licenses_from_ort.py index 9c7d7b62ba..6b4b6cb60e 100644 --- a/utils/get_licenses_from_ort.py +++ b/utils/get_licenses_from_ort.py @@ -13,10 +13,13 @@ APPROVED_LICENSES = [ "Unicode-DFS-2016", "(Apache-2.0 OR MIT) AND Unicode-DFS-2016", + "Unicode-3.0", + "(Apache-2.0 OR MIT) AND Unicode-3.0", "0BSD OR Apache-2.0 OR MIT", "Apache-2.0", "Apache-2.0 AND (Apache-2.0 OR BSD-2-Clause)", "Apache-2.0 AND (Apache-2.0 OR BSD-3-Clause)", + "Apache-2.0 AND MIT", "Apache-2.0 OR Apache-2.0 WITH LLVM-exception OR MIT", "Apache-2.0 OR BSD-2-Clause OR MIT", "Apache-2.0 OR BSL-1.0", @@ -36,6 +39,11 @@ "PSF-2.0", ] +# Packages with non-pre-approved licenses that received manual approval. +APPROVED_PACKAGES = [ + "PyPI::pathspec:0.12.1", + "PyPI::certifi:2023.11.17" +] SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) @@ -105,7 +113,7 @@ def __str__(self): package_license = PackageLicense( package["id"], ort_result.name, license ) - if license not in APPROVED_LICENSES: + if license not in APPROVED_LICENSES and package["id"] not in APPROVED_PACKAGES: unknown_licenses.append(package_license) else: final_packages.append(package_license) From c0a903c04e239afd39c99ec04bf2dbc325434813 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Thu, 2 Jan 2025 10:15:01 -0800 Subject: [PATCH 27/32] fix gradle (#2879) * fix gradle Signed-off-by: Yury-Fridlyand --- java/benchmarks/build.gradle | 2 +- java/client/build.gradle | 39 ++++++------------------------------ java/integTest/build.gradle | 2 +- 3 files changed, 8 insertions(+), 35 deletions(-) diff --git a/java/benchmarks/build.gradle b/java/benchmarks/build.gradle index e789bece2b..b4777ee410 100644 --- a/java/benchmarks/build.gradle +++ b/java/benchmarks/build.gradle @@ -28,7 +28,7 @@ dependencies { implementation group: 'com.google.code.gson', name: 'gson', version: '2.10.1' } -run.dependsOn ':client:buildRustRelease' +run.dependsOn ':client:buildRust' application { // Define the main class for the application. diff --git a/java/client/build.gradle b/java/client/build.gradle index 0075b01f87..7ae0d7c429 100644 --- a/java/client/build.gradle +++ b/java/client/build.gradle @@ -89,26 +89,14 @@ tasks.register('cleanRust') { } } -tasks.register('buildRustRelease', Exec) { - commandLine 'cargo', 'build', '--release' - workingDir project.rootDir - environment CARGO_TERM_COLOR: 'always' -} - -tasks.register('buildRustReleaseStrip', Exec) { - commandLine 'cargo', 'build', '--release', '--strip' - workingDir project.rootDir - environment CARGO_TERM_COLOR: 'always' -} - tasks.register('buildRust', Exec) { - commandLine 'cargo', 'build' + commandLine 'cargo', 'build', '--release' workingDir project.rootDir environment CARGO_TERM_COLOR: 'always' } tasks.register('buildRustFfi', Exec) { - commandLine 'cargo', 'build' + commandLine 'cargo', 'build', '--release' workingDir project.rootDir environment CARGO_TERM_COLOR: 'always', CARGO_BUILD_RUSTFLAGS: '--cfg ffi_test' } @@ -118,16 +106,6 @@ tasks.register('buildWithRust') { finalizedBy 'build' } -tasks.register('buildWithRustRelease') { - dependsOn 'buildRustRelease' - finalizedBy 'build' -} - -tasks.register('buildWithRustReleaseStrip') { - dependsOn 'buildRustReleaseStrip' - finalizedBy 'build' -} - tasks.register('buildWithProto') { dependsOn 'protobuf' finalizedBy 'build' @@ -143,11 +121,6 @@ tasks.register('buildAll') { finalizedBy 'build' } -tasks.register('buildAllRelease') { - dependsOn 'protobuf', 'buildRustRelease', 'testFfi' - finalizedBy 'build' -} - compileJava.dependsOn('protobuf') clean.dependsOn('cleanProtobuf', 'cleanRust') @@ -162,10 +135,10 @@ def defaultReleaseVersion = "255.255.255"; delombok.dependsOn('compileJava') jar.dependsOn('copyNativeLib') javadoc.dependsOn('copyNativeLib') -copyNativeLib.dependsOn('buildRustRelease') +copyNativeLib.dependsOn('buildRust') compileTestJava.dependsOn('copyNativeLib') -test.dependsOn('buildRustRelease') -testFfi.dependsOn('buildRustRelease') +test.dependsOn('buildRust') +testFfi.dependsOn('buildRust') test { exclude "glide/ffi/FfiTest.class" @@ -243,7 +216,7 @@ tasks.withType(Test) { showStandardStreams true } // This is needed for the FFI tests - jvmArgs "-Djava.library.path=${projectDir}/../target/debug" + jvmArgs "-Djava.library.path=${projectDir}/../target/release" } jar { diff --git a/java/integTest/build.gradle b/java/integTest/build.gradle index 663c19eb52..8ebd7f272e 100644 --- a/java/integTest/build.gradle +++ b/java/integTest/build.gradle @@ -129,7 +129,7 @@ clearDirs.finalizedBy 'startStandalone' clearDirs.finalizedBy 'startCluster' clearDirs.finalizedBy 'startClusterForAz' test.finalizedBy 'stopAllAfterTests' -test.dependsOn ':client:buildRustRelease' +test.dependsOn ':client:buildRust' tasks.withType(Test) { doFirst { From a4d7fe18ed0df3c80456b6f91e5d784ce689bd38 Mon Sep 17 00:00:00 2001 From: Shachar Langbeheim Date: Thu, 2 Jan 2025 20:16:06 +0200 Subject: [PATCH 28/32] Remove `git submodule update` from DEVELOPER.md (#2894) Signed-off-by: Shachar Langbeheim --- csharp/DEVELOPER.md | 14 ++++---------- go/DEVELOPER.md | 14 +++++--------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/csharp/DEVELOPER.md b/csharp/DEVELOPER.md index f42e0d2022..43bb647215 100644 --- a/csharp/DEVELOPER.md +++ b/csharp/DEVELOPER.md @@ -85,19 +85,13 @@ Before starting this step, make sure you've installed all software requirments. cd valkey-glide ``` -2. Initialize git submodule - -```bash -git submodule update --init --recursive -``` - -3. Build the C# wrapper +2. Build the C# wrapper ```bash dotnet build ``` -4. Run tests +3. Run tests Run test suite from `csharp` directory: @@ -105,7 +99,7 @@ Run test suite from `csharp` directory: dotnet test ``` -5. Run benchmark +4. Run benchmark 1. Ensure that you have installed `redis-server` and `redis-cli` on your host. You can find the Redis installation guide at the following link: [Redis Installation Guide](https://redis.io/docs/install/install-redis/install-redis-on-linux/). @@ -125,7 +119,7 @@ dotnet test Run benchmarking script with `-h` flag to get list and help about all command line parameters. -6. Lint the code +5. Lint the code Before making a contribution ensure that all new user API and non-obvious places in code is well documented and run a code linter. diff --git a/go/DEVELOPER.md b/go/DEVELOPER.md index 5619b7f7b2..12562c9e0f 100644 --- a/go/DEVELOPER.md +++ b/go/DEVELOPER.md @@ -105,32 +105,28 @@ Before starting this step, make sure you've installed all software requirements. git clone --branch ${VERSION} https://github.com/valkey-io/valkey-glide.git cd valkey-glide ``` -2. Initialize git submodules: - ```bash - git submodule update --init --recursive - ``` -3. Install build dependencies: +2. Install build dependencies: ```bash cd go make install-build-tools ``` -4. If on CentOS or Ubuntu, add the glide-rs library to LD_LIBRARY_PATH: +3. If on CentOS or Ubuntu, add the glide-rs library to LD_LIBRARY_PATH: ```bash # Replace "" with the path to the valkey-glide root, eg "$HOME/Projects/valkey-glide" GLIDE_ROOT_FOLDER_PATH= export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GLIDE_ROOT_FOLDER_PATH/go/target/release/deps/ ``` -5. Build the Go wrapper: +4. Build the Go wrapper: ```bash make build ``` -6. Run tests: +5. Run tests: 1. Ensure that you have installed valkey-server and valkey-cli on your host. You can find the Valkey installation guide at the following link: [Valkey Installation Guide](https://github.com/valkey-io/valkey). 2. Execute the following command from the go folder: ```bash go test -race ./... ``` -7. Install Go development tools with: +6. Install Go development tools with: ```bash # For go1.22: make install-dev-tools From 4514943cd145a496bc1252133a0a87ffcb599b52 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Thu, 2 Jan 2025 10:28:20 -0800 Subject: [PATCH 29/32] Go: Fix command groups and links (#2844) * Fix command groups and links Signed-off-by: Yury-Fridlyand --- go/api/connection_management_commands.go | 35 +++ go/api/generic_commands.go | 6 +- go/api/hash_commands.go | 295 ++++++++++++++++++ go/api/list_commands.go | 4 +- go/api/set_commands.go | 4 +- go/api/{commands.go => string_commands.go} | 331 +-------------------- 6 files changed, 338 insertions(+), 337 deletions(-) create mode 100644 go/api/connection_management_commands.go create mode 100644 go/api/hash_commands.go rename go/api/{commands.go => string_commands.go} (57%) diff --git a/go/api/connection_management_commands.go b/go/api/connection_management_commands.go new file mode 100644 index 0000000000..16c08f0a78 --- /dev/null +++ b/go/api/connection_management_commands.go @@ -0,0 +1,35 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + +package api + +// Supports commands and transactions for the "Connection Management" group of commands for standalone client. +// +// See [valkey.io] for details. +// +// [valkey.io]: https://valkey.io/commands/#connection +type ConnectionManagementCommands interface { + // Pings the server. + // + // Return value: + // Returns "PONG". + // + // For example: + // result, err := client.Ping() + // + // [valkey.io]: https://valkey.io/commands/ping/ + Ping() (string, error) + + // Pings the server with a custom message. + // + // Parameters: + // message - A message to include in the `PING` command. + // + // Return value: + // Returns the copy of message. + // + // For example: + // result, err := client.PingWithMessage("Hello") + // + // [valkey.io]: https://valkey.io/commands/ping/ + PingWithMessage(message string) (string, error) +} diff --git a/go/api/generic_commands.go b/go/api/generic_commands.go index 04fd69d520..c583dfe31b 100644 --- a/go/api/generic_commands.go +++ b/go/api/generic_commands.go @@ -2,13 +2,11 @@ package api -// Supports commands and transactions for the "List Commands" group for standalone and cluster clients. +// Supports commands and transactions for the "Generic" group of commands for standalone and cluster clients. // // See [valkey.io] for details. // -// GenericBaseCommands defines an interface for the "Generic Commands". -// -// [valkey.io]: https://valkey.io/commands/?group=Generic +// [valkey.io]: https://valkey.io/commands/#generic type GenericBaseCommands interface { // Del removes the specified keys from the database. A key is ignored if it does not exist. // diff --git a/go/api/hash_commands.go b/go/api/hash_commands.go new file mode 100644 index 0000000000..b1ef215339 --- /dev/null +++ b/go/api/hash_commands.go @@ -0,0 +1,295 @@ +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + +package api + +// Supports commands and transactions for the "Hash" group of commands for standalone and cluster clients. +// +// See [valkey.io] for details. +// +// [valkey.io]: https://valkey.io/commands/#hash +type HashCommands interface { + // HGet returns the value associated with field in the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // field - The field in the hash stored at key to retrieve from the database. + // + // Return value: + // The Result[string] associated with field, or [api.NilResult[string]](api.CreateNilStringResult()) when field is not + // present in the hash or key does not exist. + // + // For example: + // Assume we have the following hash: + // my_hash := map[string]string{"field1": "value", "field2": "another_value"} + // payload, err := client.HGet("my_hash", "field1") + // // payload.Value(): "value" + // // payload.IsNil(): false + // payload, err = client.HGet("my_hash", "nonexistent_field") + // // payload equals api.CreateNilStringResult() + // + // [valkey.io]: https://valkey.io/commands/hget/ + HGet(key string, field string) (Result[string], error) + + // HGetAll returns all fields and values of the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // + // Return value: + // A map of all fields and their values as Result[string] in the hash, or an empty map when key does not exist. + // + // For example: + // fieldValueMap, err := client.HGetAll("my_hash") + // // field1 equals api.CreateStringResult("field1") + // // value1 equals api.CreateStringResult("value1") + // // field2 equals api.CreateStringResult("field2") + // // value2 equals api.CreateStringResult("value2") + // // fieldValueMap equals map[api.Result[string]]api.Result[string]{field1: value1, field2: value2} + // + // [valkey.io]: https://valkey.io/commands/hgetall/ + HGetAll(key string) (map[Result[string]]Result[string], error) + + // HMGet returns the values associated with the specified fields in the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // fields - The fields in the hash stored at key to retrieve from the database. + // + // Return value: + // An array of Result[string]s associated with the given fields, in the same order as they are requested. + // For every field that does not exist in the hash, a [api.NilResult[string]](api.CreateNilStringResult()) is + // returned. + // If key does not exist, returns an empty string array. + // + // For example: + // values, err := client.HMGet("my_hash", []string{"field1", "field2"}) + // // value1 equals api.CreateStringResult("value1") + // // value2 equals api.CreateStringResult("value2") + // // values equals []api.Result[string]{value1, value2} + // + // [valkey.io]: https://valkey.io/commands/hmget/ + HMGet(key string, fields []string) ([]Result[string], error) + + // HSet sets the specified fields to their respective values in the hash stored at key. + // This command overwrites the values of specified fields that exist in the hash. + // If key doesn't exist, a new key holding a hash is created. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // values - A map of field-value pairs to set in the hash. + // + // Return value: + // The Result[int64] containing number of fields that were added or updated. + // + // For example: + // num, err := client.HSet("my_hash", map[string]string{"field": "value", "field2": "value2"}) + // // num.Value(): 2 + // // num.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/hset/ + HSet(key string, values map[string]string) (Result[int64], error) + + // HSetNX sets field in the hash stored at key to value, only if field does not yet exist. + // If key does not exist, a new key holding a hash is created. + // If field already exists, this operation has no effect. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // field - The field to set. + // value - The value to set. + // + // Return value: + // A Result[bool] containing true if field is a new field in the hash and value was set. + // false if field already exists in the hash and no operation was performed. + // + // For example: + // payload1, err := client.HSetNX("myHash", "field", "value") + // // payload1.Value(): true + // // payload1.IsNil(): false + // payload2, err := client.HSetNX("myHash", "field", "newValue") + // // payload2.Value(): false + // // payload2.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/hsetnx/ + HSetNX(key string, field string, value string) (Result[bool], error) + + // HDel removes the specified fields from the hash stored at key. + // Specified fields that do not exist within this hash are ignored. + // If key does not exist, it is treated as an empty hash and this command returns 0. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // fields - The fields to remove from the hash stored at key. + // + // Return value: + // The Result[int64] containing number of fields that were removed from the hash, not including specified but non-existing + // fields. + // + // For example: + // num, err := client.HDel("my_hash", []string{"field_1", "field_2"}) + // // num.Value(): 2 + // // num.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/hdel/ + HDel(key string, fields []string) (Result[int64], error) + + // HLen returns the number of fields contained in the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // + // Return value: + // The Result[int64] containing number of fields in the hash, or 0 when key does not exist. + // If key holds a value that is not a hash, an error is returned. + // + // For example: + // num1, err := client.HLen("myHash") + // // num.Value(): 3 + // // num.IsNil(): false + // num2, err := client.HLen("nonExistingKey") + // // num.Value(): 0 + // // num.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/hlen/ + HLen(key string) (Result[int64], error) + + // HVals returns all values in the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // + // Return value: + // A slice of Result[string]s containing all the values in the hash, or an empty slice when key does not exist. + // + // For example: + // values, err := client.HVals("myHash") + // // value1 equals api.CreateStringResult("value1") + // // value2 equals api.CreateStringResult("value2") + // // value3 equals api.CreateStringResult("value3") + // // values equals []api.Result[string]{value1, value2, value3} + // + // [valkey.io]: https://valkey.io/commands/hvals/ + HVals(key string) ([]Result[string], error) + + // HExists returns if field is an existing field in the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // field - The field to check in the hash stored at key. + // + // Return value: + // A Result[bool] containing true if the hash contains the specified field. + // false if the hash does not contain the field, or if the key does not exist. + // + // For example: + // exists, err := client.HExists("my_hash", "field1") + // // exists.Value(): true + // // exists.IsNil(): false + // exists, err = client.HExists("my_hash", "non_existent_field") + // // exists.Value(): false + // // exists.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/hexists/ + HExists(key string, field string) (Result[bool], error) + + // HKeys returns all field names in the hash stored at key. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // + // Return value: + // A slice of Result[string]s containing all the field names in the hash, or an empty slice when key does not exist. + // + // For example: + // names, err := client.HKeys("my_hash") + // // field1 equals api.CreateStringResult("field_1") + // // field2 equals api.CreateStringResult("field_2") + // // names equals []api.Result[string]{field1, field2} + // + // [valkey.io]: https://valkey.io/commands/hkeys/ + HKeys(key string) ([]Result[string], error) + + // HStrLen returns the string length of the value associated with field in the hash stored at key. + // If the key or the field do not exist, 0 is returned. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // field - The field to get the string length of its value. + // + // Return value: + // The Result[int64] containing length of the string value associated with field, or 0 when field or key do not exist. + // + // For example: + // strlen, err := client.HStrLen("my_hash", "my_field") + // // strlen.Value(): 10 + // // strlen.IsNil(): false + // + // [valkey.io]: https://valkey.io/commands/hstrlen/ + HStrLen(key string, field string) (Result[int64], error) + + // Increments the number stored at `field` in the hash stored at `key` by increment. + // By using a negative increment value, the value stored at `field` in the hash stored at `key` is decremented. + // If `field` or `key` does not exist, it is set to 0 before performing the operation. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // field - The field in the hash stored at `key` to increment its value. + // increment - The amount to increment. + // + // Return value: + // The Result[int64] value of `field` in the hash stored at `key` after the increment. + // + // Example: + // _, err := client.HSet("key", map[string]string{"field": "10"}) + // hincrByResult, err := client.HIncrBy("key", "field", 1) + // // hincrByResult.Value(): 11 + // + // [valkey.io]: https://valkey.io/commands/hincrby/ + HIncrBy(key string, field string, increment int64) (Result[int64], error) + + // Increments the string representing a floating point number stored at `field` in the hash stored at `key` by increment. + // By using a negative increment value, the value stored at `field` in the hash stored at `key` is decremented. + // If `field` or `key` does not exist, it is set to 0 before performing the operation. + // + // See [valkey.io] for details. + // + // Parameters: + // key - The key of the hash. + // field - The field in the hash stored at `key` to increment its value. + // increment - The amount to increment. + // + // Return value: + // The Result[float64] value of `field` in the hash stored at `key` after the increment. + // + // Example: + // _, err := client.HSet("key", map[string]string{"field": "10"}) + // hincrByFloatResult, err := client.HIncrByFloat("key", "field", 1.5) + // // hincrByFloatResult.Value(): 11.5 + // + // [valkey.io]: https://valkey.io/commands/hincrbyfloat/ + HIncrByFloat(key string, field string, increment float64) (Result[float64], error) +} diff --git a/go/api/list_commands.go b/go/api/list_commands.go index 0d9f07e55c..bfeafdc0c7 100644 --- a/go/api/list_commands.go +++ b/go/api/list_commands.go @@ -2,11 +2,11 @@ package api -// Supports commands and transactions for the "List Commands" group for standalone and cluster clients. +// Supports commands and transactions for the "List" group of commands for standalone and cluster clients. // // See [valkey.io] for details. // -// [valkey.io]: https://valkey.io/commands/?group=list +// [valkey.io]: https://valkey.io/commands/#list type ListCommands interface { // Inserts all the specified values at the head of the list stored at key. elements are inserted one after the other to the // head of the list, from the leftmost element to the rightmost element. If key does not exist, it is created as an empty diff --git a/go/api/set_commands.go b/go/api/set_commands.go index a87500a8c0..14a088db6a 100644 --- a/go/api/set_commands.go +++ b/go/api/set_commands.go @@ -2,11 +2,11 @@ package api -// SetCommands supports commands and transactions for the "Set Commands" group for standalone and cluster clients. +// Supports commands and transactions for the "Set" group of commands for standalone and cluster clients. // // See [valkey.io] for details. // -// [valkey.io]: https://valkey.io/commands/?group=set +// [valkey.io]: https://valkey.io/commands/#set type SetCommands interface { // SAdd adds specified members to the set stored at key. // diff --git a/go/api/commands.go b/go/api/string_commands.go similarity index 57% rename from go/api/commands.go rename to go/api/string_commands.go index 8f62892024..2141d3c211 100644 --- a/go/api/commands.go +++ b/go/api/string_commands.go @@ -2,11 +2,11 @@ package api -// StringCommands defines an interface for the "String Commands" group of commands for standalone and cluster clients. +// Supports commands and transactions for the "String" group of commands for standalone and cluster clients. // // See [valkey.io] for details. // -// [valkey.io]: https://valkey.io/commands/?group=string +// [valkey.io]: https://valkey.io/commands/#string type StringCommands interface { // Set the given key with the given value. The return value is a response from Valkey containing the string "OK". // @@ -456,330 +456,3 @@ type StringCommands interface { //[valkey.io]: https://valkey.io/commands/getdel/ GetDel(key string) (Result[string], error) } - -// HashCommands supports commands and transactions for the "Hash Commands" group for standalone and cluster -// clients. -// -// See [valkey.io] for details. -// -// [valkey.io]: https://valkey.io/commands/?group=hash -type HashCommands interface { - // HGet returns the value associated with field in the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // field - The field in the hash stored at key to retrieve from the database. - // - // Return value: - // The Result[string] associated with field, or [api.NilResult[string]](api.CreateNilStringResult()) when field is not - // present in the hash or key does not exist. - // - // For example: - // Assume we have the following hash: - // my_hash := map[string]string{"field1": "value", "field2": "another_value"} - // payload, err := client.HGet("my_hash", "field1") - // // payload.Value(): "value" - // // payload.IsNil(): false - // payload, err = client.HGet("my_hash", "nonexistent_field") - // // payload equals api.CreateNilStringResult() - // - // [valkey.io]: https://valkey.io/commands/hget/ - HGet(key string, field string) (Result[string], error) - - // HGetAll returns all fields and values of the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // - // Return value: - // A map of all fields and their values as Result[string] in the hash, or an empty map when key does not exist. - // - // For example: - // fieldValueMap, err := client.HGetAll("my_hash") - // // field1 equals api.CreateStringResult("field1") - // // value1 equals api.CreateStringResult("value1") - // // field2 equals api.CreateStringResult("field2") - // // value2 equals api.CreateStringResult("value2") - // // fieldValueMap equals map[api.Result[string]]api.Result[string]{field1: value1, field2: value2} - // - // [valkey.io]: https://valkey.io/commands/hgetall/ - HGetAll(key string) (map[Result[string]]Result[string], error) - - // HMGet returns the values associated with the specified fields in the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // fields - The fields in the hash stored at key to retrieve from the database. - // - // Return value: - // An array of Result[string]s associated with the given fields, in the same order as they are requested. - // For every field that does not exist in the hash, a [api.NilResult[string]](api.CreateNilStringResult()) is - // returned. - // If key does not exist, returns an empty string array. - // - // For example: - // values, err := client.HMGet("my_hash", []string{"field1", "field2"}) - // // value1 equals api.CreateStringResult("value1") - // // value2 equals api.CreateStringResult("value2") - // // values equals []api.Result[string]{value1, value2} - // - // [valkey.io]: https://valkey.io/commands/hmget/ - HMGet(key string, fields []string) ([]Result[string], error) - - // HSet sets the specified fields to their respective values in the hash stored at key. - // This command overwrites the values of specified fields that exist in the hash. - // If key doesn't exist, a new key holding a hash is created. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // values - A map of field-value pairs to set in the hash. - // - // Return value: - // The Result[int64] containing number of fields that were added or updated. - // - // For example: - // num, err := client.HSet("my_hash", map[string]string{"field": "value", "field2": "value2"}) - // // num.Value(): 2 - // // num.IsNil(): false - // - // [valkey.io]: https://valkey.io/commands/hset/ - HSet(key string, values map[string]string) (Result[int64], error) - - // HSetNX sets field in the hash stored at key to value, only if field does not yet exist. - // If key does not exist, a new key holding a hash is created. - // If field already exists, this operation has no effect. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // field - The field to set. - // value - The value to set. - // - // Return value: - // A Result[bool] containing true if field is a new field in the hash and value was set. - // false if field already exists in the hash and no operation was performed. - // - // For example: - // payload1, err := client.HSetNX("myHash", "field", "value") - // // payload1.Value(): true - // // payload1.IsNil(): false - // payload2, err := client.HSetNX("myHash", "field", "newValue") - // // payload2.Value(): false - // // payload2.IsNil(): false - // - // [valkey.io]: https://valkey.io/commands/hsetnx/ - HSetNX(key string, field string, value string) (Result[bool], error) - - // HDel removes the specified fields from the hash stored at key. - // Specified fields that do not exist within this hash are ignored. - // If key does not exist, it is treated as an empty hash and this command returns 0. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // fields - The fields to remove from the hash stored at key. - // - // Return value: - // The Result[int64] containing number of fields that were removed from the hash, not including specified but non-existing - // fields. - // - // For example: - // num, err := client.HDel("my_hash", []string{"field_1", "field_2"}) - // // num.Value(): 2 - // // num.IsNil(): false - // - // [valkey.io]: https://valkey.io/commands/hdel/ - HDel(key string, fields []string) (Result[int64], error) - - // HLen returns the number of fields contained in the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // - // Return value: - // The Result[int64] containing number of fields in the hash, or 0 when key does not exist. - // If key holds a value that is not a hash, an error is returned. - // - // For example: - // num1, err := client.HLen("myHash") - // // num.Value(): 3 - // // num.IsNil(): false - // num2, err := client.HLen("nonExistingKey") - // // num.Value(): 0 - // // num.IsNil(): false - // - // [valkey.io]: https://valkey.io/commands/hlen/ - HLen(key string) (Result[int64], error) - - // HVals returns all values in the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // - // Return value: - // A slice of Result[string]s containing all the values in the hash, or an empty slice when key does not exist. - // - // For example: - // values, err := client.HVals("myHash") - // // value1 equals api.CreateStringResult("value1") - // // value2 equals api.CreateStringResult("value2") - // // value3 equals api.CreateStringResult("value3") - // // values equals []api.Result[string]{value1, value2, value3} - // - // [valkey.io]: https://valkey.io/commands/hvals/ - HVals(key string) ([]Result[string], error) - - // HExists returns if field is an existing field in the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // field - The field to check in the hash stored at key. - // - // Return value: - // A Result[bool] containing true if the hash contains the specified field. - // false if the hash does not contain the field, or if the key does not exist. - // - // For example: - // exists, err := client.HExists("my_hash", "field1") - // // exists.Value(): true - // // exists.IsNil(): false - // exists, err = client.HExists("my_hash", "non_existent_field") - // // exists.Value(): false - // // exists.IsNil(): false - // - // [valkey.io]: https://valkey.io/commands/hexists/ - HExists(key string, field string) (Result[bool], error) - - // HKeys returns all field names in the hash stored at key. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // - // Return value: - // A slice of Result[string]s containing all the field names in the hash, or an empty slice when key does not exist. - // - // For example: - // names, err := client.HKeys("my_hash") - // // field1 equals api.CreateStringResult("field_1") - // // field2 equals api.CreateStringResult("field_2") - // // names equals []api.Result[string]{field1, field2} - // - // [valkey.io]: https://valkey.io/commands/hkeys/ - HKeys(key string) ([]Result[string], error) - - // HStrLen returns the string length of the value associated with field in the hash stored at key. - // If the key or the field do not exist, 0 is returned. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // field - The field to get the string length of its value. - // - // Return value: - // The Result[int64] containing length of the string value associated with field, or 0 when field or key do not exist. - // - // For example: - // strlen, err := client.HStrLen("my_hash", "my_field") - // // strlen.Value(): 10 - // // strlen.IsNil(): false - // - // [valkey.io]: https://valkey.io/commands/hstrlen/ - HStrLen(key string, field string) (Result[int64], error) - - // Increments the number stored at `field` in the hash stored at `key` by increment. - // By using a negative increment value, the value stored at `field` in the hash stored at `key` is decremented. - // If `field` or `key` does not exist, it is set to 0 before performing the operation. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // field - The field in the hash stored at `key` to increment its value. - // increment - The amount to increment. - // - // Return value: - // The Result[int64] value of `field` in the hash stored at `key` after the increment. - // - // Example: - // _, err := client.HSet("key", map[string]string{"field": "10"}) - // hincrByResult, err := client.HIncrBy("key", "field", 1) - // // hincrByResult.Value(): 11 - // - // [valkey.io]: https://valkey.io/commands/hincrby/ - HIncrBy(key string, field string, increment int64) (Result[int64], error) - - // Increments the string representing a floating point number stored at `field` in the hash stored at `key` by increment. - // By using a negative increment value, the value stored at `field` in the hash stored at `key` is decremented. - // If `field` or `key` does not exist, it is set to 0 before performing the operation. - // - // See [valkey.io] for details. - // - // Parameters: - // key - The key of the hash. - // field - The field in the hash stored at `key` to increment its value. - // increment - The amount to increment. - // - // Return value: - // The Result[float64] value of `field` in the hash stored at `key` after the increment. - // - // Example: - // _, err := client.HSet("key", map[string]string{"field": "10"}) - // hincrByFloatResult, err := client.HIncrByFloat("key", "field", 1.5) - // // hincrByFloatResult.Value(): 11.5 - // - // [valkey.io]: https://valkey.io/commands/hincrbyfloat/ - HIncrByFloat(key string, field string, increment float64) (Result[float64], error) -} - -// ConnectionManagementCommands defines an interface for connection management-related commands. -// -// See [valkey.io] for details. -type ConnectionManagementCommands interface { - // Pings the server. - // - // If no argument is provided, returns "PONG". If a message is provided, returns the message. - // - // Return value: - // If no argument is provided, returns "PONG". - // If an argument is provided, returns the argument. - // - // For example: - // result, err := client.Ping("Hello") - // - // [valkey.io]: https://valkey.io/commands/ping/ - Ping() (string, error) - - // Pings the server with a custom message. - // - // If a message is provided, returns the message. - // If no argument is provided, returns "PONG". - // - // Return value: - // If no argument is provided, returns "PONG". - // If an argument is provided, returns the argument. - // - // For example: - // result, err := client.PingWithMessage("Hello") - // - // [valkey.io]: https://valkey.io/commands/ping/ - PingWithMessage(message string) (string, error) -} From 077f77fbbd625ded99aabee2457fa844e60130e5 Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Thu, 2 Jan 2025 10:34:45 -0800 Subject: [PATCH 30/32] Cleanup unused CI file (#2864) Cleanup Signed-off-by: Yury-Fridlyand --- python/.github/workflows/CI.yml | 66 --------------------------------- 1 file changed, 66 deletions(-) delete mode 100644 python/.github/workflows/CI.yml diff --git a/python/.github/workflows/CI.yml b/python/.github/workflows/CI.yml deleted file mode 100644 index 81adab9479..0000000000 --- a/python/.github/workflows/CI.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: CI - -on: - push: - pull_request: - -jobs: - linux: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: messense/maturin-action@v1 - with: - manylinux: auto - command: build - args: --release --sdist -o dist - - name: Upload wheels - uses: actions/upload-artifact@v3 - with: - name: wheels - path: dist - - windows: - runs-on: windows-latest - steps: - - uses: actions/checkout@v4 - - uses: messense/maturin-action@v1 - with: - command: build - args: --release -o dist - - name: Upload wheels - uses: actions/upload-artifact@v3 - with: - name: wheels - path: dist - - macos: - runs-on: macos-latest - steps: - - uses: actions/checkout@v4 - - uses: messense/maturin-action@v1 - with: - command: build - args: --release -o dist --universal2 - - name: Upload wheels - uses: actions/upload-artifact@v3 - with: - name: wheels - path: dist - - release: - name: Release - runs-on: ubuntu-latest - if: "startsWith(github.ref, 'refs/tags/')" - needs: [macos, windows, linux] - steps: - - uses: actions/download-artifact@v3 - with: - name: wheels - - name: Publish to PyPI - uses: messense/maturin-action@v1 - env: - MATURIN_PYPI_TOKEN: ${{ secrets.PYPI_API_TOKEN }} - with: - command: upload - args: --skip-existing * From c5b78373fbbd13c62bf2ce42adc4f90cc2d692aa Mon Sep 17 00:00:00 2001 From: Yury-Fridlyand Date: Thu, 2 Jan 2025 11:42:28 -0800 Subject: [PATCH 31/32] Go: test fixes + reporting (#2867) * test fixes + reporting Signed-off-by: Yury-Fridlyand --- .github/workflows/go.yml | 25 ++++++++++++++--------- go/DEVELOPER.md | 2 ++ go/Makefile | 29 +++++++++++++++------------ go/api/options/zadd_options.go | 3 +-- go/integTest/glide_test_suite_test.go | 4 ++-- go/integTest/shared_commands_test.go | 22 ++++++++++---------- 6 files changed, 47 insertions(+), 38 deletions(-) diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 044c0ac369..96052130cd 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -102,8 +102,8 @@ jobs: - name: Install & build & test working-directory: go run: | - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GITHUB_WORKSPACE/go/target/release/deps/ - make install-tools-go${{ matrix.go }} build unit-test integ-test + make install-tools-go${{ matrix.go }} build + make -k unit-test integ-test - uses: ./.github/workflows/test-benchmark with: @@ -118,6 +118,7 @@ jobs: path: | utils/clusters/** benchmarks/results/** + go/reports/** lint: timeout-minutes: 10 @@ -205,8 +206,8 @@ jobs: - name: Install & build & test working-directory: go run: | - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GITHUB_WORKSPACE/go/target/release/deps/ - make install-tools-go${{ matrix.go }} build unit-test integ-test + make install-tools-go${{ matrix.go }} build + make -k unit-test integ-test - name: Upload test reports if: always() @@ -217,6 +218,7 @@ jobs: path: | utils/clusters/** benchmarks/results/** + go/reports/** test-modules: if: (github.repository_owner == 'valkey-io' && github.event_name == 'workflow_dispatch') || github.event.pull_request.head.repo.owner.login == 'valkey-io' @@ -239,10 +241,13 @@ jobs: - name: Build and test working-directory: ./go run: | - make install-tools-go1.20.0 - LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$GITHUB_WORKSPACE/go/target/release/deps/ - make build - make modules-test cluster-endpoints=${{ secrets.MEMDB_MODULES_ENDPOINT }} tls=true + make install-tools-go1.20.0 build modules-test cluster-endpoints=${{ secrets.MEMDB_MODULES_ENDPOINT }} tls=true - # TODO: - # Upload test reports + - name: Upload test reports + if: always() + continue-on-error: true + uses: actions/upload-artifact@v4 + with: + name: test-reports-modules + path: | + go/reports/** diff --git a/go/DEVELOPER.md b/go/DEVELOPER.md index 12562c9e0f..8dcaf2cb7b 100644 --- a/go/DEVELOPER.md +++ b/go/DEVELOPER.md @@ -167,6 +167,8 @@ By default, those test suite start standalone and cluster servers without TLS an make integ-test standalone-endpoints=localhost:6379 cluster-endpoints=localhost:7000 tls=true ``` +Test reports generated in `reports` folder. + ### Generate protobuf files During the initial build, Go protobuf files were created in `go/protobuf`. If modifications are made to the protobuf definition files (.proto files located in `glide-core/src/protobuf`), it becomes necessary to regenerate the Go protobuf files. To do so, run: diff --git a/go/Makefile b/go/Makefile index c4d4b5aeb4..62eabbaa8b 100644 --- a/go/Makefile +++ b/go/Makefile @@ -1,3 +1,5 @@ +SHELL:=/bin/bash + install-build-tools: go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.33.0 cargo install cbindgen @@ -40,6 +42,7 @@ clean: rm -f benchmarks/benchmarks rm -rf protobuf rm -rf target + rm -rf reports build-glide-client: cargo build --release @@ -76,8 +79,11 @@ format: # unit tests - skip complete IT suite (including MT) unit-test: + mkdir -p reports + set -o pipefail; \ LD_LIBRARY_PATH=$(shell find . -name libglide_rs.so|grep -w release|tail -1|xargs dirname|xargs readlink -f):${LD_LIBRARY_PATH} \ - go test -v -race ./... -skip TestGlideTestSuite $(if $(test-filter), -run $(test-filter)) + go test -v -race ./... -skip TestGlideTestSuite $(if $(test-filter), -run $(test-filter)) \ + | tee >(go tool test2json -t -p github.com/valkey-io/valkey-glide/go/glide/utils | go-test-report -o reports/unit-tests.html -t unit-test > /dev/null) # integration tests - run subtask with skipping modules tests integ-test: export TEST_FILTER = -skip TestGlideTestSuite/TestModule $(if $(test-filter), -run $(test-filter)) @@ -88,17 +94,14 @@ modules-test: export TEST_FILTER = $(if $(test-filter), -run $(test-filter), -ru modules-test: __it __it: + mkdir -p reports + set -o pipefail; \ LD_LIBRARY_PATH=$(shell find . -name libglide_rs.so|grep -w release|tail -1|xargs dirname|xargs readlink -f):${LD_LIBRARY_PATH} \ go test -v -race ./integTest/... \ - $(TEST_FILTER) \ - $(if $(filter true, $(tls)), --tls,) \ - $(if $(standalone-endpoints), --standalone-endpoints=$(standalone-endpoints)) \ - $(if $(cluster-endpoints), --cluster-endpoints=$(cluster-endpoints)) - -# Note: this task is no longer run by CI because: -# - build failures that occur while running the task can be hidden by the task; CI still reports success in these scenarios. -# - there is not a good way to both generate a test report and log the test outcomes to GH actions. -# TODO: fix this and include -run/-skip flags -test-and-report: - mkdir -p reports - go test -v -race ./... -json | go-test-report -o reports/test-report.html + $(TEST_FILTER) \ + $(if $(filter true, $(tls)), --tls,) \ + $(if $(standalone-endpoints), --standalone-endpoints=$(standalone-endpoints)) \ + $(if $(cluster-endpoints), --cluster-endpoints=$(cluster-endpoints)) \ + | tee >(go tool test2json -t -p github.com/valkey-io/valkey-glide/go/glide/integTest | go-test-report -o reports/integ-tests.html -t integ-test > /dev/null) +# code above ^ is similar to `go test .... -json | go-test-report ....`, but it also prints plain text output to stdout +# `go test` prints plain text, tee duplicates it to stdout and to `test2json` which is coupled with `go-test-report` to generate the report diff --git a/go/api/options/zadd_options.go b/go/api/options/zadd_options.go index 7926b346cc..f10c010e4e 100644 --- a/go/api/options/zadd_options.go +++ b/go/api/options/zadd_options.go @@ -22,8 +22,7 @@ func NewZAddOptionsBuilder() *ZAddOptions { return &ZAddOptions{} } -// `conditionalChange“ defines conditions for updating or adding elements with {@link SortedSetBaseCommands#zadd} -// command. +// `conditionalChange` defines conditions for updating or adding elements with `ZADD` command. func (options *ZAddOptions) SetConditionalChange(c ConditionalChange) *ZAddOptions { options.conditionalChange = c return options diff --git a/go/integTest/glide_test_suite_test.go b/go/integTest/glide_test_suite_test.go index eb80993d9d..2ba275799a 100644 --- a/go/integTest/glide_test_suite_test.go +++ b/go/integTest/glide_test_suite_test.go @@ -264,8 +264,8 @@ func (suite *GlideTestSuite) clusterClient(config *api.GlideClusterClientConfigu } func (suite *GlideTestSuite) runWithClients(clients []api.BaseClient, test func(client api.BaseClient)) { - for i, client := range clients { - suite.T().Run(fmt.Sprintf("Testing [%v]", i), func(t *testing.T) { + for _, client := range clients { + suite.T().Run(fmt.Sprintf("%T", client)[5:], func(t *testing.T) { test(client) }) } diff --git a/go/integTest/shared_commands_test.go b/go/integTest/shared_commands_test.go index dd0fc29022..eefd38e9b0 100644 --- a/go/integTest/shared_commands_test.go +++ b/go/integTest/shared_commands_test.go @@ -55,7 +55,7 @@ func (suite *GlideTestSuite) TestSetWithOptions_ReturnOldValue() { func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfExists_overwrite() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_OnlyIfExists_overwrite" + key := uuid.New().String() suite.verifyOK(client.Set(key, initialValue)) opts := api.NewSetOptionsBuilder().SetConditionalSet(api.OnlyIfExists) @@ -70,7 +70,7 @@ func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfExists_overwrite() { func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfExists_missingKey() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_OnlyIfExists_missingKey" + key := uuid.New().String() opts := api.NewSetOptionsBuilder().SetConditionalSet(api.OnlyIfExists) result, err := client.SetWithOptions(key, anotherValue, opts) @@ -81,7 +81,7 @@ func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfExists_missingKey() { func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfDoesNotExist_missingKey() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_OnlyIfDoesNotExist_missingKey" + key := uuid.New().String() opts := api.NewSetOptionsBuilder().SetConditionalSet(api.OnlyIfDoesNotExist) suite.verifyOK(client.SetWithOptions(key, anotherValue, opts)) @@ -94,7 +94,7 @@ func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfDoesNotExist_missingKey() func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfDoesNotExist_existingKey() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_OnlyIfDoesNotExist_existingKey" + key := uuid.New().String() opts := api.NewSetOptionsBuilder().SetConditionalSet(api.OnlyIfDoesNotExist) suite.verifyOK(client.Set(key, initialValue)) @@ -112,7 +112,7 @@ func (suite *GlideTestSuite) TestSetWithOptions_OnlyIfDoesNotExist_existingKey() func (suite *GlideTestSuite) TestSetWithOptions_KeepExistingExpiry() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_KeepExistingExpiry" + key := uuid.New().String() opts := api.NewSetOptionsBuilder().SetExpiry(api.NewExpiryBuilder().SetType(api.Milliseconds).SetCount(uint64(2000))) suite.verifyOK(client.SetWithOptions(key, initialValue, opts)) @@ -139,7 +139,7 @@ func (suite *GlideTestSuite) TestSetWithOptions_KeepExistingExpiry() { func (suite *GlideTestSuite) TestSetWithOptions_UpdateExistingExpiry() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_UpdateExistingExpiry" + key := uuid.New().String() opts := api.NewSetOptionsBuilder().SetExpiry(api.NewExpiryBuilder().SetType(api.Milliseconds).SetCount(uint64(100500))) suite.verifyOK(client.SetWithOptions(key, initialValue, opts)) @@ -166,14 +166,14 @@ func (suite *GlideTestSuite) TestSetWithOptions_UpdateExistingExpiry() { func (suite *GlideTestSuite) TestGetEx_existingAndNonExistingKeys() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestGetEx_ExisitingKey" + key := uuid.New().String() suite.verifyOK(client.Set(key, initialValue)) result, err := client.GetEx(key) assert.Nil(suite.T(), err) assert.Equal(suite.T(), initialValue, result.Value()) - key = "TestGetEx_NonExisitingKey" + key = uuid.New().String() result, err = client.Get(key) assert.Nil(suite.T(), err) assert.Equal(suite.T(), "", result.Value()) @@ -182,7 +182,7 @@ func (suite *GlideTestSuite) TestGetEx_existingAndNonExistingKeys() { func (suite *GlideTestSuite) TestGetExWithOptions_PersistKey() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestGetExWithOptions_PersistKey" + key := uuid.New().String() suite.verifyOK(client.Set(key, initialValue)) opts := api.NewGetExOptionsBuilder().SetExpiry(api.NewExpiryBuilder().SetType(api.Milliseconds).SetCount(uint64(2000))) @@ -205,7 +205,7 @@ func (suite *GlideTestSuite) TestGetExWithOptions_PersistKey() { func (suite *GlideTestSuite) TestGetExWithOptions_UpdateExpiry() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestGetExWithOptions_UpdateExpiry" + key := uuid.New().String() suite.verifyOK(client.Set(key, initialValue)) opts := api.NewGetExOptionsBuilder().SetExpiry(api.NewExpiryBuilder().SetType(api.Milliseconds).SetCount(uint64(2000))) @@ -227,7 +227,7 @@ func (suite *GlideTestSuite) TestGetExWithOptions_UpdateExpiry() { func (suite *GlideTestSuite) TestSetWithOptions_ReturnOldValue_nonExistentKey() { suite.runWithDefaultClients(func(client api.BaseClient) { - key := "TestSetWithOptions_ReturnOldValue_nonExistentKey" + key := uuid.New().String() opts := api.NewSetOptionsBuilder().SetReturnOldValue(true) result, err := client.SetWithOptions(key, anotherValue, opts) From a36f98edcff6a9462db20f80bdc954e5be654b8c Mon Sep 17 00:00:00 2001 From: Yi-Pin Chen Date: Thu, 2 Jan 2025 15:14:37 -0800 Subject: [PATCH 32/32] [Merge to main] Support transactions for JSON commands (#2862) Java, Node, Python: Add transaction commands for JSON module --------- Signed-off-by: Yi-Pin Chen --- CHANGELOG.md | 2 +- .../redis-rs/redis/src/cluster_routing.rs | 3 +- .../api/commands/servermodules/MultiJson.java | 1205 +++++++++++++++++ .../glide/api/models/BaseTransaction.java | 31 +- .../glide/api/models/ClusterTransaction.java | 2 + .../java/glide/api/models/Transaction.java | 2 + .../main/java/glide/utils/ArgsBuilder.java | 30 + .../test/java/glide/modules/JsonTests.java | 201 +++ node/src/server-modules/GlideJson.ts | 791 ++++++++++- node/tests/ServerModules.test.ts | 747 +++++----- node/tests/TestUtilities.ts | 183 +++ python/python/glide/__init__.py | 3 +- .../server_modules/json_transaction.py | 789 +++++++++++ .../tests/tests_server_modules/test_json.py | 134 +- 14 files changed, 3714 insertions(+), 409 deletions(-) create mode 100644 java/client/src/main/java/glide/api/commands/servermodules/MultiJson.java create mode 100644 python/python/glide/async_commands/server_modules/json_transaction.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b85345f69..3c7585a2f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,5 @@ #### Changes - +* Java, Node, Python: Add transaction commands for JSON module ([#2862](https://github.com/valkey-io/valkey-glide/pull/2862)) * Go: Add HINCRBY command ([#2847](https://github.com/valkey-io/valkey-glide/pull/2847)) * Go: Add HINCRBYFLOAT command ([#2846](https://github.com/valkey-io/valkey-glide/pull/2846)) * Go: Add SUNIONSTORE command ([#2805](https://github.com/valkey-io/valkey-glide/pull/2805)) diff --git a/glide-core/redis-rs/redis/src/cluster_routing.rs b/glide-core/redis-rs/redis/src/cluster_routing.rs index 011f5e08e6..fe03d1e41a 100644 --- a/glide-core/redis-rs/redis/src/cluster_routing.rs +++ b/glide-core/redis-rs/redis/src/cluster_routing.rs @@ -672,7 +672,8 @@ fn base_routing(cmd: &[u8]) -> RouteBy { | b"OBJECT ENCODING" | b"OBJECT FREQ" | b"OBJECT IDLETIME" - | b"OBJECT REFCOUNT" => RouteBy::SecondArg, + | b"OBJECT REFCOUNT" + | b"JSON.DEBUG" => RouteBy::SecondArg, b"LMPOP" | b"SINTERCARD" | b"ZDIFF" | b"ZINTER" | b"ZINTERCARD" | b"ZMPOP" | b"ZUNION" => { RouteBy::SecondArgAfterKeyCount diff --git a/java/client/src/main/java/glide/api/commands/servermodules/MultiJson.java b/java/client/src/main/java/glide/api/commands/servermodules/MultiJson.java new file mode 100644 index 0000000000..32f19b45c1 --- /dev/null +++ b/java/client/src/main/java/glide/api/commands/servermodules/MultiJson.java @@ -0,0 +1,1205 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.commands.servermodules; + +import static glide.utils.ArgsBuilder.checkTypeOrThrow; +import static glide.utils.ArgsBuilder.newArgsBuilder; + +import glide.api.models.BaseTransaction; +import glide.api.models.Transaction; +import glide.api.models.commands.ConditionalChange; +import glide.api.models.commands.json.JsonArrindexOptions; +import glide.api.models.commands.json.JsonGetOptions; +import lombok.NonNull; + +/** + * Transaction implementation for JSON module. Transactions allow the execution of a group of + * commands in a single step. See {@link Transaction}. + * + * @example + *
{@code
+ * Transaction transaction = new Transaction();
+ * MultiJson.set(transaction, "doc", ".", "{\"a\": 1.0, \"b\": 2}");
+ * MultiJson.get(transaction, "doc");
+ * Object[] result = client.exec(transaction).get();
+ * assert result[0].equals("OK"); // result of MultiJson.set()
+ * assert result[1].equals("{\"a\": 1.0, \"b\": 2}"); // result of MultiJson.get()
+ * }
+ */ +public class MultiJson { + + private static final String JSON_PREFIX = "JSON."; + private static final String JSON_SET = JSON_PREFIX + "SET"; + private static final String JSON_GET = JSON_PREFIX + "GET"; + private static final String JSON_MGET = JSON_PREFIX + "MGET"; + private static final String JSON_NUMINCRBY = JSON_PREFIX + "NUMINCRBY"; + private static final String JSON_NUMMULTBY = JSON_PREFIX + "NUMMULTBY"; + private static final String JSON_ARRAPPEND = JSON_PREFIX + "ARRAPPEND"; + private static final String JSON_ARRINSERT = JSON_PREFIX + "ARRINSERT"; + private static final String JSON_ARRINDEX = JSON_PREFIX + "ARRINDEX"; + private static final String JSON_ARRLEN = JSON_PREFIX + "ARRLEN"; + private static final String[] JSON_DEBUG_MEMORY = new String[] {JSON_PREFIX + "DEBUG", "MEMORY"}; + private static final String[] JSON_DEBUG_FIELDS = new String[] {JSON_PREFIX + "DEBUG", "FIELDS"}; + private static final String JSON_ARRPOP = JSON_PREFIX + "ARRPOP"; + private static final String JSON_ARRTRIM = JSON_PREFIX + "ARRTRIM"; + private static final String JSON_OBJLEN = JSON_PREFIX + "OBJLEN"; + private static final String JSON_OBJKEYS = JSON_PREFIX + "OBJKEYS"; + private static final String JSON_DEL = JSON_PREFIX + "DEL"; + private static final String JSON_FORGET = JSON_PREFIX + "FORGET"; + private static final String JSON_TOGGLE = JSON_PREFIX + "TOGGLE"; + private static final String JSON_STRAPPEND = JSON_PREFIX + "STRAPPEND"; + private static final String JSON_STRLEN = JSON_PREFIX + "STRLEN"; + private static final String JSON_CLEAR = JSON_PREFIX + "CLEAR"; + private static final String JSON_RESP = JSON_PREFIX + "RESP"; + private static final String JSON_TYPE = JSON_PREFIX + "TYPE"; + + private MultiJson() {} + + /** + * Sets the JSON value at the specified path stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path Represents the path within the JSON document where the value will be set. The key + * will be modified only if value is added as the last child in the specified + * path, or if the specified path acts as the parent of a new child + * being added. + * @param value The value to set at the specific path, in JSON formatted string. + * @return Command Response - A simple "OK" response if the value is successfully + * set. + */ + public static > BaseTransaction set( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + @NonNull ArgType value) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + checkTypeOrThrow(value); + return transaction.customCommand( + newArgsBuilder().add(JSON_SET).add(key).add(path).add(value).toArray()); + } + + /** + * Sets the JSON value at the specified path stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path Represents the path within the JSON document where the value will be set. The key + * will be modified only if value is added as the last child in the specified + * path, or if the specified path acts as the parent of a new child + * being added. + * @param value The value to set at the specific path, in JSON formatted string. + * @param setCondition Set the value only if the given condition is met (within the key or path). + * @return Command Response - A simple "OK" response if the value is successfully + * set. If value isn't set because of setCondition, returns null. + */ + public static > BaseTransaction set( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + @NonNull ArgType value, + @NonNull ConditionalChange setCondition) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + checkTypeOrThrow(value); + return transaction.customCommand( + newArgsBuilder() + .add(JSON_SET) + .add(key) + .add(path) + .add(value) + .add(setCondition.getValkeyApi()) + .toArray()); + } + + /** + * Retrieves the JSON value at the specified path stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - Returns a string representation of the JSON document. If key + * doesn't exist, returns null. + */ + public static > BaseTransaction get( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_GET).add(key).toArray()); + } + + /** + * Retrieves the JSON value at the specified paths stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param paths List of paths within the JSON document. + * @return Command Response - + *
    + *
  • If one path is given: + *
      + *
    • For JSONPath (path starts with $): Returns a stringified JSON list + * replies for every possible path, or a string representation of an empty array, + * if path doesn't exist. If key doesn't exist, returns null + * . + *
    • For legacy path (path doesn't start with $): Returns a string + * representation of the value in paths. If paths + * doesn't exist, an error is raised. If key doesn't exist, returns + * null. + *
    + *
  • If multiple paths are given: Returns a stringified JSON, in which each path is a key, + * and it's corresponding value, is the value as if the path was executed in the command + * as a single path. + *
+ * In case of multiple paths, and paths are a mix of both JSONPath and legacy + * path, the command behaves as if all are JSONPath paths. + */ + public static > BaseTransaction get( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType[] paths) { + checkTypeOrThrow(key); + checkTypeOrThrow(paths); + return transaction.customCommand(newArgsBuilder().add(JSON_GET).add(key).add(paths).toArray()); + } + + /** + * Retrieves the JSON value at the specified path stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param options Options for formatting the byte representation of the JSON data. See + * JsonGetOptions. + * @return Command Response - Returns a string representation of the JSON document. If key + * doesn't exist, returns null. + */ + public static > BaseTransaction get( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull JsonGetOptions options) { + checkTypeOrThrow(key); + return transaction.customCommand( + newArgsBuilder().add(JSON_GET).add(key).add(options.toArgs()).toArray()); + } + + /** + * Retrieves the JSON value at the specified path stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param paths List of paths within the JSON document. + * @param options Options for formatting the byte representation of the JSON data. See + * JsonGetOptions. + * @return Command Response - + *
    + *
  • If one path is given: + *
      + *
    • For JSONPath (path starts with $): Returns a stringified JSON list + * replies for every possible path, or a string representation of an empty array, + * if path doesn't exist. If key doesn't exist, returns null + * . + *
    • For legacy path (path doesn't start with $): Returns a string + * representation of the value in paths. If paths + * doesn't exist, an error is raised. If key doesn't exist, returns + * null. + *
    + *
  • If multiple paths are given: Returns a stringified JSON, in which each path is a key, + * and it's corresponding value, is the value as if the path was executed in the command + * as a single path. + *
+ * In case of multiple paths, and paths are a mix of both JSONPath and legacy + * path, the command behaves as if all are JSONPath paths. + */ + public static > BaseTransaction get( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType[] paths, + @NonNull JsonGetOptions options) { + checkTypeOrThrow(key); + checkTypeOrThrow(paths); + return transaction.customCommand( + newArgsBuilder().add(JSON_GET).add(key).add(options.toArgs()).add(paths).toArray()); + } + + /** + * Retrieves the JSON values at the specified path stored at multiple keys + * . + * + * @apiNote When using ClusterTransaction, all keys in the transaction must be mapped to the same + * slot. + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param keys The keys of the JSON documents. + * @param path The path within the JSON documents. + * @return Command Response -An array with requested values for each key. + *
    + *
  • For JSONPath (path starts with $): Returns a stringified JSON list + * replies for every possible path, or a string representation of an empty array, if + * path doesn't exist. + *
  • For legacy path (path doesn't start with $): Returns a string + * representation of the value in path. If path doesn't exist, + * the corresponding array element will be null. + *
+ * If a key doesn't exist, the corresponding array element will be null + * . + */ + public static > BaseTransaction mget( + @NonNull BaseTransaction transaction, @NonNull ArgType[] keys, @NonNull ArgType path) { + checkTypeOrThrow(keys); + checkTypeOrThrow(path); + return transaction.customCommand(newArgsBuilder().add(JSON_MGET).add(keys).add(path).toArray()); + } + + /** + * Appends one or more values to the JSON array at the specified path + * within the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path Represents the path within the JSON document where the values + * will be appended. + * @param values The JSON values to be appended to the array.
+ * JSON string values must be wrapped with quotes. For example, to append "foo", + * pass "\"foo\"". + * @return Command Response - + *
    + *
  • For JSONPath (path starts with $):
    + * Returns a list of integers for every possible path, indicating the new length of the + * array after appending values, or null for JSON values + * matching the path that are not an array. If path does not exist, an + * empty array will be returned. + *
  • For legacy path (path doesn't start with $):
    + * Returns the new length of the array after appending values to the array + * at path. If multiple paths are matched, returns the last updated array. + * If the JSON value at path is not an array or if path + * doesn't exist, an error is raised. If key doesn't exist, an error is + * raised. + */ + public static > BaseTransaction arrappend( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + @NonNull ArgType[] values) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + checkTypeOrThrow(values); + return transaction.customCommand( + newArgsBuilder().add(JSON_ARRAPPEND).add(key).add(path).add(values).toArray()); + } + + /** + * Inserts one or more values into the array at the specified path within the JSON + * document stored at key, before the given index. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param index The array index before which values are inserted. + * @param values The JSON values to be inserted into the array.
    + * JSON string values must be wrapped with quotes. For example, to insert "foo", + * pass "\"foo\"". + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[] with a list of integers for every possible path, + * indicating the new length of the array, or null for JSON values matching + * the path that are not an array. If path does not exist, an empty array + * will be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns an integer representing the new length of the array. If multiple paths are + * matched, returns the length of the first modified array. If path doesn't + * exist or the value at path is not an array, an error is raised. + *
    + * If the index is out of bounds or key doesn't exist, an error is raised. + */ + public static > BaseTransaction arrinsert( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + int index, + @NonNull ArgType[] values) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + checkTypeOrThrow(values); + return transaction.customCommand( + newArgsBuilder() + .add(JSON_ARRINSERT) + .add(key) + .add(path) + .add(Integer.toString(index)) + .add(values) + .toArray()); + } + + /** + * Searches for the first occurrence of a scalar JSON value in the arrays at the + * path. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param scalar The scalar value to search for. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $): Returns an array with a + * list of integers for every possible path, indicating the index of the matching + * element. The value is -1 if not found. If a value is not an array, its + * corresponding return value is null. + *
    • For legacy path (path doesn't start with $): Returns an integer + * representing the index of matching element, or -1 if not found. If the + * value at the path is not an array, an error is raised. + *
    + */ + public static > BaseTransaction arrindex( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + @NonNull ArgType scalar) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + checkTypeOrThrow(scalar); + return transaction.customCommand( + newArgsBuilder().add(JSON_ARRINDEX).add(key).add(path).add(scalar).toArray()); + } + + /** + * Searches for the first occurrence of a scalar JSON value in the arrays at the + * path. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param scalar The scalar value to search for. + * @param options The additional options for the command. See JsonArrindexOptions. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $): Returns an array with a + * list of integers for every possible path, indicating the index of the matching + * element. The value is -1 if not found. If a value is not an array, its + * corresponding return value is null. + *
    • For legacy path (path doesn't start with $): Returns an integer + * representing the index of matching element, or -1 if not found. If the + * value at the path is not an array, an error is raised. + *
    + */ + public static > BaseTransaction arrindex( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + @NonNull ArgType scalar, + @NonNull JsonArrindexOptions options) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + checkTypeOrThrow(scalar); + return transaction.customCommand( + newArgsBuilder() + .add(JSON_ARRINDEX) + .add(key) + .add(path) + .add(scalar) + .add(options.toArgs()) + .toArray()); + } + + /** + * Retrieves the length of the array at the specified path within the JSON document + * stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[] with a list of integers for every possible path, + * indicating the length of the array, or null for JSON values matching the + * path that are not an array. If path does not exist, an empty array will + * be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns an integer representing the length of the array. If multiple paths are + * matched, returns the length of the first matching array. If path doesn't + * exist or the value at path is not an array, an error is raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction arrlen( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_ARRLEN).add(key).add(path).toArray()); + } + + /** + * Retrieves the length of the array at the root of the JSON document stored at key. + *
    + * Equivalent to {@link #arrlen(BaseTransaction, ArgType, ArgType)} with path set to + * + * ".". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The array length stored at the root of the document. If document + * root is not an array, an error is raised.
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction arrlen( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_ARRLEN).add(key).toArray()); + } + + /** + * Reports memory usage in bytes of a JSON object at the specified path within the + * JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[] with a list of numbers for every possible path, + * indicating the memory usage. If path does not exist, an empty array will + * be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns an integer representing the memory usage. If multiple paths are matched, + * returns the data of the first matching object. If path doesn't exist, an + * error is raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction debugMemory( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_DEBUG_MEMORY).add(key).add(path).toArray()); + } + + /** + * Reports memory usage in bytes of a JSON object at the specified path within the + * JSON document stored at key.
    + * Equivalent to {@link #debugMemory(BaseTransaction, ArgType, ArgType)} with path + * set to "..". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The total memory usage in bytes of the entire JSON document.
    + * If key doesn't exist, returns null. + * @example + *
    {@code
    +     * Json.set(client, "doc", "$", "[1, 2.3, \"foo\", true, null, {}, [], {\"a\":1, \"b\":2}, [1, 2, 3]]").get();
    +     * var res = Json.debugMemory(client, "doc").get();
    +     * assert res == 258L;
    +     * }
    + */ + public static > BaseTransaction debugMemory( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_DEBUG_MEMORY).add(key).toArray()); + } + + /** + * Reports the number of fields at the specified path within the JSON document stored + * at key.
    + * Each non-container JSON value counts as one field. Objects and arrays recursively count one + * field for each of their containing JSON values. Each container value, except the root + * container, counts as one additional field. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[] with a list of numbers for every possible path, + * indicating the number of fields. If path does not exist, an empty array + * will be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns an integer representing the number of fields. If multiple paths are matched, + * returns the data of the first matching object. If path doesn't exist, an + * error is raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction debugFields( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_DEBUG_FIELDS).add(key).add(path).toArray()); + } + + /** + * Reports the number of fields at the specified path within the JSON document stored + * at key.
    + * Each non-container JSON value counts as one field. Objects and arrays recursively count one + * field for each of their containing JSON values. Each container value, except the root + * container, counts as one additional field.
    + * Equivalent to {@link #debugFields(BaseTransaction, ArgType, ArgType)} with path + * set to "..". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The total number of fields in the entire JSON document.
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction debugFields( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_DEBUG_FIELDS).add(key).toArray()); + } + + /** + * Pops the last element from the array stored in the root of the JSON document stored at + * key. Equivalent to {@link #arrpop(BaseTransaction, ArgType, ArgType)} with + * path set to ".". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - Returns a string representing the popped JSON value, or null + * if the array at document root is empty.
    + * If the JSON value at document root is not an array or if key doesn't exist, an + * error is raised. + */ + public static > BaseTransaction arrpop( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_ARRPOP).add(key).toArray()); + } + + /** + * Pops the last element from the array located at path in the JSON document stored + * at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an array with a strings for every possible path, representing the popped JSON + * values, or null for JSON values matching the path that are not an array + * or an empty array. + *
    • For legacy path (path doesn't start with $):
      + * Returns a string representing the popped JSON value, or null if the + * array at path is empty. If multiple paths are matched, the value from + * the first matching array that is not empty is returned. If path doesn't + * exist or the value at path is not an array, an error is raised. + *
    + * If key doesn't exist, an error is raised. + */ + public static > BaseTransaction arrpop( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_ARRPOP).add(key).add(path).toArray()); + } + + /** + * Pops an element from the array located at path in the JSON document stored at + * key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param index The index of the element to pop. Out of boundary indexes are rounded to their + * respective array boundaries. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an array with a strings for every possible path, representing the popped JSON + * values, or null for JSON values matching the path that are not an array + * or an empty array. + *
    • For legacy path (path doesn't start with $):
      + * Returns a string representing the popped JSON value, or null if the + * array at path is empty. If multiple paths are matched, the value from + * the first matching array that is not empty is returned. If path doesn't + * exist or the value at path is not an array, an error is raised. + *
    + * If key doesn't exist, an error is raised. + */ + public static > BaseTransaction arrpop( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + long index) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_ARRPOP).add(key).add(path).add(Long.toString(index)).toArray()); + } + + /** + * Trims an array at the specified path within the JSON document stored at key + * so that it becomes a subarray [start, end], both inclusive. + *
    + * If start < 0, it is treated as 0.
    + * If end >= size (size of the array), it is treated as size -1.
    + * If start >= size or start > end, the array is emptied + * and 0 is return.
    + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param start The index of the first element to keep, inclusive. + * @param end The index of the last element to keep, inclusive. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[] with a list of integers for every possible path, + * indicating the new length of the array, or null for JSON values matching + * the path that are not an array. If the array is empty, its corresponding return value + * is 0. If path doesn't exist, an empty array will be return. If an index + * argument is out of bounds, an error is raised. + *
    • For legacy path (path doesn't start with $):
      + * Returns an integer representing the new length of the array. If the array is empty, + * its corresponding return value is 0. If multiple paths match, the length of the first + * trimmed array match is returned. If path doesn't exist, or the value at + * path is not an array, an error is raised. If an index argument is out of + * bounds, an error is raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction arrtrim( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + int start, + int end) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder() + .add(JSON_ARRTRIM) + .add(key) + .add(path) + .add(Integer.toString(start)) + .add(Integer.toString(end)) + .toArray()); + } + + /** + * Increments or decrements the JSON value(s) at the specified path by number + * within the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param number The number to increment or decrement by. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns a string representation of an array of strings, indicating the new values + * after incrementing for each matched path.
      + * If a value is not a number, its corresponding return value will be null. + *
      + * If path doesn't exist, a byte string representation of an empty array + * will be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns a string representation of the resulting value after the increment or + * decrement.
      + * If multiple paths match, the result of the last updated value is returned.
      + * If the value at the path is not a number or path doesn't + * exist, an error is raised. + *
    + * If key does not exist, an error is raised.
    + * If the result is out of the range of 64-bit IEEE double, an error is raised. + */ + public static > BaseTransaction numincrby( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + Number number) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_NUMINCRBY).add(key).add(path).add(number.toString()).toArray()); + } + + /** + * Multiplies the JSON value(s) at the specified path by number within + * the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @param number The number to multiply by. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns a string representation of an array of strings, indicating the new values + * after multiplication for each matched path.
      + * If a value is not a number, its corresponding return value will be null. + *
      + * If path doesn't exist, a byte string representation of an empty array + * will be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns a string representation of the resulting value after multiplication.
      + * If multiple paths match, the result of the last updated value is returned.
      + * If the value at the path is not a number or path doesn't + * exist, an error is raised. + *
    + * If key does not exist, an error is raised.
    + * If the result is out of the range of 64-bit IEEE double, an error is raised. + */ + public static > BaseTransaction nummultby( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType path, + Number number) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_NUMMULTBY).add(key).add(path).add(number.toString()).toArray()); + } + + /** + * Retrieves the number of key-value pairs in the object values at the specified path + * within the JSON document stored at key.
    + * Equivalent to {@link #objlen(BaseTransaction, ArgType, ArgType)} with path set to + * + * ".". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The object length stored at the root of the document. If document + * root is not an object, an error is raised.
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction objlen( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_OBJLEN).add(key).toArray()); + } + + /** + * Retrieves the number of key-value pairs in the object values at the specified path + * within the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[] with a list of long integers for every possible + * path, indicating the number of key-value pairs for each matching object, or + * null + * for JSON values matching the path that are not an object. If path + * does not exist, an empty array will be returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns the number of key-value pairs for the object value matching the path. If + * multiple paths are matched, returns the length of the first matching object. If + * path doesn't exist or the value at path is not an array, an + * error is raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction objlen( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_OBJLEN).add(key).add(path).toArray()); + } + + /** + * Retrieves the key names in the object values at the specified path within the JSON + * document stored at key.
    + * Equivalent to {@link #objkeys(BaseTransaction, ArgType, ArgType)} with path set to + * + * ".". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The object length stored at the root of the document. If document + * root is not an object, an error is raised.
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction objkeys( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_OBJKEYS).add(key).toArray()); + } + + /** + * Retrieves the key names in the object values at the specified path within the JSON + * document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns an Object[][] with each nested array containing key names for + * each matching object for every possible path, indicating the list of object keys for + * each matching object, or null for JSON values matching the path that are + * not an object. If path does not exist, an empty sub-array will be + * returned. + *
    • For legacy path (path doesn't start with $):
      + * Returns an array of object keys for the object value matching the path. If multiple + * paths are matched, returns the length of the first matching object. If path + * doesn't exist or the value at path is not an array, an error is + * raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction objkeys( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_OBJKEYS).add(key).add(path).toArray()); + } + + /** + * Deletes the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The number of elements deleted. 0 if the key does not exist. + */ + public static > BaseTransaction del( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_DEL).add(key).toArray()); + } + + /** + * Deletes the JSON value at the specified path within the JSON document stored at + * key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path Represents the path within the JSON document where the value will be deleted. + * @return Command Response - The number of elements deleted. 0 if the key does not exist, or if + * the JSON path is invalid or does not exist. + */ + public static > BaseTransaction del( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand(newArgsBuilder().add(JSON_DEL).add(key).add(path).toArray()); + } + + /** + * Deletes the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - The number of elements deleted. 0 if the key does not exist. + */ + public static > BaseTransaction forget( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_FORGET).add(key).toArray()); + } + + /** + * Deletes the JSON value at the specified path within the JSON document stored at + * key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path Represents the path within the JSON document where the value will be deleted. + * @return Command Response - The number of elements deleted. 0 if the key does not exist, or if + * the JSON path is invalid or does not exist. + */ + public static > BaseTransaction forget( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_FORGET).add(key).add(path).toArray()); + } + + /** + * Toggles a Boolean value stored at the root within the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - Returns the toggled boolean value at the root of the document, or + * null for JSON values matching the root that are not boolean. If key + * doesn't exist, returns null. + */ + public static > BaseTransaction toggle( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_TOGGLE).add(key).toArray()); + } + + /** + * Toggles a Boolean value stored at the specified path within the JSON document + * stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns a Boolean[] with the toggled boolean value for every possible + * path, or null for JSON values matching the path that are not boolean. + *
    • For legacy path (path doesn't start with $):
      + * Returns the value of the toggled boolean in path. If path + * doesn't exist or the value at path isn't a boolean, an error is raised. + *
    + * If key doesn't exist, returns null. + */ + public static > BaseTransaction toggle( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_TOGGLE).add(key).add(path).toArray()); + } + + /** + * Appends the specified value to the string stored at the specified path + * within the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param value The value to append to the string. Must be wrapped with single quotes. For + * example, to append "foo", pass '"foo"'. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns a list of integer replies for every possible path, indicating the length of + * the resulting string after appending value, or null for + * JSON values matching the path that are not string.
      + * If key doesn't exist, an error is raised. + *
    • For legacy path (path doesn't start with $):
      + * Returns the length of the resulting string after appending value to the + * string at path.
      + * If multiple paths match, the length of the last updated string is returned.
      + * If the JSON value at path is not a string of if path + * doesn't exist, an error is raised.
      + * If key doesn't exist, an error is raised. + *
    + */ + public static > BaseTransaction strappend( + @NonNull BaseTransaction transaction, + @NonNull ArgType key, + @NonNull ArgType value, + @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(value); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_STRAPPEND).add(key).add(path).add(value).toArray()); + } + + /** + * Appends the specified value to the string stored at the root within the JSON + * document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param value The value to append to the string. Must be wrapped with single quotes. For + * example, to append "foo", pass '"foo"'. + * @return Command Response - Returns the length of the resulting string after appending + * value to the string at the root.
    + * If the JSON value at root is not a string, an error is raised.
    + * If key doesn't exist, an error is raised. + */ + public static > BaseTransaction strappend( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType value) { + checkTypeOrThrow(key); + checkTypeOrThrow(value); + return transaction.customCommand( + newArgsBuilder().add(JSON_STRAPPEND).add(key).add(value).toArray()); + } + + /** + * Returns the length of the JSON string value stored at the specified path within + * the JSON document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $):
      + * Returns a list of integer replies for every possible path, indicating the length of + * the JSON string value, or null for JSON values matching the path that + * are not string. + *
    • For legacy path (path doesn't start with $):
      + * Returns the length of the JSON value at path or null if + * key doesn't exist.
      + * If multiple paths match, the length of the first matched string is returned.
      + * If the JSON value at path is not a string of if path + * doesn't exist, an error is raised. If key doesn't exist, null + * is returned. + *
    + */ + public static > BaseTransaction strlen( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand( + newArgsBuilder().add(JSON_STRLEN).add(key).add(path).toArray()); + } + + /** + * Returns the length of the JSON string value stored at the root within the JSON document stored + * at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - Returns the length of the JSON value at the root.
    + * If the JSON value is not a string, an error is raised.
    + * If key doesn't exist, null is returned. + */ + public static > BaseTransaction strlen( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_STRLEN).add(key).toArray()); + } + + /** + * Clears an array and an object at the root of the JSON document stored at key.
    + * Equivalent to {@link #clear(BaseTransaction, ArgType, ArgType)} with path set to + * + * ".". + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - 1 if the document wasn't empty or 0 if it + * was.
    + * If key doesn't exist, an error is raised. + */ + public static > BaseTransaction clear( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_CLEAR).add(key).toArray()); + } + + /** + * Clears arrays and objects at the specified path within the JSON document stored at + * key.
    + * Numeric values are set to 0, boolean values are set to false, and + * string values are converted to empty strings. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - The number of containers cleared.
    + * If path doesn't exist, or the value at path is already cleared + * (e.g., an empty array, object, or string), 0 is returned. If key doesn't + * exist, an error is raised. + */ + public static > BaseTransaction clear( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand(newArgsBuilder().add(JSON_CLEAR).add(key).add(path).toArray()); + } + + /** + * Retrieves the JSON document stored at key. The returning result is in the Valkey + * or Redis OSS Serialization Protocol (RESP). + * + *
      + *
    • JSON null is mapped to the RESP Null Bulk String. + *
    • JSON Booleans are mapped to RESP Simple string. + *
    • JSON integers are mapped to RESP Integers. + *
    • JSON doubles are mapped to RESP Bulk Strings. + *
    • JSON strings are mapped to RESP Bulk Strings. + *
    • JSON arrays are represented as RESP arrays, where the first element is the simple string + * [, followed by the array's elements. + *
    • JSON objects are represented as RESP object, where the first element is the simple string + * {, followed by key-value pairs, each of which is a RESP bulk string. + *
    + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - Returns the JSON document in its RESP form. If key + * doesn't exist, null is returned. + */ + public static > BaseTransaction resp( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_RESP).add(key).toArray()); + } + + /** + * Retrieve the JSON value at the specified path within the JSON document stored at + * key. The returning result is in the Valkey or Redis OSS Serialization Protocol + * (RESP). + * + *
      + *
    • JSON null is mapped to the RESP Null Bulk String. + *
    • JSON Booleans are mapped to RESP Simple string. + *
    • JSON integers are mapped to RESP Integers. + *
    • JSON doubles are mapped to RESP Bulk Strings. + *
    • JSON strings are mapped to RESP Bulk Strings. + *
    • JSON arrays are represented as RESP arrays, where the first element is the simple string + * [, followed by the array's elements. + *
    • JSON objects are represented as RESP object, where the first element is the simple string + * {, followed by key-value pairs, each of which is a RESP bulk string. + *
    + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path The path within the JSON document. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $): Returns a list of + * replies for every possible path, indicating the RESP form of the JSON value. If + * path doesn't exist, returns an empty list. + *
    • For legacy path (path doesn't starts with $): Returns a + * single reply for the JSON value at the specified path, in its RESP form. If multiple + * paths match, the value of the first JSON value match is returned. If path + * doesn't exist, an error is raised. + *
    + * If key doesn't exist, null is returned. + */ + public static > BaseTransaction resp( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand(newArgsBuilder().add(JSON_RESP).add(key).add(path).toArray()); + } + + /** + * Retrieves the type of the JSON value at the root of the JSON document stored at key + * . + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @return Command Response - Returns the type of the JSON value at root. If key + * doesn't exist, + * null is returned. + */ + public static > BaseTransaction type( + @NonNull BaseTransaction transaction, @NonNull ArgType key) { + checkTypeOrThrow(key); + return transaction.customCommand(newArgsBuilder().add(JSON_TYPE).add(key).toArray()); + } + + /** + * Retrieves the type of the JSON value at the specified path within the JSON + * document stored at key. + * + * @param transaction The Valkey GLIDE client to execute the command in transaction. + * @param key The key of the JSON document. + * @param path Represents the path within the JSON document where the type will be retrieved. + * @return Command Response - + *
      + *
    • For JSONPath (path starts with $): Returns a list of string + * replies for every possible path, indicating the type of the JSON value. If `path` + * doesn't exist, an empty array will be returned. + *
    • For legacy path (path doesn't starts with $): Returns the + * type of the JSON value at `path`. If multiple paths match, the type of the first JSON + * value match is returned. If `path` doesn't exist, null will be returned. + *
    + * If key doesn't exist, null is returned. + */ + public static > BaseTransaction type( + @NonNull BaseTransaction transaction, @NonNull ArgType key, @NonNull ArgType path) { + checkTypeOrThrow(key); + checkTypeOrThrow(path); + return transaction.customCommand(newArgsBuilder().add(JSON_TYPE).add(key).add(path).toArray()); + } +} diff --git a/java/client/src/main/java/glide/api/models/BaseTransaction.java b/java/client/src/main/java/glide/api/models/BaseTransaction.java index 3914b05049..bfdd81efc0 100644 --- a/java/client/src/main/java/glide/api/models/BaseTransaction.java +++ b/java/client/src/main/java/glide/api/models/BaseTransaction.java @@ -215,6 +215,8 @@ import static glide.api.models.commands.stream.StreamReadOptions.READ_COUNT_VALKEY_API; import static glide.api.models.commands.stream.XInfoStreamOptions.COUNT; import static glide.api.models.commands.stream.XInfoStreamOptions.FULL; +import static glide.utils.ArgsBuilder.checkTypeOrThrow; +import static glide.utils.ArgsBuilder.newArgsBuilder; import static glide.utils.ArrayTransformUtils.flattenAllKeysFollowedByAllValues; import static glide.utils.ArrayTransformUtils.flattenMapToGlideStringArray; import static glide.utils.ArrayTransformUtils.flattenMapToGlideStringArrayValueFirst; @@ -7267,35 +7269,6 @@ protected ArgsArray emptyArgs() { return commandArgs.build(); } - protected ArgsBuilder newArgsBuilder() { - return new ArgsBuilder(); - } - - protected void checkTypeOrThrow(ArgType arg) { - if ((arg instanceof String) || (arg instanceof GlideString)) { - return; - } - throw new IllegalArgumentException("Expected String or GlideString"); - } - - protected void checkTypeOrThrow(ArgType[] args) { - if (args.length == 0) { - // nothing to check here - return; - } - checkTypeOrThrow(args[0]); - } - - protected void checkTypeOrThrow(Map argsMap) { - if (argsMap.isEmpty()) { - // nothing to check here - return; - } - - var arg = argsMap.keySet().iterator().next(); - checkTypeOrThrow(arg); - } - /** Helper function for creating generic type ("ArgType") array */ @SafeVarargs protected final ArgType[] createArray(ArgType... args) { diff --git a/java/client/src/main/java/glide/api/models/ClusterTransaction.java b/java/client/src/main/java/glide/api/models/ClusterTransaction.java index 6252d69d36..667c8e2785 100644 --- a/java/client/src/main/java/glide/api/models/ClusterTransaction.java +++ b/java/client/src/main/java/glide/api/models/ClusterTransaction.java @@ -4,6 +4,8 @@ import static command_request.CommandRequestOuterClass.RequestType.PubSubShardChannels; import static command_request.CommandRequestOuterClass.RequestType.PubSubShardNumSub; import static command_request.CommandRequestOuterClass.RequestType.SPublish; +import static glide.utils.ArgsBuilder.checkTypeOrThrow; +import static glide.utils.ArgsBuilder.newArgsBuilder; import glide.api.GlideClusterClient; import lombok.NonNull; diff --git a/java/client/src/main/java/glide/api/models/Transaction.java b/java/client/src/main/java/glide/api/models/Transaction.java index ed69907b2b..ac7bf6e09f 100644 --- a/java/client/src/main/java/glide/api/models/Transaction.java +++ b/java/client/src/main/java/glide/api/models/Transaction.java @@ -7,6 +7,8 @@ import static command_request.CommandRequestOuterClass.RequestType.Select; import static glide.api.commands.GenericBaseCommands.REPLACE_VALKEY_API; import static glide.api.commands.GenericCommands.DB_VALKEY_API; +import static glide.utils.ArgsBuilder.checkTypeOrThrow; +import static glide.utils.ArgsBuilder.newArgsBuilder; import glide.api.GlideClient; import glide.api.models.commands.scan.ScanOptions; diff --git a/java/client/src/main/java/glide/utils/ArgsBuilder.java b/java/client/src/main/java/glide/utils/ArgsBuilder.java index 066d75a707..c6873f70fb 100644 --- a/java/client/src/main/java/glide/utils/ArgsBuilder.java +++ b/java/client/src/main/java/glide/utils/ArgsBuilder.java @@ -3,6 +3,7 @@ import glide.api.models.GlideString; import java.util.ArrayList; +import java.util.Map; /** * Helper class for collecting arbitrary type of arguments and stores them as an array of @@ -63,4 +64,33 @@ public ArgsBuilder add(int[] args) { public GlideString[] toArray() { return argumentsList.toArray(new GlideString[0]); } + + public static void checkTypeOrThrow(ArgType arg) { + if ((arg instanceof String) || (arg instanceof GlideString)) { + return; + } + throw new IllegalArgumentException("Expected String or GlideString"); + } + + public static void checkTypeOrThrow(ArgType[] args) { + if (args.length == 0) { + // nothing to check here + return; + } + checkTypeOrThrow(args[0]); + } + + public static void checkTypeOrThrow(Map argsMap) { + if (argsMap.isEmpty()) { + // nothing to check here + return; + } + + var arg = argsMap.keySet().iterator().next(); + checkTypeOrThrow(arg); + } + + public static ArgsBuilder newArgsBuilder() { + return new ArgsBuilder(); + } } diff --git a/java/integTest/src/test/java/glide/modules/JsonTests.java b/java/integTest/src/test/java/glide/modules/JsonTests.java index 747a6078b6..21d051f12f 100644 --- a/java/integTest/src/test/java/glide/modules/JsonTests.java +++ b/java/integTest/src/test/java/glide/modules/JsonTests.java @@ -1,6 +1,7 @@ /** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.modules; +import static glide.TestUtilities.assertDeepEquals; import static glide.TestUtilities.commonClusterClientConfig; import static glide.api.BaseClient.OK; import static glide.api.models.GlideString.gs; @@ -16,12 +17,15 @@ import com.google.gson.JsonParser; import glide.api.GlideClusterClient; import glide.api.commands.servermodules.Json; +import glide.api.commands.servermodules.MultiJson; +import glide.api.models.ClusterTransaction; import glide.api.models.GlideString; import glide.api.models.commands.ConditionalChange; import glide.api.models.commands.FlushMode; import glide.api.models.commands.InfoOptions.Section; import glide.api.models.commands.json.JsonArrindexOptions; import glide.api.models.commands.json.JsonGetOptions; +import java.util.ArrayList; import java.util.Map; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -1225,4 +1229,201 @@ public void json_type() { // Check for all types in the JSON document using legacy path assertEquals("string", Json.type(client, key, "[*]").get()); } + + @SneakyThrows + @Test + public void transaction_tests() { + + ClusterTransaction transaction = new ClusterTransaction(); + ArrayList expectedResult = new ArrayList<>(); + + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String key3 = "{key}-3" + UUID.randomUUID(); + String key4 = "{key}-4" + UUID.randomUUID(); + String key5 = "{key}-5" + UUID.randomUUID(); + String key6 = "{key}-6" + UUID.randomUUID(); + + MultiJson.set(transaction, key1, "$", "{\"a\": \"one\", \"b\": [\"one\", \"two\"]}"); + expectedResult.add(OK); + + MultiJson.set( + transaction, + key1, + "$", + "{\"a\": \"one\", \"b\": [\"one\", \"two\"]}", + ConditionalChange.ONLY_IF_DOES_NOT_EXIST); + expectedResult.add(null); + + MultiJson.get(transaction, key1); + expectedResult.add("{\"a\":\"one\",\"b\":[\"one\",\"two\"]}"); + + MultiJson.get(transaction, key1, new String[] {"$.a", "$.b"}); + expectedResult.add("{\"$.a\":[\"one\"],\"$.b\":[[\"one\",\"two\"]]}"); + + MultiJson.get(transaction, key1, JsonGetOptions.builder().space(" ").build()); + expectedResult.add("{\"a\": \"one\",\"b\": [\"one\",\"two\"]}"); + + MultiJson.get( + transaction, + key1, + new String[] {"$.a", "$.b"}, + JsonGetOptions.builder().space(" ").build()); + expectedResult.add("{\"$.a\": [\"one\"],\"$.b\": [[\"one\",\"two\"]]}"); + + MultiJson.arrappend( + transaction, key1, "$.b", new String[] {"\"3\"", "\"4\"", "\"5\"", "\"6\""}); + expectedResult.add(new Object[] {6L}); + + MultiJson.arrindex(transaction, key1, "$..b", "\"one\""); + expectedResult.add(new Object[] {0L}); + + MultiJson.arrindex(transaction, key1, "$..b", "\"one\"", new JsonArrindexOptions(0L)); + expectedResult.add(new Object[] {0L}); + + MultiJson.arrinsert(transaction, key1, "$..b", 4, new String[] {"\"7\""}); + expectedResult.add(new Object[] {7L}); + + MultiJson.arrlen(transaction, key1, "$..b"); + expectedResult.add(new Object[] {7L}); + + MultiJson.arrpop(transaction, key1, "$..b", 6L); + expectedResult.add(new Object[] {"\"6\""}); + + MultiJson.arrpop(transaction, key1, "$..b"); + expectedResult.add(new Object[] {"\"5\""}); + + MultiJson.arrtrim(transaction, key1, "$..b", 2, 3); + expectedResult.add(new Object[] {2L}); + + MultiJson.objlen(transaction, key1); + expectedResult.add(2L); + + MultiJson.objlen(transaction, key1, "$..b"); + expectedResult.add(new Object[] {null}); + + MultiJson.objkeys(transaction, key1, ".."); + expectedResult.add(new Object[] {"a", "b"}); + + MultiJson.objkeys(transaction, key1); + expectedResult.add(new Object[] {"a", "b"}); + + MultiJson.del(transaction, key1); + expectedResult.add(1L); + + MultiJson.set( + transaction, + key1, + "$", + "{\"c\": [1, 2], \"d\": true, \"e\": [\"hello\", \"clouds\"], \"f\": {\"a\": \"hello\"}}"); + expectedResult.add(OK); + + MultiJson.del(transaction, key1, "$"); + expectedResult.add(1L); + + MultiJson.set( + transaction, + key1, + "$", + "{\"c\": [1, 2], \"d\": true, \"e\": [\"hello\", \"clouds\"], \"f\": {\"a\": \"hello\"}}"); + expectedResult.add(OK); + + MultiJson.numincrby(transaction, key1, "$.c[*]", 10.0); + expectedResult.add("[11,12]"); + + MultiJson.nummultby(transaction, key1, "$.c[*]", 10.0); + expectedResult.add("[110,120]"); + + MultiJson.strappend(transaction, key1, "\"bar\"", "$..a"); + expectedResult.add(new Object[] {8L}); + + MultiJson.strlen(transaction, key1, "$..a"); + expectedResult.add(new Object[] {8L}); + + MultiJson.type(transaction, key1, "$..a"); + expectedResult.add(new Object[] {"string"}); + + MultiJson.toggle(transaction, key1, "..d"); + expectedResult.add(false); + + MultiJson.resp(transaction, key1, "$..a"); + expectedResult.add(new Object[] {"hellobar"}); + + MultiJson.del(transaction, key1, "$..a"); + expectedResult.add(1L); + + // then delete the entire key + MultiJson.del(transaction, key1, "$"); + expectedResult.add(1L); + + // 2nd key + MultiJson.set(transaction, key2, "$", "[1, 2, true, null, \"tree\", \"tree2\" ]"); + expectedResult.add(OK); + + MultiJson.arrlen(transaction, key2); + expectedResult.add(6L); + + MultiJson.arrpop(transaction, key2); + expectedResult.add("\"tree2\""); + + MultiJson.debugFields(transaction, key2); + expectedResult.add(5L); + + MultiJson.debugFields(transaction, key2, "$"); + expectedResult.add(new Object[] {5L}); + + // 3rd key + MultiJson.set(transaction, key3, "$", "\"abc\""); + expectedResult.add(OK); + + MultiJson.strappend(transaction, key3, "\"bar\""); + expectedResult.add(6L); + + MultiJson.strlen(transaction, key3); + expectedResult.add(6L); + + MultiJson.type(transaction, key3); + expectedResult.add("string"); + + MultiJson.resp(transaction, key3); + expectedResult.add("abcbar"); + + // 4th key + MultiJson.set(transaction, key4, "$", "true"); + expectedResult.add(OK); + + MultiJson.toggle(transaction, key4); + expectedResult.add(false); + + MultiJson.debugMemory(transaction, key4); + expectedResult.add(24L); + + MultiJson.debugMemory(transaction, key4, "$"); + expectedResult.add(new Object[] {16L}); + + MultiJson.clear(transaction, key2, "$.a"); + expectedResult.add(0L); + + MultiJson.clear(transaction, key2); + expectedResult.add(1L); + + MultiJson.forget(transaction, key3); + expectedResult.add(1L); + + MultiJson.forget(transaction, key4, "$"); + expectedResult.add(1L); + + // mget, key5 and key6 + MultiJson.set(transaction, key5, "$", "{\"a\": 1, \"b\": [\"one\", \"two\"]}"); + expectedResult.add(OK); + + MultiJson.set(transaction, key6, "$", "{\"a\": 1, \"c\": false}"); + expectedResult.add(OK); + + MultiJson.mget(transaction, new String[] {key5, key6}, "$.c"); + expectedResult.add(new String[] {"[]", "[false]"}); + + Object[] results = client.exec(transaction).get(); + assertDeepEquals(expectedResult.toArray(), results); + } } diff --git a/node/src/server-modules/GlideJson.ts b/node/src/server-modules/GlideJson.ts index 23d667292e..4b9d1a2ded 100644 --- a/node/src/server-modules/GlideJson.ts +++ b/node/src/server-modules/GlideJson.ts @@ -2,6 +2,7 @@ * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +import { ClusterTransaction, Transaction } from "src/Transaction"; import { BaseClient, DecoderOption, GlideString } from "../BaseClient"; import { ConditionalChange } from "../Commands"; import { GlideClient } from "../GlideClient"; @@ -263,7 +264,7 @@ export class GlideJson { * await GlideJson.set(client, "doc", "$", '[[], ["a"], ["a", "b"]]'); * const result = await GlideJson.arrinsert(client, "doc", "$[*]", 0, ['"c"', '{"key": "value"}', "true", "null", '["bar"]']); * console.log(result); // Output: [5, 6, 7] - * const doc = await json.get(client, "doc"); + * const doc = await GlideJson.get(client, "doc"); * console.log(doc); // Output: '[["c",{"key":"value"},true,null,["bar"]],["c",{"key":"value"},true,null,["bar"],"a"],["c",{"key":"value"},true,null,["bar"],"a","b"]]' * ``` * @example @@ -271,7 +272,7 @@ export class GlideJson { * await GlideJson.set(client, "doc", "$", '[[], ["a"], ["a", "b"]]'); * const result = await GlideJson.arrinsert(client, "doc", ".", 0, ['"c"']) * console.log(result); // Output: 4 - * const doc = await json.get(client, "doc"); + * const doc = await GlideJson.get(client, "doc"); * console.log(doc); // Output: '[\"c\",[],[\"a\"],[\"a\",\"b\"]]' * ``` */ @@ -721,13 +722,13 @@ export class GlideJson { /** * Retrieve the JSON value at the specified `path` within the JSON document stored at `key`. * The returning result is in the Valkey or Redis OSS Serialization Protocol (RESP). - * JSON null is mapped to the RESP Null Bulk String. - * JSON Booleans are mapped to RESP Simple string. - * JSON integers are mapped to RESP Integers. - * JSON doubles are mapped to RESP Bulk Strings. - * JSON strings are mapped to RESP Bulk Strings. - * JSON arrays are represented as RESP arrays, where the first element is the simple string [, followed by the array's elements. - * JSON objects are represented as RESP object, where the first element is the simple string {, followed by key-value pairs, each of which is a RESP bulk string. + * - JSON null is mapped to the RESP Null Bulk String. + * - JSON Booleans are mapped to RESP Simple string. + * - JSON integers are mapped to RESP Integers. + * - JSON doubles are mapped to RESP Bulk Strings. + * - JSON strings are mapped to RESP Bulk Strings. + * - JSON arrays are represented as RESP arrays, where the first element is the simple string [, followed by the array's elements. + * - JSON objects are represented as RESP object, where the first element is the simple string {, followed by key-value pairs, each of which is a RESP bulk string. * * @param client - The client to execute the command. * @param key - The key of the JSON document. @@ -974,7 +975,7 @@ export class GlideJson { * ```typescript * console.log(await GlideJson.set(client, "doc", "$", '[1, 2.3, "foo", true, null, {}, [], {a:1, b:2}, [1, 2, 3]]')); * // Output: 'OK' - Indicates successful setting of the value at path '$' in the key stored at `doc`. - * console.log(await GlideJson.debugMemory(client, "doc", {path: "$[*]"}); + * console.log(await GlideJson.debugFields(client, "doc", {path: "$[*]"}); * // Output: [1, 1, 1, 1, 1, 0, 0, 2, 3] * ``` */ @@ -1157,3 +1158,773 @@ export class GlideJson { return _executeCommand(client, args, options); } } + +/** + * Transaction implementation for JSON module. Transactions allow the execution of a group of + * commands in a single step. See {@link Transaction} and {@link ClusterTransaction}. + * + * @example + * ```typescript + * const transaction = new Transaction(); + * GlideMultiJson.set(transaction, "doc", ".", '{"a": 1.0, "b": 2}'); + * GlideMultiJson.get(transaction, "doc"); + * const result = await client.exec(transaction); + * + * console.log(result[0]); // Output: 'OK' - result of GlideMultiJson.set() + * console.log(result[1]); // Output: '{"a": 1.0, "b": 2}' - result of GlideMultiJson.get() + * ``` + */ +export class GlideMultiJson { + /** + * Sets the JSON value at the specified `path` stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - Represents the path within the JSON document where the value will be set. + * The key will be modified only if `value` is added as the last child in the specified `path`, or if the specified `path` acts as the parent of a new child being added. + * @param value - The value to set at the specific path, in JSON formatted bytes or str. + * @param options - (Optional) Additional parameters: + * - (Optional) `conditionalChange` - Set the value only if the given condition is met (within the key or path). + * Equivalent to [`XX` | `NX`] in the module API. + * + * Command Response - If the value is successfully set, returns `"OK"`. + * If `value` isn't set because of `conditionalChange`, returns `null`. + */ + static set( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + value: GlideString, + options?: { conditionalChange: ConditionalChange }, + ): Transaction | ClusterTransaction { + const args: GlideString[] = ["JSON.SET", key, path, value]; + + if (options?.conditionalChange !== undefined) { + args.push(options.conditionalChange); + } + + return transaction.customCommand(args); + } + + /** + * Retrieves the JSON value at the specified `paths` stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) Options for formatting the byte representation of the JSON data. See {@link JsonGetOptions}. + * + * Command Response - + * - If one path is given: + * - For JSONPath (path starts with `$`): + * - Returns a stringified JSON list of bytes replies for every possible path, + * or a byte string representation of an empty array, if path doesn't exist. + * If `key` doesn't exist, returns `null`. + * - For legacy path (path doesn't start with `$`): + * Returns a byte string representation of the value in `path`. + * If `path` doesn't exist, an error is raised. + * If `key` doesn't exist, returns `null`. + * - If multiple paths are given: + * Returns a stringified JSON object in bytes, in which each path is a key, and it's corresponding value, is the value as if the path was executed in the command as a single path. + * In case of multiple paths, and `paths` are a mix of both JSONPath and legacy path, the command behaves as if all are JSONPath paths. + */ + static get( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: JsonGetOptions, + ): Transaction | ClusterTransaction { + const args = ["JSON.GET", key]; + + if (options) { + const optionArgs = _jsonGetOptionsToArgs(options); + args.push(...optionArgs); + } + + return transaction.customCommand(args); + } + + /** + * Retrieves the JSON values at the specified `path` stored at multiple `keys`. + * + * @remarks When in cluster mode, all keys in the transaction must be mapped to the same slot. + * + * @param client - The client to execute the command. + * @param keys - The keys of the JSON documents. + * @param path - The path within the JSON documents. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * Returns a stringified JSON list replies for every possible path, or a string representation + * of an empty array, if path doesn't exist. + * - For legacy path (path doesn't start with `$`): + * Returns a string representation of the value in `path`. If `path` doesn't exist, + * the corresponding array element will be `null`. + * - If a `key` doesn't exist, the corresponding array element will be `null`. + */ + static mget( + transaction: Transaction | ClusterTransaction, + keys: GlideString[], + path: GlideString, + ): Transaction | ClusterTransaction { + const args = ["JSON.MGET", ...keys, path]; + return transaction.customCommand(args); + } + + /** + * Inserts one or more values into the array at the specified `path` within the JSON + * document stored at `key`, before the given `index`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param index - The array index before which values are inserted. + * @param values - The JSON values to be inserted into the array. + * JSON string values must be wrapped with quotes. For example, to insert `"foo"`, pass `"\"foo\""`. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * Returns an array with a list of integers for every possible path, + * indicating the new length of the array, or `null` for JSON values matching + * the path that are not an array. If `path` does not exist, an empty array + * will be returned. + * - For legacy path (path doesn't start with `$`): + * Returns an integer representing the new length of the array. If multiple paths are + * matched, returns the length of the first modified array. If `path` doesn't + * exist or the value at `path` is not an array, an error is raised. + * - If the index is out of bounds or `key` doesn't exist, an error is raised. + */ + static arrinsert( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + index: number, + values: GlideString[], + ): Transaction | ClusterTransaction { + const args = ["JSON.ARRINSERT", key, path, index.toString(), ...values]; + + return transaction.customCommand(args); + } + + /** + * Pops an element from the array located at `path` in the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) See {@link JsonArrPopOptions}. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * Returns an array with a strings for every possible path, representing the popped JSON + * values, or `null` for JSON values matching the path that are not an array + * or an empty array. + * - For legacy path (path doesn't start with `$`): + * Returns a string representing the popped JSON value, or `null` if the + * array at `path` is empty. If multiple paths are matched, the value from + * the first matching array that is not empty is returned. If `path` doesn't + * exist or the value at `path` is not an array, an error is raised. + * - If the index is out of bounds or `key` doesn't exist, an error is raised. + */ + static arrpop( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: JsonArrPopOptions, + ): Transaction | ClusterTransaction { + const args = ["JSON.ARRPOP", key]; + if (options?.path) args.push(options?.path); + if (options && "index" in options && options.index) + args.push(options?.index.toString()); + + return transaction.customCommand(args); + } + + /** + * Retrieves the length of the array at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document. Defaults to the root (`"."`) if not specified. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * Returns an array with a list of integers for every possible path, + * indicating the length of the array, or `null` for JSON values matching + * the path that are not an array. If `path` does not exist, an empty array + * will be returned. + * - For legacy path (path doesn't start with `$`): + * Returns an integer representing the length of the array. If multiple paths are + * matched, returns the length of the first matching array. If `path` doesn't + * exist or the value at `path` is not an array, an error is raised. + * - If the index is out of bounds or `key` doesn't exist, an error is raised. + */ + static arrlen( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.ARRLEN", key]; + if (options?.path) args.push(options?.path); + + return transaction.customCommand(args); + } + + /** + * Trims an array at the specified `path` within the JSON document stored at `key` so that it becomes a subarray [start, end], both inclusive. + * If `start` < 0, it is treated as 0. + * If `end` >= size (size of the array), it is treated as size-1. + * If `start` >= size or `start` > `end`, the array is emptied and 0 is returned. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param start - The start index, inclusive. + * @param end - The end index, inclusive. + * + * Command Response - + * - For JSONPath (`path` starts with `$`): + * - Returns a list of integer replies for every possible path, indicating the new length of the array, + * or `null` for JSON values matching the path that are not an array. + * - If the array is empty, its corresponding return value is 0. + * - If `path` doesn't exist, an empty array will be returned. + * - If an index argument is out of bounds, an error is raised. + * - For legacy path (`path` doesn't start with `$`): + * - Returns an integer representing the new length of the array. + * - If the array is empty, its corresponding return value is 0. + * - If multiple paths match, the length of the first trimmed array match is returned. + * - If `path` doesn't exist, or the value at `path` is not an array, an error is raised. + * - If an index argument is out of bounds, an error is raised. + */ + static arrtrim( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + start: number, + end: number, + ): Transaction | ClusterTransaction { + const args: GlideString[] = [ + "JSON.ARRTRIM", + key, + path, + start.toString(), + end.toString(), + ]; + return transaction.customCommand(args); + } + + /** + * Searches for the first occurrence of a `scalar` JSON value in the arrays at the `path`. + * Out of range errors are treated by rounding the index to the array's `start` and `end. + * If `start` > `end`, return `-1` (not found). + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param scalar - The scalar value to search for. + * @param options - (Optional) Additional parameters: + * - (Optional) `start`: The start index, inclusive. Default to 0 if not provided. + * - (Optional) `end`: The end index, exclusive. Default to 0 if not provided. + * 0 or -1 means the last element is included. + * Command Response - + * - For JSONPath (path starts with `$`): + * Returns an array with a list of integers for every possible path, + * indicating the index of the matching element. The value is `-1` if not found. + * If a value is not an array, its corresponding return value is `null`. + * - For legacy path (path doesn't start with `$`): + * Returns an integer representing the index of matching element, or `-1` if + * not found. If the value at the `path` is not an array, an error is raised. + */ + static arrindex( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + scalar: GlideString | number | boolean | null, + options?: { start: number; end?: number }, + ): Transaction | ClusterTransaction { + const args = ["JSON.ARRINDEX", key, path]; + + if (typeof scalar === `number`) { + args.push(scalar.toString()); + } else if (typeof scalar === `boolean`) { + args.push(scalar ? `true` : `false`); + } else if (scalar !== null) { + args.push(scalar); + } else { + args.push(`null`); + } + + if (options?.start !== undefined) args.push(options?.start.toString()); + if (options?.end !== undefined) args.push(options?.end.toString()); + + return transaction.customCommand(args); + } + + /** + * Toggles a Boolean value stored at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document. Defaults to the root (`"."`) if not specified. + * + * Command Response - For JSONPath (`path` starts with `$`), returns a list of boolean replies for every possible path, with the toggled boolean value, + * or `null` for JSON values matching the path that are not boolean. + * - For legacy path (`path` doesn't starts with `$`), returns the value of the toggled boolean in `path`. + * - Note that when sending legacy path syntax, If `path` doesn't exist or the value at `path` isn't a boolean, an error is raised. + */ + static toggle( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.TOGGLE", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Deletes the JSON value at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: If `null`, deletes the entire JSON document at `key`. + * + * Command Response - The number of elements removed. If `key` or `path` doesn't exist, returns 0. + */ + static del( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.DEL", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Deletes the JSON value at the specified `path` within the JSON document stored at `key`. This command is + * an alias of {@link del}. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: If `null`, deletes the entire JSON document at `key`. + * + * Command Response - The number of elements removed. If `key` or `path` doesn't exist, returns 0. + */ + static forget( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.FORGET", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Reports the type of values at the given path. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: Defaults to root (`"."`) if not provided. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns an array of strings that represents the type of value at each path. + * The type is one of "null", "boolean", "string", "number", "integer", "object" and "array". + * - If a path does not exist, its corresponding return value is `null`. + * - Empty array if the document key does not exist. + * - For legacy path (path doesn't start with `$`): + * - String that represents the type of the value. + * - `null` if the document key does not exist. + * - `null` if the JSON path is invalid or does not exist. + */ + static type( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.TYPE", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Clears arrays or objects at the specified JSON path in the document stored at `key`. + * Numeric values are set to `0`, boolean values are set to `false`, and string values are converted to empty strings. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The JSON path to the arrays or objects to be cleared. Defaults to root if not provided. + * + * Command Response - The number of containers cleared, numeric values zeroed, and booleans toggled to `false`, + * and string values converted to empty strings. + * If `path` doesn't exist, or the value at `path` is already empty (e.g., an empty array, object, or string), `0` is returned. + * If `key doesn't exist, an error is raised. + */ + static clear( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.CLEAR", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Retrieve the JSON value at the specified `path` within the JSON document stored at `key`. + * The returning result is in the Valkey or Redis OSS Serialization Protocol (RESP). + * - JSON null is mapped to the RESP Null Bulk String. + * - JSON Booleans are mapped to RESP Simple string. + * - JSON integers are mapped to RESP Integers. + * - JSON doubles are mapped to RESP Bulk Strings. + * - JSON strings are mapped to RESP Bulk Strings. + * - JSON arrays are represented as RESP arrays, where the first element is the simple string [, followed by the array's elements. + * - JSON objects are represented as RESP object, where the first element is the simple string {, followed by key-value pairs, each of which is a RESP bulk string. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, defaults to root (`"."`) if not provided. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns an array of replies for every possible path, indicating the RESP form of the JSON value. + * If `path` doesn't exist, returns an empty array. + * - For legacy path (path doesn't start with `$`): + * - Returns a single reply for the JSON value at the specified `path`, in its RESP form. + * If multiple paths match, the value of the first JSON value match is returned. If `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, `null` is returned. + */ + static resp( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.RESP", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Returns the length of the JSON string value stored at the specified `path` within + * the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, Defaults to root (`"."`) if not provided. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns a list of integer replies for every possible path, indicating the length of + * the JSON string value, or null for JSON values matching the path that + * are not string. + * - For legacy path (path doesn't start with `$`): + * - Returns the length of the JSON value at `path` or `null` if `key` doesn't exist. + * - If multiple paths match, the length of the first matched string is returned. + * - If the JSON value at`path` is not a string or if `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, `null` is returned. + */ + static strlen( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.STRLEN", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Appends the specified `value` to the string stored at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param value - The value to append to the string. Must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, defaults to root (`"."`) if not provided. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns a list of integer replies for every possible path, indicating the length of the resulting string after appending `value`, + * or None for JSON values matching the path that are not string. + * - If `key` doesn't exist, an error is raised. + * - For legacy path (path doesn't start with `$`): + * - Returns the length of the resulting string after appending `value` to the string at `path`. + * - If multiple paths match, the length of the last updated string is returned. + * - If the JSON value at `path` is not a string of if `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, an error is raised. + */ + static strappend( + transaction: Transaction | ClusterTransaction, + key: GlideString, + value: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.STRAPPEND", key]; + + if (options) { + args.push(options.path); + } + + args.push(value); + + return transaction.customCommand(args); + } + + /** + * Appends one or more `values` to the JSON array at the specified `path` within the JSON + * document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param values - The JSON values to be appended to the array. + * JSON string values must be wrapped with quotes. For example, to append `"foo"`, pass `"\"foo\""`. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * Returns an array with a list of integers for every possible path, + * indicating the new length of the array, or `null` for JSON values matching + * the path that are not an array. If `path` does not exist, an empty array + * will be returned. + * - For legacy path (path doesn't start with `$`): + * Returns an integer representing the new length of the array. If multiple paths are + * matched, returns the length of the first modified array. If `path` doesn't + * exist or the value at `path` is not an array, an error is raised. + * - If the index is out of bounds or `key` doesn't exist, an error is raised. + */ + static arrappend( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + values: GlideString[], + ): Transaction | ClusterTransaction { + const args = ["JSON.ARRAPPEND", key, path, ...values]; + return transaction.customCommand(args); + } + + /** + * Reports memory usage in bytes of a JSON object at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param value - The value to append to the string. Must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, returns total memory usage if no path is given. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns an array of numbers for every possible path, indicating the memory usage. + * If `path` does not exist, an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns an integer representing the memory usage. If multiple paths are matched, + * returns the data of the first matching object. If `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, returns `null`. + */ + static debugMemory( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.DEBUG", "MEMORY", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Reports the number of fields at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param value - The value to append to the string. Must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, returns total number of fields if no path is given. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns an array of numbers for every possible path, indicating the number of fields. + * If `path` does not exist, an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns an integer representing the memory usage. If multiple paths are matched, + * returns the data of the first matching object. If `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, returns `null`. + */ + static debugFields( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.DEBUG", "FIELDS", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Increments or decrements the JSON value(s) at the specified `path` by `number` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param num - The number to increment or decrement by. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns a string representation of an array of strings, indicating the new values after incrementing for each matched `path`. + * If a value is not a number, its corresponding return value will be `null`. + * If `path` doesn't exist, a byte string representation of an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns a string representation of the resulting value after the increment or decrement. + * If multiple paths match, the result of the last updated value is returned. + * If the value at the `path` is not a number or `path` doesn't exist, an error is raised. + * - If `key` does not exist, an error is raised. + * - If the result is out of the range of 64-bit IEEE double, an error is raised. + */ + static numincrby( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + num: number, + ): Transaction | ClusterTransaction { + const args = ["JSON.NUMINCRBY", key, path, num.toString()]; + return transaction.customCommand(args); + } + + /** + * Multiplies the JSON value(s) at the specified `path` by `number` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param path - The path within the JSON document. + * @param num - The number to multiply by. + * + * Command Response - + * - For JSONPath (path starts with `$`): + * - Returns a GlideString representation of an array of strings, indicating the new values after multiplication for each matched `path`. + * If a value is not a number, its corresponding return value will be `null`. + * If `path` doesn't exist, a byte string representation of an empty array will be returned. + * - For legacy path (path doesn't start with `$`): + * - Returns a GlideString representation of the resulting value after multiplication. + * If multiple paths match, the result of the last updated value is returned. + * If the value at the `path` is not a number or `path` doesn't exist, an error is raised. + * - If `key` does not exist, an error is raised. + * - If the result is out of the range of 64-bit IEEE double, an error is raised. + */ + static nummultby( + transaction: Transaction | ClusterTransaction, + key: GlideString, + path: GlideString, + num: number, + ): Transaction | ClusterTransaction { + const args = ["JSON.NUMMULTBY", key, path, num.toString()]; + return transaction.customCommand(args); + } + + /** + * Retrieves the number of key-value pairs in the object stored at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document, Defaults to root (`"."`) if not provided. + * + * Command Response - ReturnTypeJson: + * - For JSONPath (`path` starts with `$`): + * - Returns a list of integer replies for every possible path, indicating the length of the object, + * or `null` for JSON values matching the path that are not an object. + * - If `path` doesn't exist, an empty array will be returned. + * - For legacy path (`path` doesn't starts with `$`): + * - Returns the length of the object at `path`. + * - If multiple paths match, the length of the first object match is returned. + * - If the JSON value at `path` is not an object or if `path` doesn't exist, an error is raised. + * - If `key` doesn't exist, `null` is returned. + */ + static objlen( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.OBJLEN", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } + + /** + * Retrieves key names in the object values at the specified `path` within the JSON document stored at `key`. + * + * @param transaction - A transaction to add commands to. + * @param key - The key of the JSON document. + * @param options - (Optional) Additional parameters: + * - (Optional) `path`: The path within the JSON document where the key names will be retrieved. Defaults to root (`"."`) if not provided. + * + * Command Response - ReturnTypeJson: + * - For JSONPath (`path` starts with `$`): + * - Returns a list of arrays containing key names for each matching object. + * - If a value matching the path is not an object, an empty array is returned. + * - If `path` doesn't exist, an empty array is returned. + * - For legacy path (`path` starts with `.`): + * - Returns a list of key names for the object value matching the path. + * - If multiple objects match the path, the key names of the first object is returned. + * - If a value matching the path is not an object, an error is raised. + * - If `path` doesn't exist, `null` is returned. + * - If `key` doesn't exist, `null` is returned. + */ + static objkeys( + transaction: Transaction | ClusterTransaction, + key: GlideString, + options?: { path: GlideString }, + ): Transaction | ClusterTransaction { + const args = ["JSON.OBJKEYS", key]; + + if (options) { + args.push(options.path); + } + + return transaction.customCommand(args); + } +} diff --git a/node/tests/ServerModules.test.ts b/node/tests/ServerModules.test.ts index df16ce89e7..96ac19cea3 100644 --- a/node/tests/ServerModules.test.ts +++ b/node/tests/ServerModules.test.ts @@ -11,6 +11,7 @@ import { } from "@jest/globals"; import { v4 as uuidv4 } from "uuid"; import { + ClusterTransaction, ConditionalChange, convertGlideRecordToRecord, Decoder, @@ -36,6 +37,9 @@ import { getClientConfigurationOption, getServerVersion, parseEndpoints, + transactionMultiJson, + transactionMultiJsonForArrCommands, + validateTransactionResponse, } from "./TestUtilities"; const TIMEOUT = 50000; @@ -1034,158 +1038,148 @@ describe("Server Module Tests", () => { ).toEqual("integer"); }); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "json.clear tests", - async () => { - client = await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - ), - ); - const key = uuidv4(); - const jsonValue = { - obj: { a: 1, b: 2 }, - arr: [1, 2, 3], - str: "foo", - bool: true, - int: 42, - float: 3.14, - nullVal: null, - }; + it("json.clear tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + obj: { a: 1, b: 2 }, + arr: [1, 2, 3], + str: "foo", + bool: true, + int: 42, + float: 3.14, + nullVal: null, + }; - expect( - await GlideJson.set( - client, - key, - "$", - JSON.stringify(jsonValue), - ), - ).toBe("OK"); + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); - expect( - await GlideJson.clear(client, key, { path: "$.*" }), - ).toBe(6); + expect( + await GlideJson.clear(client, key, { path: "$.*" }), + ).toBe(6); - const result = await GlideJson.get(client, key, { - path: ["$"], - }); + const result = await GlideJson.get(client, key, { + path: ["$"], + }); - expect(JSON.parse(result as string)).toEqual([ - { - obj: {}, - arr: [], - str: "", - bool: false, - int: 0, - float: 0.0, - nullVal: null, - }, - ]); + expect(JSON.parse(result as string)).toEqual([ + { + obj: {}, + arr: [], + str: "", + bool: false, + int: 0, + float: 0.0, + nullVal: null, + }, + ]); - expect( - await GlideJson.clear(client, key, { path: "$.*" }), - ).toBe(0); + expect( + await GlideJson.clear(client, key, { path: "$.*" }), + ).toBe(0); - expect( - await GlideJson.set( - client, - key, - "$", - JSON.stringify(jsonValue), - ), - ).toBe("OK"); + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); - expect( - await GlideJson.clear(client, key, { path: "*" }), - ).toBe(6); + expect(await GlideJson.clear(client, key, { path: "*" })).toBe( + 6, + ); - const jsonValue2 = { - a: 1, - b: { a: [5, 6, 7], b: { a: true } }, - c: { a: "value", b: { a: 3.5 } }, - d: { a: { foo: "foo" } }, - nullVal: null, - }; - expect( - await GlideJson.set( - client, - key, - "$", - JSON.stringify(jsonValue2), - ), - ).toBe("OK"); + const jsonValue2 = { + a: 1, + b: { a: [5, 6, 7], b: { a: true } }, + c: { a: "value", b: { a: 3.5 } }, + d: { a: { foo: "foo" } }, + nullVal: null, + }; + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue2), + ), + ).toBe("OK"); - expect( - await GlideJson.clear(client, key, { - path: "b.a[1:3]", - }), - ).toBe(2); + expect( + await GlideJson.clear(client, key, { + path: "b.a[1:3]", + }), + ).toBe(2); - expect( - await GlideJson.clear(client, key, { - path: "b.a[1:3]", - }), - ).toBe(0); + expect( + await GlideJson.clear(client, key, { + path: "b.a[1:3]", + }), + ).toBe(0); - expect( - JSON.parse( - (await GlideJson.get(client, key, { - path: ["$..a"], - })) as string, - ), - ).toEqual([ - 1, - [5, 0, 0], - true, - "value", - 3.5, - { foo: "foo" }, - ]); - - expect( - await GlideJson.clear(client, key, { path: "..a" }), - ).toBe(6); - - expect( - JSON.parse( - (await GlideJson.get(client, key, { - path: ["$..a"], - })) as string, - ), - ).toEqual([0, [], false, "", 0.0, {}]); + expect( + JSON.parse( + (await GlideJson.get(client, key, { + path: ["$..a"], + })) as string, + ), + ).toEqual([1, [5, 0, 0], true, "value", 3.5, { foo: "foo" }]); - expect( - await GlideJson.clear(client, key, { path: "$..a" }), - ).toBe(0); + expect( + await GlideJson.clear(client, key, { path: "..a" }), + ).toBe(6); - // Path doesn't exist - expect( - await GlideJson.clear(client, key, { path: "$.path" }), - ).toBe(0); + expect( + JSON.parse( + (await GlideJson.get(client, key, { + path: ["$..a"], + })) as string, + ), + ).toEqual([0, [], false, "", 0.0, {}]); - expect( - await GlideJson.clear(client, key, { path: "path" }), - ).toBe(0); + expect( + await GlideJson.clear(client, key, { path: "$..a" }), + ).toBe(0); - // Key doesn't exist - await expect( - GlideJson.clear(client, "non_existing_key"), - ).rejects.toThrow(RequestError); + // Path doesn't exist + expect( + await GlideJson.clear(client, key, { path: "$.path" }), + ).toBe(0); - await expect( - GlideJson.clear(client, "non_existing_key", { - path: "$", - }), - ).rejects.toThrow(RequestError); + expect( + await GlideJson.clear(client, key, { path: "path" }), + ).toBe(0); - await expect( - GlideJson.clear(client, "non_existing_key", { - path: ".", - }), - ).rejects.toThrow(RequestError); - }, - ); + // Key doesn't exist + await expect( + GlideJson.clear(client, "non_existing_key"), + ).rejects.toThrow(RequestError); + + await expect( + GlideJson.clear(client, "non_existing_key", { + path: "$", + }), + ).rejects.toThrow(RequestError); + + await expect( + GlideJson.clear(client, "non_existing_key", { + path: ".", + }), + ).rejects.toThrow(RequestError); + }); it("json.resp tests", async () => { client = await GlideClusterClient.createClient( @@ -2068,269 +2062,290 @@ describe("Server Module Tests", () => { ).toBe("0"); // 0 * 10.2 = 0 }); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "json.debug tests", - async (protocol) => { - client = await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - ), - ); - const key = uuidv4(); - const jsonValue = - '{ "key1": 1, "key2": 3.5, "key3": {"nested_key": {"key1": [4, 5]}}, "key4":' + - ' [1, 2, 3], "key5": 0, "key6": "hello", "key7": null, "key8":' + - ' {"nested_key": {"key1": 3.5953862697246314e307}}, "key9":' + - ' 3.5953862697246314e307, "key10": true }'; - // setup - expect( - await GlideJson.set(client, key, "$", jsonValue), - ).toBe("OK"); - - expect( - await GlideJson.debugFields(client, key, { - path: "$.key1", - }), - ).toEqual([1]); + it("json.debug tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = + '{ "key1": 1, "key2": 3.5, "key3": {"nested_key": {"key1": [4, 5]}}, "key4":' + + ' [1, 2, 3], "key5": 0, "key6": "hello", "key7": null, "key8":' + + ' {"nested_key": {"key1": 3.5953862697246314e307}}, "key9":' + + ' 3.5953862697246314e307, "key10": true }'; + // setup + expect(await GlideJson.set(client, key, "$", jsonValue)).toBe( + "OK", + ); - expect( - await GlideJson.debugFields(client, key, { - path: "$.key3.nested_key.key1", - }), - ).toEqual([2]); + expect( + await GlideJson.debugFields(client, key, { + path: "$.key1", + }), + ).toEqual([1]); - expect( - await GlideJson.debugMemory(client, key, { - path: "$.key4[2]", - }), - ).toEqual([16]); + expect( + await GlideJson.debugFields(client, key, { + path: "$.key3.nested_key.key1", + }), + ).toEqual([2]); - expect( - await GlideJson.debugMemory(client, key, { - path: ".key6", - }), - ).toEqual(16); + expect( + await GlideJson.debugMemory(client, key, { + path: "$.key4[2]", + }), + ).toEqual([16]); - expect(await GlideJson.debugMemory(client, key)).toEqual( - 504, - ); + expect( + await GlideJson.debugMemory(client, key, { + path: ".key6", + }), + ).toEqual(16); - expect(await GlideJson.debugFields(client, key)).toEqual( - 19, - ); + expect(await GlideJson.debugMemory(client, key)).toEqual(504); - // testing binary input - expect( - await GlideJson.debugMemory(client, Buffer.from(key)), - ).toEqual(504); + expect(await GlideJson.debugFields(client, key)).toEqual(19); - expect( - await GlideJson.debugFields(client, Buffer.from(key)), - ).toEqual(19); - }, - ); + // testing binary input + expect( + await GlideJson.debugMemory(client, Buffer.from(key)), + ).toEqual(504); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "json.objlen tests", - async (protocol) => { - client = await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - ), - ); - const key = uuidv4(); - const jsonValue = { - a: 1.0, - b: { a: { x: 1, y: 2 }, b: 2.5, c: true }, - }; - - // setup - expect( - await GlideJson.set( - client, - key, - "$", - JSON.stringify(jsonValue), - ), - ).toBe("OK"); + expect( + await GlideJson.debugFields(client, Buffer.from(key)), + ).toEqual(19); + }); + + it("json.objlen tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + a: 1.0, + b: { a: { x: 1, y: 2 }, b: 2.5, c: true }, + }; - expect( - await GlideJson.objlen(client, key, { path: "$" }), - ).toEqual([2]); + // setup + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); - expect( - await GlideJson.objlen(client, key, { path: "." }), - ).toEqual(2); + expect( + await GlideJson.objlen(client, key, { path: "$" }), + ).toEqual([2]); - expect( - await GlideJson.objlen(client, key, { path: "$.." }), - ).toEqual([2, 3, 2]); + expect( + await GlideJson.objlen(client, key, { path: "." }), + ).toEqual(2); - expect( - await GlideJson.objlen(client, key, { path: ".." }), - ).toEqual(2); + expect( + await GlideJson.objlen(client, key, { path: "$.." }), + ).toEqual([2, 3, 2]); - expect( - await GlideJson.objlen(client, key, { path: "$..b" }), - ).toEqual([3, null]); + expect( + await GlideJson.objlen(client, key, { path: ".." }), + ).toEqual(2); - expect( - await GlideJson.objlen(client, key, { path: "..b" }), - ).toEqual(3); + expect( + await GlideJson.objlen(client, key, { path: "$..b" }), + ).toEqual([3, null]); - expect( - await GlideJson.objlen(client, Buffer.from(key), { - path: Buffer.from("..a"), - }), - ).toEqual(2); + expect( + await GlideJson.objlen(client, key, { path: "..b" }), + ).toEqual(3); - expect(await GlideJson.objlen(client, key)).toEqual(2); + expect( + await GlideJson.objlen(client, Buffer.from(key), { + path: Buffer.from("..a"), + }), + ).toEqual(2); - // path doesn't exist - expect( - await GlideJson.objlen(client, key, { - path: "$.non_existing_path", - }), - ).toEqual([]); + expect(await GlideJson.objlen(client, key)).toEqual(2); - await expect( - GlideJson.objlen(client, key, { - path: "non_existing_path", - }), - ).rejects.toThrow(RequestError); + // path doesn't exist + expect( + await GlideJson.objlen(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); - // Value at path isnt an object - expect( - await GlideJson.objlen(client, key, { - path: "$.non_existing_path", - }), - ).toEqual([]); + await expect( + GlideJson.objlen(client, key, { + path: "non_existing_path", + }), + ).rejects.toThrow(RequestError); - await expect( - GlideJson.objlen(client, key, { path: ".a" }), - ).rejects.toThrow(RequestError); + // Value at path isnt an object + expect( + await GlideJson.objlen(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); - // Non-existing key - expect( - await GlideJson.objlen(client, "non_existing_key", { - path: "$", - }), - ).toBeNull(); + await expect( + GlideJson.objlen(client, key, { path: ".a" }), + ).rejects.toThrow(RequestError); - expect( - await GlideJson.objlen(client, "non_existing_key", { - path: ".", - }), - ).toBeNull(); + // Non-existing key + expect( + await GlideJson.objlen(client, "non_existing_key", { + path: "$", + }), + ).toBeNull(); - expect( - await GlideJson.set( - client, - key, - "$", - '{"a": 1, "b": 2, "c":3, "d":4}', - ), - ).toBe("OK"); - expect(await GlideJson.objlen(client, key)).toEqual(4); - }, - ); + expect( + await GlideJson.objlen(client, "non_existing_key", { + path: ".", + }), + ).toBeNull(); - it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( - "json.objkeys tests", - async (protocol) => { - client = await GlideClusterClient.createClient( - getClientConfigurationOption( - cluster.getAddresses(), - protocol, - ), - ); - const key = uuidv4(); - const jsonValue = { - a: 1.0, - b: { a: { x: 1, y: 2 }, b: 2.5, c: true }, - }; - - // setup - expect( - await GlideJson.set( - client, - key, - "$", - JSON.stringify(jsonValue), - ), - ).toBe("OK"); + expect( + await GlideJson.set( + client, + key, + "$", + '{"a": 1, "b": 2, "c":3, "d":4}', + ), + ).toBe("OK"); + expect(await GlideJson.objlen(client, key)).toEqual(4); + }); + + it("json.objkeys tests", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const key = uuidv4(); + const jsonValue = { + a: 1.0, + b: { a: { x: 1, y: 2 }, b: 2.5, c: true }, + }; - expect( - await GlideJson.objkeys(client, key, { path: "$" }), - ).toEqual([["a", "b"]]); + // setup + expect( + await GlideJson.set( + client, + key, + "$", + JSON.stringify(jsonValue), + ), + ).toBe("OK"); - expect( - await GlideJson.objkeys(client, key, { - path: ".", - decoder: Decoder.Bytes, - }), - ).toEqual([Buffer.from("a"), Buffer.from("b")]); + expect( + await GlideJson.objkeys(client, key, { path: "$" }), + ).toEqual([["a", "b"]]); - expect( - await GlideJson.objkeys(client, Buffer.from(key), { - path: Buffer.from("$.."), - }), - ).toEqual([ - ["a", "b"], - ["a", "b", "c"], - ["x", "y"], - ]); - - expect( - await GlideJson.objkeys(client, key, { path: ".." }), - ).toEqual(["a", "b"]); - - expect( - await GlideJson.objkeys(client, key, { path: "$..b" }), - ).toEqual([["a", "b", "c"], []]); - - expect( - await GlideJson.objkeys(client, key, { path: "..b" }), - ).toEqual(["a", "b", "c"]); - - // path doesn't exist - expect( - await GlideJson.objkeys(client, key, { - path: "$.non_existing_path", - }), - ).toEqual([]); + expect( + await GlideJson.objkeys(client, key, { + path: ".", + decoder: Decoder.Bytes, + }), + ).toEqual([Buffer.from("a"), Buffer.from("b")]); - expect( - await GlideJson.objkeys(client, key, { - path: "non_existing_path", - }), - ).toBeNull(); + expect( + await GlideJson.objkeys(client, Buffer.from(key), { + path: Buffer.from("$.."), + }), + ).toEqual([ + ["a", "b"], + ["a", "b", "c"], + ["x", "y"], + ]); - // Value at path isnt an object - expect( - await GlideJson.objkeys(client, key, { path: "$.a" }), - ).toEqual([[]]); + expect( + await GlideJson.objkeys(client, key, { path: ".." }), + ).toEqual(["a", "b"]); - await expect( - GlideJson.objkeys(client, key, { path: ".a" }), - ).rejects.toThrow(RequestError); + expect( + await GlideJson.objkeys(client, key, { path: "$..b" }), + ).toEqual([["a", "b", "c"], []]); - // Non-existing key - expect( - await GlideJson.objkeys(client, "non_existing_key", { - path: "$", - }), - ).toBeNull(); + expect( + await GlideJson.objkeys(client, key, { path: "..b" }), + ).toEqual(["a", "b", "c"]); - expect( - await GlideJson.objkeys(client, "non_existing_key", { - path: ".", - }), - ).toBeNull(); - }, - ); + // path doesn't exist + expect( + await GlideJson.objkeys(client, key, { + path: "$.non_existing_path", + }), + ).toEqual([]); + + expect( + await GlideJson.objkeys(client, key, { + path: "non_existing_path", + }), + ).toBeNull(); + + // Value at path isnt an object + expect( + await GlideJson.objkeys(client, key, { path: "$.a" }), + ).toEqual([[]]); + + await expect( + GlideJson.objkeys(client, key, { path: ".a" }), + ).rejects.toThrow(RequestError); + + // Non-existing key + expect( + await GlideJson.objkeys(client, "non_existing_key", { + path: "$", + }), + ).toBeNull(); + + expect( + await GlideJson.objkeys(client, "non_existing_key", { + path: ".", + }), + ).toBeNull(); + }); + + it("can send GlideMultiJson transactions for ARR commands", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const clusterTransaction = new ClusterTransaction(); + const expectedRes = + await transactionMultiJsonForArrCommands( + clusterTransaction, + ); + const result = await client.exec(clusterTransaction); + + validateTransactionResponse(result, expectedRes); + client.close(); + }); + + it("can send GlideMultiJson transactions general commands", async () => { + client = await GlideClusterClient.createClient( + getClientConfigurationOption( + cluster.getAddresses(), + protocol, + ), + ); + const clusterTransaction = new ClusterTransaction(); + const expectedRes = + await transactionMultiJson(clusterTransaction); + const result = await client.exec(clusterTransaction); + + validateTransactionResponse(result, expectedRes); + client.close(); + }); }, ); diff --git a/node/tests/TestUtilities.ts b/node/tests/TestUtilities.ts index a58abacb6c..234e82f259 100644 --- a/node/tests/TestUtilities.ts +++ b/node/tests/TestUtilities.ts @@ -23,6 +23,7 @@ import { GeospatialData, GlideClient, GlideClusterClient, + GlideMultiJson, GlideReturnType, GlideString, InfBoundary, @@ -1883,6 +1884,188 @@ export async function transactionTest( return responseData; } +/** + * Populates a transaction with JSON commands to test. + * @param baseTransaction - A transaction. + * @returns Array of tuples, where first element is a test name/description, second - expected return value. + */ +export async function transactionMultiJsonForArrCommands( + baseTransaction: ClusterTransaction, +): Promise<[string, GlideReturnType][]> { + const responseData: [string, GlideReturnType][] = []; + const key = "{key}:1" + uuidv4(); + const jsonValue = { a: 1.0, b: 2 }; + + // JSON.SET + GlideMultiJson.set(baseTransaction, key, "$", JSON.stringify(jsonValue)); + responseData.push(['set(key, "{ a: 1.0, b: 2 }")', "OK"]); + + // JSON.CLEAR + GlideMultiJson.clear(baseTransaction, key, { path: "$" }); + responseData.push(['clear(key, "bar")', 1]); + + GlideMultiJson.set(baseTransaction, key, "$", JSON.stringify(jsonValue)); + responseData.push(['set(key, "$", "{ "a": 1, b: ["one", "two"] }")', "OK"]); + + // JSON.GET + GlideMultiJson.get(baseTransaction, key, { path: "." }); + responseData.push(['get(key, {path: "."})', JSON.stringify(jsonValue)]); + + const jsonValue2 = { a: 1.0, b: [1, 2] }; + GlideMultiJson.set(baseTransaction, key, "$", JSON.stringify(jsonValue2)); + responseData.push(['set(key, "$", "{ "a": 1, b: ["1", "2"] }")', "OK"]); + + // JSON.ARRAPPEND + GlideMultiJson.arrappend(baseTransaction, key, "$.b", ["3", "4"]); + responseData.push(['arrappend(key, "$.b", [\'"3"\', \'"4"\'])', [4]]); + + // JSON.GET to check JSON.ARRAPPEND was successful. + const jsonValueAfterAppend = { a: 1.0, b: [1, 2, 3, 4] }; + GlideMultiJson.get(baseTransaction, key, { path: "." }); + responseData.push([ + 'get(key, {path: "."})', + JSON.stringify(jsonValueAfterAppend), + ]); + + // JSON.ARRINDEX + GlideMultiJson.arrindex(baseTransaction, key, "$.b", "2"); + responseData.push(['arrindex(key, "$.b", "1")', [1]]); + + // JSON.ARRINSERT + GlideMultiJson.arrinsert(baseTransaction, key, "$.b", 2, ["5"]); + responseData.push(['arrinsert(key, "$.b", 4, [\'"5"\'])', [5]]); + + // JSON.GET to check JSON.ARRINSERT was successful. + const jsonValueAfterArrInsert = { a: 1.0, b: [1, 2, 5, 3, 4] }; + GlideMultiJson.get(baseTransaction, key, { path: "." }); + responseData.push([ + 'get(key, {path: "."})', + JSON.stringify(jsonValueAfterArrInsert), + ]); + + // JSON.ARRLEN + GlideMultiJson.arrlen(baseTransaction, key, { path: "$.b" }); + responseData.push(['arrlen(key, "$.b")', [5]]); + + // JSON.ARRPOP + GlideMultiJson.arrpop(baseTransaction, key, { + path: "$.b", + index: 2, + }); + responseData.push(['arrpop(key, {path: "$.b", index: 4})', ["5"]]); + + // JSON.GET to check JSON.ARRPOP was successful. + const jsonValueAfterArrpop = { a: 1.0, b: [1, 2, 3, 4] }; + GlideMultiJson.get(baseTransaction, key, { path: "." }); + responseData.push([ + 'get(key, {path: "."})', + JSON.stringify(jsonValueAfterArrpop), + ]); + + // JSON.ARRTRIM + GlideMultiJson.arrtrim(baseTransaction, key, "$.b", 1, 2); + responseData.push(['arrtrim(key, "$.b", 2, 3)', [2]]); + + // JSON.GET to check JSON.ARRTRIM was successful. + const jsonValueAfterArrTrim = { a: 1.0, b: [2, 3] }; + GlideMultiJson.get(baseTransaction, key, { path: "." }); + responseData.push([ + 'get(key, {path: "."})', + JSON.stringify(jsonValueAfterArrTrim), + ]); + return responseData; +} + +export async function transactionMultiJson( + baseTransaction: ClusterTransaction, +): Promise<[string, GlideReturnType][]> { + const responseData: [string, GlideReturnType][] = []; + const key = "{key}:1" + uuidv4(); + const jsonValue = { a: [1, 2], b: [3, 4], c: "c", d: true }; + + // JSON.SET to create a key for testing commands. + GlideMultiJson.set(baseTransaction, key, "$", JSON.stringify(jsonValue)); + responseData.push(['set(key, "$")', "OK"]); + + // JSON.DEBUG MEMORY + GlideMultiJson.debugMemory(baseTransaction, key, { path: "$.a" }); + responseData.push(['debugMemory(key, "{ path: "$.a" }")', [48]]); + + // JSON.DEBUG FIELDS + GlideMultiJson.debugFields(baseTransaction, key, { path: "$.a" }); + responseData.push(['debugFields(key, "{ path: "$.a" }")', [2]]); + + // JSON.OBJLEN + GlideMultiJson.objlen(baseTransaction, key, { path: "." }); + responseData.push(["objlen(key)", 4]); + + // JSON.OBJKEY + GlideMultiJson.objkeys(baseTransaction, key, { path: "." }); + responseData.push(['objkeys(key, "$.")', ["a", "b", "c", "d"]]); + + // JSON.NUMINCRBY + GlideMultiJson.numincrby(baseTransaction, key, "$.a[*]", 10.0); + responseData.push(['numincrby(key, "$.a[*]", 10.0)', "[11,12]"]); + + // JSON.NUMMULTBY + GlideMultiJson.nummultby(baseTransaction, key, "$.a[*]", 10.0); + responseData.push(['nummultby(key, "$.a[*]", 10.0)', "[110,120]"]); + + // // JSON.STRAPPEND + GlideMultiJson.strappend(baseTransaction, key, '"-test"', { path: "$.c" }); + responseData.push(['strappend(key, \'"-test"\', "$.c")', [6]]); + + // // JSON.STRLEN + GlideMultiJson.strlen(baseTransaction, key, { path: "$.c" }); + responseData.push(['strlen(key, "$.c")', [6]]); + + // JSON.TYPE + GlideMultiJson.type(baseTransaction, key, { path: "$.a" }); + responseData.push(['type(key, "$.a")', ["array"]]); + + // JSON.MGET + const key2 = "{key}:2" + uuidv4(); + const key3 = "{key}:3" + uuidv4(); + const jsonValue2 = { b: [3, 4], c: "c", d: true }; + GlideMultiJson.set(baseTransaction, key2, "$", JSON.stringify(jsonValue2)); + responseData.push(['set(key2, "$")', "OK"]); + + GlideMultiJson.mget(baseTransaction, [key, key2, key3], "$.a"); + responseData.push([ + 'json.mget([key, key2, key3], "$.a")', + ["[[110,120]]", "[]", null], + ]); + + // JSON.TOGGLE + GlideMultiJson.toggle(baseTransaction, key, { path: "$.d" }); + responseData.push(['toggle(key2, "$.d")', [false]]); + + // JSON.RESP + GlideMultiJson.resp(baseTransaction, key, { path: "$" }); + responseData.push([ + 'resp(key, "$")', + [ + [ + "{", + ["a", ["[", 110, 120]], + ["b", ["[", 3, 4]], + ["c", "c-test"], + ["d", "false"], + ], + ], + ]); + + // JSON.DEL + GlideMultiJson.del(baseTransaction, key, { path: "$.d" }); + responseData.push(['del(key, { path: "$.d" })', 1]); + + // JSON.FORGET + GlideMultiJson.forget(baseTransaction, key, { path: "$.c" }); + responseData.push(['forget(key, {path: "$.c" })', 1]); + + return responseData; +} + /** * This function gets server version using info command in glide client. * diff --git a/python/python/glide/__init__.py b/python/python/glide/__init__.py index f2ecc3da4e..4a7ca8328e 100644 --- a/python/python/glide/__init__.py +++ b/python/python/glide/__init__.py @@ -32,7 +32,7 @@ InsertPosition, UpdateOptions, ) -from glide.async_commands.server_modules import ft, glide_json +from glide.async_commands.server_modules import ft, glide_json, json_transaction from glide.async_commands.server_modules.ft_options.ft_aggregate_options import ( FtAggregateApply, FtAggregateClause, @@ -271,6 +271,7 @@ "PubSubMsg", # Json "glide_json", + "json_transaction", "JsonGetOptions", "JsonArrIndexOptions", "JsonArrPopOptions", diff --git a/python/python/glide/async_commands/server_modules/json_transaction.py b/python/python/glide/async_commands/server_modules/json_transaction.py new file mode 100644 index 0000000000..ad0cc91158 --- /dev/null +++ b/python/python/glide/async_commands/server_modules/json_transaction.py @@ -0,0 +1,789 @@ +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 +"""Glide module for `JSON` commands in transaction. + + Examples: + >>> import json + >>> from glide import json_transaction + >>> transaction = ClusterTransaction() + >>> value = {'a': 1.0, 'b': 2} + >>> json_str = json.dumps(value) # Convert Python dictionary to JSON string using json.dumps() + >>> json_transaction.set(transaction, "doc", "$", json_str) + >>> json_transaction.get(transaction, "doc", "$") # Returns the value at path '$' in the JSON document stored at `doc` as JSON string. + >>> result = await glide_client.exec(transaction) + >>> print result[0] # set result + 'OK' # Indicates successful setting of the value at path '$' in the key stored at `doc`. + >>> print result[1] # get result + b"[{\"a\":1.0,\"b\":2}]" + >>> print json.loads(str(result[1])) + [{"a": 1.0, "b": 2}] # JSON object retrieved from the key `doc` using json.loads() + """ + +from typing import List, Optional, Union, cast + +from glide.async_commands.core import ConditionalChange +from glide.async_commands.server_modules.glide_json import ( + JsonArrIndexOptions, + JsonArrPopOptions, + JsonGetOptions, +) +from glide.async_commands.transaction import TTransaction +from glide.constants import TEncodable +from glide.protobuf.command_request_pb2 import RequestType + + +def set( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + value: TEncodable, + set_condition: Optional[ConditionalChange] = None, +) -> TTransaction: + """ + Sets the JSON value at the specified `path` stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): Represents the path within the JSON document where the value will be set. + The key will be modified only if `value` is added as the last child in the specified `path`, or if the specified `path` acts as the parent of a new child being added. + value (TEncodable): The value to set at the specific path, in JSON formatted bytes or str. + set_condition (Optional[ConditionalChange]): Set the value only if the given condition is met (within the key or path). + Equivalent to [`XX` | `NX`] in the RESP API. Defaults to None. + + Command response: + Optional[TOK]: If the value is successfully set, returns OK. + If `value` isn't set because of `set_condition`, returns None. + """ + args = ["JSON.SET", key, path, value] + if set_condition: + args.append(set_condition.value) + + return transaction.custom_command(args) + + +def get( + transaction: TTransaction, + key: TEncodable, + paths: Optional[Union[TEncodable, List[TEncodable]]] = None, + options: Optional[JsonGetOptions] = None, +) -> TTransaction: + """ + Retrieves the JSON value at the specified `paths` stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + paths (Optional[Union[TEncodable, List[TEncodable]]]): The path or list of paths within the JSON document. Default to None. + options (Optional[JsonGetOptions]): Options for formatting the byte representation of the JSON data. See `JsonGetOptions`. + + Command response: + TJsonResponse[Optional[bytes]]: + If one path is given: + For JSONPath (path starts with `$`): + Returns a stringified JSON list of bytes replies for every possible path, + or a byte string representation of an empty array, if path doesn't exists. + If `key` doesn't exist, returns None. + For legacy path (path doesn't start with `$`): + Returns a byte string representation of the value in `path`. + If `path` doesn't exist, an error is raised. + If `key` doesn't exist, returns None. + If multiple paths are given: + Returns a stringified JSON object in bytes, in which each path is a key, and it's corresponding value, is the value as if the path was executed in the command as a single path. + In case of multiple paths, and `paths` are a mix of both JSONPath and legacy path, the command behaves as if all are JSONPath paths. + For more information about the returned type, see `TJsonResponse`. + """ + args = ["JSON.GET", key] + if options: + args.extend(options.get_options()) + if paths: + if isinstance(paths, (str, bytes)): + paths = [paths] + args.extend(paths) + + return transaction.custom_command(args) + + +def mget( + transaction: TTransaction, + keys: List[TEncodable], + path: TEncodable, +) -> TTransaction: + """ + Retrieves the JSON values at the specified `path` stored at multiple `keys`. + + Note: + When in cluster mode, all keys in the transaction must be mapped to the same slot. + + Args: + transaction (TTransaction): The transaction to execute the command. + keys (List[TEncodable]): A list of keys for the JSON documents. + path (TEncodable): The path within the JSON documents. + + Command response: + List[Optional[bytes]]: + For JSONPath (`path` starts with `$`): + Returns a list of byte representations of the values found at the given path for each key. + If `path` does not exist within the key, the entry will be an empty array. + For legacy path (`path` doesn't starts with `$`): + Returns a list of byte representations of the values found at the given path for each key. + If `path` does not exist within the key, the entry will be None. + If a key doesn't exist, the corresponding list element will be None. + """ + args = ["JSON.MGET"] + keys + [path] + return transaction.custom_command(args) + + +def arrappend( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + values: List[TEncodable], +) -> TTransaction: + """ + Appends one or more `values` to the JSON array at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): Represents the path within the JSON document where the `values` will be appended. + values (TEncodable): The values to append to the JSON array at the specified path. + JSON string values must be wrapped with quotes. For example, to append `"foo"`, pass `"\"foo\""`. + + Command response: + TJsonResponse[int]: + For JSONPath (`path` starts with `$`): + Returns a list of integer replies for every possible path, indicating the new length of the array after appending `values`, + or None for JSON values matching the path that are not an array. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't start with `$`): + Returns the length of the array after appending `values` to the array at `path`. + If multiple paths match, the length of the first updated array is returned. + If the JSON value at `path` is not a array or if `path` doesn't exist, an error is raised. + If `key` doesn't exist, an error is raised. + For more information about the returned type, see `TJsonResponse`. + """ + args = ["JSON.ARRAPPEND", key, path] + values + return transaction.custom_command(args) + + +def arrindex( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + value: TEncodable, + options: Optional[JsonArrIndexOptions] = None, +) -> TTransaction: + """ + Searches for the first occurrence of a scalar JSON value (i.e., a value that is neither an object nor an array) within arrays at the specified `path` in the JSON document stored at `key`. + + If specified, `options.start` and `options.end` define an inclusive-to-exclusive search range within the array. + (Where `options.start` is inclusive and `options.end` is exclusive). + + Out-of-range indices adjust to the nearest valid position, and negative values count from the end (e.g., `-1` is the last element, `-2` the second last). + + Setting `options.end` to `0` behaves like `-1`, extending the range to the array's end (inclusive). + + If `options.start` exceeds `options.end`, `-1` is returned, indicating that the value was not found. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): The path within the JSON document. + value (TEncodable): The value to search for within the arrays. + options (Optional[JsonArrIndexOptions]): Options specifying an inclusive `start` index and an optional exclusive `end` index for a range-limited search. + Defaults to the full array if not provided. See `JsonArrIndexOptions`. + + Command response: + Optional[Union[int, List[int]]]: + For JSONPath (`path` starts with `$`): + Returns an array of integers for every possible path, indicating of the first occurrence of `value` within the array, + or None for JSON values matching the path that are not an array. + A returned value of `-1` indicates that the value was not found in that particular array. + If `path` does not exist, an empty array will be returned. + For legacy path (`path` doesn't start with `$`): + Returns an integer representing the index of the first occurrence of `value` within the array at the specified path. + A returned value of `-1` indicates that the value was not found in that particular array. + If multiple paths match, the index of the value from the first matching array is returned. + If the JSON value at the `path` is not an array or if `path` does not exist, an error is raised. + If `key` does not exist, an error is raised. + """ + args = ["JSON.ARRINDEX", key, path, value] + + if options: + args.extend(options.to_args()) + + return transaction.custom_command(args) + + +def arrinsert( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + index: int, + values: List[TEncodable], +) -> TTransaction: + """ + Inserts one or more values into the array at the specified `path` within the JSON document stored at `key`, before the given `index`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): The path within the JSON document. + index (int): The array index before which values are inserted. + values (List[TEncodable]): The JSON values to be inserted into the array, in JSON formatted bytes or str. + Json string values must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + + Command response: + TJsonResponse[int]: + For JSONPath (`path` starts with '$'): + Returns a list of integer replies for every possible path, indicating the new length of the array, + or None for JSON values matching the path that are not an array. + If `path` does not exist, an empty array will be returned. + For legacy path (`path` doesn't start with '$'): + Returns an integer representing the new length of the array. + If multiple paths are matched, returns the length of the first modified array. + If `path` doesn't exist or the value at `path` is not an array, an error is raised. + If the index is out of bounds, an error is raised. + If `key` doesn't exist, an error is raised. + """ + args = ["JSON.ARRINSERT", key, path, str(index)] + values + return transaction.custom_command(args) + + +def arrlen( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Retrieves the length of the array at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Defaults to None. + + Command response: + Optional[TJsonResponse[int]]: + For JSONPath (`path` starts with `$`): + Returns a list of integer replies for every possible path, indicating the length of the array, + or None for JSON values matching the path that are not an array. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't starts with `$`): + Returns the length of the array at `path`. + If multiple paths match, the length of the first array match is returned. + If the JSON value at `path` is not a array or if `path` doesn't exist, an error is raised. + If `key` doesn't exist, None is returned. + """ + args = ["JSON.ARRLEN", key] + if path: + args.append(path) + return transaction.custom_command(args) + + +def arrpop( + transaction: TTransaction, + key: TEncodable, + options: Optional[JsonArrPopOptions] = None, +) -> TTransaction: + """ + Pops an element from the array located at the specified path within the JSON document stored at `key`. + If `options.index` is provided, it pops the element at that index instead of the last element. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + options (Optional[JsonArrPopOptions]): Options including the path and optional index. See `JsonArrPopOptions`. Default to None. + If not specified, attempts to pop the last element from the root value if it's an array. + If the root value is not an array, an error will be raised. + + Command response: + Optional[TJsonResponse[bytes]]: + For JSONPath (`options.path` starts with `$`): + Returns a list of bytes string replies for every possible path, representing the popped JSON values, + or None for JSON values matching the path that are not an array or are an empty array. + If `options.path` doesn't exist, an empty list will be returned. + For legacy path (`options.path` doesn't starts with `$`): + Returns a bytes string representing the popped JSON value, or None if the array at `options.path` is empty. + If multiple paths match, the value from the first matching array that is not empty is returned. + If the JSON value at `options.path` is not a array or if `options.path` doesn't exist, an error is raised. + If `key` doesn't exist, an error is raised. + """ + args = ["JSON.ARRPOP", key] + if options: + args.extend(options.to_args()) + + return transaction.custom_command(args) + + +def arrtrim( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + start: int, + end: int, +) -> TTransaction: + """ + Trims an array at the specified `path` within the JSON document stored at `key` so that it becomes a subarray [start, end], both inclusive. + If `start` < 0, it is treated as 0. + If `end` >= size (size of the array), it is treated as size-1. + If `start` >= size or `start` > `end`, the array is emptied and 0 is returned. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): The path within the JSON document. + start (int): The start index, inclusive. + end (int): The end index, inclusive. + + Command response: + TJsonResponse[int]: + For JSONPath (`path` starts with '$'): + Returns a list of integer replies for every possible path, indicating the new length of the array, or None for JSON values matching the path that are not an array. + If a value is an empty array, its corresponding return value is 0. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't starts with `$`): + Returns an integer representing the new length of the array. + If the array is empty, returns 0. + If multiple paths match, the length of the first trimmed array match is returned. + If `path` doesn't exist, or the value at `path` is not an array, an error is raised. + If `key` doesn't exist, an error is raised. + """ + + return transaction.custom_command(["JSON.ARRTRIM", key, path, str(start), str(end)]) + + +def clear( + transaction: TTransaction, + key: TEncodable, + path: Optional[str] = None, +) -> TTransaction: + """ + Clears arrays or objects at the specified JSON path in the document stored at `key`. + Numeric values are set to `0`, and boolean values are set to `False`, and string values are converted to empty strings. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[str]): The path within the JSON document. Default to None. + + Command response: + int: The number of containers cleared, numeric values zeroed, and booleans toggled to `false`, + and string values converted to empty strings. + If `path` doesn't exist, or the value at `path` is already empty (e.g., an empty array, object, or string), 0 is returned. + If `key doesn't exist, an error is raised. + """ + args = ["JSON.CLEAR", key] + if path: + args.append(path) + + return transaction.custom_command(args) + + +def debug_fields( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Returns the number of fields of the JSON value at the specified `path` within the JSON document stored at `key`. + - **Primitive Values**: Each non-container JSON value (e.g., strings, numbers, booleans, and null) counts as one field. + - **Arrays and Objects:**: Each item in an array and each key-value pair in an object is counted as one field. (Each top-level value counts as one field, regardless of it's type.) + - Their nested values are counted recursively and added to the total. + - **Example**: For the JSON `{"a": 1, "b": [2, 3, {"c": 4}]}`, the count would be: + - Top-level: 2 fields (`"a"` and `"b"`) + - Nested: 3 fields in the array (`2`, `3`, and `{"c": 4}`) plus 1 for the object (`"c"`) + - Total: 2 (top-level) + 3 (from array) + 1 (from nested object) = 6 fields. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Defaults to root if not provided. + + Command response: + Optional[TJsonUniversalResponse[int]]: + For JSONPath (`path` starts with `$`): + Returns an array of integers, each indicating the number of fields for each matched `path`. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't start with `$`): + Returns an integer indicating the number of fields for each matched `path`. + If multiple paths match, number of fields of the first JSON value match is returned. + If `path` doesn't exist, an error is raised. + If `path` is not provided, it reports the total number of fields in the entire JSON document. + If `key` doesn't exist, None is returned. + """ + args = ["JSON.DEBUG", "FIELDS", key] + if path: + args.append(path) + + return transaction.custom_command(args) + + +def debug_memory( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Reports memory usage in bytes of a JSON value at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Defaults to None. + + Command response: + Optional[TJsonUniversalResponse[int]]: + For JSONPath (`path` starts with `$`): + Returns an array of integers, indicating the memory usage in bytes of a JSON value for each matched `path`. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't start with `$`): + Returns an integer, indicating the memory usage in bytes for the JSON value in `path`. + If multiple paths match, the memory usage of the first JSON value match is returned. + If `path` doesn't exist, an error is raised. + If `path` is not provided, it reports the total memory usage in bytes in the entire JSON document. + If `key` doesn't exist, None is returned. + """ + args = ["JSON.DEBUG", "MEMORY", key] + if path: + args.append(path) + + return transaction.custom_command(args) + + +def delete( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Deletes the JSON value at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. + If None, deletes the entire JSON document at `key`. Defaults to None. + + Command response: + int: The number of elements removed. + If `key` or `path` doesn't exist, returns 0. + """ + + return transaction.custom_command(["JSON.DEL", key] + ([path] if path else [])) + + +def forget( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Deletes the JSON value at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. + If None, deletes the entire JSON document at `key`. Defaults to None. + + Command response: + int: The number of elements removed. + If `key` or `path` doesn't exist, returns 0. + """ + + return transaction.custom_command(["JSON.FORGET", key] + ([path] if path else [])) + + +def numincrby( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + number: Union[int, float], +) -> TTransaction: + """ + Increments or decrements the JSON value(s) at the specified `path` by `number` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): The path within the JSON document. + number (Union[int, float]): The number to increment or decrement by. + + Command response: + bytes: + For JSONPath (`path` starts with `$`): + Returns a bytes string representation of an array of bulk strings, indicating the new values after incrementing for each matched `path`. + If a value is not a number, its corresponding return value will be `null`. + If `path` doesn't exist, a byte string representation of an empty array will be returned. + For legacy path (`path` doesn't start with `$`): + Returns a bytes string representation of the resulting value after the increment or decrement. + If multiple paths match, the result of the last updated value is returned. + If the value at the `path` is not a number or `path` doesn't exist, an error is raised. + If `key` does not exist, an error is raised. + If the result is out of the range of 64-bit IEEE double, an error is raised. + """ + args = ["JSON.NUMINCRBY", key, path, str(number)] + + return transaction.custom_command(args) + + +def nummultby( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, + number: Union[int, float], +) -> TTransaction: + """ + Multiplies the JSON value(s) at the specified `path` by `number` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): The path within the JSON document. + number (Union[int, float]): The number to multiply by. + + Command response: + bytes: + For JSONPath (`path` starts with `$`): + Returns a bytes string representation of an array of bulk strings, indicating the new values after multiplication for each matched `path`. + If a value is not a number, its corresponding return value will be `null`. + If `path` doesn't exist, a byte string representation of an empty array will be returned. + For legacy path (`path` doesn't start with `$`): + Returns a bytes string representation of the resulting value after multiplication. + If multiple paths match, the result of the last updated value is returned. + If the value at the `path` is not a number or `path` doesn't exist, an error is raised. + If `key` does not exist, an error is raised. + If the result is out of the range of 64-bit IEEE double, an error is raised. + """ + args = ["JSON.NUMMULTBY", key, path, str(number)] + + return transaction.custom_command(args) + + +def objlen( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Retrieves the number of key-value pairs in the object stored at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Defaults to None. + + Command response: + Optional[TJsonResponse[int]]: + For JSONPath (`path` starts with `$`): + Returns a list of integer replies for every possible path, indicating the length of the object, + or None for JSON values matching the path that are not an object. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't starts with `$`): + Returns the length of the object at `path`. + If multiple paths match, the length of the first object match is returned. + If the JSON value at `path` is not an object or if `path` doesn't exist, an error is raised. + If `key` doesn't exist, None is returned. + """ + args = ["JSON.OBJLEN", key] + if path: + args.append(path) + + return transaction.custom_command(args) + + +def objkeys( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Retrieves key names in the object values at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): Represents the path within the JSON document where the key names will be retrieved. + Defaults to None. + + Command response: + Optional[TJsonUniversalResponse[List[bytes]]]: + For JSONPath (`path` starts with `$`): + Returns a list of arrays containing key names for each matching object. + If a value matching the path is not an object, an empty array is returned. + If `path` doesn't exist, an empty array is returned. + For legacy path (`path` starts with `.`): + Returns a list of key names for the object value matching the path. + If multiple objects match the path, the key names of the first object are returned. + If a value matching the path is not an object, an error is raised. + If `path` doesn't exist, None is returned. + If `key` doesn't exist, None is returned. + """ + args = ["JSON.OBJKEYS", key] + if path: + args.append(path) + + return transaction.custom_command(args) + + +def resp( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Retrieve the JSON value at the specified `path` within the JSON document stored at `key`. + The returning result is in the Valkey or Redis OSS Serialization Protocol (RESP).\n + JSON null is mapped to the RESP Null Bulk String.\n + JSON Booleans are mapped to RESP Simple string.\n + JSON integers are mapped to RESP Integers.\n + JSON doubles are mapped to RESP Bulk Strings.\n + JSON strings are mapped to RESP Bulk Strings.\n + JSON arrays are represented as RESP arrays, where the first element is the simple string [, followed by the array's elements.\n + JSON objects are represented as RESP object, where the first element is the simple string {, followed by key-value pairs, each of which is a RESP bulk string.\n + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Default to None. + + Command response: + TJsonUniversalResponse[Optional[Union[bytes, int, List[Optional[Union[bytes, int]]]]]] + For JSONPath ('path' starts with '$'): + Returns a list of replies for every possible path, indicating the RESP form of the JSON value. + If `path` doesn't exist, returns an empty list. + For legacy path (`path` doesn't starts with `$`): + Returns a single reply for the JSON value at the specified path, in its RESP form. + This can be a bytes object, an integer, None, or a list representing complex structures. + If multiple paths match, the value of the first JSON value match is returned. + If `path` doesn't exist, an error is raised. + If `key` doesn't exist, an None is returned. + """ + args = ["JSON.RESP", key] + if path: + args.append(path) + + return transaction.custom_command(args) + + +def strappend( + transaction: TTransaction, + key: TEncodable, + value: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Appends the specified `value` to the string stored at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + value (TEncodable): The value to append to the string. Must be wrapped with single quotes. For example, to append "foo", pass '"foo"'. + path (Optional[TEncodable]): The path within the JSON document. Default to None. + + Command response: + TJsonResponse[int]: + For JSONPath (`path` starts with `$`): + Returns a list of integer replies for every possible path, indicating the length of the resulting string after appending `value`, + or None for JSON values matching the path that are not string. + If `key` doesn't exist, an error is raised. + For legacy path (`path` doesn't start with `$`): + Returns the length of the resulting string after appending `value` to the string at `path`. + If multiple paths match, the length of the last updated string is returned. + If the JSON value at `path` is not a string of if `path` doesn't exist, an error is raised. + If `key` doesn't exist, an error is raised. + For more information about the returned type, see `TJsonResponse`. + """ + return transaction.custom_command( + ["JSON.STRAPPEND", key] + ([path, value] if path else [value]) + ) + + +def strlen( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Returns the length of the JSON string value stored at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Default to None. + + Command response: + TJsonResponse[Optional[int]]: + For JSONPath (`path` starts with `$`): + Returns a list of integer replies for every possible path, indicating the length of the JSON string value, + or None for JSON values matching the path that are not string. + For legacy path (`path` doesn't start with `$`): + Returns the length of the JSON value at `path` or None if `key` doesn't exist. + If multiple paths match, the length of the first mached string is returned. + If the JSON value at `path` is not a string of if `path` doesn't exist, an error is raised. + If `key` doesn't exist, None is returned. + For more information about the returned type, see `TJsonResponse`. + """ + return transaction.custom_command( + ["JSON.STRLEN", key, path] if path else ["JSON.STRLEN", key] + ) + + +def toggle( + transaction: TTransaction, + key: TEncodable, + path: TEncodable, +) -> TTransaction: + """ + Toggles a Boolean value stored at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (TEncodable): The path within the JSON document. Default to None. + + Command response: + TJsonResponse[bool]: + For JSONPath (`path` starts with `$`): + Returns a list of boolean replies for every possible path, with the toggled boolean value, + or None for JSON values matching the path that are not boolean. + If `key` doesn't exist, an error is raised. + For legacy path (`path` doesn't start with `$`): + Returns the value of the toggled boolean in `path`. + If the JSON value at `path` is not a boolean of if `path` doesn't exist, an error is raised. + If `key` doesn't exist, an error is raised. + For more information about the returned type, see `TJsonResponse`. + """ + return transaction.custom_command(["JSON.TOGGLE", key, path]) + + +def type( + transaction: TTransaction, + key: TEncodable, + path: Optional[TEncodable] = None, +) -> TTransaction: + """ + Retrieves the type of the JSON value at the specified `path` within the JSON document stored at `key`. + + Args: + transaction (TTransaction): The transaction to execute the command. + key (TEncodable): The key of the JSON document. + path (Optional[TEncodable]): The path within the JSON document. Default to None. + + Command response: + Optional[TJsonUniversalResponse[bytes]]: + For JSONPath ('path' starts with '$'): + Returns a list of byte string replies for every possible path, indicating the type of the JSON value. + If `path` doesn't exist, an empty array will be returned. + For legacy path (`path` doesn't starts with `$`): + Returns the type of the JSON value at `path`. + If multiple paths match, the type of the first JSON value match is returned. + If `path` doesn't exist, None will be returned. + If `key` doesn't exist, None is returned. + """ + args = ["JSON.TYPE", key] + if path: + args.append(path) + + return transaction.custom_command(args) diff --git a/python/python/tests/tests_server_modules/test_json.py b/python/python/tests/tests_server_modules/test_json.py index 85657914de..0182943d82 100644 --- a/python/python/tests/tests_server_modules/test_json.py +++ b/python/python/tests/tests_server_modules/test_json.py @@ -4,19 +4,26 @@ import json as OuterJson import random import typing +from typing import List import pytest from glide.async_commands.core import ConditionalChange, InfoSection from glide.async_commands.server_modules import glide_json as json +from glide.async_commands.server_modules import json_transaction from glide.async_commands.server_modules.glide_json import ( JsonArrIndexOptions, JsonArrPopOptions, JsonGetOptions, ) +from glide.async_commands.transaction import ( + BaseTransaction, + ClusterTransaction, + Transaction, +) from glide.config import ProtocolVersion from glide.constants import OK from glide.exceptions import RequestError -from glide.glide_client import TGlideClient +from glide.glide_client import GlideClusterClient, TGlideClient from tests.test_async_client import get_random_string, parse_info_response @@ -2097,3 +2104,128 @@ async def test_json_arrpop(self, glide_client: TGlideClient): assert await json.arrpop(glide_client, key2, JsonArrPopOptions("[*]")) == b'"a"' assert await json.get(glide_client, key2, ".") == b'[[],[],["a"],["a","b"]]' + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_json_transaction_array(self, glide_client: GlideClusterClient): + transaction = ClusterTransaction() + + key = get_random_string(5) + json_value1 = {"a": 1.0, "b": 2} + json_value2 = {"a": 1.0, "b": [1, 2]} + + # Test 'set', 'get', and 'clear' commands + json_transaction.set(transaction, key, "$", OuterJson.dumps(json_value1)) + json_transaction.clear(transaction, key, "$") + json_transaction.set(transaction, key, "$", OuterJson.dumps(json_value1)) + json_transaction.get(transaction, key, ".") + + # Test array related commands + json_transaction.set(transaction, key, "$", OuterJson.dumps(json_value2)) + json_transaction.arrappend(transaction, key, "$.b", ["3", "4"]) + json_transaction.arrindex(transaction, key, "$.b", "2") + json_transaction.arrinsert(transaction, key, "$.b", 2, ["5"]) + json_transaction.arrlen(transaction, key, "$.b") + json_transaction.arrpop( + transaction, key, JsonArrPopOptions(path="$.b", index=2) + ) + json_transaction.arrtrim(transaction, key, "$.b", 1, 2) + json_transaction.get(transaction, key, ".") + + result = await glide_client.exec(transaction) + assert isinstance(result, list) + + assert result[0] == "OK" # set + assert result[1] == 1 # clear + assert result[2] == "OK" # set + assert isinstance(result[3], bytes) + assert OuterJson.loads(result[3]) == json_value1 # get + + assert result[4] == "OK" # set + assert result[5] == [4] # arrappend + assert result[6] == [1] # arrindex + assert result[7] == [5] # arrinsert + assert result[8] == [5] # arrlen + assert result[9] == [b"5"] # arrpop + assert result[10] == [2] # arrtrim + assert isinstance(result[11], bytes) + assert OuterJson.loads(result[11]) == {"a": 1.0, "b": [2, 3]} # get + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_json_transaction(self, glide_client: GlideClusterClient): + transaction = ClusterTransaction() + + key = f"{{key}}-1{get_random_string(5)}" + key2 = f"{{key}}-2{get_random_string(5)}" + key3 = f"{{key}}-3{get_random_string(5)}" + json_value = {"a": [1, 2], "b": [3, 4], "c": "c", "d": True} + + json_transaction.set(transaction, key, "$", OuterJson.dumps(json_value)) + + # Test debug commands + json_transaction.debug_memory(transaction, key, "$.a") + json_transaction.debug_fields(transaction, key, "$.a") + + # Test obj commands + json_transaction.objlen(transaction, key, ".") + json_transaction.objkeys(transaction, key, ".") + + # Test num commands + json_transaction.numincrby(transaction, key, "$.a[*]", 10.0) + json_transaction.nummultby(transaction, key, "$.a[*]", 10.0) + + # Test str commands + json_transaction.strappend(transaction, key, '"-test"', "$.c") + json_transaction.strlen(transaction, key, "$.c") + + # Test type command + json_transaction.type(transaction, key, "$.a") + + # Test mget command + json_value2 = {"b": [3, 4], "c": "c", "d": True} + json_transaction.set(transaction, key2, "$", OuterJson.dumps(json_value2)) + json_transaction.mget(transaction, [key, key2, key3], "$.a") + + # Test toggle command + json_transaction.toggle(transaction, key, "$.d") + + # Test resp command + json_transaction.resp(transaction, key, "$") + + # Test del command + json_transaction.delete(transaction, key, "$.d") + + # Test forget command + json_transaction.forget(transaction, key, "$.c") + + result = await glide_client.exec(transaction) + assert isinstance(result, list) + + assert result[0] == "OK" # set + assert result[1] == [48] # debug_memory + assert result[2] == [2] # debug_field + + assert result[3] == 4 # objlen + assert result[4] == [b"a", b"b", b"c", b"d"] # objkeys + assert result[5] == b"[11,12]" # numincrby + assert result[6] == b"[110,120]" # nummultby + assert result[7] == [6] # strappend + assert result[8] == [6] # strlen + assert result[9] == [b"array"] # type + assert result[10] == "OK" # set + assert result[11] == [b"[[110,120]]", b"[]", None] # mget + assert result[12] == [False] # toggle + + assert result[13] == [ + [ + b"{", + [b"a", [b"[", 110, 120]], + [b"b", [b"[", 3, 4]], + [b"c", b"c-test"], + [b"d", b"false"], + ] + ] # resp + + assert result[14] == 1 # del + assert result[15] == 1 # forget