diff --git a/.github/workflows/ort.yml b/.github/workflows/ort.yml index 3270b2052f..014db6da9b 100644 --- a/.github/workflows/ort.yml +++ b/.github/workflows/ort.yml @@ -28,6 +28,7 @@ jobs: PYTHON_ATTRIBUTIONS: "python/THIRD_PARTY_LICENSES_PYTHON" NODE_ATTRIBUTIONS: "node/THIRD_PARTY_LICENSES_NODE" RUST_ATTRIBUTIONS: "glide-core/THIRD_PARTY_LICENSES_RUST" + JAVA_ATTRIBUTIONS: "java/THIRD_PARTY_LICENSES_JAVA" steps: - name: Set the release version shell: bash @@ -158,6 +159,19 @@ jobs: with: folder_path: "${{ github.workspace }}/glide-core" + ### Java ### + + - name: Set up JDK 11 + uses: actions/setup-java@v4 + with: + distribution: "temurin" + java-version: 11 + + - name: Run ORT tools for Java + uses: ./.github/workflows/run-ort-tools + with: + folder_path: "${{ github.workspace }}/java" + ### Process results ### - name: Check for diff @@ -165,7 +179,8 @@ jobs: cp python/ort_results/NOTICE_DEFAULT $PYTHON_ATTRIBUTIONS cp node/ort_results/NOTICE_DEFAULT $NODE_ATTRIBUTIONS cp glide-core/ort_results/NOTICE_DEFAULT $RUST_ATTRIBUTIONS - GIT_DIFF=`git diff $PYTHON_ATTRIBUTIONS $NODE_ATTRIBUTIONS $RUST_ATTRIBUTIONS` + cp java/ort_results/NOTICE_DEFAULT $JAVA_ATTRIBUTIONS + GIT_DIFF=`git diff $PYTHON_ATTRIBUTIONS $NODE_ATTRIBUTIONS $RUST_ATTRIBUTIONS $JAVA_ATTRIBUTIONS` if [ -n "$GIT_DIFF" ]; then echo "FOUND_DIFF=true" >> $GITHUB_ENV else @@ -191,7 +206,7 @@ jobs: git config --global user.email "glide-for-redis@amazon.com" git config --global user.name "ort-bot" git checkout -b ${BRANCH_NAME} - git add $PYTHON_ATTRIBUTIONS $NODE_ATTRIBUTIONS $RUST_ATTRIBUTIONS + git add $PYTHON_ATTRIBUTIONS $NODE_ATTRIBUTIONS $RUST_ATTRIBUTIONS $JAVA_ATTRIBUTIONS git commit -m "Updated attribution files" git push --set-upstream origin ${BRANCH_NAME} -f title="Updated attribution files for ${BRANCH_NAME}" diff --git a/CHANGELOG.md b/CHANGELOG.md index 995cc0f59f..f1f8511d97 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ * Python: Added OBJECT FREQ command ([#1472](https://github.com/aws/glide-for-redis/pull/1472)) * Python: Added OBJECT IDLETIME command ([#1474](https://github.com/aws/glide-for-redis/pull/1474)) * Python: Added GEOSEARCH command ([#1482](https://github.com/aws/glide-for-redis/pull/1482)) +* Python: Added GEOSEARCHSTORE command ([#1581](https://github.com/aws/glide-for-redis/pull/1581)) * Node: Added RENAMENX command ([#1483](https://github.com/aws/glide-for-redis/pull/1483)) * Python: Added OBJECT REFCOUNT command ([#1485](https://github.com/aws/glide-for-redis/pull/1485)) * Python: Added RENAMENX command ([#1492](https://github.com/aws/glide-for-redis/pull/1492)) @@ -13,27 +14,56 @@ * Python: Added XLEN command ([#1503](https://github.com/aws/glide-for-redis/pull/1503)) * Python: Added LASTSAVE command ([#1509](https://github.com/aws/glide-for-redis/pull/1509)) * Python: Added GETDEL command ([#1514](https://github.com/aws/glide-for-redis/pull/1514)) +* Python: Added GETRANGE command ([#1585](https://github.com/aws/glide-for-redis/pull/1585)) * Python: Added ZINTER, ZUNION commands ([#1478](https://github.com/aws/glide-for-redis/pull/1478)) * Python: Added SINTERCARD command ([#1511](https://github.com/aws/glide-for-redis/pull/1511)) * Python: Added SORT command ([#1439](https://github.com/aws/glide-for-redis/pull/1439)) * Node: Added OBJECT ENCODING command ([#1518](https://github.com/aws/glide-for-redis/pull/1518), [#1559](https://github.com/aws/glide-for-redis/pull/1559)) * Python: Added LMOVE and BLMOVE commands ([#1536](https://github.com/aws/glide-for-redis/pull/1536)) * Node: Added SUNIONSTORE command ([#1549](https://github.com/aws/glide-for-redis/pull/1549)) +* Python: Added SUNION command ([#1583](https://github.com/aws/glide-for-redis/pull/1583)) * Node: Added PFCOUNT command ([#1545](https://github.com/aws/glide-for-redis/pull/1545)) * Node: Added OBJECT FREQ command ([#1542](https://github.com/aws/glide-for-redis/pull/1542), [#1559](https://github.com/aws/glide-for-redis/pull/1559)) * Node: Added LINSERT command ([#1544](https://github.com/aws/glide-for-redis/pull/1544)) * Node: Added XLEN command ([#1555](https://github.com/aws/glide-for-redis/pull/1555)) * Node: Added ZINTERCARD command ([#1553](https://github.com/aws/glide-for-redis/pull/1553)) +* Python: Added ZINCBY command ([#1586](https://github.com/aws/glide-for-redis/pull/1586)) * Python: Added LMPOP and BLMPOP commands ([#1547](https://github.com/aws/glide-for-redis/pull/1547)) +* Python: Added HSTRLEN command ([#1564](https://github.com/aws/glide-for-redis/pull/1564)) * Python: Added MSETNX command ([#1565](https://github.com/aws/glide-for-redis/pull/1565)) * Python: Added MOVE command ([#1566](https://github.com/aws/glide-for-redis/pull/1566)) +* Python: Added EXPIRETIME, PEXPIRETIME commands ([#1587](https://github.com/aws/glide-for-redis/pull/1587)) +* Python: Added LSET command ([#1584](https://github.com/aws/glide-for-redis/pull/1584)) * Node: Added OBJECT IDLETIME command ([#1567](https://github.com/aws/glide-for-redis/pull/1567)) * Node: Added OBJECT REFCOUNT command ([#1568](https://github.com/aws/glide-for-redis/pull/1568)) * Python: Added SETBIT command ([#1571](https://github.com/aws/glide-for-redis/pull/1571)) +* Python: Added SRandMember command ([#1578](https://github.com/aws/glide-for-redis/pull/1578)) +* Python: Added GETBIT command ([#1575](https://github.com/aws/glide-for-redis/pull/1575)) +* Python: Added BITCOUNT command ([#1592](https://github.com/aws/glide-for-redis/pull/1592)) +* Python: Added FLUSHALL command ([#1579](https://github.com/aws/glide-for-redis/pull/1579)) +* Python: Added TOUCH command ([#1582](https://github.com/aws/glide-for-redis/pull/1582)) +* Python: Added BITOP command ([#1596](https://github.com/aws/glide-for-redis/pull/1596)) +* Python: Added BITPOS command ([#1604](https://github.com/aws/glide-for-redis/pull/1604)) +* Python: Added GETEX command ([#1612](https://github.com/aws/glide-for-redis/pull/1612)) +* Python: Added BITFIELD and BITFIELD_RO commands ([#1615](https://github.com/aws/glide-for-redis/pull/1615)) +* Python: Added ZREVRANK command ([#1614](https://github.com/aws/glide-for-redis/pull/1614)) +* Python: Added XDEL command ([#1619](https://github.com/aws/glide-for-redis/pull/1619)) +* Python: Added XRANGE command ([#1624](https://github.com/aws/glide-for-redis/pull/1624)) +* Python: Added COPY command ([#1626](https://github.com/aws/glide-for-redis/pull/1626)) +* Python: Added XREVRANGE command ([#1625](https://github.com/aws/glide-for-redis/pull/1625)) +* Python: Added XREAD command ([#1644](https://github.com/aws/glide-for-redis/pull/1644)) +* Python: Added XGROUP CREATE and XGROUP DESTROY commands ([#1646](https://github.com/aws/glide-for-redis/pull/1646)) +* Python: Added XGROUP CREATECONSUMER and XGROUP DELCONSUMER commands ([#1658](https://github.com/aws/glide-for-redis/pull/1658)) +* Python: Added LOLWUT command ([#1657](https://github.com/aws/glide-for-redis/pull/1657)) +* Python: Added XREADGROUP command ([#1679](https://github.com/aws/glide-for-redis/pull/1679)) +* Python: Added XACK command ([#1681](https://github.com/aws/glide-for-redis/pull/1681)) +* Python: Added FLUSHDB command ([#1680](https://github.com/aws/glide-for-redis/pull/1680)) * Python: Added FUNCTION LOAD command ([#1699](https://github.com/aws/glide-for-redis/pull/1699)) ### Breaking Changes * Node: Update XREAD to return a Map of Map ([#1494](https://github.com/aws/glide-for-redis/pull/1494)) +* Node: Rename RedisClient to GlideClient and RedisClusterClient to GlideClusterClient ([#1670](https://github.com/aws/glide-for-redis/pull/1670)) +* Python: Rename RedisClient to GlideClient, RedisClusterClient to GlideClusterClient and BaseRedisClient to BaseClient([#1669](https://github.com/aws/glide-for-redis/pull/1669)) ## 0.4.1 (2024-02-06) diff --git a/benchmarks/csharp/Program.cs b/benchmarks/csharp/Program.cs index 84a785aac5..3a3e8a6f08 100644 --- a/benchmarks/csharp/Program.cs +++ b/benchmarks/csharp/Program.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Collections.Concurrent; using System.Diagnostics; diff --git a/benchmarks/node/node_benchmark.ts b/benchmarks/node/node_benchmark.ts index ce044cb770..1f5b70703f 100644 --- a/benchmarks/node/node_benchmark.ts +++ b/benchmarks/node/node_benchmark.ts @@ -1,9 +1,9 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { writeFileSync } from "fs"; -import { Logger, RedisClient, RedisClusterClient } from "glide-for-redis"; +import { GlideClient, GlideClusterClient, Logger } from "glide-for-redis"; import { Cluster, Redis } from "ioredis"; import { parse } from "path"; import percentile from "percentile"; @@ -216,8 +216,8 @@ async function main( if (clientsToRun == "all" || clientsToRun == "glide") { const clientClass = clusterModeEnabled - ? RedisClusterClient - : RedisClient; + ? GlideClusterClient + : GlideClient; const clients = await createClients(clientCount, () => clientClass.createClient({ addresses: [{ host, port }], @@ -232,7 +232,7 @@ async function main( dataSize, data, (client) => { - (client as RedisClient).close(); + (client as GlideClient).close(); }, clusterModeEnabled, ); @@ -240,11 +240,11 @@ async function main( } if (clientsToRun == "all") { - const nodeRedisClients = await createClients(clientCount, async () => { + const nodeGlideClients = await createClients(clientCount, async () => { const node = { url: getAddress(host, useTLS, port), }; - const nodeRedisClient = clusterModeEnabled + const nodeGlideClient = clusterModeEnabled ? createCluster({ rootNodes: [{ socket: { host, port, tls: useTLS } }], defaults: { @@ -255,11 +255,11 @@ async function main( useReplicas: true, }) : createClient(node); - await nodeRedisClient.connect(); - return nodeRedisClient; + await nodeGlideClient.connect(); + return nodeGlideClient; }); await runClients( - nodeRedisClients, + nodeGlideClients, "node_redis", totalCommands, numOfConcurrentTasks, diff --git a/benchmarks/python/python_benchmark.py b/benchmarks/python/python_benchmark.py index 29262764eb..1da52f9941 100644 --- a/benchmarks/python/python_benchmark.py +++ b/benchmarks/python/python_benchmark.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import argparse import asyncio @@ -17,11 +17,11 @@ import redis.asyncio as redispy # type: ignore from glide import ( BaseClientConfiguration, + GlideClient, + GlideClusterClient, Logger, LogLevel, NodeAddress, - RedisClient, - RedisClusterClient, ) @@ -288,7 +288,7 @@ async def main( if clients_to_run == "all" or clients_to_run == "glide": # Glide Socket - client_class = RedisClusterClient if is_cluster else RedisClient + client_class = GlideClusterClient if is_cluster else GlideClient config = BaseClientConfiguration( [NodeAddress(host=host, port=port)], use_tls=use_tls ) diff --git a/benchmarks/rust/src/main.rs b/benchmarks/rust/src/main.rs index 8503375195..edace91a30 100644 --- a/benchmarks/rust/src/main.rs +++ b/benchmarks/rust/src/main.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #[cfg(not(target_env = "msvc"))] @@ -236,7 +236,7 @@ async fn get_connection(args: &Args) -> Client { ..Default::default() }; - glide_core::client::Client::new(connection_request) + glide_core::client::Client::new(connection_request, None) .await .unwrap() } diff --git a/benchmarks/utilities/csv_exporter.py b/benchmarks/utilities/csv_exporter.py index 080aa22e4f..2841e867f6 100755 --- a/benchmarks/utilities/csv_exporter.py +++ b/benchmarks/utilities/csv_exporter.py @@ -1,6 +1,6 @@ #!/bin/python3 -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import csv import json diff --git a/benchmarks/utilities/fill_db.ts b/benchmarks/utilities/fill_db.ts index 45c1412e02..01bd29884f 100644 --- a/benchmarks/utilities/fill_db.ts +++ b/benchmarks/utilities/fill_db.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { diff --git a/benchmarks/utilities/flush_db.ts b/benchmarks/utilities/flush_db.ts index b5a59cc0f2..00d2af086f 100644 --- a/benchmarks/utilities/flush_db.ts +++ b/benchmarks/utilities/flush_db.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { RedisClientType, RedisClusterType } from "redis"; diff --git a/benchmarks/utilities/utils.ts b/benchmarks/utilities/utils.ts index 140dd4fadd..3e1c2e8014 100644 --- a/benchmarks/utilities/utils.ts +++ b/benchmarks/utilities/utils.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import commandLineArgs from "command-line-args"; diff --git a/csharp/.editorconfig b/csharp/.editorconfig index 4a0f9f3bb6..d05fdf9728 100644 --- a/csharp/.editorconfig +++ b/csharp/.editorconfig @@ -7,7 +7,7 @@ indent_size = 2 [*.cs] # License header -file_header_template = Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +file_header_template = Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 # Organize usings dotnet_separate_import_directive_groups = true diff --git a/csharp/lib/AsyncClient.cs b/csharp/lib/AsyncClient.cs index db28bd7194..3e6aab1ba8 100644 --- a/csharp/lib/AsyncClient.cs +++ b/csharp/lib/AsyncClient.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Buffers; using System.Runtime.InteropServices; diff --git a/csharp/lib/Logger.cs b/csharp/lib/Logger.cs index fc30584323..814737e649 100644 --- a/csharp/lib/Logger.cs +++ b/csharp/lib/Logger.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Runtime.InteropServices; diff --git a/csharp/lib/Message.cs b/csharp/lib/Message.cs index fd6d9090f7..9e3cdd4d2e 100644 --- a/csharp/lib/Message.cs +++ b/csharp/lib/Message.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Diagnostics; using System.Runtime.CompilerServices; diff --git a/csharp/lib/MessageContainer.cs b/csharp/lib/MessageContainer.cs index 18073a62d2..d2baf6e2cb 100644 --- a/csharp/lib/MessageContainer.cs +++ b/csharp/lib/MessageContainer.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Collections.Concurrent; diff --git a/csharp/lib/Properties/AssemblyInfo.cs b/csharp/lib/Properties/AssemblyInfo.cs index e7e05eb672..9ddad510f9 100644 --- a/csharp/lib/Properties/AssemblyInfo.cs +++ b/csharp/lib/Properties/AssemblyInfo.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Runtime.CompilerServices; diff --git a/csharp/lib/src/lib.rs b/csharp/lib/src/lib.rs index fce015a376..73a4be8681 100644 --- a/csharp/lib/src/lib.rs +++ b/csharp/lib/src/lib.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use glide_core::client; use glide_core::client::Client as GlideClient; @@ -59,7 +59,7 @@ fn create_client_internal( .thread_name("GLIDE for Redis C# thread") .build()?; let _runtime_handle = runtime.enter(); - let client = runtime.block_on(GlideClient::new(request)).unwrap(); // TODO - handle errors. + let client = runtime.block_on(GlideClient::new(request, None)).unwrap(); // TODO - handle errors. Ok(Client { client, success_callback, diff --git a/csharp/tests/Integration/GetAndSet.cs b/csharp/tests/Integration/GetAndSet.cs index 78c6a74180..792741cf44 100644 --- a/csharp/tests/Integration/GetAndSet.cs +++ b/csharp/tests/Integration/GetAndSet.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Runtime.InteropServices; diff --git a/csharp/tests/Integration/IntegrationTestBase.cs b/csharp/tests/Integration/IntegrationTestBase.cs index 2f507e0473..8e909af17e 100644 --- a/csharp/tests/Integration/IntegrationTestBase.cs +++ b/csharp/tests/Integration/IntegrationTestBase.cs @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 using System.Diagnostics; diff --git a/csharp/tests/Usings.cs b/csharp/tests/Usings.cs index 7bb49c0d1f..a14d42be1a 100644 --- a/csharp/tests/Usings.cs +++ b/csharp/tests/Usings.cs @@ -1,3 +1,3 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 global using Xunit; diff --git a/examples/node/index.ts b/examples/node/index.ts index ec4d5c9d51..ec7533f547 100644 --- a/examples/node/index.ts +++ b/examples/node/index.ts @@ -1,8 +1,8 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ -import { Logger, RedisClient, RedisClusterClient } from "@aws/glide-for-redis"; +import { GlideClient, GlideClusterClient, Logger } from "@aws/glide-for-redis"; async function sendPingToNode() { // When in Redis is in standalone mode, add address of the primary node, and any replicas you'd like to be able to read from. @@ -12,8 +12,8 @@ async function sendPingToNode() { port: 6379, }, ]; - // Check `RedisClientConfiguration/ClusterClientConfiguration` for additional options. - const client = await RedisClient.createClient({ + // Check `GlideClientConfiguration/ClusterClientConfiguration` for additional options. + const client = await GlideClient.createClient({ addresses: addresses, // if the server uses TLS, you'll need to enable it. Otherwise the connection attempt will time out silently. // useTLS: true, @@ -26,7 +26,7 @@ async function sendPingToNode() { client.close(); } -async function send_set_and_get(client: RedisClient | RedisClusterClient) { +async function send_set_and_get(client: GlideClient | GlideClusterClient) { const set_response = await client.set("foo", "bar"); console.log(`Set response is = ${set_response}`); const get_response = await client.get("foo"); @@ -41,8 +41,8 @@ async function sendPingToRandomNodeInCluster() { port: 6380, }, ]; - // Check `RedisClientConfiguration/ClusterClientConfiguration` for additional options. - const client = await RedisClusterClient.createClient({ + // Check `GlideClientConfiguration/ClusterClientConfiguration` for additional options. + const client = await GlideClusterClient.createClient({ addresses: addresses, // if the cluster nodes use TLS, you'll need to enable it. Otherwise the connection attempt will time out silently. // useTLS: true, diff --git a/examples/python/client_example.py b/examples/python/client_example.py index 7620348a5b..3d5941f4ff 100755 --- a/examples/python/client_example.py +++ b/examples/python/client_example.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import asyncio from typing import Optional, Union @@ -6,11 +6,11 @@ from glide import ( AllNodes, BaseClientConfiguration, + GlideClient, + GlideClusterClient, Logger, LogLevel, NodeAddress, - RedisClient, - RedisClusterClient, ) @@ -28,7 +28,7 @@ def set_file_logger(level: LogLevel = LogLevel.WARN, file: Optional[str] = None) Logger.set_logger_config(level, file) -async def send_set_and_get(client: Union[RedisClient, RedisClusterClient]): +async def send_set_and_get(client: Union[GlideClient, GlideClusterClient]): set_response = await client.set("foo", "bar") print(f"Set response is = {set_response}") get_response = await client.get("foo") @@ -39,14 +39,14 @@ async def test_standalone_client(host: str = "localhost", port: int = 6379): # When in Redis is in standalone mode, add address of the primary node, # and any replicas you'd like to be able to read from. addresses = [NodeAddress(host, port)] - # Check `RedisClientConfiguration/ClusterClientConfiguration` for additional options. + # Check `GlideClientConfiguration/ClusterClientConfiguration` for additional options. config = BaseClientConfiguration( addresses=addresses, - client_name="test_standalone_client" + client_name="test_standalone_client", # if the server use TLS, you'll need to enable it. Otherwise the connection attempt will time out silently. # use_tls=True ) - client = await RedisClient.create(config) + client = await GlideClient.create(config) # Send SET and GET await send_set_and_get(client) @@ -58,14 +58,14 @@ async def test_standalone_client(host: str = "localhost", port: int = 6379): async def test_cluster_client(host: str = "localhost", port: int = 6379): # When in Redis is cluster mode, add address of any nodes, and the client will find all nodes in the cluster. addresses = [NodeAddress(host, port)] - # Check `RedisClientConfiguration/ClusterClientConfiguration` for additional options. + # Check `GlideClientConfiguration/ClusterClientConfiguration` for additional options. config = BaseClientConfiguration( addresses=addresses, - client_name="test_cluster_client" + client_name="test_cluster_client", # if the cluster nodes use TLS, you'll need to enable it. Otherwise the connection attempt will time out silently. # use_tls=True ) - client = await RedisClusterClient.create(config) + client = await GlideClusterClient.create(config) # Send SET and GET await send_set_and_get(client) diff --git a/glide-core/Cargo.toml b/glide-core/Cargo.toml index dc623c6714..1d934fabc3 100644 --- a/glide-core/Cargo.toml +++ b/glide-core/Cargo.toml @@ -20,7 +20,7 @@ tokio-retry = "0.3.0" protobuf = { version= "3", features = ["bytes", "with-bytes"], optional = true } integer-encoding = { version = "4.0.0", optional = true } thiserror = "1" -rand = { version = "0.8.5", optional = true } +rand = { version = "0.8.5" } futures-intrusive = "0.5.0" directories = { version = "4.0", optional = true } once_cell = "1.18.0" @@ -28,7 +28,7 @@ arcstr = "1.1.5" sha1_smol = "1.0.0" [features] -socket-layer = ["directories", "integer-encoding", "num_cpus", "protobuf", "tokio-util", "bytes", "rand"] +socket-layer = ["directories", "integer-encoding", "num_cpus", "protobuf", "tokio-util", "bytes"] [dev-dependencies] rsevents = "0.3.1" diff --git a/glide-core/THIRD_PARTY_LICENSES_RUST b/glide-core/THIRD_PARTY_LICENSES_RUST index e22b97f8f7..ddacaea254 100644 --- a/glide-core/THIRD_PARTY_LICENSES_RUST +++ b/glide-core/THIRD_PARTY_LICENSES_RUST @@ -2993,7 +2993,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: bitflags:2.5.0 +Package: bitflags:2.6.0 The following copyrights and licenses were found in the source code of this package: @@ -11851,7 +11851,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: lazy_static:1.4.0 +Package: lazy_static:1.5.0 The following copyrights and licenses were found in the source code of this package: @@ -13000,7 +13000,7 @@ The following copyrights and licenses were found in the source code of this pack ---- -Package: memchr:2.7.2 +Package: memchr:2.7.4 The following copyrights and licenses were found in the source code of this package: @@ -13052,7 +13052,7 @@ For more information, please refer to ---- -Package: miniz_oxide:0.7.3 +Package: miniz_oxide:0.7.4 The following copyrights and licenses were found in the source code of this package: @@ -17727,7 +17727,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: proc-macro2:1.0.85 +Package: proc-macro2:1.0.86 The following copyrights and licenses were found in the source code of this package: @@ -18953,7 +18953,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- -Package: redox_syscall:0.5.1 +Package: redox_syscall:0.5.2 The following copyrights and licenses were found in the source code of this package: @@ -22176,7 +22176,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: subtle:2.5.0 +Package: subtle:2.6.1 The following copyrights and licenses were found in the source code of this package: @@ -22436,7 +22436,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: syn:2.0.66 +Package: syn:2.0.68 The following copyrights and licenses were found in the source code of this package: @@ -24039,7 +24039,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: tinyvec:1.6.0 +Package: tinyvec:1.6.1 The following copyrights and licenses were found in the source code of this package: diff --git a/glide-core/benches/connections_benchmark.rs b/glide-core/benches/connections_benchmark.rs index fc98933de8..f52a91d3ca 100644 --- a/glide-core/benches/connections_benchmark.rs +++ b/glide-core/benches/connections_benchmark.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use criterion::{criterion_group, criterion_main, Criterion}; use futures::future::join_all; @@ -83,7 +83,7 @@ fn get_connection_info(address: ConnectionAddr) -> redis::ConnectionInfo { fn multiplexer_benchmark(c: &mut Criterion, address: ConnectionAddr, group: &str) { benchmark(c, address, "multiplexer", group, |address, runtime| { let client = redis::Client::open(get_connection_info(address)).unwrap(); - runtime.block_on(async { client.get_multiplexed_tokio_connection().await.unwrap() }) + runtime.block_on(async { client.get_multiplexed_tokio_connection(None).await.unwrap() }) }); } @@ -120,7 +120,7 @@ fn cluster_connection_benchmark( builder = builder.read_from_replicas(); } let client = builder.build().unwrap(); - client.get_async_connection().await + client.get_async_connection(None).await }) .unwrap() }); diff --git a/glide-core/benches/memory_benchmark.rs b/glide-core/benches/memory_benchmark.rs index c6e307bae2..1948d9a2cd 100644 --- a/glide-core/benches/memory_benchmark.rs +++ b/glide-core/benches/memory_benchmark.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use glide_core::{ client::Client, @@ -26,7 +26,7 @@ where { let runtime = Builder::new_current_thread().enable_all().build().unwrap(); runtime.block_on(async { - let client = Client::new(create_connection_request().into()) + let client = Client::new(create_connection_request().into(), None) .await .unwrap(); f(client).await; diff --git a/glide-core/benches/rotating_buffer_benchmark.rs b/glide-core/benches/rotating_buffer_benchmark.rs index 30f52f702f..581a278453 100644 --- a/glide-core/benches/rotating_buffer_benchmark.rs +++ b/glide-core/benches/rotating_buffer_benchmark.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use std::io::Write; diff --git a/glide-core/build.rs b/glide-core/build.rs index 9d41cd2491..a20b5dadea 100644 --- a/glide-core/build.rs +++ b/glide-core/build.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #[cfg(feature = "socket-layer")] diff --git a/glide-core/src/client/mod.rs b/glide-core/src/client/mod.rs index 3197c82a23..9961e3cf1c 100644 --- a/glide-core/src/client/mod.rs +++ b/glide-core/src/client/mod.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ mod types; @@ -9,7 +9,7 @@ use logger_core::log_info; use redis::aio::ConnectionLike; use redis::cluster_async::ClusterConnection; use redis::cluster_routing::{Routable, RoutingInfo, SingleNodeRoutingInfo}; -use redis::{Cmd, ErrorKind, Value}; +use redis::{Cmd, ErrorKind, PushInfo, Value}; use redis::{RedisError, RedisResult}; pub use standalone_client::StandaloneClient; use std::io; @@ -21,6 +21,7 @@ use self::value_conversion::{convert_to_expected_type, expected_type_for_cmd, ge mod reconnecting_connection; mod standalone_client; mod value_conversion; +use tokio::sync::mpsc; pub const HEARTBEAT_SLEEP_DURATION: Duration = Duration::from_secs(1); @@ -44,6 +45,7 @@ pub(super) fn get_redis_connection_info( let protocol = connection_request.protocol.unwrap_or_default(); let db = connection_request.database_id; let client_name = connection_request.client_name.clone(); + let pubsub_subscriptions = connection_request.pubsub_subscriptions.clone(); match &connection_request.authentication_info { Some(info) => redis::RedisConnectionInfo { db, @@ -51,11 +53,13 @@ pub(super) fn get_redis_connection_info( password: info.password.clone(), protocol, client_name, + pubsub_subscriptions, }, None => redis::RedisConnectionInfo { db, protocol, client_name, + pubsub_subscriptions, ..Default::default() }, } @@ -373,6 +377,7 @@ fn to_duration(time_in_millis: Option, default: Duration) -> Duration { async fn create_cluster_client( request: ConnectionRequest, + push_sender: Option>, ) -> RedisResult { // TODO - implement timeout for each connection attempt let tls_mode = request.tls_mode.unwrap_or_default(); @@ -410,8 +415,11 @@ async fn create_cluster_client( }; builder = builder.tls(tls); } + if let Some(pubsub_subscriptions) = redis_connection_info.pubsub_subscriptions { + builder = builder.pubsub_subscriptions(pubsub_subscriptions); + } let client = builder.build()?; - client.get_async_connection().await + client.get_async_connection(push_sender).await } #[derive(thiserror::Error)] @@ -520,13 +528,22 @@ fn sanitized_request_string(request: &ConnectionRequest) -> String { String::new() }; + let pubsub_subscriptions = request + .pubsub_subscriptions + .as_ref() + .map(|pubsub_subscriptions| format!("\nPubsub subscriptions: {pubsub_subscriptions:?}")) + .unwrap_or_default(); + format!( - "\nAddresses: {addresses}{tls_mode}{cluster_mode}{request_timeout}{rfr_strategy}{connection_retry_strategy}{database_id}{protocol}{client_name}{periodic_checks}", + "\nAddresses: {addresses}{tls_mode}{cluster_mode}{request_timeout}{rfr_strategy}{connection_retry_strategy}{database_id}{protocol}{client_name}{periodic_checks}{pubsub_subscriptions}", ) } impl Client { - pub async fn new(request: ConnectionRequest) -> Result { + pub async fn new( + request: ConnectionRequest, + push_sender: Option>, + ) -> Result { const DEFAULT_CLIENT_CREATION_TIMEOUT: Duration = Duration::from_secs(10); log_info( @@ -536,13 +553,13 @@ impl Client { let request_timeout = to_duration(request.request_timeout, DEFAULT_RESPONSE_TIMEOUT); tokio::time::timeout(DEFAULT_CLIENT_CREATION_TIMEOUT, async move { let internal_client = if request.cluster_mode_enabled { - let client = create_cluster_client(request) + let client = create_cluster_client(request, push_sender) .await .map_err(ConnectionError::Cluster)?; ClientWrapper::Cluster { client } } else { ClientWrapper::Standalone( - StandaloneClient::create_client(request) + StandaloneClient::create_client(request, push_sender) .await .map_err(ConnectionError::Standalone)?, ) diff --git a/glide-core/src/client/reconnecting_connection.rs b/glide-core/src/client/reconnecting_connection.rs index ac33f6c005..c76da9cf42 100644 --- a/glide-core/src/client/reconnecting_connection.rs +++ b/glide-core/src/client/reconnecting_connection.rs @@ -1,17 +1,18 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use super::{NodeAddress, TlsMode}; use crate::retry_strategies::RetryStrategy; use futures_intrusive::sync::ManualResetEvent; use logger_core::{log_debug, log_trace, log_warn}; use redis::aio::MultiplexedConnection; -use redis::{RedisConnectionInfo, RedisError, RedisResult}; +use redis::{PushInfo, RedisConnectionInfo, RedisError, RedisResult}; use std::fmt; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; +use tokio::sync::mpsc; use tokio::task; use tokio_retry::Retry; @@ -45,6 +46,7 @@ struct InnerReconnectingConnection { #[derive(Clone)] pub(super) struct ReconnectingConnection { inner: Arc, + push_sender: Option>, } impl fmt::Debug for ReconnectingConnection { @@ -53,10 +55,13 @@ impl fmt::Debug for ReconnectingConnection { } } -async fn get_multiplexed_connection(client: &redis::Client) -> RedisResult { +async fn get_multiplexed_connection( + client: &redis::Client, + push_sender: Option>, +) -> RedisResult { run_with_timeout( Some(DEFAULT_CONNECTION_ATTEMPT_TIMEOUT), - client.get_multiplexed_async_connection(), + client.get_multiplexed_async_connection(push_sender), ) .await } @@ -64,9 +69,10 @@ async fn get_multiplexed_connection(client: &redis::Client) -> RedisResult>, ) -> Result { let client = &connection_backend.connection_info; - let action = || get_multiplexed_connection(client); + let action = || get_multiplexed_connection(client, push_sender.clone()); match Retry::spawn(retry_strategy.get_iterator(), action).await { Ok(connection) => { @@ -85,6 +91,7 @@ async fn create_connection( state: Mutex::new(ConnectionState::Connected(connection)), backend: connection_backend, }), + push_sender, }) } Err(err) => { @@ -103,6 +110,7 @@ async fn create_connection( state: Mutex::new(ConnectionState::InitializedDisconnected), backend: connection_backend, }), + push_sender, }; connection.reconnect(); Err((connection, err)) @@ -141,6 +149,7 @@ impl ReconnectingConnection { connection_retry_strategy: RetryStrategy, redis_connection_info: RedisConnectionInfo, tls_mode: TlsMode, + push_sender: Option>, ) -> Result { log_debug( "connection creation", @@ -153,7 +162,7 @@ impl ReconnectingConnection { connection_available_signal: ManualResetEvent::new(true), client_dropped_flagged: AtomicBool::new(false), }; - create_connection(backend, connection_retry_strategy).await + create_connection(backend, connection_retry_strategy, push_sender).await } fn node_address(&self) -> String { @@ -211,6 +220,7 @@ impl ReconnectingConnection { log_debug("reconnect", "starting"); let connection_clone = self.clone(); + let push_sender = self.push_sender.clone(); // The reconnect task is spawned instead of awaited here, so that the reconnect attempt will continue in the // background, regardless of whether the calling task is dropped or not. task::spawn(async move { @@ -224,7 +234,7 @@ impl ReconnectingConnection { // Client was dropped, reconnection attempts can stop return; } - match get_multiplexed_connection(client).await { + match get_multiplexed_connection(client, push_sender.clone()).await { Ok(mut connection) => { if connection .send_packed_command(&redis::cmd("PING")) diff --git a/glide-core/src/client/standalone_client.rs b/glide-core/src/client/standalone_client.rs index 736155bbf0..f59e18eac8 100644 --- a/glide-core/src/client/standalone_client.rs +++ b/glide-core/src/client/standalone_client.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use super::get_redis_connection_info; use super::reconnecting_connection::ReconnectingConnection; @@ -9,10 +9,12 @@ use futures::{future, stream, StreamExt}; #[cfg(standalone_heartbeat)] use logger_core::log_debug; use logger_core::log_warn; +use rand::Rng; use redis::cluster_routing::{self, is_readonly_cmd, ResponsePolicy, Routable, RoutingInfo}; -use redis::{RedisError, RedisResult, Value}; +use redis::{PushInfo, RedisError, RedisResult, Value}; use std::sync::atomic::AtomicUsize; use std::sync::Arc; +use tokio::sync::mpsc; #[cfg(standalone_heartbeat)] use tokio::task; @@ -96,22 +98,33 @@ impl std::fmt::Debug for StandaloneClientConnectionError { impl StandaloneClient { pub async fn create_client( connection_request: ConnectionRequest, + push_sender: Option>, ) -> Result { if connection_request.addresses.is_empty() { return Err(StandaloneClientConnectionError::NoAddressesProvided); } - let redis_connection_info = get_redis_connection_info(&connection_request); + let mut redis_connection_info = get_redis_connection_info(&connection_request); + let pubsub_connection_info = redis_connection_info.clone(); + redis_connection_info.pubsub_subscriptions = None; let retry_strategy = RetryStrategy::new(connection_request.connection_retry_strategy); let tls_mode = connection_request.tls_mode; let node_count = connection_request.addresses.len(); + // randomize pubsub nodes, maybe a batter option is to always use the primary + let pubsub_node_index = rand::thread_rng().gen_range(0..node_count); + let pubsub_addr = &connection_request.addresses[pubsub_node_index]; let mut stream = stream::iter(connection_request.addresses.iter()) .map(|address| async { get_connection_and_replication_info( address, &retry_strategy, - &redis_connection_info, + if address.to_string() != pubsub_addr.to_string() { + &redis_connection_info + } else { + &pubsub_connection_info + }, tls_mode.unwrap_or(TlsMode::NoTls), + &push_sender, ) .await .map_err(|err| (format!("{}:{}", address.host, address.port), err)) @@ -392,12 +405,14 @@ async fn get_connection_and_replication_info( retry_strategy: &RetryStrategy, connection_info: &redis::RedisConnectionInfo, tls_mode: TlsMode, + push_sender: &Option>, ) -> Result<(ReconnectingConnection, Value), (ReconnectingConnection, RedisError)> { let result = ReconnectingConnection::new( address, retry_strategy.clone(), connection_info.clone(), tls_mode, + push_sender.clone(), ) .await; let reconnecting_connection = match result { diff --git a/glide-core/src/client/types.rs b/glide-core/src/client/types.rs index f942f64174..c26cdfb93f 100644 --- a/glide-core/src/client/types.rs +++ b/glide-core/src/client/types.rs @@ -1,7 +1,9 @@ /* - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +use logger_core::log_warn; +use std::collections::HashSet; use std::time::Duration; #[cfg(feature = "socket-layer")] @@ -20,6 +22,7 @@ pub struct ConnectionRequest { pub request_timeout: Option, pub connection_retry_strategy: Option, pub periodic_checks: Option, + pub pubsub_subscriptions: Option, } pub struct AuthenticationInfo { @@ -150,6 +153,39 @@ impl From for ConnectionRequest { PeriodicCheck::Disabled } }); + let mut pubsub_subscriptions: Option = None; + if let Some(protobuf_pubsub) = value.pubsub_subscriptions.0 { + let mut redis_pubsub = redis::PubSubSubscriptionInfo::new(); + for (pubsub_type, channels_patterns) in + protobuf_pubsub.channels_or_patterns_by_type.iter() + { + let kind = match *pubsub_type { + 0 => redis::PubSubSubscriptionKind::Exact, + 1 => redis::PubSubSubscriptionKind::Pattern, + 2 => redis::PubSubSubscriptionKind::Sharded, + 3_u32..=u32::MAX => { + log_warn( + "client creation", + format!( + "Omitting pubsub subscription on an unknown type: {:?}", + *pubsub_type + ), + ); + continue; + } + }; + + for channel_pattern in channels_patterns.channels_or_patterns.iter() { + redis_pubsub + .entry(kind) + .and_modify(|channels_patterns| { + channels_patterns.insert(channel_pattern.to_vec()); + }) + .or_insert(HashSet::from([channel_pattern.to_vec()])); + } + } + pubsub_subscriptions = Some(redis_pubsub); + } ConnectionRequest { read_from, @@ -163,6 +199,7 @@ impl From for ConnectionRequest { request_timeout, connection_retry_strategy, periodic_checks, + pubsub_subscriptions, } } } diff --git a/glide-core/src/client/value_conversion.rs b/glide-core/src/client/value_conversion.rs index f5b8fc04d2..dff852e839 100644 --- a/glide-core/src/client/value_conversion.rs +++ b/glide-core/src/client/value_conversion.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use redis::{ cluster_routing::Routable, from_owned_redis_value, Cmd, ErrorKind, RedisResult, Value, @@ -33,6 +33,7 @@ pub(crate) enum ExpectedReturnType<'a> { KeyWithMemberAndScore, FunctionStatsReturnType, GeoSearchReturnType, + SimpleString, } pub(crate) fn convert_to_expected_type( @@ -141,6 +142,9 @@ pub(crate) fn convert_to_expected_type( ExpectedReturnType::BulkString => Ok(Value::BulkString( from_owned_redis_value::(value)?.into(), )), + ExpectedReturnType::SimpleString => Ok(Value::SimpleString( + from_owned_redis_value::(value)?, + )), ExpectedReturnType::JsonToggleReturnType => match value { Value::Array(array) => { let converted_array: RedisResult> = array @@ -791,6 +795,7 @@ fn convert_to_array_of_pairs( value_expected_return_type: Option, ) -> RedisResult { match response { + Value::Nil => Ok(response), Value::Array(ref array) if array.is_empty() || matches!(array[0], Value::Array(_)) => { // The server response is an empty array or a RESP3 array of pairs. In RESP3, the values in the pairs are // already of the correct type, so we do not need to convert them and `response` is in the correct format. @@ -852,20 +857,35 @@ pub(crate) fn expected_type_for_cmd(cmd: &Cmd) -> Option { key_type: &Some(ExpectedReturnType::BulkString), value_type: &Some(ExpectedReturnType::ArrayOfPairs), }), - b"XREAD" => Some(ExpectedReturnType::Map { + b"XREAD" | b"XREADGROUP" => Some(ExpectedReturnType::Map { key_type: &Some(ExpectedReturnType::BulkString), value_type: &Some(ExpectedReturnType::Map { key_type: &Some(ExpectedReturnType::BulkString), value_type: &Some(ExpectedReturnType::ArrayOfPairs), }), }), + b"LCS" => cmd.position(b"IDX").map(|_| ExpectedReturnType::Map { + key_type: &Some(ExpectedReturnType::SimpleString), + value_type: &None, + }), b"INCRBYFLOAT" | b"HINCRBYFLOAT" | b"ZINCRBY" => Some(ExpectedReturnType::Double), - b"HEXISTS" | b"HSETNX" | b"EXPIRE" | b"EXPIREAT" | b"PEXPIRE" | b"PEXPIREAT" - | b"SISMEMBER" | b"PERSIST" | b"SMOVE" | b"RENAMENX" | b"MOVE" | b"COPY" | b"MSETNX" => { - Some(ExpectedReturnType::Boolean) - } + b"HEXISTS" + | b"HSETNX" + | b"EXPIRE" + | b"EXPIREAT" + | b"PEXPIRE" + | b"PEXPIREAT" + | b"SISMEMBER" + | b"PERSIST" + | b"SMOVE" + | b"RENAMENX" + | b"MOVE" + | b"COPY" + | b"MSETNX" + | b"XGROUP DESTROY" + | b"XGROUP CREATECONSUMER" => Some(ExpectedReturnType::Boolean), b"SMISMEMBER" => Some(ExpectedReturnType::ArrayOfBools), - b"SMEMBERS" | b"SINTER" | b"SDIFF" => Some(ExpectedReturnType::Set), + b"SMEMBERS" | b"SINTER" | b"SDIFF" | b"SUNION" => Some(ExpectedReturnType::Set), b"ZSCORE" | b"GEODIST" => Some(ExpectedReturnType::DoubleOrNull), b"ZMSCORE" => Some(ExpectedReturnType::ArrayOfDoubleOrNull), b"ZPOPMIN" | b"ZPOPMAX" => Some(ExpectedReturnType::MapOfStringToDouble), @@ -1194,6 +1214,28 @@ mod tests { )); } + #[test] + fn convert_xreadgroup() { + assert!(matches!( + expected_type_for_cmd( + redis::cmd("XREADGROUP") + .arg("GROUP") + .arg("group") + .arg("consumer") + .arg("streams") + .arg("key") + .arg("id") + ), + Some(ExpectedReturnType::Map { + key_type: &Some(ExpectedReturnType::BulkString), + value_type: &Some(ExpectedReturnType::Map { + key_type: &Some(ExpectedReturnType::BulkString), + value_type: &Some(ExpectedReturnType::ArrayOfPairs), + }), + }) + )); + } + #[test] fn test_convert_empty_array_to_map_is_nil() { let mut cmd = redis::cmd("XREAD"); @@ -2341,4 +2383,14 @@ mod tests { assert!(expected_type_for_cmd(redis::cmd("GEOSEARCH").arg("key")).is_none()); } + #[test] + fn convert_lcs_idx() { + assert!(matches!( + expected_type_for_cmd(redis::cmd("LCS").arg("key1").arg("key2").arg("IDX")), + Some(ExpectedReturnType::Map { + key_type: &Some(ExpectedReturnType::SimpleString), + value_type: &None, + }) + )); + } } diff --git a/glide-core/src/errors.rs b/glide-core/src/errors.rs index 1c05aad84b..b5f9b1af9e 100644 --- a/glide-core/src/errors.rs +++ b/glide-core/src/errors.rs @@ -1,5 +1,5 @@ /* - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use redis::RedisError; diff --git a/glide-core/src/lib.rs b/glide-core/src/lib.rs index f904928be1..5bbc431e82 100644 --- a/glide-core/src/lib.rs +++ b/glide-core/src/lib.rs @@ -1,5 +1,5 @@ /* - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #[cfg(feature = "socket-layer")] diff --git a/glide-core/src/protobuf/connection_request.proto b/glide-core/src/protobuf/connection_request.proto index ecdeeae1b2..a186a1f41f 100644 --- a/glide-core/src/protobuf/connection_request.proto +++ b/glide-core/src/protobuf/connection_request.proto @@ -36,6 +36,22 @@ message PeriodicChecksManualInterval { message PeriodicChecksDisabled { } +enum PubSubChannelType { + Exact = 0; + Pattern = 1; + Sharded = 2; +} + +message PubSubChannelsOrPatterns +{ + repeated bytes channels_or_patterns = 1; +} + +message PubSubSubscriptions +{ + map channels_or_patterns_by_type = 1; +} + // IMPORTANT - if you add fields here, you probably need to add them also in client/mod.rs:`sanitized_request_string`. message ConnectionRequest { repeated NodeAddress addresses = 1; @@ -52,6 +68,7 @@ message ConnectionRequest { PeriodicChecksManualInterval periodic_checks_manual_interval = 11; PeriodicChecksDisabled periodic_checks_disabled = 12; } + PubSubSubscriptions pubsub_subscriptions = 13; } message ConnectionRetryStrategy { diff --git a/glide-core/src/protobuf/redis_request.proto b/glide-core/src/protobuf/redis_request.proto index 8056bf308e..e3d2b01b2c 100644 --- a/glide-core/src/protobuf/redis_request.proto +++ b/glide-core/src/protobuf/redis_request.proto @@ -202,6 +202,8 @@ enum RequestType { Sort = 160; FunctionKill = 161; FunctionStats = 162; + FCallReadOnly = 163; + FlushDB = 164; LSet = 165; XDel = 166; XRange = 167; @@ -219,6 +221,22 @@ enum RequestType { LPos = 180; LCS = 181; GeoSearch = 182; + Watch = 183; + UnWatch = 184; + GeoSearchStore = 185; + SUnion = 186; + Publish = 187; + SPublish = 188; + XGroupCreateConsumer = 189; + XGroupDelConsumer = 190; + RandomKey = 191; + GetEx = 192; + Dump = 193; + Restore = 194; + SortReadOnly = 195; + FunctionDump = 196; + FunctionRestore = 197; + XPending = 198; } message Command { diff --git a/glide-core/src/protobuf/response.proto b/glide-core/src/protobuf/response.proto index 33591112ba..871d38e476 100644 --- a/glide-core/src/protobuf/response.proto +++ b/glide-core/src/protobuf/response.proto @@ -21,6 +21,7 @@ message Response { RequestError request_error = 4; string closing_error = 5; } + bool is_push = 6; } enum ConstantResponse { diff --git a/glide-core/src/request_type.rs b/glide-core/src/request_type.rs index 8e417b91f4..805db2dc91 100644 --- a/glide-core/src/request_type.rs +++ b/glide-core/src/request_type.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use redis::{cmd, Cmd}; @@ -172,6 +172,8 @@ pub enum RequestType { Sort = 160, FunctionKill = 161, FunctionStats = 162, + FCallReadOnly = 163, + FlushDB = 164, LSet = 165, XDel = 166, XRange = 167, @@ -189,6 +191,22 @@ pub enum RequestType { LPos = 180, LCS = 181, GeoSearch = 182, + Watch = 183, + UnWatch = 184, + GeoSearchStore = 185, + SUnion = 186, + Publish = 187, + SPublish = 188, + XGroupCreateConsumer = 189, + XGroupDelConsumer = 190, + RandomKey = 191, + GetEx = 192, + Dump = 193, + Restore = 194, + SortReadOnly = 195, + FunctionDump = 196, + FunctionRestore = 197, + XPending = 198, } fn get_two_word_command(first: &str, second: &str) -> Cmd { @@ -363,6 +381,8 @@ impl From<::protobuf::EnumOrUnknown> for RequestType { ProtobufRequestType::XLen => RequestType::XLen, ProtobufRequestType::FunctionKill => RequestType::FunctionKill, ProtobufRequestType::FunctionStats => RequestType::FunctionStats, + ProtobufRequestType::FCallReadOnly => RequestType::FCallReadOnly, + ProtobufRequestType::FlushDB => RequestType::FlushDB, ProtobufRequestType::LSet => RequestType::LSet, ProtobufRequestType::XDel => RequestType::XDel, ProtobufRequestType::XRange => RequestType::XRange, @@ -381,6 +401,22 @@ impl From<::protobuf::EnumOrUnknown> for RequestType { ProtobufRequestType::LPos => RequestType::LPos, ProtobufRequestType::LCS => RequestType::LCS, ProtobufRequestType::GeoSearch => RequestType::GeoSearch, + ProtobufRequestType::SUnion => RequestType::SUnion, + ProtobufRequestType::Watch => RequestType::Watch, + ProtobufRequestType::UnWatch => RequestType::UnWatch, + ProtobufRequestType::GeoSearchStore => RequestType::GeoSearchStore, + ProtobufRequestType::Publish => RequestType::Publish, + ProtobufRequestType::SPublish => RequestType::SPublish, + ProtobufRequestType::XGroupCreateConsumer => RequestType::XGroupCreateConsumer, + ProtobufRequestType::XGroupDelConsumer => RequestType::XGroupDelConsumer, + ProtobufRequestType::RandomKey => RequestType::RandomKey, + ProtobufRequestType::GetEx => RequestType::GetEx, + ProtobufRequestType::Dump => RequestType::Dump, + ProtobufRequestType::Restore => RequestType::Restore, + ProtobufRequestType::SortReadOnly => RequestType::SortReadOnly, + ProtobufRequestType::FunctionDump => RequestType::FunctionDump, + ProtobufRequestType::FunctionRestore => RequestType::FunctionRestore, + ProtobufRequestType::XPending => RequestType::XPending, } } } @@ -551,6 +587,8 @@ impl RequestType { RequestType::XLen => Some(cmd("XLEN")), RequestType::FunctionKill => Some(get_two_word_command("FUNCTION", "KILL")), RequestType::FunctionStats => Some(get_two_word_command("FUNCTION", "STATS")), + RequestType::FCallReadOnly => Some(cmd("FCALL_RO")), + RequestType::FlushDB => Some(cmd("FLUSHDB")), RequestType::LSet => Some(cmd("LSET")), RequestType::XDel => Some(cmd("XDEL")), RequestType::XRange => Some(cmd("XRANGE")), @@ -569,6 +607,24 @@ impl RequestType { RequestType::LPos => Some(cmd("LPOS")), RequestType::LCS => Some(cmd("LCS")), RequestType::GeoSearch => Some(cmd("GEOSEARCH")), + RequestType::SUnion => Some(cmd("SUNION")), + RequestType::Watch => Some(cmd("WATCH")), + RequestType::UnWatch => Some(cmd("UNWATCH")), + RequestType::GeoSearchStore => Some(cmd("GEOSEARCHSTORE")), + RequestType::Publish => Some(cmd("PUBLISH")), + RequestType::SPublish => Some(cmd("SPUBLISH")), + RequestType::XGroupCreateConsumer => { + Some(get_two_word_command("XGROUP", "CREATECONSUMER")) + } + RequestType::XGroupDelConsumer => Some(get_two_word_command("XGROUP", "DELCONSUMER")), + RequestType::RandomKey => Some(cmd("RANDOMKEY")), + RequestType::GetEx => Some(cmd("GETEX")), + RequestType::Dump => Some(cmd("DUMP")), + RequestType::Restore => Some(cmd("RESTORE")), + RequestType::SortReadOnly => Some(cmd("SORT_RO")), + RequestType::FunctionDump => Some(get_two_word_command("FUNCTION", "DUMP")), + RequestType::FunctionRestore => Some(get_two_word_command("FUNCTION", "RESTORE")), + RequestType::XPending => Some(cmd("XPENDING")), } } } diff --git a/glide-core/src/retry_strategies.rs b/glide-core/src/retry_strategies.rs index 4dd5d7edb7..dbe5683347 100644 --- a/glide-core/src/retry_strategies.rs +++ b/glide-core/src/retry_strategies.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use crate::client::ConnectionRetryStrategy; use std::time::Duration; diff --git a/glide-core/src/rotating_buffer.rs b/glide-core/src/rotating_buffer.rs index 5178b587e7..cbd32313ed 100644 --- a/glide-core/src/rotating_buffer.rs +++ b/glide-core/src/rotating_buffer.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #[allow(unused_imports)] use bytes::{Bytes, BytesMut}; diff --git a/glide-core/src/scripts_container.rs b/glide-core/src/scripts_container.rs index 251a69e5c3..129e6592c4 100644 --- a/glide-core/src/scripts_container.rs +++ b/glide-core/src/scripts_container.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use arcstr::ArcStr; use logger_core::log_info; diff --git a/glide-core/src/socket_listener.rs b/glide-core/src/socket_listener.rs index 82b8df6a69..a2a333f103 100644 --- a/glide-core/src/socket_listener.rs +++ b/glide-core/src/socket_listener.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use super::rotating_buffer::RotatingBuffer; use crate::client::Client; @@ -21,7 +21,7 @@ use redis::cluster_routing::{ }; use redis::cluster_routing::{ResponsePolicy, Routable}; use redis::RedisError; -use redis::{Cmd, Value}; +use redis::{Cmd, PushInfo, Value}; use std::cell::Cell; use std::rc::Rc; use std::{env, str}; @@ -30,6 +30,7 @@ use thiserror::Error; use tokio::io::ErrorKind::AddrInUse; use tokio::net::{UnixListener, UnixStream}; use tokio::runtime::Builder; +use tokio::sync::mpsc; use tokio::sync::mpsc::{channel, Sender}; use tokio::sync::Mutex; use tokio::task; @@ -184,6 +185,7 @@ async fn write_result( ) -> Result<(), io::Error> { let mut response = Response::new(); response.callback_idx = callback_index; + response.is_push = false; response.value = match resp_result { Ok(Value::Okay) => Some(response::response::Value::ConstantResponse( response::ConstantResponse::OK.into(), @@ -473,8 +475,9 @@ pub fn close_socket(socket_path: &String) { async fn create_client( writer: &Rc, request: ConnectionRequest, + push_tx: Option>, ) -> Result { - let client = match Client::new(request.into()).await { + let client = match Client::new(request.into(), push_tx).await { Ok(client) => client, Err(err) => return Err(ClientCreationError::ConnectionError(err)), }; @@ -485,13 +488,14 @@ async fn create_client( async fn wait_for_connection_configuration_and_create_client( client_listener: &mut UnixStreamListener, writer: &Rc, + push_tx: Option>, ) -> Result { // Wait for the server's address match client_listener.next_values::().await { Closed(reason) => Err(ClientCreationError::SocketListenerClosed(reason)), ReceivedValues(mut received_requests) => { if let Some(request) = received_requests.pop() { - create_client(writer, request).await + create_client(writer, request, push_tx).await } else { Err(ClientCreationError::UnhandledError( "No received requests".to_string(), @@ -518,6 +522,35 @@ async fn read_values_loop( } } +async fn push_manager_loop(mut push_rx: mpsc::UnboundedReceiver, writer: Rc) { + loop { + let result = push_rx.recv().await; + match result { + None => { + log_error("push manager loop", "got None from push manager"); + return; + } + Some(push_msg) => { + log_debug("push manager loop", format!("got PushInfo: {:?}", push_msg)); + let mut response = Response::new(); + response.callback_idx = 0; // callback_idx is not used with push notifications + response.is_push = true; + response.value = { + let push_val = Value::Push { + kind: (push_msg.kind), + data: (push_msg.data), + }; + let pointer = Box::leak(Box::new(push_val)); + let raw_pointer = pointer as *mut redis::Value; + Some(response::response::Value::RespPointer(raw_pointer as u64)) + }; + + _ = write_to_writer(response, &writer).await; + } + } + } +} + async fn listen_on_client_stream(socket: UnixStream) { let socket = Rc::new(socket); // Spawn a new task to listen on this client's stream @@ -525,14 +558,18 @@ async fn listen_on_client_stream(socket: UnixStream) { let mut client_listener = UnixStreamListener::new(socket.clone()); let accumulated_outputs = Cell::new(Vec::new()); let (sender, mut receiver) = channel(1); + let (push_tx, push_rx) = tokio::sync::mpsc::unbounded_channel(); let writer = Rc::new(Writer { socket, lock: write_lock, accumulated_outputs, closing_sender: sender, }); - let client_creation = - wait_for_connection_configuration_and_create_client(&mut client_listener, &writer); + let client_creation = wait_for_connection_configuration_and_create_client( + &mut client_listener, + &writer, + Some(push_tx), + ); let client = match client_creation.await { Ok(conn) => conn, Err(ClientCreationError::SocketListenerClosed(ClosingReason::ReadSocketClosed)) => { @@ -583,6 +620,9 @@ async fn listen_on_client_stream(socket: UnixStream) { } else { log_trace("client closing", "writer closed"); } + }, + _ = push_manager_loop(push_rx, writer.clone()) => { + log_trace("client closing", "push manager closed"); } } log_trace("client closing", "closing connection"); diff --git a/glide-core/tests/test_client.rs b/glide-core/tests/test_client.rs index 682b5de9b9..2dfe9fc248 100644 --- a/glide-core/tests/test_client.rs +++ b/glide-core/tests/test_client.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ mod utilities; @@ -35,6 +35,7 @@ pub(crate) mod shared_client_tests { Client::new( create_connection_request(&[connection_addr.clone()], &configuration) .into(), + None, ) .await .ok() diff --git a/glide-core/tests/test_cluster_client.rs b/glide-core/tests/test_cluster_client.rs index de3e22e15a..1c60dc8c79 100644 --- a/glide-core/tests/test_cluster_client.rs +++ b/glide-core/tests/test_cluster_client.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ mod utilities; diff --git a/glide-core/tests/test_socket_listener.rs b/glide-core/tests/test_socket_listener.rs index d2735aaab0..bfa27ebc9c 100644 --- a/glide-core/tests/test_socket_listener.rs +++ b/glide-core/tests/test_socket_listener.rs @@ -1,5 +1,5 @@ /* - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #![cfg(feature = "socket-layer")] diff --git a/glide-core/tests/test_standalone_client.rs b/glide-core/tests/test_standalone_client.rs index aa7f3b6609..75e3262f80 100644 --- a/glide-core/tests/test_standalone_client.rs +++ b/glide-core/tests/test_standalone_client.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ mod utilities; @@ -199,7 +199,7 @@ mod standalone_client_tests { connection_request.read_from = config.read_from.into(); block_on_all(async { - let mut client = StandaloneClient::create_client(connection_request.into()) + let mut client = StandaloneClient::create_client(connection_request.into(), None) .await .unwrap(); for mock in mocks.drain(1..config.number_of_replicas_dropped_after_connection + 1) { @@ -305,7 +305,7 @@ mod standalone_client_tests { let connection_request = create_connection_request(addresses.as_slice(), &Default::default()); block_on_all(async { - let client_res = StandaloneClient::create_client(connection_request.into()) + let client_res = StandaloneClient::create_client(connection_request.into(), None) .await .map_err(ConnectionError::Standalone); assert!(client_res.is_err()); @@ -344,7 +344,7 @@ mod standalone_client_tests { create_connection_request(addresses.as_slice(), &Default::default()); block_on_all(async { - let mut client = StandaloneClient::create_client(connection_request.into()) + let mut client = StandaloneClient::create_client(connection_request.into(), None) .await .unwrap(); diff --git a/glide-core/tests/utilities/cluster.rs b/glide-core/tests/utilities/cluster.rs index 6ff69a932e..9e7c356f4e 100644 --- a/glide-core/tests/utilities/cluster.rs +++ b/glide-core/tests/utilities/cluster.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use super::{create_connection_request, ClusterMode, TestConfiguration}; use futures::future::{join_all, BoxFuture}; @@ -249,7 +249,7 @@ pub async fn create_cluster_client( configuration.request_timeout = configuration.request_timeout.or(Some(10000)); let connection_request = create_connection_request(&addresses, &configuration); - Client::new(connection_request.into()).await.unwrap() + Client::new(connection_request.into(), None).await.unwrap() } pub async fn setup_test_basics_internal(configuration: TestConfiguration) -> ClusterTestBasics { diff --git a/glide-core/tests/utilities/mocks.rs b/glide-core/tests/utilities/mocks.rs index f465000988..160e8a3189 100644 --- a/glide-core/tests/utilities/mocks.rs +++ b/glide-core/tests/utilities/mocks.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use futures_intrusive::sync::ManualResetEvent; use redis::{Cmd, ConnectionAddr, Value}; diff --git a/glide-core/tests/utilities/mod.rs b/glide-core/tests/utilities/mod.rs index 04bd727a1d..05c6f1f05a 100644 --- a/glide-core/tests/utilities/mod.rs +++ b/glide-core/tests/utilities/mod.rs @@ -1,5 +1,5 @@ /* - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #![allow(dead_code)] @@ -12,7 +12,7 @@ use once_cell::sync::Lazy; use rand::{distributions::Alphanumeric, Rng}; use redis::{ cluster_routing::{MultipleNodeRoutingInfo, RoutingInfo}, - ConnectionAddr, RedisConnectionInfo, RedisResult, Value, + ConnectionAddr, PushInfo, RedisConnectionInfo, RedisResult, Value, }; use socket2::{Domain, Socket, Type}; use std::{ @@ -20,6 +20,7 @@ use std::{ sync::Mutex, time::Duration, }; use tempfile::TempDir; +use tokio::sync::mpsc; pub mod cluster; pub mod mocks; @@ -456,7 +457,7 @@ pub async fn wait_for_server_to_become_ready(server_address: &ConnectionAddr) { }) .unwrap(); loop { - match client.get_multiplexed_async_connection().await { + match client.get_multiplexed_async_connection(None).await { Err(err) => { if err.is_connection_refusal() { tokio::time::sleep(millisecond).await; @@ -546,6 +547,7 @@ pub async fn send_set_and_get(mut client: Client, key: String) { pub struct TestBasics { pub server: Option, pub client: StandaloneClient, + pub push_receiver: mpsc::UnboundedReceiver, } fn convert_to_protobuf_protocol( @@ -592,7 +594,8 @@ pub async fn setup_acl(addr: &ConnectionAddr, connection_info: &RedisConnectionI }) .unwrap(); let mut connection = - repeat_try_create(|| async { client.get_multiplexed_async_connection().await.ok() }).await; + repeat_try_create(|| async { client.get_multiplexed_async_connection(None).await.ok() }) + .await; let password = connection_info.password.clone().unwrap(); let username = connection_info @@ -689,11 +692,16 @@ pub(crate) async fn setup_test_basics_internal(configuration: &TestConfiguration let mut connection_request = create_connection_request(&[connection_addr], configuration); connection_request.cluster_mode_enabled = false; connection_request.protocol = configuration.protocol.into(); - let client = StandaloneClient::create_client(connection_request.into()) + let (push_sender, push_receiver) = tokio::sync::mpsc::unbounded_channel(); + let client = StandaloneClient::create_client(connection_request.into(), Some(push_sender)) .await .unwrap(); - TestBasics { server, client } + TestBasics { + server, + client, + push_receiver, + } } pub async fn setup_test_basics(use_tls: bool) -> TestBasics { diff --git a/go/api/config.go b/go/api/config.go index 9d2417b429..7b2955d828 100644 --- a/go/api/config.go +++ b/go/api/config.go @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 package api diff --git a/go/api/config_test.go b/go/api/config_test.go index 53a18e5308..e30a1b096b 100644 --- a/go/api/config_test.go +++ b/go/api/config_test.go @@ -1,4 +1,4 @@ -// Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +// Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 package api diff --git a/go/cbindgen.toml b/go/cbindgen.toml index 8bd3eb749f..9378736cd1 100644 --- a/go/cbindgen.toml +++ b/go/cbindgen.toml @@ -1,7 +1,7 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 language = "C" -header = "/* Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */" +header = "/* Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */" [parse] parse_deps = true diff --git a/go/src/lib.rs b/go/src/lib.rs index 72ffeca427..bd76ebe347 100644 --- a/go/src/lib.rs +++ b/go/src/lib.rs @@ -1,5 +1,5 @@ /* - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ // TODO: Investigate using uniffi bindings for Go instead of cbindgen @@ -81,7 +81,7 @@ fn create_client_internal( errors::error_message(&redis_error) })?; let client = runtime - .block_on(GlideClient::new(ConnectionRequest::from(request))) + .block_on(GlideClient::new(ConnectionRequest::from(request), None)) .map_err(|err| err.to_string())?; Ok(ClientAdapter { client, diff --git a/java/THIRD_PARTY_LICENSES_JAVA b/java/THIRD_PARTY_LICENSES_JAVA new file mode 100644 index 0000000000..e69de29bb2 diff --git a/java/benchmarks/src/main/java/glide/benchmarks/BenchmarkingApp.java b/java/benchmarks/src/main/java/glide/benchmarks/BenchmarkingApp.java index 594c82c030..31ab7bbd13 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/BenchmarkingApp.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/BenchmarkingApp.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks; import static glide.benchmarks.utils.Benchmarking.testClientSetGet; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/clients/AsyncClient.java b/java/benchmarks/src/main/java/glide/benchmarks/clients/AsyncClient.java index ce450bd118..8a6c8a9025 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/clients/AsyncClient.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/clients/AsyncClient.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.clients; import java.util.concurrent.ExecutionException; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/clients/Client.java b/java/benchmarks/src/main/java/glide/benchmarks/clients/Client.java index 790229d9ec..d61f239642 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/clients/Client.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/clients/Client.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.clients; import glide.benchmarks.utils.ConnectionSettings; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/clients/SyncClient.java b/java/benchmarks/src/main/java/glide/benchmarks/clients/SyncClient.java index 4a47e6ed3d..f8034435ae 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/clients/SyncClient.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/clients/SyncClient.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.clients; /** A Redis client with sync capabilities */ diff --git a/java/benchmarks/src/main/java/glide/benchmarks/clients/glide/GlideAsyncClient.java b/java/benchmarks/src/main/java/glide/benchmarks/clients/glide/GlideAsyncClient.java index ee2bdeb83a..3cb1361ee1 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/clients/glide/GlideAsyncClient.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/clients/glide/GlideAsyncClient.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.clients.glide; import static java.util.concurrent.TimeUnit.SECONDS; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/clients/jedis/JedisClient.java b/java/benchmarks/src/main/java/glide/benchmarks/clients/jedis/JedisClient.java index 0553cbba37..9745bdec38 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/clients/jedis/JedisClient.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/clients/jedis/JedisClient.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.clients.jedis; import glide.benchmarks.clients.SyncClient; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/clients/lettuce/LettuceAsyncClient.java b/java/benchmarks/src/main/java/glide/benchmarks/clients/lettuce/LettuceAsyncClient.java index e628ed8f8c..d141582939 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/clients/lettuce/LettuceAsyncClient.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/clients/lettuce/LettuceAsyncClient.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.clients.lettuce; import glide.benchmarks.clients.AsyncClient; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/utils/Benchmarking.java b/java/benchmarks/src/main/java/glide/benchmarks/utils/Benchmarking.java index 82bd607a70..0d38204be1 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/utils/Benchmarking.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/utils/Benchmarking.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.utils; import glide.benchmarks.BenchmarkingApp; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/utils/ChosenAction.java b/java/benchmarks/src/main/java/glide/benchmarks/utils/ChosenAction.java index 90d62ba392..58c88ca08d 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/utils/ChosenAction.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/utils/ChosenAction.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.utils; public enum ChosenAction { diff --git a/java/benchmarks/src/main/java/glide/benchmarks/utils/ConnectionSettings.java b/java/benchmarks/src/main/java/glide/benchmarks/utils/ConnectionSettings.java index f15338bd01..e8eae01a1b 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/utils/ConnectionSettings.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/utils/ConnectionSettings.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.utils; /** Redis-client settings */ diff --git a/java/benchmarks/src/main/java/glide/benchmarks/utils/JsonWriter.java b/java/benchmarks/src/main/java/glide/benchmarks/utils/JsonWriter.java index c41ca18906..fb8004d69c 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/utils/JsonWriter.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/utils/JsonWriter.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.utils; import com.google.gson.Gson; diff --git a/java/benchmarks/src/main/java/glide/benchmarks/utils/LatencyResults.java b/java/benchmarks/src/main/java/glide/benchmarks/utils/LatencyResults.java index f7214f9865..297a1e42d0 100644 --- a/java/benchmarks/src/main/java/glide/benchmarks/utils/LatencyResults.java +++ b/java/benchmarks/src/main/java/glide/benchmarks/utils/LatencyResults.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.benchmarks.utils; import java.util.Arrays; diff --git a/java/build.gradle b/java/build.gradle index 1e8824a15f..d36a4bf750 100644 --- a/java/build.gradle +++ b/java/build.gradle @@ -79,7 +79,7 @@ spotless { include '**/*.java' exclude '**/build/**', '**/build-*/**', '**/protobuf/**' } - licenseHeader('/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */') + licenseHeader('/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */') importOrder() removeUnusedImports() trimTrailingWhitespace() diff --git a/java/client/src/main/java/glide/api/BaseClient.java b/java/client/src/main/java/glide/api/BaseClient.java index f7c47e4827..c777e40c30 100644 --- a/java/client/src/main/java/glide/api/BaseClient.java +++ b/java/client/src/main/java/glide/api/BaseClient.java @@ -1,10 +1,14 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; +import static glide.api.models.GlideString.gs; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.STORE_COMMAND_STRING; import static glide.api.models.commands.bitmap.BitFieldOptions.BitFieldReadOnlySubCommands; import static glide.api.models.commands.bitmap.BitFieldOptions.BitFieldSubCommands; import static glide.api.models.commands.bitmap.BitFieldOptions.createBitFieldArgs; import static glide.ffi.resolvers.SocketListenerResolver.getSocket; +import static glide.utils.ArrayTransformUtils.cast3DArray; import static glide.utils.ArrayTransformUtils.castArray; import static glide.utils.ArrayTransformUtils.castArrayofArrays; import static glide.utils.ArrayTransformUtils.castMapOf2DArray; @@ -30,11 +34,13 @@ import static redis_request.RedisRequestOuterClass.RequestType.Decr; import static redis_request.RedisRequestOuterClass.RequestType.DecrBy; import static redis_request.RedisRequestOuterClass.RequestType.Del; +import static redis_request.RedisRequestOuterClass.RequestType.Dump; import static redis_request.RedisRequestOuterClass.RequestType.Exists; import static redis_request.RedisRequestOuterClass.RequestType.Expire; import static redis_request.RedisRequestOuterClass.RequestType.ExpireAt; import static redis_request.RedisRequestOuterClass.RequestType.ExpireTime; import static redis_request.RedisRequestOuterClass.RequestType.FCall; +import static redis_request.RedisRequestOuterClass.RequestType.FCallReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.GeoAdd; import static redis_request.RedisRequestOuterClass.RequestType.GeoDist; import static redis_request.RedisRequestOuterClass.RequestType.GeoHash; @@ -42,6 +48,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.Get; import static redis_request.RedisRequestOuterClass.RequestType.GetBit; import static redis_request.RedisRequestOuterClass.RequestType.GetDel; +import static redis_request.RedisRequestOuterClass.RequestType.GetEx; import static redis_request.RedisRequestOuterClass.RequestType.GetRange; import static redis_request.RedisRequestOuterClass.RequestType.HDel; import static redis_request.RedisRequestOuterClass.RequestType.HExists; @@ -94,6 +101,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.RPushX; import static redis_request.RedisRequestOuterClass.RequestType.Rename; import static redis_request.RedisRequestOuterClass.RequestType.RenameNX; +import static redis_request.RedisRequestOuterClass.RequestType.Restore; import static redis_request.RedisRequestOuterClass.RequestType.SAdd; import static redis_request.RedisRequestOuterClass.RequestType.SCard; import static redis_request.RedisRequestOuterClass.RequestType.SDiff; @@ -108,20 +116,31 @@ import static redis_request.RedisRequestOuterClass.RequestType.SPop; import static redis_request.RedisRequestOuterClass.RequestType.SRandMember; import static redis_request.RedisRequestOuterClass.RequestType.SRem; +import static redis_request.RedisRequestOuterClass.RequestType.SUnion; import static redis_request.RedisRequestOuterClass.RequestType.SUnionStore; import static redis_request.RedisRequestOuterClass.RequestType.Set; import static redis_request.RedisRequestOuterClass.RequestType.SetBit; import static redis_request.RedisRequestOuterClass.RequestType.SetRange; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Strlen; import static redis_request.RedisRequestOuterClass.RequestType.TTL; import static redis_request.RedisRequestOuterClass.RequestType.Touch; import static redis_request.RedisRequestOuterClass.RequestType.Type; import static redis_request.RedisRequestOuterClass.RequestType.Unlink; +import static redis_request.RedisRequestOuterClass.RequestType.Watch; +import static redis_request.RedisRequestOuterClass.RequestType.XAck; import static redis_request.RedisRequestOuterClass.RequestType.XAdd; import static redis_request.RedisRequestOuterClass.RequestType.XDel; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreate; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreateConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDelConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDestroy; import static redis_request.RedisRequestOuterClass.RequestType.XLen; +import static redis_request.RedisRequestOuterClass.RequestType.XPending; import static redis_request.RedisRequestOuterClass.RequestType.XRange; import static redis_request.RedisRequestOuterClass.RequestType.XRead; +import static redis_request.RedisRequestOuterClass.RequestType.XReadGroup; import static redis_request.RedisRequestOuterClass.RequestType.XRevRange; import static redis_request.RedisRequestOuterClass.RequestType.XTrim; import static redis_request.RedisRequestOuterClass.RequestType.ZAdd; @@ -162,8 +181,11 @@ import glide.api.commands.SortedSetBaseCommands; import glide.api.commands.StreamBaseCommands; import glide.api.commands.StringBaseCommands; +import glide.api.commands.TransactionsBaseCommands; +import glide.api.models.GlideString; import glide.api.models.Script; import glide.api.models.commands.ExpireOptions; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.LInsertOptions.InsertPosition; import glide.api.models.commands.LPosOptions; import glide.api.models.commands.ListDirection; @@ -172,6 +194,7 @@ import glide.api.models.commands.RangeOptions.RangeQuery; import glide.api.models.commands.RangeOptions.ScoreRange; import glide.api.models.commands.RangeOptions.ScoredRangeQuery; +import glide.api.models.commands.RestoreOptions; import glide.api.models.commands.ScoreFilter; import glide.api.models.commands.ScriptOptions; import glide.api.models.commands.SetOptions; @@ -185,7 +208,10 @@ import glide.api.models.commands.geospatial.GeoUnit; import glide.api.models.commands.geospatial.GeospatialData; import glide.api.models.commands.stream.StreamAddOptions; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamPendingOptions; import glide.api.models.commands.stream.StreamRange; +import glide.api.models.commands.stream.StreamReadGroupOptions; import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions; import glide.api.models.configuration.BaseClientConfiguration; @@ -199,8 +225,8 @@ import glide.managers.BaseCommandResponseResolver; import glide.managers.CommandManager; import glide.managers.ConnectionManager; -import java.util.Arrays; import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -228,7 +254,8 @@ public abstract class BaseClient StreamBaseCommands, HyperLogLogBaseCommands, GeospatialIndicesBaseCommands, - ScriptingAndFunctionsBaseCommands { + ScriptingAndFunctionsBaseCommands, + TransactionsBaseCommands { /** Redis simple string response with "OK" */ public static final String OK = ConstantResponse.OK.toString(); @@ -303,12 +330,18 @@ protected static CommandManager buildCommandManager(ChannelHandler channelHandle /** * Extracts the value from a GLIDE core response message and either throws an - * exception or returns the value as an object of type T. If isNullable, - * than also returns null. + * exception or returns the value as an object of type T. * * @param response Redis protobuf message. * @param classType Parameter T class type. - * @param isNullable Accepts null values in the protobuf message. + * @param flags A set of parameters which describes how to handle the response. Could be empty or + * any combination of + *
    + *
  • {@link ResponseFlags#ENCODING_UTF8} to return the data as a String; if + * unset, a byte[] is returned. + *
  • {@link ResponseFlags#IS_NULLABLE} to accept null values. + *
+ * * @return Response as an object of type T or null. * @param The return value type. * @throws RedisException On a type mismatch. @@ -326,6 +359,9 @@ protected T handleRedisResponse( if (isNullable && (value == null)) { return null; } + + value = convertByteArrayToGlideString(value); + if (classType.isInstance(value)) { return (T) value; } @@ -352,7 +388,19 @@ protected String handleStringOrNullResponse(Response response) throws RedisExcep } protected byte[] handleBytesOrNullResponse(Response response) throws RedisException { - return handleRedisResponse(byte[].class, EnumSet.of(ResponseFlags.IS_NULLABLE), response); + var result = + handleRedisResponse(GlideString.class, EnumSet.of(ResponseFlags.IS_NULLABLE), response); + if (result == null) return null; + + return result.getBytes(); + } + + protected GlideString handleGlideStringOrNullResponse(Response response) throws RedisException { + return handleRedisResponse(GlideString.class, EnumSet.of(ResponseFlags.IS_NULLABLE), response); + } + + protected GlideString handleGlideStringResponse(Response response) throws RedisException { + return handleRedisResponse(GlideString.class, EnumSet.noneOf(ResponseFlags.class), response); } protected Boolean handleBooleanResponse(Response response) throws RedisException { @@ -386,6 +434,10 @@ protected Object[] handleArrayOrNullResponse(Response response) throws RedisExce response); } + protected Object[] handleArrayOrNullResponseBinary(Response response) throws RedisException { + return handleRedisResponse(Object[].class, EnumSet.of(ResponseFlags.IS_NULLABLE), response); + } + /** * @param response A Protobuf response * @return A map of String to V. @@ -396,6 +448,19 @@ protected Map handleMapResponse(Response response) throws RedisEx return handleRedisResponse(Map.class, EnumSet.of(ResponseFlags.ENCODING_UTF8), response); } + /** + * Get a map and convert {@link Map} keys from byte[] to {@link String}. + * + * @param response A Protobuf response + * @return A map of GlideString to V. + * @param Value type. + */ + @SuppressWarnings("unchecked") // raw Map cast to Map + protected Map handleBinaryStringMapResponse(Response response) + throws RedisException { + return handleRedisResponse(Map.class, EnumSet.noneOf(ResponseFlags.class), response); + } + /** * @param response A Protobuf response * @return A map of String to V or null @@ -429,6 +494,11 @@ protected Set handleSetResponse(Response response) throws RedisException return handleRedisResponse(Set.class, EnumSet.of(ResponseFlags.ENCODING_UTF8), response); } + @SuppressWarnings("unchecked") + protected Set handleSetBinaryResponse(Response response) throws RedisException { + return handleRedisResponse(Set.class, EnumSet.noneOf(ResponseFlags.class), response); + } + /** Process a FUNCTION LIST standalone response. */ @SuppressWarnings("unchecked") protected Map[] handleFunctionListResponse(Object[] response) { @@ -452,6 +522,21 @@ protected Map> handleFunctionStatsResponse( return response; } + /** Process a LCS key1 key2 IDX response */ + protected Map handleLcsIdxResponse(Map response) + throws RedisException { + Long[][][] convertedMatchesObject = + cast3DArray((Object[]) (response.get(LCS_MATCHES_RESULT_KEY)), Long.class); + + if (convertedMatchesObject == null) { + throw new NullPointerException( + "LCS result does not contain the key \"" + LCS_MATCHES_RESULT_KEY + "\""); + } + + response.put("matches", convertedMatchesObject); + return response; + } + @Override public CompletableFuture del(@NonNull String[] keys) { return commandManager.submitNewCommand(Del, keys, this::handleLongResponse); @@ -464,9 +549,9 @@ public CompletableFuture get(@NonNull String key) { } @Override - public CompletableFuture get(@NonNull byte[] key) { + public CompletableFuture get(@NonNull GlideString key) { return commandManager.submitNewCommand( - Get, Arrays.asList(key), this::handleBytesOrNullResponse); + Get, new GlideString[] {key}, this::handleGlideStringOrNullResponse); } @Override @@ -476,9 +561,27 @@ public CompletableFuture getdel(@NonNull String key) { } @Override - public CompletableFuture set(@NonNull byte[] key, @NonNull byte[] value) { + public CompletableFuture getex(@NonNull String key) { + return commandManager.submitNewCommand( + GetEx, new String[] {key}, this::handleStringOrNullResponse); + } + + @Override + public CompletableFuture getex(@NonNull String key, @NonNull GetExOptions options) { + String[] arguments = ArrayUtils.addFirst(options.toArgs(), key); + return commandManager.submitNewCommand(GetEx, arguments, this::handleStringOrNullResponse); + } + + @Override + public CompletableFuture getdel(@NonNull GlideString key) { + return commandManager.submitNewCommand( + GetDel, new GlideString[] {key}, this::handleGlideStringOrNullResponse); + } + + @Override + public CompletableFuture set(@NonNull GlideString key, @NonNull GlideString value) { return commandManager.submitNewCommand( - Set, Arrays.asList(key, value), this::handleStringResponse); + Set, new GlideString[] {key, value}, this::handleStringResponse); } @Override @@ -494,18 +597,40 @@ public CompletableFuture set( return commandManager.submitNewCommand(Set, arguments, this::handleStringOrNullResponse); } + @Override + public CompletableFuture set( + @NonNull GlideString key, @NonNull GlideString value, @NonNull SetOptions options) { + GlideString[] arguments = + ArrayUtils.addAll(new GlideString[] {key, value}, options.toGlideStringArgs()); + return commandManager.submitNewCommand(Set, arguments, this::handleStringOrNullResponse); + } + @Override public CompletableFuture append(@NonNull String key, @NonNull String value) { return commandManager.submitNewCommand( Append, new String[] {key, value}, this::handleLongResponse); } + @Override + public CompletableFuture append(@NonNull GlideString key, @NonNull GlideString value) { + return commandManager.submitNewCommand( + Append, new GlideString[] {key, value}, this::handleLongResponse); + } + @Override public CompletableFuture mget(@NonNull String[] keys) { return commandManager.submitNewCommand( MGet, keys, response -> castArray(handleArrayOrNullResponse(response), String.class)); } + @Override + public CompletableFuture mget(@NonNull GlideString[] keys) { + return commandManager.submitNewCommand( + MGet, + keys, + response -> castArray(handleArrayOrNullResponseBinary(response), GlideString.class)); + } + @Override public CompletableFuture mset(@NonNull Map keyValueMap) { String[] args = convertMapToKeyValueStringArray(keyValueMap); @@ -518,36 +643,73 @@ public CompletableFuture objectEncoding(@NonNull String key) { ObjectEncoding, new String[] {key}, this::handleStringOrNullResponse); } + @Override + public CompletableFuture objectEncoding(@NonNull GlideString key) { + return commandManager.submitNewCommand( + ObjectEncoding, new GlideString[] {key}, this::handleStringOrNullResponse); + } + @Override public CompletableFuture objectFreq(@NonNull String key) { return commandManager.submitNewCommand( ObjectFreq, new String[] {key}, this::handleLongOrNullResponse); } + @Override + public CompletableFuture objectFreq(@NonNull GlideString key) { + return commandManager.submitNewCommand( + ObjectFreq, new GlideString[] {key}, this::handleLongOrNullResponse); + } + @Override public CompletableFuture objectIdletime(@NonNull String key) { return commandManager.submitNewCommand( ObjectIdleTime, new String[] {key}, this::handleLongOrNullResponse); } + @Override + public CompletableFuture objectIdletime(@NonNull GlideString key) { + return commandManager.submitNewCommand( + ObjectIdleTime, new GlideString[] {key}, this::handleLongOrNullResponse); + } + @Override public CompletableFuture objectRefcount(@NonNull String key) { return commandManager.submitNewCommand( ObjectRefCount, new String[] {key}, this::handleLongOrNullResponse); } + @Override + public CompletableFuture objectRefcount(@NonNull GlideString key) { + return commandManager.submitNewCommand( + ObjectRefCount, new GlideString[] {key}, this::handleLongOrNullResponse); + } + @Override public CompletableFuture rename(@NonNull String key, @NonNull String newKey) { return commandManager.submitNewCommand( Rename, new String[] {key, newKey}, this::handleStringResponse); } + @Override + public CompletableFuture rename(@NonNull GlideString key, @NonNull GlideString newKey) { + return commandManager.submitNewCommand( + Rename, new GlideString[] {key, newKey}, this::handleStringResponse); + } + @Override public CompletableFuture renamenx(@NonNull String key, @NonNull String newKey) { return commandManager.submitNewCommand( RenameNX, new String[] {key, newKey}, this::handleBooleanResponse); } + @Override + public CompletableFuture renamenx( + @NonNull GlideString key, @NonNull GlideString newKey) { + return commandManager.submitNewCommand( + RenameNX, new GlideString[] {key, newKey}, this::handleBooleanResponse); + } + @Override public CompletableFuture incr(@NonNull String key) { return commandManager.submitNewCommand(Incr, new String[] {key}, this::handleLongResponse); @@ -559,12 +721,28 @@ public CompletableFuture incrBy(@NonNull String key, long amount) { IncrBy, new String[] {key, Long.toString(amount)}, this::handleLongResponse); } + @Override + public CompletableFuture incrBy(@NonNull GlideString key, long amount) { + return commandManager.submitNewCommand( + IncrBy, + new GlideString[] {key, gs(Long.toString(amount).getBytes())}, + this::handleLongResponse); + } + @Override public CompletableFuture incrByFloat(@NonNull String key, double amount) { return commandManager.submitNewCommand( IncrByFloat, new String[] {key, Double.toString(amount)}, this::handleDoubleResponse); } + @Override + public CompletableFuture incrByFloat(@NonNull GlideString key, double amount) { + return commandManager.submitNewCommand( + IncrByFloat, + new GlideString[] {key, gs(Double.toString(amount).getBytes())}, + this::handleDoubleResponse); + } + @Override public CompletableFuture decr(@NonNull String key) { return commandManager.submitNewCommand(Decr, new String[] {key}, this::handleLongResponse); @@ -581,6 +759,12 @@ public CompletableFuture strlen(@NonNull String key) { return commandManager.submitNewCommand(Strlen, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture strlen(@NonNull GlideString key) { + return commandManager.submitNewCommand( + Strlen, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture setrange(@NonNull String key, int offset, @NonNull String value) { String[] arguments = new String[] {key, Integer.toString(offset), value}; @@ -613,6 +797,13 @@ public CompletableFuture hsetnx( HSetNX, new String[] {key, field, value}, this::handleBooleanResponse); } + @Override + public CompletableFuture hsetnx( + @NonNull GlideString key, @NonNull GlideString field, @NonNull GlideString value) { + return commandManager.submitNewCommand( + HSetNX, new GlideString[] {key, field, value}, this::handleBooleanResponse); + } + @Override public CompletableFuture hdel(@NonNull String key, @NonNull String[] fields) { String[] args = ArrayUtils.addFirst(fields, key); @@ -645,17 +836,38 @@ public CompletableFuture hexists(@NonNull String key, @NonNull String f HExists, new String[] {key, field}, this::handleBooleanResponse); } + @Override + public CompletableFuture hexists(@NonNull GlideString key, @NonNull GlideString field) { + return commandManager.submitNewCommand( + HExists, new GlideString[] {key, field}, this::handleBooleanResponse); + } + @Override public CompletableFuture> hgetall(@NonNull String key) { return commandManager.submitNewCommand(HGetAll, new String[] {key}, this::handleMapResponse); } + @Override + public CompletableFuture> hgetall(@NonNull GlideString key) { + return commandManager.submitNewCommand( + HGetAll, new GlideString[] {key}, this::handleBinaryStringMapResponse); + } + @Override public CompletableFuture hincrBy(@NonNull String key, @NonNull String field, long amount) { return commandManager.submitNewCommand( HIncrBy, new String[] {key, field, Long.toString(amount)}, this::handleLongResponse); } + @Override + public CompletableFuture hincrBy( + @NonNull GlideString key, @NonNull GlideString field, long amount) { + return commandManager.submitNewCommand( + HIncrBy, + new GlideString[] {key, field, gs(Long.toString(amount).getBytes())}, + this::handleLongResponse); + } + @Override public CompletableFuture hincrByFloat( @NonNull String key, @NonNull String field, double amount) { @@ -665,6 +877,15 @@ public CompletableFuture hincrByFloat( this::handleDoubleResponse); } + @Override + public CompletableFuture hincrByFloat( + @NonNull GlideString key, @NonNull GlideString field, double amount) { + return commandManager.submitNewCommand( + HIncrByFloat, + new GlideString[] {key, field, gs(Double.toString(amount).getBytes())}, + this::handleDoubleResponse); + } + @Override public CompletableFuture hkeys(@NonNull String key) { return commandManager.submitNewCommand( @@ -679,6 +900,12 @@ public CompletableFuture hstrlen(@NonNull String key, @NonNull String fiel HStrlen, new String[] {key, field}, this::handleLongResponse); } + @Override + public CompletableFuture hstrlen(@NonNull GlideString key, @NonNull GlideString field) { + return commandManager.submitNewCommand( + HStrlen, new GlideString[] {key, field}, this::handleLongResponse); + } + @Override public CompletableFuture hrandfield(@NonNull String key) { return commandManager.submitNewCommand( @@ -708,6 +935,12 @@ public CompletableFuture lpush(@NonNull String key, @NonNull String[] elem return commandManager.submitNewCommand(LPush, arguments, this::handleLongResponse); } + @Override + public CompletableFuture lpush(@NonNull GlideString key, @NonNull GlideString[] elements) { + GlideString[] arguments = ArrayUtils.addFirst(elements, key); + return commandManager.submitNewCommand(LPush, arguments, this::handleLongResponse); + } + @Override public CompletableFuture lpop(@NonNull String key) { return commandManager.submitNewCommand( @@ -777,23 +1010,51 @@ public CompletableFuture ltrim(@NonNull String key, long start, long end this::handleStringResponse); } + @Override + public CompletableFuture ltrim(@NonNull GlideString key, long start, long end) { + return commandManager.submitNewCommand( + LTrim, + new GlideString[] {key, gs(Long.toString(start)), gs(Long.toString(end))}, + this::handleStringResponse); + } + @Override public CompletableFuture llen(@NonNull String key) { return commandManager.submitNewCommand(LLen, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture llen(@NonNull GlideString key) { + return commandManager.submitNewCommand(LLen, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture lrem(@NonNull String key, long count, @NonNull String element) { return commandManager.submitNewCommand( LRem, new String[] {key, Long.toString(count), element}, this::handleLongResponse); } + @Override + public CompletableFuture lrem( + @NonNull GlideString key, long count, @NonNull GlideString element) { + return commandManager.submitNewCommand( + LRem, + new GlideString[] {key, gs(Long.toString(count).getBytes()), element}, + this::handleLongResponse); + } + @Override public CompletableFuture rpush(@NonNull String key, @NonNull String[] elements) { String[] arguments = ArrayUtils.addFirst(elements, key); return commandManager.submitNewCommand(RPush, arguments, this::handleLongResponse); } + @Override + public CompletableFuture rpush(@NonNull GlideString key, @NonNull GlideString[] elements) { + GlideString[] arguments = ArrayUtils.addFirst(elements, key); + return commandManager.submitNewCommand(RPush, arguments, this::handleLongResponse); + } + @Override public CompletableFuture rpop(@NonNull String key) { return commandManager.submitNewCommand( @@ -814,28 +1075,59 @@ public CompletableFuture sadd(@NonNull String key, @NonNull String[] membe return commandManager.submitNewCommand(SAdd, arguments, this::handleLongResponse); } + @Override + public CompletableFuture sadd(@NonNull GlideString key, @NonNull GlideString[] members) { + GlideString[] arguments = ArrayUtils.addFirst(members, key); + return commandManager.submitNewCommand(SAdd, arguments, this::handleLongResponse); + } + @Override public CompletableFuture sismember(@NonNull String key, @NonNull String member) { return commandManager.submitNewCommand( SIsMember, new String[] {key, member}, this::handleBooleanResponse); } + @Override + public CompletableFuture sismember( + @NonNull GlideString key, @NonNull GlideString member) { + return commandManager.submitNewCommand( + SIsMember, new GlideString[] {key, member}, this::handleBooleanResponse); + } + @Override public CompletableFuture srem(@NonNull String key, @NonNull String[] members) { String[] arguments = ArrayUtils.addFirst(members, key); return commandManager.submitNewCommand(SRem, arguments, this::handleLongResponse); } + @Override + public CompletableFuture srem(@NonNull GlideString key, @NonNull GlideString[] members) { + GlideString[] arguments = ArrayUtils.addFirst(members, key); + return commandManager.submitNewCommand(SRem, arguments, this::handleLongResponse); + } + @Override public CompletableFuture> smembers(@NonNull String key) { return commandManager.submitNewCommand(SMembers, new String[] {key}, this::handleSetResponse); } + @Override + public CompletableFuture> smembers(@NonNull GlideString key) { + return commandManager.submitNewCommand( + SMembers, new GlideString[] {key}, this::handleSetBinaryResponse); + } + @Override public CompletableFuture scard(@NonNull String key) { return commandManager.submitNewCommand(SCard, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture scard(@NonNull GlideString key) { + return commandManager.submitNewCommand( + SCard, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture> sdiff(@NonNull String[] keys) { return commandManager.submitNewCommand(SDiff, keys, this::handleSetResponse); @@ -861,17 +1153,36 @@ public CompletableFuture smove( SMove, new String[] {source, destination, member}, this::handleBooleanResponse); } + @Override + public CompletableFuture smove( + @NonNull GlideString source, @NonNull GlideString destination, @NonNull GlideString member) { + return commandManager.submitNewCommand( + SMove, new GlideString[] {source, destination, member}, this::handleBooleanResponse); + } + @Override public CompletableFuture sinterstore(@NonNull String destination, @NonNull String[] keys) { String[] arguments = ArrayUtils.addFirst(keys, destination); return commandManager.submitNewCommand(SInterStore, arguments, this::handleLongResponse); } + @Override + public CompletableFuture sinterstore( + @NonNull GlideString destination, @NonNull GlideString[] keys) { + GlideString[] arguments = ArrayUtils.addFirst(keys, destination); + return commandManager.submitNewCommand(SInterStore, arguments, this::handleLongResponse); + } + @Override public CompletableFuture> sinter(@NonNull String[] keys) { return commandManager.submitNewCommand(SInter, keys, this::handleSetResponse); } + @Override + public CompletableFuture> sinter(@NonNull GlideString[] keys) { + return commandManager.submitNewCommand(SInter, keys, this::handleSetBinaryResponse); + } + @Override public CompletableFuture sunionstore(@NonNull String destination, @NonNull String[] keys) { String[] arguments = ArrayUtils.addFirst(keys, destination); @@ -883,17 +1194,33 @@ public CompletableFuture exists(@NonNull String[] keys) { return commandManager.submitNewCommand(Exists, keys, this::handleLongResponse); } + @Override + public CompletableFuture exists(@NonNull GlideString[] keys) { + return commandManager.submitNewCommand(Exists, keys, this::handleLongResponse); + } + @Override public CompletableFuture unlink(@NonNull String[] keys) { return commandManager.submitNewCommand(Unlink, keys, this::handleLongResponse); } + @Override + public CompletableFuture unlink(@NonNull GlideString[] keys) { + return commandManager.submitNewCommand(Unlink, keys, this::handleLongResponse); + } + @Override public CompletableFuture expire(@NonNull String key, long seconds) { return commandManager.submitNewCommand( Expire, new String[] {key, Long.toString(seconds)}, this::handleBooleanResponse); } + @Override + public CompletableFuture expire(@NonNull GlideString key, long seconds) { + return commandManager.submitNewCommand( + Expire, new GlideString[] {key, gs(Long.toString(seconds))}, this::handleBooleanResponse); + } + @Override public CompletableFuture expire( @NonNull String key, long seconds, @NonNull ExpireOptions expireOptions) { @@ -902,12 +1229,29 @@ public CompletableFuture expire( return commandManager.submitNewCommand(Expire, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture expire( + @NonNull GlideString key, long seconds, @NonNull ExpireOptions expireOptions) { + GlideString[] arguments = + ArrayUtils.addAll( + new GlideString[] {key, gs(Long.toString(seconds))}, expireOptions.toGlideStringArgs()); + return commandManager.submitNewCommand(Expire, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture expireAt(@NonNull String key, long unixSeconds) { return commandManager.submitNewCommand( ExpireAt, new String[] {key, Long.toString(unixSeconds)}, this::handleBooleanResponse); } + @Override + public CompletableFuture expireAt(@NonNull GlideString key, long unixSeconds) { + return commandManager.submitNewCommand( + ExpireAt, + new GlideString[] {key, gs(Long.toString(unixSeconds))}, + this::handleBooleanResponse); + } + @Override public CompletableFuture expireAt( @NonNull String key, long unixSeconds, @NonNull ExpireOptions expireOptions) { @@ -916,12 +1260,30 @@ public CompletableFuture expireAt( return commandManager.submitNewCommand(ExpireAt, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture expireAt( + @NonNull GlideString key, long unixSeconds, @NonNull ExpireOptions expireOptions) { + GlideString[] arguments = + ArrayUtils.addAll( + new GlideString[] {key, gs(Long.toString(unixSeconds))}, + expireOptions.toGlideStringArgs()); + return commandManager.submitNewCommand(ExpireAt, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture pexpire(@NonNull String key, long milliseconds) { return commandManager.submitNewCommand( PExpire, new String[] {key, Long.toString(milliseconds)}, this::handleBooleanResponse); } + @Override + public CompletableFuture pexpire(@NonNull GlideString key, long milliseconds) { + return commandManager.submitNewCommand( + PExpire, + new GlideString[] {key, gs(Long.toString(milliseconds))}, + this::handleBooleanResponse); + } + @Override public CompletableFuture pexpire( @NonNull String key, long milliseconds, @NonNull ExpireOptions expireOptions) { @@ -930,6 +1292,16 @@ public CompletableFuture pexpire( return commandManager.submitNewCommand(PExpire, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture pexpire( + @NonNull GlideString key, long milliseconds, @NonNull ExpireOptions expireOptions) { + GlideString[] arguments = + ArrayUtils.addAll( + new GlideString[] {key, gs(Long.toString(milliseconds))}, + expireOptions.toGlideStringArgs()); + return commandManager.submitNewCommand(PExpire, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture pexpireAt(@NonNull String key, long unixMilliseconds) { return commandManager.submitNewCommand( @@ -938,6 +1310,14 @@ public CompletableFuture pexpireAt(@NonNull String key, long unixMillis this::handleBooleanResponse); } + @Override + public CompletableFuture pexpireAt(@NonNull GlideString key, long unixMilliseconds) { + return commandManager.submitNewCommand( + PExpireAt, + new GlideString[] {key, gs(Long.toString(unixMilliseconds))}, + this::handleBooleanResponse); + } + @Override public CompletableFuture pexpireAt( @NonNull String key, long unixMilliseconds, @NonNull ExpireOptions expireOptions) { @@ -947,23 +1327,50 @@ public CompletableFuture pexpireAt( return commandManager.submitNewCommand(PExpireAt, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture pexpireAt( + @NonNull GlideString key, long unixMilliseconds, @NonNull ExpireOptions expireOptions) { + GlideString[] arguments = + ArrayUtils.addAll( + new GlideString[] {key, gs(Long.toString(unixMilliseconds))}, + expireOptions.toGlideStringArgs()); + return commandManager.submitNewCommand(PExpireAt, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture ttl(@NonNull String key) { return commandManager.submitNewCommand(TTL, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture ttl(@NonNull GlideString key) { + return commandManager.submitNewCommand(TTL, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture expiretime(@NonNull String key) { return commandManager.submitNewCommand( ExpireTime, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture expiretime(@NonNull GlideString key) { + return commandManager.submitNewCommand( + ExpireTime, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture pexpiretime(@NonNull String key) { return commandManager.submitNewCommand( PExpireTime, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture pexpiretime(@NonNull GlideString key) { + return commandManager.submitNewCommand( + PExpireTime, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture invokeScript(@NonNull Script script) { return commandManager.submitScript( @@ -1040,11 +1447,23 @@ public CompletableFuture zrem(@NonNull String key, @NonNull String[] membe return commandManager.submitNewCommand(ZRem, arguments, this::handleLongResponse); } + @Override + public CompletableFuture zrem(@NonNull GlideString key, @NonNull GlideString[] members) { + GlideString[] arguments = ArrayUtils.addFirst(members, key); + return commandManager.submitNewCommand(ZRem, arguments, this::handleLongResponse); + } + @Override public CompletableFuture zcard(@NonNull String key) { return commandManager.submitNewCommand(ZCard, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture zcard(@NonNull GlideString key) { + return commandManager.submitNewCommand( + ZCard, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture> zpopmin(@NonNull String key, long count) { return commandManager.submitNewCommand( @@ -1085,12 +1504,24 @@ public CompletableFuture zscore(@NonNull String key, @NonNull String mem ZScore, new String[] {key, member}, this::handleDoubleOrNullResponse); } + @Override + public CompletableFuture zscore(@NonNull GlideString key, @NonNull GlideString member) { + return commandManager.submitNewCommand( + ZScore, new GlideString[] {key, member}, this::handleDoubleOrNullResponse); + } + @Override public CompletableFuture zrank(@NonNull String key, @NonNull String member) { return commandManager.submitNewCommand( ZRank, new String[] {key, member}, this::handleLongOrNullResponse); } + @Override + public CompletableFuture zrank(@NonNull GlideString key, @NonNull GlideString member) { + return commandManager.submitNewCommand( + ZRank, new GlideString[] {key, member}, this::handleLongOrNullResponse); + } + @Override public CompletableFuture zrankWithScore(@NonNull String key, @NonNull String member) { return commandManager.submitNewCommand( @@ -1122,10 +1553,20 @@ public CompletableFuture zmscore(@NonNull String key, @NonNull String[ } @Override - public CompletableFuture zdiff(@NonNull String[] keys) { - String[] arguments = ArrayUtils.addFirst(keys, Long.toString(keys.length)); + public CompletableFuture zmscore( + @NonNull GlideString key, @NonNull GlideString[] members) { + GlideString[] arguments = ArrayUtils.addFirst(members, key); return commandManager.submitNewCommand( - ZDiff, arguments, response -> castArray(handleArrayResponse(response), String.class)); + ZMScore, + arguments, + response -> castArray(handleArrayOrNullResponse(response), Double.class)); + } + + @Override + public CompletableFuture zdiff(@NonNull String[] keys) { + String[] arguments = ArrayUtils.addFirst(keys, Long.toString(keys.length)); + return commandManager.submitNewCommand( + ZDiff, arguments, response -> castArray(handleArrayResponse(response), String.class)); } @Override @@ -1142,6 +1583,15 @@ public CompletableFuture zdiffstore(@NonNull String destination, @NonNull return commandManager.submitNewCommand(ZDiffStore, arguments, this::handleLongResponse); } + @Override + public CompletableFuture zdiffstore( + @NonNull GlideString destination, @NonNull GlideString[] keys) { + GlideString[] arguments = + ArrayUtils.addAll( + new GlideString[] {destination, gs(Long.toString(keys.length).getBytes())}, keys); + return commandManager.submitNewCommand(ZDiffStore, arguments, this::handleLongResponse); + } + @Override public CompletableFuture zcount( @NonNull String key, @NonNull ScoreRange minScore, @NonNull ScoreRange maxScore) { @@ -1157,6 +1607,16 @@ public CompletableFuture zremrangebyrank(@NonNull String key, long start, this::handleLongResponse); } + @Override + public CompletableFuture zremrangebyrank(@NonNull GlideString key, long start, long end) { + return commandManager.submitNewCommand( + ZRemRangeByRank, + new GlideString[] { + key, gs(Long.toString(start).getBytes()), gs(Long.toString(end).getBytes()) + }, + this::handleLongResponse); + } + @Override public CompletableFuture zremrangebylex( @NonNull String key, @NonNull LexRange minLex, @NonNull LexRange maxLex) { @@ -1313,12 +1773,27 @@ public CompletableFuture zincrby( return commandManager.submitNewCommand(ZIncrBy, arguments, this::handleDoubleResponse); } + @Override + public CompletableFuture zincrby( + @NonNull GlideString key, double increment, @NonNull GlideString member) { + GlideString[] arguments = + new GlideString[] {key, gs(Double.toString(increment).getBytes()), member}; + return commandManager.submitNewCommand(ZIncrBy, arguments, this::handleDoubleResponse); + } + @Override public CompletableFuture zintercard(@NonNull String[] keys) { String[] arguments = ArrayUtils.addFirst(keys, Integer.toString(keys.length)); return commandManager.submitNewCommand(ZInterCard, arguments, this::handleLongResponse); } + @Override + public CompletableFuture zintercard(@NonNull GlideString[] keys) { + GlideString[] arguments = + ArrayUtils.addFirst(keys, gs(Integer.toString(keys.length).getBytes())); + return commandManager.submitNewCommand(ZInterCard, arguments, this::handleLongResponse); + } + @Override public CompletableFuture zintercard(@NonNull String[] keys, long limit) { String[] arguments = @@ -1329,6 +1804,16 @@ public CompletableFuture zintercard(@NonNull String[] keys, long limit) { return commandManager.submitNewCommand(ZInterCard, arguments, this::handleLongResponse); } + @Override + public CompletableFuture zintercard(@NonNull GlideString[] keys, long limit) { + GlideString[] arguments = + concatenateArrays( + new GlideString[] {gs(Integer.toString(keys.length).getBytes())}, + keys, + new GlideString[] {gs(LIMIT_REDIS_API), gs(Long.toString(limit).getBytes())}); + return commandManager.submitNewCommand(ZInterCard, arguments, this::handleLongResponse); + } + @Override public CompletableFuture xadd(@NonNull String key, @NonNull Map values) { return xadd(key, values, StreamAddOptions.builder().build()); @@ -1367,12 +1852,23 @@ public CompletableFuture xlen(@NonNull String key) { return commandManager.submitNewCommand(XLen, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture xlen(@NonNull GlideString key) { + return commandManager.submitNewCommand(XLen, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture xdel(@NonNull String key, @NonNull String[] ids) { String[] arguments = ArrayUtils.addFirst(ids, key); return commandManager.submitNewCommand(XDel, arguments, this::handleLongResponse); } + @Override + public CompletableFuture xdel(@NonNull GlideString key, @NonNull GlideString[] ids) { + GlideString[] arguments = ArrayUtils.addFirst(ids, key); + return commandManager.submitNewCommand(XDel, arguments, this::handleLongResponse); + } + @Override public CompletableFuture> xrange( @NonNull String key, @NonNull StreamRange start, @NonNull StreamRange end) { @@ -1409,22 +1905,135 @@ public CompletableFuture> xrevrange( response -> castMapOf2DArray(handleMapResponse(response), String.class)); } + @Override + public CompletableFuture xgroupCreate( + @NonNull String key, @NonNull String groupname, @NonNull String id) { + return commandManager.submitNewCommand( + XGroupCreate, new String[] {key, groupname, id}, this::handleStringResponse); + } + + @Override + public CompletableFuture xgroupCreate( + @NonNull String key, + @NonNull String groupname, + @NonNull String id, + @NonNull StreamGroupOptions options) { + String[] arguments = concatenateArrays(new String[] {key, groupname, id}, options.toArgs()); + return commandManager.submitNewCommand(XGroupCreate, arguments, this::handleStringResponse); + } + + @Override + public CompletableFuture xgroupDestroy(@NonNull String key, @NonNull String groupname) { + return commandManager.submitNewCommand( + XGroupDestroy, new String[] {key, groupname}, this::handleBooleanResponse); + } + + @Override + public CompletableFuture xgroupCreateConsumer( + @NonNull String key, @NonNull String group, @NonNull String consumer) { + return commandManager.submitNewCommand( + XGroupCreateConsumer, new String[] {key, group, consumer}, this::handleBooleanResponse); + } + + @Override + public CompletableFuture xgroupDelConsumer( + @NonNull String key, @NonNull String group, @NonNull String consumer) { + return commandManager.submitNewCommand( + XGroupDelConsumer, new String[] {key, group, consumer}, this::handleLongResponse); + } + + @Override + public CompletableFuture>> xreadgroup( + @NonNull Map keysAndIds, @NonNull String group, @NonNull String consumer) { + return xreadgroup(keysAndIds, group, consumer, StreamReadGroupOptions.builder().build()); + } + + @Override + public CompletableFuture>> xreadgroup( + @NonNull Map keysAndIds, + @NonNull String group, + @NonNull String consumer, + @NonNull StreamReadGroupOptions options) { + String[] arguments = options.toArgs(group, consumer, keysAndIds); + return commandManager.submitNewCommand(XReadGroup, arguments, this::handleXReadResponse); + } + + @Override + public CompletableFuture xack( + @NonNull String key, @NonNull String group, @NonNull String[] ids) { + String[] args = concatenateArrays(new String[] {key, group}, ids); + return commandManager.submitNewCommand(XAck, args, this::handleLongResponse); + } + + @Override + public CompletableFuture xack( + @NonNull GlideString key, @NonNull GlideString group, @NonNull GlideString[] ids) { + GlideString[] args = concatenateArrays(new GlideString[] {key, group}, ids); + return commandManager.submitNewCommand(XAck, args, this::handleLongResponse); + } + + @Override + public CompletableFuture xpending(@NonNull String key, @NonNull String group) { + return commandManager.submitNewCommand( + XPending, new String[] {key, group}, this::handleArrayOrNullResponse); + } + + @Override + public CompletableFuture xpending( + @NonNull String key, + @NonNull String group, + @NonNull StreamRange start, + @NonNull StreamRange end, + long count) { + return xpending(key, group, start, end, count, StreamPendingOptions.builder().build()); + } + + @Override + public CompletableFuture xpending( + @NonNull String key, + @NonNull String group, + @NonNull StreamRange start, + @NonNull StreamRange end, + long count, + @NonNull StreamPendingOptions options) { + String[] args = concatenateArrays(new String[] {key, group}, options.toArgs(start, end, count)); + return commandManager.submitNewCommand( + XPending, args, response -> castArray(handleArrayResponse(response), Object[].class)); + } + @Override public CompletableFuture pttl(@NonNull String key) { return commandManager.submitNewCommand(PTTL, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture pttl(@NonNull GlideString key) { + return commandManager.submitNewCommand(PTTL, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture persist(@NonNull String key) { return commandManager.submitNewCommand( Persist, new String[] {key}, this::handleBooleanResponse); } + @Override + public CompletableFuture persist(@NonNull GlideString key) { + return commandManager.submitNewCommand( + Persist, new GlideString[] {key}, this::handleBooleanResponse); + } + @Override public CompletableFuture type(@NonNull String key) { return commandManager.submitNewCommand(Type, new String[] {key}, this::handleStringResponse); } + @Override + public CompletableFuture type(@NonNull GlideString key) { + return commandManager.submitNewCommand( + Type, new GlideString[] {key}, this::handleStringResponse); + } + @Override public CompletableFuture linsert( @NonNull String key, @@ -1455,12 +2064,24 @@ public CompletableFuture rpushx(@NonNull String key, @NonNull String[] ele return commandManager.submitNewCommand(RPushX, arguments, this::handleLongResponse); } + @Override + public CompletableFuture rpushx(@NonNull GlideString key, @NonNull GlideString[] elements) { + GlideString[] arguments = ArrayUtils.addFirst(elements, key); + return commandManager.submitNewCommand(RPushX, arguments, this::handleLongResponse); + } + @Override public CompletableFuture lpushx(@NonNull String key, @NonNull String[] elements) { String[] arguments = ArrayUtils.addFirst(elements, key); return commandManager.submitNewCommand(LPushX, arguments, this::handleLongResponse); } + @Override + public CompletableFuture lpushx(@NonNull GlideString key, @NonNull GlideString[] elements) { + GlideString[] arguments = ArrayUtils.addFirst(elements, key); + return commandManager.submitNewCommand(LPushX, arguments, this::handleLongResponse); + } + @Override public CompletableFuture zrange( @NonNull String key, @NonNull RangeQuery rangeQuery, boolean reverse) { @@ -1581,6 +2202,16 @@ public CompletableFuture geopos(@NonNull String key, @NonNull String response -> castArrayofArrays(handleArrayResponse(response), Double.class)); } + @Override + public CompletableFuture geopos( + @NonNull GlideString key, @NonNull GlideString[] members) { + GlideString[] arguments = concatenateArrays(new GlideString[] {key}, members); + return commandManager.submitNewCommand( + GeoPos, + arguments, + response -> castArrayofArrays(handleArrayResponse(response), Double.class)); + } + @Override public CompletableFuture geodist( @NonNull String key, @@ -1591,6 +2222,16 @@ public CompletableFuture geodist( return commandManager.submitNewCommand(GeoDist, arguments, this::handleDoubleOrNullResponse); } + @Override + public CompletableFuture geodist( + @NonNull GlideString key, + @NonNull GlideString member1, + @NonNull GlideString member2, + @NonNull GeoUnit geoUnit) { + GlideString[] arguments = new GlideString[] {key, member1, member2, gs(geoUnit.getRedisApi())}; + return commandManager.submitNewCommand(GeoDist, arguments, this::handleDoubleOrNullResponse); + } + @Override public CompletableFuture geodist( @NonNull String key, @NonNull String member1, @NonNull String member2) { @@ -1598,6 +2239,13 @@ public CompletableFuture geodist( return commandManager.submitNewCommand(GeoDist, arguments, this::handleDoubleOrNullResponse); } + @Override + public CompletableFuture geodist( + @NonNull GlideString key, @NonNull GlideString member1, @NonNull GlideString member2) { + GlideString[] arguments = new GlideString[] {key, member1, member2}; + return commandManager.submitNewCommand(GeoDist, arguments, this::handleDoubleOrNullResponse); + } + @Override public CompletableFuture geohash(@NonNull String key, @NonNull String[] members) { String[] arguments = concatenateArrays(new String[] {key}, members); @@ -1610,6 +2258,12 @@ public CompletableFuture bitcount(@NonNull String key) { return commandManager.submitNewCommand(BitCount, new String[] {key}, this::handleLongResponse); } + @Override + public CompletableFuture bitcount(@NonNull GlideString key) { + return commandManager.submitNewCommand( + BitCount, new GlideString[] {key}, this::handleLongResponse); + } + @Override public CompletableFuture bitcount(@NonNull String key, long start, long end) { return commandManager.submitNewCommand( @@ -1618,6 +2272,16 @@ public CompletableFuture bitcount(@NonNull String key, long start, long en this::handleLongResponse); } + @Override + public CompletableFuture bitcount(@NonNull GlideString key, long start, long end) { + return commandManager.submitNewCommand( + BitCount, + new GlideString[] { + key, gs(Long.toString(start).getBytes()), gs(Long.toString(end).getBytes()) + }, + this::handleLongResponse); + } + @Override public CompletableFuture bitcount( @NonNull String key, long start, long end, @NonNull BitmapIndexType options) { @@ -1626,30 +2290,71 @@ public CompletableFuture bitcount( return commandManager.submitNewCommand(BitCount, arguments, this::handleLongResponse); } + @Override + public CompletableFuture bitcount( + @NonNull GlideString key, long start, long end, @NonNull BitmapIndexType options) { + GlideString[] arguments = + new GlideString[] { + key, + gs(Long.toString(start).getBytes()), + gs(Long.toString(end).getBytes()), + gs(options.toString().getBytes()) + }; + return commandManager.submitNewCommand(BitCount, arguments, this::handleLongResponse); + } + @Override public CompletableFuture setbit(@NonNull String key, long offset, long value) { String[] arguments = new String[] {key, Long.toString(offset), Long.toString(value)}; return commandManager.submitNewCommand(SetBit, arguments, this::handleLongResponse); } + @Override + public CompletableFuture setbit(@NonNull GlideString key, long offset, long value) { + GlideString[] arguments = + new GlideString[] {key, gs(Long.toString(offset)), gs(Long.toString(value))}; + return commandManager.submitNewCommand(SetBit, arguments, this::handleLongResponse); + } + @Override public CompletableFuture getbit(@NonNull String key, long offset) { String[] arguments = new String[] {key, Long.toString(offset)}; return commandManager.submitNewCommand(GetBit, arguments, this::handleLongResponse); } + @Override + public CompletableFuture getbit(@NonNull GlideString key, long offset) { + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(offset))}; + return commandManager.submitNewCommand(GetBit, arguments, this::handleLongResponse); + } + @Override public CompletableFuture bitpos(@NonNull String key, long bit) { String[] arguments = new String[] {key, Long.toString(bit)}; return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); } + @Override + public CompletableFuture bitpos(@NonNull GlideString key, long bit) { + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(bit).getBytes())}; + return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); + } + @Override public CompletableFuture bitpos(@NonNull String key, long bit, long start) { String[] arguments = new String[] {key, Long.toString(bit), Long.toString(start)}; return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); } + @Override + public CompletableFuture bitpos(@NonNull GlideString key, long bit, long start) { + GlideString[] arguments = + new GlideString[] { + key, gs(Long.toString(bit).getBytes()), gs(Long.toString(start).getBytes()) + }; + return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); + } + @Override public CompletableFuture bitpos(@NonNull String key, long bit, long start, long end) { String[] arguments = @@ -1657,6 +2362,18 @@ public CompletableFuture bitpos(@NonNull String key, long bit, long start, return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); } + @Override + public CompletableFuture bitpos(@NonNull GlideString key, long bit, long start, long end) { + GlideString[] arguments = + new GlideString[] { + key, + gs(Long.toString(bit).getBytes()), + gs(Long.toString(start).getBytes()), + gs(Long.toString(end).getBytes()) + }; + return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); + } + @Override public CompletableFuture bitpos( @NonNull String key, long bit, long start, long end, @NonNull BitmapIndexType options) { @@ -1667,6 +2384,20 @@ public CompletableFuture bitpos( return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); } + @Override + public CompletableFuture bitpos( + @NonNull GlideString key, long bit, long start, long end, @NonNull BitmapIndexType options) { + GlideString[] arguments = + new GlideString[] { + key, + gs(Long.toString(bit).getBytes()), + gs(Long.toString(start).getBytes()), + gs(Long.toString(end).getBytes()), + gs(options.toString().getBytes()) + }; + return commandManager.submitNewCommand(BitPos, arguments, this::handleLongResponse); + } + @Override public CompletableFuture bitop( @NonNull BitwiseOperation bitwiseOperation, @@ -1677,6 +2408,17 @@ public CompletableFuture bitop( return commandManager.submitNewCommand(BitOp, arguments, this::handleLongResponse); } + @Override + public CompletableFuture bitop( + @NonNull BitwiseOperation bitwiseOperation, + @NonNull GlideString destination, + @NonNull GlideString[] keys) { + GlideString[] arguments = + concatenateArrays( + new GlideString[] {gs(bitwiseOperation.toString().getBytes()), destination}, keys); + return commandManager.submitNewCommand(BitOp, arguments, this::handleLongResponse); + } + @Override public CompletableFuture> lmpop( @NonNull String[] keys, @NonNull ListDirection direction, long count) { @@ -1737,6 +2479,13 @@ public CompletableFuture lset(@NonNull String key, long index, @NonNull return commandManager.submitNewCommand(LSet, arguments, this::handleStringResponse); } + @Override + public CompletableFuture lset( + @NonNull GlideString key, long index, @NonNull GlideString element) { + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(index)), element}; + return commandManager.submitNewCommand(LSet, arguments, this::handleStringResponse); + } + @Override public CompletableFuture lmove( @NonNull String source, @@ -1812,6 +2561,12 @@ public CompletableFuture sintercard(@NonNull String[] keys) { return commandManager.submitNewCommand(SInterCard, arguments, this::handleLongResponse); } + @Override + public CompletableFuture sintercard(@NonNull GlideString[] keys) { + GlideString[] arguments = ArrayUtils.addFirst(keys, gs(Long.toString(keys.length))); + return commandManager.submitNewCommand(SInterCard, arguments, this::handleLongResponse); + } + @Override public CompletableFuture sintercard(@NonNull String[] keys, long limit) { String[] arguments = @@ -1822,6 +2577,16 @@ public CompletableFuture sintercard(@NonNull String[] keys, long limit) { return commandManager.submitNewCommand(SInterCard, arguments, this::handleLongResponse); } + @Override + public CompletableFuture sintercard(@NonNull GlideString[] keys, long limit) { + GlideString[] arguments = + concatenateArrays( + new GlideString[] {gs(Long.toString(keys.length))}, + keys, + new GlideString[] {gs(SET_LIMIT_REDIS_API), gs(Long.toString(limit))}); + return commandManager.submitNewCommand(SInterCard, arguments, this::handleLongResponse); + } + @Override public CompletableFuture fcall( @NonNull String function, @NonNull String[] keys, @NonNull String[] arguments) { @@ -1830,6 +2595,14 @@ public CompletableFuture fcall( return commandManager.submitNewCommand(FCall, args, this::handleObjectOrNullResponse); } + @Override + public CompletableFuture fcallReadOnly( + @NonNull String function, @NonNull String[] keys, @NonNull String[] arguments) { + String[] args = + concatenateArrays(new String[] {function, Long.toString(keys.length)}, keys, arguments); + return commandManager.submitNewCommand(FCallReadOnly, args, this::handleObjectOrNullResponse); + } + @Override public CompletableFuture copy( @NonNull String source, @NonNull String destination, boolean replace) { @@ -1840,12 +2613,29 @@ public CompletableFuture copy( return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture copy( + @NonNull GlideString source, @NonNull GlideString destination, boolean replace) { + GlideString[] arguments = new GlideString[] {source, destination}; + if (replace) { + arguments = ArrayUtils.add(arguments, gs(REPLACE_REDIS_API)); + } + return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture copy(@NonNull String source, @NonNull String destination) { String[] arguments = new String[] {source, destination}; return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture copy( + @NonNull GlideString source, @NonNull GlideString destination) { + GlideString[] arguments = new GlideString[] {source, destination}; + return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture msetnx(@NonNull Map keyValueMap) { String[] args = convertMapToKeyValueStringArray(keyValueMap); @@ -1863,4 +2653,135 @@ public CompletableFuture lcsLen(@NonNull String key1, @NonNull String key2 String[] arguments = new String[] {key1, key2, LEN_REDIS_API}; return commandManager.submitNewCommand(LCS, arguments, this::handleLongResponse); } + + @Override + public CompletableFuture> lcsIdx(@NonNull String key1, @NonNull String key2) { + String[] arguments = new String[] {key1, key2, IDX_COMMAND_STRING}; + return commandManager.submitNewCommand( + LCS, arguments, response -> handleLcsIdxResponse(handleMapResponse(response))); + } + + @Override + public CompletableFuture> lcsIdx( + @NonNull String key1, @NonNull String key2, long minMatchLen) { + String[] arguments = + new String[] { + key1, key2, IDX_COMMAND_STRING, MINMATCHLEN_COMMAND_STRING, String.valueOf(minMatchLen) + }; + return commandManager.submitNewCommand( + LCS, arguments, response -> handleLcsIdxResponse(handleMapResponse(response))); + } + + @Override + public CompletableFuture> lcsIdxWithMatchLen( + @NonNull String key1, @NonNull String key2) { + String[] arguments = new String[] {key1, key2, IDX_COMMAND_STRING, WITHMATCHLEN_COMMAND_STRING}; + return commandManager.submitNewCommand(LCS, arguments, this::handleMapResponse); + } + + @Override + public CompletableFuture> lcsIdxWithMatchLen( + @NonNull String key1, @NonNull String key2, long minMatchLen) { + String[] arguments = + concatenateArrays( + new String[] { + key1, + key2, + IDX_COMMAND_STRING, + MINMATCHLEN_COMMAND_STRING, + String.valueOf(minMatchLen), + WITHMATCHLEN_COMMAND_STRING + }); + return commandManager.submitNewCommand(LCS, arguments, this::handleMapResponse); + } + + @Override + public CompletableFuture watch(@NonNull String[] keys) { + return commandManager.submitNewCommand(Watch, keys, this::handleStringResponse); + } + + @Override + public CompletableFuture watch(@NonNull GlideString[] keys) { + return commandManager.submitNewCommand(Watch, keys, this::handleStringResponse); + } + + @Override + public CompletableFuture> sunion(@NonNull String[] keys) { + return commandManager.submitNewCommand(SUnion, keys, this::handleSetResponse); + } + + // Hack: convert all `byte[]` -> `GlideString`. Better doing it here in the Java realm + // rather than doing it in the Rust code using JNI calls (performance) + private Object convertByteArrayToGlideString(Object o) { + if (o == null) return o; + + if (o instanceof byte[]) { + o = GlideString.of((byte[]) o); + } else if (o.getClass().isArray()) { + var array = (Object[]) o; + for (var i = 0; i < array.length; i++) { + array[i] = convertByteArrayToGlideString(array[i]); + } + } else if (o instanceof Set) { + var set = (Set) o; + o = set.stream().map(this::convertByteArrayToGlideString).collect(Collectors.toSet()); + } else if (o instanceof Map) { + var map = (Map) o; + o = + map.entrySet().stream() + .collect( + HashMap::new, + (m, e) -> + m.put( + convertByteArrayToGlideString(e.getKey()), + convertByteArrayToGlideString(e.getValue())), + HashMap::putAll); + } + return o; + } + + @Override + public CompletableFuture dump(@NonNull GlideString key) { + GlideString[] arguments = new GlideString[] {key}; + return commandManager.submitNewCommand(Dump, arguments, this::handleBytesOrNullResponse); + } + + @Override + public CompletableFuture restore( + @NonNull GlideString key, long ttl, @NonNull byte[] value) { + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(ttl).getBytes()), gs(value)}; + return commandManager.submitNewCommand(Restore, arguments, this::handleStringResponse); + } + + @Override + public CompletableFuture restore( + @NonNull GlideString key, + long ttl, + @NonNull byte[] value, + @NonNull RestoreOptions restoreOptions) { + GlideString[] arguments = restoreOptions.toArgs(key, ttl, value); + return commandManager.submitNewCommand(Restore, arguments, this::handleStringResponse); + } + + @Override + public CompletableFuture sort(@NonNull String key) { + return commandManager.submitNewCommand( + Sort, + new String[] {key}, + response -> castArray(handleArrayResponse(response), String.class)); + } + + @Override + public CompletableFuture sortReadOnly(@NonNull String key) { + return commandManager.submitNewCommand( + SortReadOnly, + new String[] {key}, + response -> castArray(handleArrayResponse(response), String.class)); + } + + @Override + public CompletableFuture sortStore(@NonNull String key, @NonNull String destination) { + return commandManager.submitNewCommand( + Sort, new String[] {key, STORE_COMMAND_STRING, destination}, this::handleLongResponse); + } } diff --git a/java/client/src/main/java/glide/api/RedisClient.java b/java/client/src/main/java/glide/api/RedisClient.java index f4a6057844..0f8607afb7 100644 --- a/java/client/src/main/java/glide/api/RedisClient.java +++ b/java/client/src/main/java/glide/api/RedisClient.java @@ -1,6 +1,9 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; +import static glide.api.models.GlideString.gs; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.STORE_COMMAND_STRING; import static glide.api.models.commands.function.FunctionListOptions.LIBRARY_NAME_REDIS_API; import static glide.api.models.commands.function.FunctionListOptions.WITH_CODE_REDIS_API; import static glide.api.models.commands.function.FunctionLoadOptions.REPLACE; @@ -18,27 +21,38 @@ import static redis_request.RedisRequestOuterClass.RequestType.DBSize; import static redis_request.RedisRequestOuterClass.RequestType.Echo; import static redis_request.RedisRequestOuterClass.RequestType.FlushAll; +import static redis_request.RedisRequestOuterClass.RequestType.FlushDB; import static redis_request.RedisRequestOuterClass.RequestType.FunctionDelete; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionDump; import static redis_request.RedisRequestOuterClass.RequestType.FunctionFlush; import static redis_request.RedisRequestOuterClass.RequestType.FunctionKill; import static redis_request.RedisRequestOuterClass.RequestType.FunctionList; import static redis_request.RedisRequestOuterClass.RequestType.FunctionLoad; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionRestore; import static redis_request.RedisRequestOuterClass.RequestType.FunctionStats; import static redis_request.RedisRequestOuterClass.RequestType.Info; import static redis_request.RedisRequestOuterClass.RequestType.LastSave; import static redis_request.RedisRequestOuterClass.RequestType.Lolwut; import static redis_request.RedisRequestOuterClass.RequestType.Move; import static redis_request.RedisRequestOuterClass.RequestType.Ping; +import static redis_request.RedisRequestOuterClass.RequestType.RandomKey; import static redis_request.RedisRequestOuterClass.RequestType.Select; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Time; +import static redis_request.RedisRequestOuterClass.RequestType.UnWatch; import glide.api.commands.ConnectionManagementCommands; import glide.api.commands.GenericCommands; import glide.api.commands.ScriptingAndFunctionsCommands; import glide.api.commands.ServerManagementCommands; +import glide.api.commands.TransactionsCommands; +import glide.api.models.GlideString; import glide.api.models.Transaction; import glide.api.models.commands.FlushMode; import glide.api.models.commands.InfoOptions; +import glide.api.models.commands.SortOptions; +import glide.api.models.commands.function.FunctionRestorePolicy; import glide.api.models.configuration.RedisClientConfiguration; import glide.managers.CommandManager; import glide.managers.ConnectionManager; @@ -56,7 +70,8 @@ public class RedisClient extends BaseClient implements GenericCommands, ServerManagementCommands, ConnectionManagementCommands, - ScriptingAndFunctionsCommands { + ScriptingAndFunctionsCommands, + TransactionsCommands { protected RedisClient(ConnectionManager connectionManager, CommandManager commandManager) { super(connectionManager, commandManager); @@ -150,6 +165,12 @@ public CompletableFuture echo(@NonNull String message) { Echo, new String[] {message}, this::handleStringResponse); } + @Override + public CompletableFuture echo(@NonNull GlideString message) { + return commandManager.submitNewCommand( + Echo, new GlideString[] {message}, this::handleGlideStringResponse); + } + @Override public CompletableFuture time() { return commandManager.submitNewCommand( @@ -172,6 +193,17 @@ public CompletableFuture flushall(@NonNull FlushMode mode) { FlushAll, new String[] {mode.toString()}, this::handleStringResponse); } + @Override + public CompletableFuture flushdb() { + return commandManager.submitNewCommand(FlushDB, new String[0], this::handleStringResponse); + } + + @Override + public CompletableFuture flushdb(@NonNull FlushMode mode) { + return commandManager.submitNewCommand( + FlushDB, new String[] {mode.toString()}, this::handleStringResponse); + } + @Override public CompletableFuture lolwut() { return commandManager.submitNewCommand(Lolwut, new String[0], this::handleStringResponse); @@ -219,6 +251,12 @@ public CompletableFuture move(@NonNull String key, long dbIndex) { Move, new String[] {key, Long.toString(dbIndex)}, this::handleBooleanResponse); } + @Override + public CompletableFuture move(@NonNull GlideString key, long dbIndex) { + return commandManager.submitNewCommand( + Move, new GlideString[] {key, gs(Long.toString(dbIndex))}, this::handleBooleanResponse); + } + @Override public CompletableFuture[]> functionList(boolean withCode) { return commandManager.submitNewCommand( @@ -256,11 +294,37 @@ public CompletableFuture functionDelete(@NonNull String libName) { FunctionDelete, new String[] {libName}, this::handleStringResponse); } + @Override + public CompletableFuture functionDump() { + return commandManager.submitNewCommand( + FunctionDump, new GlideString[0], this::handleBytesOrNullResponse); + } + + @Override + public CompletableFuture functionRestore(byte @NonNull [] payload) { + return commandManager.submitNewCommand( + FunctionRestore, new GlideString[] {gs(payload)}, this::handleStringResponse); + } + + @Override + public CompletableFuture functionRestore( + byte @NonNull [] payload, @NonNull FunctionRestorePolicy policy) { + return commandManager.submitNewCommand( + FunctionRestore, + new GlideString[] {gs(payload), gs(policy.toString())}, + this::handleStringResponse); + } + @Override public CompletableFuture fcall(@NonNull String function) { return fcall(function, new String[0], new String[0]); } + @Override + public CompletableFuture fcallReadOnly(@NonNull String function) { + return fcallReadOnly(function, new String[0], new String[0]); + } + @Override public CompletableFuture copy( @NonNull String source, @NonNull String destination, long destinationDB) { @@ -269,6 +333,14 @@ public CompletableFuture copy( return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture copy( + @NonNull GlideString source, @NonNull GlideString destination, long destinationDB) { + GlideString[] arguments = + new GlideString[] {source, destination, gs(DB_REDIS_API), gs(Long.toString(destinationDB))}; + return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture copy( @NonNull String source, @NonNull String destination, long destinationDB, boolean replace) { @@ -280,6 +352,20 @@ public CompletableFuture copy( return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); } + @Override + public CompletableFuture copy( + @NonNull GlideString source, + @NonNull GlideString destination, + long destinationDB, + boolean replace) { + GlideString[] arguments = + new GlideString[] {source, destination, gs(DB_REDIS_API), gs(Long.toString(destinationDB))}; + if (replace) { + arguments = ArrayUtils.add(arguments, gs(REPLACE_REDIS_API)); + } + return commandManager.submitNewCommand(Copy, arguments, this::handleBooleanResponse); + } + @Override public CompletableFuture functionKill() { return commandManager.submitNewCommand(FunctionKill, new String[0], this::handleStringResponse); @@ -292,4 +378,41 @@ public CompletableFuture>> functionStats() { new String[0], response -> handleFunctionStatsResponse(handleMapResponse(response))); } + + @Override + public CompletableFuture unwatch() { + return commandManager.submitNewCommand(UnWatch, new String[0], this::handleStringResponse); + } + + @Override + public CompletableFuture randomKey() { + return commandManager.submitNewCommand( + RandomKey, new String[0], this::handleStringOrNullResponse); + } + + @Override + public CompletableFuture sort(@NonNull String key, @NonNull SortOptions sortOptions) { + String[] arguments = ArrayUtils.addFirst(sortOptions.toArgs(), key); + return commandManager.submitNewCommand( + Sort, arguments, response -> castArray(handleArrayResponse(response), String.class)); + } + + @Override + public CompletableFuture sortReadOnly( + @NonNull String key, @NonNull SortOptions sortOptions) { + String[] arguments = ArrayUtils.addFirst(sortOptions.toArgs(), key); + return commandManager.submitNewCommand( + SortReadOnly, + arguments, + response -> castArray(handleArrayResponse(response), String.class)); + } + + @Override + public CompletableFuture sortStore( + @NonNull String key, @NonNull String destination, @NonNull SortOptions sortOptions) { + String[] storeArguments = new String[] {STORE_COMMAND_STRING, destination}; + String[] arguments = + concatenateArrays(new String[] {key}, sortOptions.toArgs(), storeArguments); + return commandManager.submitNewCommand(Sort, arguments, this::handleLongResponse); + } } diff --git a/java/client/src/main/java/glide/api/RedisClusterClient.java b/java/client/src/main/java/glide/api/RedisClusterClient.java index 5b7e7f5a16..26fc3e86e7 100644 --- a/java/client/src/main/java/glide/api/RedisClusterClient.java +++ b/java/client/src/main/java/glide/api/RedisClusterClient.java @@ -1,7 +1,9 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; import static glide.api.commands.ServerManagementCommands.VERSION_REDIS_API; +import static glide.api.models.GlideString.gs; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; import static glide.api.models.commands.function.FunctionListOptions.LIBRARY_NAME_REDIS_API; import static glide.api.models.commands.function.FunctionListOptions.WITH_CODE_REDIS_API; import static glide.api.models.commands.function.FunctionLoadOptions.REPLACE; @@ -19,27 +21,39 @@ import static redis_request.RedisRequestOuterClass.RequestType.DBSize; import static redis_request.RedisRequestOuterClass.RequestType.Echo; import static redis_request.RedisRequestOuterClass.RequestType.FCall; +import static redis_request.RedisRequestOuterClass.RequestType.FCallReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.FlushAll; +import static redis_request.RedisRequestOuterClass.RequestType.FlushDB; import static redis_request.RedisRequestOuterClass.RequestType.FunctionDelete; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionDump; import static redis_request.RedisRequestOuterClass.RequestType.FunctionFlush; import static redis_request.RedisRequestOuterClass.RequestType.FunctionKill; import static redis_request.RedisRequestOuterClass.RequestType.FunctionList; import static redis_request.RedisRequestOuterClass.RequestType.FunctionLoad; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionRestore; import static redis_request.RedisRequestOuterClass.RequestType.FunctionStats; import static redis_request.RedisRequestOuterClass.RequestType.Info; import static redis_request.RedisRequestOuterClass.RequestType.LastSave; import static redis_request.RedisRequestOuterClass.RequestType.Lolwut; import static redis_request.RedisRequestOuterClass.RequestType.Ping; +import static redis_request.RedisRequestOuterClass.RequestType.RandomKey; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Time; +import static redis_request.RedisRequestOuterClass.RequestType.UnWatch; import glide.api.commands.ConnectionManagementClusterCommands; import glide.api.commands.GenericClusterCommands; import glide.api.commands.ScriptingAndFunctionsClusterCommands; import glide.api.commands.ServerManagementClusterCommands; +import glide.api.commands.TransactionsClusterCommands; import glide.api.models.ClusterTransaction; import glide.api.models.ClusterValue; +import glide.api.models.GlideString; import glide.api.models.commands.FlushMode; import glide.api.models.commands.InfoOptions; +import glide.api.models.commands.SortClusterOptions; +import glide.api.models.commands.function.FunctionRestorePolicy; import glide.api.models.configuration.RedisClusterClientConfiguration; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import glide.api.models.configuration.RequestRoutingConfiguration.SingleNodeRoute; @@ -51,6 +65,7 @@ import java.util.Optional; import java.util.concurrent.CompletableFuture; import lombok.NonNull; +import org.apache.commons.lang3.ArrayUtils; import response.ResponseOuterClass.Response; /** @@ -61,7 +76,8 @@ public class RedisClusterClient extends BaseClient implements ConnectionManagementClusterCommands, GenericClusterCommands, ServerManagementClusterCommands, - ScriptingAndFunctionsClusterCommands { + ScriptingAndFunctionsClusterCommands, + TransactionsClusterCommands { protected RedisClusterClient(ConnectionManager connectionManager, CommandManager commandManager) { super(connectionManager, commandManager); @@ -269,6 +285,12 @@ public CompletableFuture echo(@NonNull String message) { Echo, new String[] {message}, this::handleStringResponse); } + @Override + public CompletableFuture echo(@NonNull GlideString message) { + return commandManager.submitNewCommand( + Echo, new GlideString[] {message}, this::handleGlideStringResponse); + } + @Override public CompletableFuture> echo( @NonNull String message, @NonNull Route route) { @@ -282,6 +304,19 @@ public CompletableFuture> echo( : ClusterValue.ofMultiValue(handleMapResponse(response))); } + @Override + public CompletableFuture> echo( + @NonNull GlideString message, @NonNull Route route) { + return commandManager.submitNewCommand( + Echo, + new GlideString[] {message}, + route, + response -> + route instanceof SingleNodeRoute + ? ClusterValue.ofSingleValue(handleGlideStringResponse(response)) + : ClusterValue.ofMultiValueBinary(handleBinaryStringMapResponse(response))); + } + @Override public CompletableFuture time() { return commandManager.submitNewCommand( @@ -330,18 +365,40 @@ public CompletableFuture flushall(@NonNull FlushMode mode) { } @Override - public CompletableFuture flushall(@NonNull SingleNodeRoute route) { + public CompletableFuture flushall(@NonNull Route route) { return commandManager.submitNewCommand( FlushAll, new String[0], route, this::handleStringResponse); } @Override - public CompletableFuture flushall( - @NonNull FlushMode mode, @NonNull SingleNodeRoute route) { + public CompletableFuture flushall(@NonNull FlushMode mode, @NonNull Route route) { return commandManager.submitNewCommand( FlushAll, new String[] {mode.toString()}, route, this::handleStringResponse); } + @Override + public CompletableFuture flushdb() { + return commandManager.submitNewCommand(FlushDB, new String[0], this::handleStringResponse); + } + + @Override + public CompletableFuture flushdb(@NonNull FlushMode mode) { + return commandManager.submitNewCommand( + FlushDB, new String[] {mode.toString()}, this::handleStringResponse); + } + + @Override + public CompletableFuture flushdb(@NonNull Route route) { + return commandManager.submitNewCommand( + FlushDB, new String[0], route, this::handleStringResponse); + } + + @Override + public CompletableFuture flushdb(@NonNull FlushMode mode, @NonNull Route route) { + return commandManager.submitNewCommand( + FlushDB, new String[] {mode.toString()}, route, this::handleStringResponse); + } + @Override public CompletableFuture lolwut() { return commandManager.submitNewCommand(Lolwut, new String[0], this::handleStringResponse); @@ -547,6 +604,55 @@ public CompletableFuture functionDelete(@NonNull String libName, @NonNul FunctionDelete, new String[] {libName}, route, this::handleStringResponse); } + @Override + public CompletableFuture functionDump() { + return commandManager.submitNewCommand( + FunctionDump, new GlideString[] {}, this::handleBytesOrNullResponse); + } + + @Override + public CompletableFuture> functionDump(@NonNull Route route) { + return commandManager.submitNewCommand( + FunctionDump, + new GlideString[] {}, + route, + response -> + route instanceof SingleNodeRoute + ? ClusterValue.ofSingleValue(handleBytesOrNullResponse(response)) + : ClusterValue.ofMultiValueBinary(handleBinaryStringMapResponse(response))); + } + + @Override + public CompletableFuture functionRestore(byte @NonNull [] payload) { + return commandManager.submitNewCommand( + FunctionRestore, new GlideString[] {gs(payload)}, this::handleStringResponse); + } + + @Override + public CompletableFuture functionRestore( + byte @NonNull [] payload, @NonNull FunctionRestorePolicy policy) { + return commandManager.submitNewCommand( + FunctionRestore, + new GlideString[] {gs(payload), gs(policy.toString())}, + this::handleStringResponse); + } + + @Override + public CompletableFuture functionRestore(byte @NonNull [] payload, @NonNull Route route) { + return commandManager.submitNewCommand( + FunctionRestore, new GlideString[] {gs(payload)}, route, this::handleStringResponse); + } + + @Override + public CompletableFuture functionRestore( + byte @NonNull [] payload, @NonNull FunctionRestorePolicy policy, @NonNull Route route) { + return commandManager.submitNewCommand( + FunctionRestore, + new GlideString[] {gs(payload), gs(policy.toString())}, + route, + this::handleStringResponse); + } + @Override public CompletableFuture fcall(@NonNull String function) { return fcall(function, new String[0]); @@ -578,6 +684,38 @@ public CompletableFuture> fcall( : ClusterValue.ofMultiValue(handleMapResponse(response))); } + @Override + public CompletableFuture fcallReadOnly(@NonNull String function) { + return fcallReadOnly(function, new String[0]); + } + + @Override + public CompletableFuture> fcallReadOnly( + @NonNull String function, @NonNull Route route) { + return fcallReadOnly(function, new String[0], route); + } + + @Override + public CompletableFuture fcallReadOnly( + @NonNull String function, @NonNull String[] arguments) { + String[] args = concatenateArrays(new String[] {function, "0"}, arguments); // 0 - key count + return commandManager.submitNewCommand(FCallReadOnly, args, this::handleObjectOrNullResponse); + } + + @Override + public CompletableFuture> fcallReadOnly( + @NonNull String function, @NonNull String[] arguments, @NonNull Route route) { + String[] args = concatenateArrays(new String[] {function, "0"}, arguments); // 0 - key count + return commandManager.submitNewCommand( + FCallReadOnly, + args, + route, + response -> + route instanceof SingleNodeRoute + ? ClusterValue.ofSingleValue(handleObjectOrNullResponse(response)) + : ClusterValue.ofMultiValue(handleMapResponse(response))); + } + @Override public CompletableFuture functionKill() { return commandManager.submitNewCommand(FunctionKill, new String[0], this::handleStringResponse); @@ -618,4 +756,56 @@ public CompletableFuture>>> functio route, response -> handleFunctionStatsResponse(response, route instanceof SingleNodeRoute)); } + + @Override + public CompletableFuture unwatch(@NonNull Route route) { + return commandManager.submitNewCommand( + UnWatch, new String[0], route, this::handleStringResponse); + } + + @Override + public CompletableFuture unwatch() { + return commandManager.submitNewCommand(UnWatch, new String[0], this::handleStringResponse); + } + + @Override + public CompletableFuture randomKey(@NonNull Route route) { + return commandManager.submitNewCommand( + RandomKey, new String[0], route, this::handleStringOrNullResponse); + } + + @Override + public CompletableFuture randomKey() { + return commandManager.submitNewCommand( + RandomKey, new String[0], this::handleStringOrNullResponse); + } + + @Override + public CompletableFuture sort( + @NonNull String key, @NonNull SortClusterOptions sortClusterOptions) { + String[] arguments = ArrayUtils.addFirst(sortClusterOptions.toArgs(), key); + return commandManager.submitNewCommand( + Sort, arguments, response -> castArray(handleArrayResponse(response), String.class)); + } + + @Override + public CompletableFuture sortReadOnly( + @NonNull String key, @NonNull SortClusterOptions sortClusterOptions) { + String[] arguments = ArrayUtils.addFirst(sortClusterOptions.toArgs(), key); + return commandManager.submitNewCommand( + SortReadOnly, + arguments, + response -> castArray(handleArrayResponse(response), String.class)); + } + + @Override + public CompletableFuture sortStore( + @NonNull String key, + @NonNull String destination, + @NonNull SortClusterOptions sortClusterOptions) { + String[] storeArguments = new String[] {STORE_COMMAND_STRING, destination}; + String[] arguments = + concatenateArrays(new String[] {key}, sortClusterOptions.toArgs(), storeArguments); + return commandManager.submitNewCommand(Sort, arguments, this::handleLongResponse); + } } diff --git a/java/client/src/main/java/glide/api/ResponseFlags.java b/java/client/src/main/java/glide/api/ResponseFlags.java index 690a9ca00a..84a5c666df 100644 --- a/java/client/src/main/java/glide/api/ResponseFlags.java +++ b/java/client/src/main/java/glide/api/ResponseFlags.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; public enum ResponseFlags { diff --git a/java/client/src/main/java/glide/api/commands/BitmapBaseCommands.java b/java/client/src/main/java/glide/api/commands/BitmapBaseCommands.java index 508cff39ca..f892424449 100644 --- a/java/client/src/main/java/glide/api/commands/BitmapBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/BitmapBaseCommands.java @@ -1,9 +1,10 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import static glide.api.models.commands.bitmap.BitFieldOptions.BitFieldReadOnlySubCommands; import static glide.api.models.commands.bitmap.BitFieldOptions.BitFieldSubCommands; +import glide.api.models.GlideString; import glide.api.models.commands.bitmap.BitFieldOptions.BitFieldGet; import glide.api.models.commands.bitmap.BitFieldOptions.BitFieldIncrby; import glide.api.models.commands.bitmap.BitFieldOptions.BitFieldOverflow; @@ -37,6 +38,21 @@ public interface BitmapBaseCommands { */ CompletableFuture bitcount(String key); + /** + * Counts the number of set bits (population counting) in a string stored at key. + * + * @see valkey.io for details. + * @param key The key for the string to count the set bits of. + * @return The number of set bits in the string. Returns zero if the key is missing as it is + * treated as an empty string. + * @example + *
{@code
+     * Long payload = client.bitcount(gs("myKey1")).get();
+     * assert payload == 2L; // The string stored at "myKey1" contains 2 set bits.
+     * }
+ */ + CompletableFuture bitcount(GlideString key); + /** * Counts the number of set bits (population counting) in a string stored at key. The * offsets start and end are zero-based indexes, with 0 @@ -59,6 +75,28 @@ public interface BitmapBaseCommands { */ CompletableFuture bitcount(String key, long start, long end); + /** + * Counts the number of set bits (population counting) in a string stored at key. The + * offsets start and end are zero-based indexes, with 0 + * being the first element of the list, 1 being the next element and so on. These + * offsets can also be negative numbers indicating offsets starting at the end of the list, with + * -1 being the last element of the list, -2 being the penultimate, and + * so on. + * + * @see valkey.io for details. + * @param key The key for the string to count the set bits of. + * @param start The starting offset byte index. + * @param end The ending offset byte index. + * @return The number of set bits in the string byte interval specified by start and + * end. Returns zero if the key is missing as it is treated as an empty string. + * @example + *
{@code
+     * Long payload = client.bitcount(gs("myKey1"), 1, 3).get();
+     * assert payload == 2L; // The second to fourth bytes of the string stored at "myKey1" contains 2 set bits.
+     * }
+ */ + CompletableFuture bitcount(GlideString key, long start, long end); + /** * Counts the number of set bits (population counting) in a string stored at key. The * offsets start and end are zero-based indexes, with 0 @@ -85,6 +123,32 @@ public interface BitmapBaseCommands { */ CompletableFuture bitcount(String key, long start, long end, BitmapIndexType options); + /** + * Counts the number of set bits (population counting) in a string stored at key. The + * offsets start and end are zero-based indexes, with 0 + * being the first element of the list, 1 being the next element and so on. These + * offsets can also be negative numbers indicating offsets starting at the end of the list, with + * -1 being the last element of the list, -2 being the penultimate, and + * so on. + * + * @since Redis 7.0 and above + * @see valkey.io for details. + * @param key The key for the string to count the set bits of. + * @param start The starting offset. + * @param end The ending offset. + * @param options The index offset type. Could be either {@link BitmapIndexType#BIT} or {@link + * BitmapIndexType#BYTE}. + * @return The number of set bits in the string interval specified by start, + * end, and options. Returns zero if the key is missing as it is treated + * as an empty string. + * @example + *
{@code
+     * Long payload = client.bitcount(gs("myKey1"), 1, 1, BIT).get();
+     * assert payload == 1L; // Indicates that the second bit of the string stored at "myKey1" is set.
+     * }
+ */ + CompletableFuture bitcount(GlideString key, long start, long end, BitmapIndexType options); + /** * Sets or clears the bit at offset in the string value stored at key. * The offset is a zero-based index, with 0 being the first element of @@ -107,6 +171,28 @@ public interface BitmapBaseCommands { */ CompletableFuture setbit(String key, long offset, long value); + /** + * Sets or clears the bit at offset in the string value stored at key. + * The offset is a zero-based index, with 0 being the first element of + * the list, 1 being the next element, and so on. The offset must be + * less than 2^32 and greater than or equal to 0. If a key is + * non-existent then the bit at offset is set to value and the preceding + * bits are set to 0. + * + * @see valkey.io for details. + * @param key The key of the string. + * @param offset The index of the bit to be set. + * @param value The bit value to set at offset. The value must be 0 or + * 1. + * @return The bit value that was previously stored at offset. + * @example + *
{@code
+     * Long payload = client.setbit(gs("myKey1"), 1, 1).get();
+     * assert payload == 0L; // The second bit value was 0 before setting to 1.
+     * }
+ */ + CompletableFuture setbit(GlideString key, long offset, long value); + /** * Returns the bit value at offset in the string value stored at key. * offset should be greater than or equal to zero. @@ -125,6 +211,24 @@ public interface BitmapBaseCommands { */ CompletableFuture getbit(String key, long offset); + /** + * Returns the bit value at offset in the string value stored at key. + * offset should be greater than or equal to zero. + * + * @see valkey.io for details. + * @param key The key of the string. + * @param offset The index of the bit to return. + * @return The bit at offset of the string. Returns zero if the key is empty or if the positive + * offset exceeds the length of the string. + * @example + *
{@code
+     * client.set(gs("sampleKey"), gs("A")); // "A" has binary value 01000001
+     * Long payload = client.getbit(gs("sampleKey"), 1).get();
+     * assert payload == 1L; // The second bit for string stored at "sampleKey" is set to 1.
+     * }
+ */ + CompletableFuture getbit(GlideString key, long offset); + /** * Returns the position of the first bit matching the given bit value. * @@ -144,6 +248,25 @@ public interface BitmapBaseCommands { */ CompletableFuture bitpos(String key, long bit); + /** + * Returns the position of the first bit matching the given bit value. + * + * @see valkey.io for details. + * @param key The key of the string. + * @param bit The bit value to match. The value must be 0 or 1. + * @return The position of the first occurrence matching bit in the binary value of + * the string held at key. If bit is not found, a -1 is + * returned. + * @example + *
{@code
+     * Long payload = client.bitpos(gs("myKey1"), 0).get();
+     * // Indicates that the first occurrence of a 0 bit value is the fourth bit of the binary value
+     * // of the string stored at "myKey1".
+     * assert payload == 3L;
+     * }
+ */ + CompletableFuture bitpos(GlideString key, long bit); + /** * Returns the position of the first bit matching the given bit value. The offset * start is a zero-based index, with 0 being the first byte of the list, @@ -168,6 +291,30 @@ public interface BitmapBaseCommands { */ CompletableFuture bitpos(String key, long bit, long start); + /** + * Returns the position of the first bit matching the given bit value. The offset + * start is a zero-based index, with 0 being the first byte of the list, + * 1 being the next byte and so on. These offsets can also be negative numbers + * indicating offsets starting at the end of the list, with -1 being the last byte of + * the list, -2 being the penultimate, and so on. + * + * @see valkey.io for details. + * @param key The key of the string. + * @param bit The bit value to match. The value must be 0 or 1. + * @param start The starting offset. + * @return The position of the first occurrence beginning at the start offset of the + * bit in the binary value of the string held at key. If bit + * is not found, a -1 is returned. + * @example + *
{@code
+     * Long payload = client.bitpos(gs("myKey1"), 1, 4).get();
+     * // Indicates that the first occurrence of a 1 bit value starting from fifth byte is the 34th
+     * // bit of the binary value of the string stored at "myKey1".
+     * assert payload == 33L;
+     * }
+ */ + CompletableFuture bitpos(GlideString key, long bit, long start); + /** * Returns the position of the first bit matching the given bit value. The offsets * start and end are zero-based indexes, with 0 being the @@ -193,6 +340,31 @@ public interface BitmapBaseCommands { */ CompletableFuture bitpos(String key, long bit, long start, long end); + /** + * Returns the position of the first bit matching the given bit value. The offsets + * start and end are zero-based indexes, with 0 being the + * first byte of the list, 1 being the next byte and so on. These offsets can also be + * negative numbers indicating offsets starting at the end of the list, with -1 being + * the last byte of the list, -2 being the penultimate, and so on. + * + * @see valkey.io for details. + * @param key The key of the string. + * @param bit The bit value to match. The value must be 0 or 1. + * @param start The starting offset. + * @param end The ending offset. + * @return The position of the first occurrence from the start to the end + * offsets of the bit in the binary value of the string held at key + * . If bit is not found, a -1 is returned. + * @example + *
{@code
+     * Long payload = client.bitpos(gs("myKey1"), 1, 4, 6).get();
+     * // Indicates that the first occurrence of a 1 bit value starting from the fifth to seventh
+     * // bytes is the 34th bit of the binary value of the string stored at "myKey1".
+     * assert payload == 33L;
+     * }
+ */ + CompletableFuture bitpos(GlideString key, long bit, long start, long end); + /** * Returns the position of the first bit matching the given bit value. The offset * offsetType specifies whether the offset is a BIT or BYTE. If BIT is specified, @@ -225,6 +397,38 @@ public interface BitmapBaseCommands { CompletableFuture bitpos( String key, long bit, long start, long end, BitmapIndexType offsetType); + /** + * Returns the position of the first bit matching the given bit value. The offset + * offsetType specifies whether the offset is a BIT or BYTE. If BIT is specified, + * start==0 and end==2 means to look at the first three bits. If BYTE is + * specified, start==0 and end==2 means to look at the first three bytes + * The offsets are zero-based indexes, with 0 being the first element of the list, + * 1 being the next, and so on. These offsets can also be negative numbers indicating + * offsets starting at the end of the list, with -1 being the last element of the + * list, -2 being the penultimate, and so on. + * + * @since Redis 7.0 and above. + * @see valkey.io for details. + * @param key The key of the string. + * @param bit The bit value to match. The value must be 0 or 1. + * @param start The starting offset. + * @param end The ending offset. + * @param offsetType The index offset type. Could be either {@link BitmapIndexType#BIT} or {@link + * BitmapIndexType#BYTE}. + * @return The position of the first occurrence from the start to the end + * offsets of the bit in the binary value of the string held at key + * . If bit is not found, a -1 is returned. + * @example + *
{@code
+     * Long payload = client.bitpos(gs("myKey1"), 1, 4, 6, BIT).get();
+     * // Indicates that the first occurrence of a 1 bit value starting from the fifth to seventh
+     * // bits is the sixth bit of the binary value of the string stored at "myKey1".
+     * assert payload == 5L;
+     * }
+ */ + CompletableFuture bitpos( + GlideString key, long bit, long start, long end, BitmapIndexType offsetType); + /** * Perform a bitwise operation between multiple keys (containing string values) and store the * result in the destination. @@ -248,6 +452,29 @@ CompletableFuture bitpos( CompletableFuture bitop( BitwiseOperation bitwiseOperation, String destination, String[] keys); + /** + * Perform a bitwise operation between multiple keys (containing string values) and store the + * result in the destination. + * + * @apiNote When in cluster mode, destination and all keys must map to + * the same hash slot. + * @see valkey.io for details. + * @param bitwiseOperation The bitwise operation to perform. + * @param destination The key that will store the resulting string. + * @param keys The list of keys to perform the bitwise operation on. + * @return The size of the string stored in destination. + * @example + *
{@code
+     * client.set("key1", "A"); // "A" has binary value 01000001
+     * client.set("key2", "B"); // "B" has binary value 01000010
+     * Long payload = client.bitop(BitwiseOperation.AND, gs("destination"), new GlideString[] {key1, key2}).get();
+     * assert "@".equals(client.get("destination").get()); // "@" has binary value 01000000
+     * assert payload == 1L; // The size of the resulting string is 1.
+     * }
+ */ + CompletableFuture bitop( + BitwiseOperation bitwiseOperation, GlideString destination, GlideString[] keys); + /** * Reads or modifies the array of bits representing the string that is held at key * based on the specified subCommands. diff --git a/java/client/src/main/java/glide/api/commands/ConnectionManagementClusterCommands.java b/java/client/src/main/java/glide/api/commands/ConnectionManagementClusterCommands.java index 1ce9e8a511..6da4f51d91 100644 --- a/java/client/src/main/java/glide/api/commands/ConnectionManagementClusterCommands.java +++ b/java/client/src/main/java/glide/api/commands/ConnectionManagementClusterCommands.java @@ -1,7 +1,8 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import glide.api.models.ClusterValue; +import glide.api.models.GlideString; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import java.util.concurrent.CompletableFuture; @@ -157,6 +158,21 @@ public interface ConnectionManagementClusterCommands { */ CompletableFuture echo(String message); + /** + * Echoes the provided message back.
+ * The command will be routed a random node. + * + * @see redis.io for details. + * @param message The message to be echoed back. + * @return The provided message. + * @example + *
{@code
+     * GlideString payload = client.echo(gs("GLIDE")).get();
+     * assert payload.equals(gs("GLIDE"));
+     * }
+ */ + CompletableFuture echo(GlideString message); + /** * Echoes the provided message back. * @@ -179,4 +195,27 @@ public interface ConnectionManagementClusterCommands { * } */ CompletableFuture> echo(String message, Route route); + + /** + * Echoes the provided message back. + * + * @see redis.io for details. + * @param message The message to be echoed back. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return The provided message. + * @example + *
{@code
+     * // Command sent to a single random node via RANDOM route, expecting a SingleValue result.
+     * GlideString message = client.echo(gs("GLIDE"), RANDOM).get().getSingleValue();
+     * assert message.equals(gs("GLIDE"));
+     *
+     * // Command sent to all nodes via ALL_NODES route, expecting a MultiValue result.
+     * Map msgForAllNodes = client.echo(gs("GLIDE"), ALL_NODES).get().getMultiValue();
+     * for(var msgPerNode : msgForAllNodes.entrySet()) {
+     *     assert msgPerNode.equals(gs("GLIDE"));
+     * }
+     * }
+ */ + CompletableFuture> echo(GlideString message, Route route); } diff --git a/java/client/src/main/java/glide/api/commands/ConnectionManagementCommands.java b/java/client/src/main/java/glide/api/commands/ConnectionManagementCommands.java index 10d5620eb9..c737cf24ef 100644 --- a/java/client/src/main/java/glide/api/commands/ConnectionManagementCommands.java +++ b/java/client/src/main/java/glide/api/commands/ConnectionManagementCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import java.util.concurrent.CompletableFuture; /** @@ -77,4 +78,18 @@ public interface ConnectionManagementCommands { * } */ CompletableFuture echo(String message); + + /** + * Echoes the provided message back. + * + * @see + */ + CompletableFuture echo(GlideString message); } diff --git a/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java b/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java index 18d07d8f0f..e440b969d7 100644 --- a/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/GenericBaseCommands.java @@ -1,9 +1,12 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import glide.api.models.Script; import glide.api.models.commands.ExpireOptions; +import glide.api.models.commands.RestoreOptions; import glide.api.models.commands.ScriptOptions; +import glide.api.models.configuration.ReadFrom; import java.util.concurrent.CompletableFuture; /** @@ -50,6 +53,23 @@ public interface GenericBaseCommands { */ CompletableFuture exists(String[] keys); + /** + * Returns the number of keys in keys that exist in the database. + * + * @apiNote When in cluster mode, the command may route to multiple nodes when keys + * map to different hash slots. + * @see redis.io for details. + * @param keys The keys list to check. + * @return The number of keys that exist. If the same existing key is mentioned in keys + * multiple times, it will be counted multiple times. + * @example + *
{@code
+     * Long result = client.exists(new GlideString[] {gs("my_key"), gs("invalid_key")}).get();
+     * assert result == 1L;
+     * }
+ */ + CompletableFuture exists(GlideString[] keys); + /** * Unlink (delete) multiple keys from the database. A key is ignored if it does not * exist. This command, similar to DEL, removes @@ -63,12 +83,31 @@ public interface GenericBaseCommands { * @return The number of keys that were unlinked. * @example *
{@code
-     * Long result = client.unlink("my_key").get();
+     * Long result = client.unlink(new String[] {"my_key"}).get();
      * assert result == 1L;
      * }
*/ CompletableFuture unlink(String[] keys); + /** + * Unlink (delete) multiple keys from the database. A key is ignored if it does not + * exist. This command, similar to DEL, removes + * specified keys and ignores non-existent ones. However, this command does not block the server, + * while DEL does. + * + * @apiNote When in cluster mode, the command may route to multiple nodes when keys + * map to different hash slots. + * @see redis.io for details. + * @param keys The list of keys to unlink. + * @return The number of keys that were unlinked. + * @example + *
{@code
+     * Long result = client.unlink(new GlideString[] {gs("my_key")}).get();
+     * assert result == 1L;
+     * }
+ */ + CompletableFuture unlink(GlideString[] keys); + /** * Sets a timeout on key in seconds. After the timeout has expired, the key * will automatically be deleted.
@@ -92,6 +131,29 @@ public interface GenericBaseCommands { */ CompletableFuture expire(String key, long seconds); + /** + * Sets a timeout on key in seconds. After the timeout has expired, the key + * will automatically be deleted.
+ * If key already has an existing expire + * set, the time to live is updated to the new value.
+ * If seconds is a non-positive number, the key will be deleted rather + * than expired.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param seconds The timeout in seconds. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist. + * @example + *
{@code
+     * Boolean isSet = client.expire(gs("my_key"), 60).get();
+     * assert isSet; //Indicates that a timeout of 60 seconds has been set for gs("my_key").
+     * }
+ */ + CompletableFuture expire(GlideString key, long seconds); + /** * Sets a timeout on key in seconds. After the timeout has expired, the key * will automatically be deleted.
@@ -112,11 +174,36 @@ public interface GenericBaseCommands { * @example *
{@code
      * Boolean isSet = client.expire("my_key", 60, ExpireOptions.HAS_NO_EXPIRY).get();
-     * assert isSet; //Indicates that a timeout of 60 seconds has been set for "my_key."
+     * assert isSet; //Indicates that a timeout of 60 seconds has been set for "my_key".
      * }
*/ CompletableFuture expire(String key, long seconds, ExpireOptions expireOptions); + /** + * Sets a timeout on key in seconds. After the timeout has expired, the key + * will automatically be deleted.
+ * If key already has an existing expire + * set, the time to live is updated to the new value.
+ * If seconds is a non-positive number, the key will be deleted rather + * than expired.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param seconds The timeout in seconds. + * @param expireOptions The expire options. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist, or operation skipped due to the provided + * arguments. + * @example + *
{@code
+     * Boolean isSet = client.expire(gs("my_key"), 60, ExpireOptions.HAS_NO_EXPIRY).get();
+     * assert isSet; //Indicates that a timeout of 60 seconds has been set for gs("my_key").
+     * }
+ */ + CompletableFuture expire(GlideString key, long seconds, ExpireOptions expireOptions); + /** * Sets a timeout on key. It takes an absolute Unix timestamp (seconds since January * 1, 1970) instead of specifying the number of seconds.
@@ -140,6 +227,29 @@ public interface GenericBaseCommands { */ CompletableFuture expireAt(String key, long unixSeconds); + /** + * Sets a timeout on key. It takes an absolute Unix timestamp (seconds since January + * 1, 1970) instead of specifying the number of seconds.
+ * A timestamp in the past will delete the key immediately. After the timeout has + * expired, the key will automatically be deleted.
+ * If key already has an existing expire set, the time to live is + * updated to the new value.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param unixSeconds The timeout in an absolute Unix timestamp. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist. + * @example + *
{@code
+     * Boolean isSet = client.expireAt(gs("my_key"), Instant.now().getEpochSecond() + 10).get();
+     * assert isSet;
+     * }
+ */ + CompletableFuture expireAt(GlideString key, long unixSeconds); + /** * Sets a timeout on key. It takes an absolute Unix timestamp (seconds since January * 1, 1970) instead of specifying the number of seconds.
@@ -165,6 +275,32 @@ public interface GenericBaseCommands { */ CompletableFuture expireAt(String key, long unixSeconds, ExpireOptions expireOptions); + /** + * Sets a timeout on key. It takes an absolute Unix timestamp (seconds since January + * 1, 1970) instead of specifying the number of seconds.
+ * A timestamp in the past will delete the key immediately. After the timeout has + * expired, the key will automatically be deleted.
+ * If key already has an existing expire set, the time to live is + * updated to the new value.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param unixSeconds The timeout in an absolute Unix timestamp. + * @param expireOptions The expire options. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist, or operation skipped due to the provided + * arguments. + * @example + *
{@code
+     * Boolean isSet = client.expireAt(gs("my_key"), Instant.now().getEpochSecond() + 10, ExpireOptions.HasNoExpiry).get();
+     * assert isSet;
+     * }
+ */ + CompletableFuture expireAt( + GlideString key, long unixSeconds, ExpireOptions expireOptions); + /** * Sets a timeout on key in milliseconds. After the timeout has expired, the * key will automatically be deleted.
@@ -188,6 +324,29 @@ public interface GenericBaseCommands { */ CompletableFuture pexpire(String key, long milliseconds); + /** + * Sets a timeout on key in milliseconds. After the timeout has expired, the + * key will automatically be deleted.
+ * If key already has an existing + * expire set, the time to live is updated to the new value.
+ * If milliseconds is a non-positive number, the key will be deleted + * rather than expired.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param milliseconds The timeout in milliseconds. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist. + * @example + *
{@code
+     * Boolean isSet = client.pexpire(gs("my_key"), 60000).get();
+     * assert isSet;
+     * }
+ */ + CompletableFuture pexpire(GlideString key, long milliseconds); + /** * Sets a timeout on key in milliseconds. After the timeout has expired, the * key will automatically be deleted.
@@ -213,6 +372,32 @@ public interface GenericBaseCommands { */ CompletableFuture pexpire(String key, long milliseconds, ExpireOptions expireOptions); + /** + * Sets a timeout on key in milliseconds. After the timeout has expired, the + * key will automatically be deleted.
+ * If key already has an existing expire set, the time to live is updated to the new + * value.
+ * If milliseconds is a non-positive number, the key will be deleted + * rather than expired.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param milliseconds The timeout in milliseconds. + * @param expireOptions The expire options. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist, or operation skipped due to the provided + * arguments. + * @example + *
{@code
+     * Boolean isSet = client.pexpire(gs("my_key"), 60000, ExpireOptions.HasNoExpiry).get();
+     * assert isSet;
+     * }
+ */ + CompletableFuture pexpire( + GlideString key, long milliseconds, ExpireOptions expireOptions); + /** * Sets a timeout on key. It takes an absolute Unix timestamp (milliseconds since * January 1, 1970) instead of specifying the number of milliseconds.
@@ -236,6 +421,29 @@ public interface GenericBaseCommands { */ CompletableFuture pexpireAt(String key, long unixMilliseconds); + /** + * Sets a timeout on key. It takes an absolute Unix timestamp (milliseconds since + * January 1, 1970) instead of specifying the number of milliseconds.
+ * A timestamp in the past will delete the key immediately. After the timeout has + * expired, the key will automatically be deleted.
+ * If key already has an existing expire set, the time to live is + * updated to the new value.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param unixMilliseconds The timeout in an absolute Unix timestamp. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist. + * @example + *
{@code
+     * Boolean isSet = client.pexpireAt(gs("my_key"), Instant.now().toEpochMilli() + 10).get();
+     * assert isSet;
+     * }
+ */ + CompletableFuture pexpireAt(GlideString key, long unixMilliseconds); + /** * Sets a timeout on key. It takes an absolute Unix timestamp (milliseconds since * January 1, 1970) instead of specifying the number of milliseconds.
@@ -262,6 +470,32 @@ public interface GenericBaseCommands { CompletableFuture pexpireAt( String key, long unixMilliseconds, ExpireOptions expireOptions); + /** + * Sets a timeout on key. It takes an absolute Unix timestamp (milliseconds since + * January 1, 1970) instead of specifying the number of milliseconds.
+ * A timestamp in the past will delete the key immediately. After the timeout has + * expired, the key will automatically be deleted.
+ * If key already has an existing expire set, the time to live is + * updated to the new value.
+ * The timeout will only be cleared by commands that delete or overwrite the contents of key + * . + * + * @see redis.io for details. + * @param key The key to set timeout on it. + * @param unixMilliseconds The timeout in an absolute Unix timestamp. + * @param expireOptions The expire option. + * @return true if the timeout was set. false if the timeout was not + * set. e.g. key doesn't exist, or operation skipped due to the provided + * arguments. + * @example + *
{@code
+     * Boolean isSet = client.pexpireAt(gs("my_key"), Instant.now().toEpochMilli() + 10, ExpireOptions.HasNoExpiry).get();
+     * assert isSet;
+     * }
+ */ + CompletableFuture pexpireAt( + GlideString key, long unixMilliseconds, ExpireOptions expireOptions); + /** * Returns the remaining time to live of key that has a timeout, in seconds. * @@ -280,6 +514,24 @@ CompletableFuture pexpireAt( */ CompletableFuture ttl(String key); + /** + * Returns the remaining time to live of key that has a timeout, in seconds. + * + * @see redis.io for details. + * @param key The key to return its timeout. + * @return TTL in seconds, -2 if key does not exist, or -1 + * if key exists but has no associated expiration. + * @example + *
{@code
+     * Long timeRemaining = client.ttl(gs("my_key")).get();
+     * assert timeRemaining == 3600L; //Indicates that gs("my_key") has a remaining time to live of 3600 seconds.
+     *
+     * Long timeRemaining = client.ttl(gs("nonexistent_key")).get();
+     * assert timeRemaining == -2L; //Returns -2 for a non-existing key.
+     * }
+ */ + CompletableFuture ttl(GlideString key); + /** * Returns the absolute Unix timestamp (since January 1, 1970) at which the given key * will expire, in seconds.
@@ -298,6 +550,24 @@ CompletableFuture pexpireAt( */ CompletableFuture expiretime(String key); + /** + * Returns the absolute Unix timestamp (since January 1, 1970) at which the given key + * will expire, in seconds.
+ * To get the expiration with millisecond precision, use {@link #pexpiretime(String)}. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param key The key to determine the expiration value of. + * @return The expiration Unix timestamp in seconds. -2 if key does not + * exist, or -1 if key exists but has no associated expiration. + * @example + *
{@code
+     * Long expiration = client.expiretime(gs("my_key")).get();
+     * System.out.printf("The key expires at %d epoch time", expiration);
+     * }
+ */ + CompletableFuture expiretime(GlideString key); + /** * Returns the absolute Unix timestamp (since January 1, 1970) at which the given key * will expire, in milliseconds. @@ -315,6 +585,23 @@ CompletableFuture pexpireAt( */ CompletableFuture pexpiretime(String key); + /** + * Returns the absolute Unix timestamp (since January 1, 1970) at which the given key + * will expire, in milliseconds. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param key The key to determine the expiration value of. + * @return The expiration Unix timestamp in milliseconds. -2 if key does + * not exist, or -1 if key exists but has no associated expiration. + * @example + *
{@code
+     * Long expiration = client.pexpiretime(gs("my_key")).get();
+     * System.out.printf("The key expires at %d epoch time (ms)", expiration);
+     * }
+ */ + CompletableFuture pexpiretime(GlideString key); + // TODO move invokeScript to ScriptingAndFunctionsBaseCommands // TODO add note to invokeScript about routing on cluster client /** @@ -382,6 +669,24 @@ CompletableFuture pexpireAt( */ CompletableFuture pttl(String key); + /** + * Returns the remaining time to live of key that has a timeout, in milliseconds. + * + * @see redis.io for details. + * @param key The key to return its timeout. + * @return TTL in milliseconds. -2 if key does not exist, -1 + * if key exists but has no associated expire. + * @example + *
{@code
+     * Long timeRemainingMS = client.pttl(gs("my_key")).get()
+     * assert timeRemainingMS == 5000L // Indicates that gs("my_key") has a remaining time to live of 5000 milliseconds.
+     *
+     * Long timeRemainingMS = client.pttl(gs("nonexistent_key")).get();
+     * assert timeRemainingMS == -2L; // Returns -2 for a non-existing key.
+     * }
+ */ + CompletableFuture pttl(GlideString key); + /** * Removes the existing timeout on key, turning the key from volatile (a * key with an expire set) to persistent (a key that will never expire @@ -399,6 +704,23 @@ CompletableFuture pexpireAt( */ CompletableFuture persist(String key); + /** + * Removes the existing timeout on key, turning the key from volatile (a + * key with an expire set) to persistent (a key that will never expire + * as no timeout is associated). + * + * @see redis.io for details. + * @param key The key to remove the existing timeout on. + * @return false if key does not exist or does not have an associated + * timeout, true if the timeout has been removed. + * @example + *
{@code
+     * Boolean timeoutRemoved = client.persist(gs("my_key")).get();
+     * assert timeoutRemoved; // Indicates that the timeout associated with the key "my_key" was successfully removed.
+     * }
+ */ + CompletableFuture persist(GlideString key); + /** * Returns the string representation of the type of the value stored at key. * @@ -417,6 +739,24 @@ CompletableFuture pexpireAt( */ CompletableFuture type(String key); + /** + * Returns the string representation of the type of the value stored at key. + * + * @see redis.io for details. + * @param key The key to check its data type. + * @return If the key exists, the type of the stored value is returned. Otherwise, a + * "none" string is returned. + * @example + *
{@code
+     * String type = client.type(gs("StringKey")).get();
+     * assert type.equals("string");
+     *
+     * type = client.type(gs("ListKey")).get();
+     * assert type.equals("list");
+     * }
+ */ + CompletableFuture type(GlideString key); + /** * Returns the internal encoding for the Redis object stored at key. * @@ -435,6 +775,24 @@ CompletableFuture pexpireAt( */ CompletableFuture objectEncoding(String key); + /** + * Returns the internal encoding for the Redis object stored at key. + * + * @see redis.io for details. + * @param key The key of the object to get the internal encoding of. + * @return If key exists, returns the internal encoding of the object stored at + * key as a String. Otherwise, returns null. + * @example + *
{@code
+     * String encoding = client.objectEncoding(gs("my_hash")).get();
+     * assert encoding.equals("listpack");
+     *
+     * encoding = client.objectEncoding(gs("non_existing_key")).get();
+     * assert encoding == null;
+     * }
+ */ + CompletableFuture objectEncoding(GlideString key); + /** * Returns the logarithmic access frequency counter of a Redis object stored at key. * @@ -455,6 +813,26 @@ CompletableFuture pexpireAt( */ CompletableFuture objectFreq(String key); + /** + * Returns the logarithmic access frequency counter of a Redis object stored at key. + * + * @see redis.io for details. + * @param key The key of the object to get the logarithmic access frequency counter + * of. + * @return If key exists, returns the logarithmic access frequency counter of the + * object stored at key as a Long. Otherwise, returns null + * . + * @example + *
{@code
+     * Long frequency = client.objectFreq(gs("my_hash")).get();
+     * assert frequency == 2L;
+     *
+     * frequency = client.objectFreq(gs("non_existing_key")).get();
+     * assert frequency == null;
+     * }
+ */ + CompletableFuture objectFreq(GlideString key); + /** * Returns the time in seconds since the last access to the value stored at key. * @@ -473,6 +851,24 @@ CompletableFuture pexpireAt( */ CompletableFuture objectIdletime(String key); + /** + * Returns the time in seconds since the last access to the value stored at key. + * + * @see redis.io for details. + * @param key The key of the object to get the idle time of. + * @return If key exists, returns the idle time in seconds. Otherwise, returns + * null. + * @example + *
{@code
+     * Long idletime = client.objectIdletime(gs("my_hash")).get();
+     * assert idletime == 2L;
+     *
+     * idletime = client.objectIdletime(gs("non_existing_key")).get();
+     * assert idletime == null;
+     * }
+ */ + CompletableFuture objectIdletime(GlideString key); + /** * Returns the reference count of the object stored at key. * @@ -491,6 +887,24 @@ CompletableFuture pexpireAt( */ CompletableFuture objectRefcount(String key); + /** + * Returns the reference count of the object stored at key. + * + * @see redis.io for details. + * @param key The key of the object to get the reference count of. + * @return If key exists, returns the reference count of the object stored at + * key as a Long. Otherwise, returns null. + * @example + *
{@code
+     * Long refcount = client.objectRefcount(gs("my_hash")).get();
+     * assert refcount == 2L;
+     *
+     * refcount = client.objectRefcount(gs("non_existing_key")).get();
+     * assert refcount == null;
+     * }
+ */ + CompletableFuture objectRefcount(GlideString key); + /** * Renames key to newKey.
* If newKey already exists it is overwritten. @@ -511,6 +925,26 @@ CompletableFuture pexpireAt( */ CompletableFuture rename(String key, String newKey); + /** + * Renames key to newKey.
+ * If newKey already exists it is overwritten. + * + * @apiNote When in cluster mode, both key and newKey must map to the + * same hash slot. + * @see redis.io for details. + * @param key The key to rename. + * @param newKey The new name of the key. + * @return If the key was successfully renamed, return "OK". If + * key does not exist, an error is thrown. + * @example + *
{@code
+     * String value = client.set(gs("key"), gs("value")).get();
+     * value = client.rename(gs("key"), gs("newKeyName")).get();
+     * assert value.equals("OK");
+     * }
+ */ + CompletableFuture rename(GlideString key, GlideString newKey); + /** * Renames key to newKey if newKey does not yet exist. * @@ -529,6 +963,24 @@ CompletableFuture pexpireAt( */ CompletableFuture renamenx(String key, String newKey); + /** + * Renames key to newKey if newKey does not yet exist. + * + * @apiNote When in cluster mode, both key and newKey must map to the + * same hash slot. + * @see redis.io for details. + * @param key The key to rename. + * @param newKey The new key name. + * @return true if key was renamed to newKey, false + * if newKey already exists. + * @example + *
{@code
+     * Boolean renamed = client.renamenx(gs("old_key"), gs("new_key")).get();
+     * assert renamed;
+     * }
+ */ + CompletableFuture renamenx(GlideString key, GlideString newKey); + /** * Updates the last access time of specified keys. * @@ -567,6 +1019,28 @@ CompletableFuture pexpireAt( */ CompletableFuture copy(String source, String destination); + /** + * Copies the value stored at the source to the destination key if the + * destination key does not yet exist. + * + * @apiNote When in cluster mode, both source and destination must map + * to the same hash slot. + * @since Redis 6.2.0 and above. + * @see redis.io for details. + * @param source The key to the source value. + * @param destination The key where the value should be copied to. + * @return true if source was copied, false if source + * was not copied. + * @example + *
{@code
+     * client.set(gs("test1"), gs("one")).get();
+     * client.set(gs("test2"), gs("two")).get();
+     * assert !client.copy(gs("test1", gs("test2")).get();
+     * assert client.copy(gs("test1"), gs("test2")).get();
+     * }
+ */ + CompletableFuture copy(GlideString source, GlideString destination); + /** * Copies the value stored at the source to the destination key. When * replace is true, removes the destination key first if it already @@ -590,4 +1064,147 @@ CompletableFuture pexpireAt( * } */ CompletableFuture copy(String source, String destination, boolean replace); + + /** + * Copies the value stored at the source to the destination key. When + * replace is true, removes the destination key first if it already + * exists, otherwise performs no action. + * + * @apiNote When in cluster mode, both source and destination must map + * to the same hash slot. + * @since Redis 6.2.0 and above. + * @see redis.io for details. + * @param source The key to the source value. + * @param destination The key where the value should be copied to. + * @param replace If the destination key should be removed before copying the value to it. + * @return true if source was copied, false if source + * was not copied. + * @example + *
{@code
+     * client.set(gs("test1"), gs("one")).get();
+     * client.set(gs("test2"), gs("two")).get();
+     * assert !client.copy(gs("test1", gs("test2"), false).get();
+     * assert client.copy(gs("test1", gs("test2"), true).get();
+     * }
+ */ + CompletableFuture copy(GlideString source, GlideString destination, boolean replace); + + /** + * Serialize the value stored at key in a Valkey-specific format and return it to the + * user. + * + * @see valkey.io for details. + * @param key The key of the set. + * @return The serialized value of a set.
+ * If key does not exist, null will be returned. + * @example + *
{@code
+     * byte[] result = client.dump("myKey").get();
+     *
+     * byte[] response = client.dump("nonExistingKey").get();
+     * assert response.equals(null);
+     * }
+ */ + CompletableFuture dump(GlideString key); + + /** + * Create a key associated with a value that is obtained by + * deserializing the provided serialized value (obtained via {@link #dump}). + * + * @see valkey.io for details. + * @param key The key of the set. + * @param ttl The expiry time (in milliseconds). If 0, the key will + * persist. + * @param value The serialized value. + * @return Return OK if successfully create a key with a value + * . + * @example + *
{@code
+     * String result = client.restore(gs("newKey"), 0, value).get();
+     * assert result.equals("OK");
+     * }
+ */ + CompletableFuture restore(GlideString key, long ttl, byte[] value); + + /** + * Create a key associated with a value that is obtained by + * deserializing the provided serialized value (obtained via {@link #dump}). + * + * @see valkey.io for details. + * @param key The key of the set. + * @param ttl The expiry time (in milliseconds). If 0, the key will + * persist. + * @param value The serialized value. + * @param restoreOptions The restore options. See {@link RestoreOptions}. + * @return Return OK if successfully create a key with a value + * . + * @example + *
{@code
+     * RestoreOptions options = RestoreOptions.builder().replace().absttl().idletime(10).frequency(10).build()).get();
+     * // Set restore options with replace and absolute TTL modifiers, object idletime and frequency to 10.
+     * String result = client.restore(gs("newKey"), 0, value, options).get();
+     * assert result.equals("OK");
+     * }
+ */ + CompletableFuture restore( + GlideString key, long ttl, byte[] value, RestoreOptions restoreOptions); + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
+ * The sort command can be used to sort elements based on different criteria and + * apply transformations on sorted elements.
+ * To store the result into a new key, see {@link #sortStore(String, String)}.
+ * + * @param key The key of the list, set, or sorted set to be sorted. + * @return An Array of sorted elements. + * @example + *
{@code
+     * client.lpush("mylist", new String[] {"3", "1", "2"}).get();
+     * assertArrayEquals(new String[] {"1", "2", "3"}, client.sort("mylist").get()); // List is sorted in ascending order
+     * }
+ */ + CompletableFuture sort(String key); + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
+ * The sortReadOnly command can be used to sort elements based on different criteria + * and apply transformations on sorted elements.
+ * This command is routed depending on the client's {@link ReadFrom} strategy. + * + * @since Redis 7.0 and above. + * @param key The key of the list, set, or sorted set to be sorted. + * @return An Array of sorted elements. + * @example + *
{@code
+     * client.lpush("mylist", new String[] {"3", "1", "2"}).get();
+     * assertArrayEquals(new String[] {"1", "2", "3"}, client.sortReadOnly("mylist").get()); // List is sorted in ascending order
+     * }
+ */ + CompletableFuture sortReadOnly(String key); + + /** + * Sorts the elements in the list, set, or sorted set at key and stores the result in + * destination. The sort command can be used to sort elements based on + * different criteria, apply transformations on sorted elements, and store the result in a new + * key.
+ * To get the sort result without storing it into a key, see {@link #sort(String)} or {@link + * #sortReadOnly(String)}. + * + * @apiNote When in cluster mode, key and destination must map to the + * same hash slot. + * @param key The key of the list, set, or sorted set to be sorted. + * @param destination The key where the sorted result will be stored. + * @return The number of elements in the sorted key stored at destination. + * @example + *
{@code
+     * client.lpush("mylist", new String[] {"3", "1", "2"}).get();
+     * assert client.sortStore("mylist", "destination").get() == 3;
+     * assertArrayEquals(
+     *    new String[] {"1", "2", "3"},
+     *    client.lrange("destination", 0, -1).get()); // Sorted list is stored in `destination`
+     * }
+ */ + CompletableFuture sortStore(String key, String destination); } diff --git a/java/client/src/main/java/glide/api/commands/GenericClusterCommands.java b/java/client/src/main/java/glide/api/commands/GenericClusterCommands.java index 3f138f42f0..a0eb8a43b3 100644 --- a/java/client/src/main/java/glide/api/commands/GenericClusterCommands.java +++ b/java/client/src/main/java/glide/api/commands/GenericClusterCommands.java @@ -1,9 +1,11 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import glide.api.models.ClusterTransaction; import glide.api.models.ClusterValue; import glide.api.models.Transaction; +import glide.api.models.commands.SortClusterOptions; +import glide.api.models.configuration.ReadFrom; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import glide.api.models.configuration.RequestRoutingConfiguration.SingleNodeRoute; import java.util.concurrent.CompletableFuture; @@ -112,4 +114,110 @@ public interface GenericClusterCommands { * } */ CompletableFuture exec(ClusterTransaction transaction, SingleNodeRoute route); + + /** + * Returns a random key. + * + * @see redis.io for details. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route, and will return the first successful + * result. + * @return A random key from the database. + * @example + *
{@code
+     * String value = client.set("key", "value").get();
+     * String value_1 = client.set("key1", "value_1").get();
+     * String key = client.randomKey(RANDOM).get();
+     * System.out.println("The random key is: " + key);
+     * // The value of key is either "key" or "key1"
+     * }
+ */ + CompletableFuture randomKey(Route route); + + /** + * Returns a random key.
+ * The command will be routed to all primary nodes, and will return the first successful result. + * + * @see redis.io for details. + * @return A random key from the database. + * @example + *
{@code
+     * String value = client.set("key", "value").get();
+     * String value_1 = client.set("key1", "value_1").get();
+     * String key = client.randomKey().get();
+     * System.out.println("The random key is: " + key);
+     * // The value of key is either "key" or "key1"
+     * }
+ */ + CompletableFuture randomKey(); + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
+ * The sort command can be used to sort elements based on different criteria and + * apply transformations on sorted elements.
+ * To store the result into a new key, see {@link #sortStore(String, String, SortClusterOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortClusterOptions The {@link SortClusterOptions}. + * @return An Array of sorted elements. + * @example + *
{@code
+     * client.lpush("mylist", new String[] {"3", "1", "2", "a"}).get();
+     * String[] payload = client.sort("mylist", SortClusterOptions.builder().alpha()
+     *          .orderBy(DESC).limit(new SortBaseOptions.Limit(0L, 3L)).build()).get();
+     * assertArrayEquals(new String[] {"a", "3", "2"}, payload); // List is sorted in descending order lexicographically starting
+     * }
+ */ + CompletableFuture sort(String key, SortClusterOptions sortClusterOptions); + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
+ * The sortReadOnly command can be used to sort elements based on different criteria + * and apply transformations on sorted elements.
+ * This command is routed depending on the client's {@link ReadFrom} strategy. + * + * @since Redis 7.0 and above. + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortClusterOptions The {@link SortClusterOptions}. + * @return An Array of sorted elements. + * @example + *
{@code
+     * client.lpush("mylist", new String[] {"3", "1", "2", "a"}).get();
+     * String[] payload = client.sortReadOnly("mylist", SortClusterOptions.builder().alpha()
+     *          .orderBy(DESC).limit(new SortBaseOptions.Limit(0L, 3L)).build()).get();
+     * assertArrayEquals(new String[] {"a", "3", "2"}, payload); // List is sorted in descending order lexicographically starting
+     * }
+ */ + CompletableFuture sortReadOnly(String key, SortClusterOptions sortClusterOptions); + + /** + * Sorts the elements in the list, set, or sorted set at key and stores the result in + * destination. The sort command can be used to sort elements based on + * different criteria, apply transformations on sorted elements, and store the result in a new + * key.
+ * To get the sort result without storing it into a key, see {@link #sort(String, + * SortClusterOptions)} or {@link #sortReadOnly(String, SortClusterOptions)}. + * + * @apiNote When in cluster mode, key and destination must map to the + * same hash slot. + * @param key The key of the list, set, or sorted set to be sorted. + * @param destination The key where the sorted result will be stored. + * @param sortClusterOptions The {@link SortClusterOptions}. + * @return The number of elements in the sorted key stored at destination. + * @example + *
{@code
+     * client.lpush("mylist", new String[] {"3", "1", "2", "a"}).get();
+     * Long payload = client.sortStore("mylist", "destination",
+     *          SortClusterOptions.builder().alpha().orderBy(DESC)
+     *              .limit(new SortBaseOptions.Limit(0L, 3L))build()).get();
+     * assertEquals(3, payload);
+     * assertArrayEquals(
+     *      new String[] {"a", "3", "2"},
+     *      client.lrange("destination", 0, -1).get()); // Sorted list is stored in "destination"
+     * }
+ */ + CompletableFuture sortStore( + String key, String destination, SortClusterOptions sortClusterOptions); } diff --git a/java/client/src/main/java/glide/api/commands/GenericCommands.java b/java/client/src/main/java/glide/api/commands/GenericCommands.java index 27447f0c6c..680a9a1972 100644 --- a/java/client/src/main/java/glide/api/commands/GenericCommands.java +++ b/java/client/src/main/java/glide/api/commands/GenericCommands.java @@ -1,7 +1,10 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import glide.api.models.Transaction; +import glide.api.models.commands.SortOptions; +import glide.api.models.configuration.ReadFrom; import java.util.concurrent.CompletableFuture; /** @@ -75,6 +78,24 @@ public interface GenericCommands { */ CompletableFuture move(String key, long dbIndex); + /** + * Move key from the currently selected database to the database specified by + * dbIndex. + * + * @see redis.io for more details. + * @param key The key to move. + * @param dbIndex The index of the database to move key to. + * @return true if key was moved, or false if the key + * already exists in the destination database or does not exist in the source + * database. + * @example + *
{@code
+     * Boolean moved = client.move(gs("some_key"), 1L).get();
+     * assert moved;
+     * }
+ */ + CompletableFuture move(GlideString key, long dbIndex); + /** * Copies the value stored at the source to the destination key on * destinationDB. When replace is true, removes the destination @@ -97,6 +118,28 @@ public interface GenericCommands { CompletableFuture copy( String source, String destination, long destinationDB, boolean replace); + /** + * Copies the value stored at the source to the destination key on + * destinationDB. When replace is true, removes the destination + * key first if it already exists, otherwise performs no action. + * + * @since Redis 6.2.0 and above. + * @see redis.io for details. + * @param source The key to the source value. + * @param destination The key where the value should be copied to. + * @param destinationDB The alternative logical database index for the destination key. + * @param replace If the destination key should be removed before copying the value to it. + * @return true if source was copied, false if source + * was not copied. + * @example + *
{@code
+     * client.set(gs("test1"), gs("one")).get();
+     * assert client.copy(gs("test1"), gs("test2"), 1, false).get();
+     * }
+ */ + CompletableFuture copy( + GlideString source, GlideString destination, long destinationDB, boolean replace); + /** * Copies the value stored at the source to the destination key on * destinationDB. When replace is true, removes the destination @@ -116,4 +159,110 @@ CompletableFuture copy( * } */ CompletableFuture copy(String source, String destination, long destinationDB); + + /** + * Copies the value stored at the source to the destination key on + * destinationDB. When replace is true, removes the destination + * key first if it already exists, otherwise performs no action. + * + * @since Redis 6.2.0 and above. + * @see redis.io for details. + * @param source The key to the source value. + * @param destination The key where the value should be copied to. + * @param destinationDB The alternative logical database index for the destination key. + * @return true if source was copied, false if source + * was not copied. + * @example + *
{@code
+     * client.set(gs("test1"), gs("one")).get();
+     * assert client.copy(gs("test1"), gs("test2"), 1).get();
+     * }
+ */ + CompletableFuture copy(GlideString source, GlideString destination, long destinationDB); + + /** + * Returns a random key from currently selected database. + * + * @see redis.io for details. + * @return A random key from the database. + * @example + *
{@code
+     * String value = client.set("key", "value").get();
+     * String value_1 = client.set("key1", "value_1").get();
+     * String key = client.randomKey().get();
+     * System.out.println("The random key is: " + key);
+     * // The value of key is either "key" or "key1"
+     * }
+ */ + CompletableFuture randomKey(); + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + * The sort command can be used to sort elements based on different criteria and + * apply transformations on sorted elements.
+ * To store the result into a new key, see {@link #sortStore(String, String, SortOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortOptions The {@link SortOptions}. + * @return An Array of sorted elements. + * @example + *
{@code
+     * client.hset("user:1", Map.of("name", "Alice", "age", "30")).get();
+     * client.hset("user:2", Map.of("name", "Bob", "age", "25")).get();
+     * client.lpush("user_ids", new String[] {"2", "1"}).get();
+     * String [] payload = client.sort("user_ids", SortOptions.builder().byPattern("user:*->age")
+     *                  .getPattern("user:*->name").build()).get();
+     * assertArrayEquals(new String[] {"Bob", "Alice"}, payload); // Returns a list of the names sorted by age
+     * }
+ */ + CompletableFuture sort(String key, SortOptions sortOptions); + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + * The sortReadOnly command can be used to sort elements based on different criteria + * and apply transformations on sorted elements.
+ * This command is routed depending on the client's {@link ReadFrom} strategy. + * + * @since Redis 7.0 and above. + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortOptions The {@link SortOptions}. + * @return An Array of sorted elements. + * @example + *
{@code
+     * client.hset("user:1", Map.of("name", "Alice", "age", "30")).get();
+     * client.hset("user:2", Map.of("name", "Bob", "age", "25")).get();
+     * client.lpush("user_ids", new String[] {"2", "1"}).get();
+     * String [] payload = client.sortReadOnly("user_ids", SortOptions.builder().byPattern("user:*->age")
+     *                  .getPattern("user:*->name").build()).get();
+     * assertArrayEquals(new String[] {"Bob", "Alice"}, payload); // Returns a list of the names sorted by age
+     * }
+ */ + CompletableFuture sortReadOnly(String key, SortOptions sortOptions); + + /** + * Sorts the elements in the list, set, or sorted set at key and stores the result in + * destination. The sort command can be used to sort elements based on + * different criteria, apply transformations on sorted elements, and store the result in a new + * key.
+ * To get the sort result without storing it into a key, see {@link #sort(String, SortOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortOptions The {@link SortOptions}. + * @param destination The key where the sorted result will be stored. + * @return The number of elements in the sorted key stored at destination. + * @example + *
{@code
+     * client.hset("user:1", Map.of("name", "Alice", "age", "30")).get();
+     * client.hset("user:2", Map.of("name", "Bob", "age", "25")).get();
+     * client.lpush("user_ids", new String[] {"2", "1"}).get();
+     * Long payload = client.sortStore("user_ids", "destination",
+     *          SortOptions.builder().byPattern("user:*->age").getPattern("user:*->name").build())
+     *          .get();
+     * assertEquals(2, payload);
+     * assertArrayEquals(
+     *      new String[] {"Bob", "Alice"},
+     *      client.lrange("destination", 0, -1).get()); // The list of the names sorted by age is stored in `destination`
+     * }
+ */ + CompletableFuture sortStore(String key, String destination, SortOptions sortOptions); } diff --git a/java/client/src/main/java/glide/api/commands/GeospatialIndicesBaseCommands.java b/java/client/src/main/java/glide/api/commands/GeospatialIndicesBaseCommands.java index 6517c99704..7a9fb87778 100644 --- a/java/client/src/main/java/glide/api/commands/GeospatialIndicesBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/GeospatialIndicesBaseCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import glide.api.models.commands.geospatial.GeoAddOptions; import glide.api.models.commands.geospatial.GeoUnit; import glide.api.models.commands.geospatial.GeospatialData; @@ -80,6 +81,27 @@ CompletableFuture geoadd( */ CompletableFuture geopos(String key, String[] members); + /** + * Returns the positions (longitude,latitude) of all the specified members of the + * geospatial index represented by the sorted set at key. + * + * @see valkey.io for more details. + * @param key The key of the sorted set. + * @param members The members for which to get the positions. + * @return A 2D array which represent positions (longitude and latitude) + * corresponding to the given members. If a member does not exist, its position will be + *
null
. + * @example + *
{@code
+     * // When added via GEOADD, the geospatial coordinates are converted into a 52 bit geohash, so the coordinates
+     * // returned might not be exactly the same as the input values
+     * client.geoadd(gs("mySortedSet"), Map.of(gs("Palermo"), new GeospatialData(13.361389, 38.115556), gs("Catania"), new GeospatialData(15.087269, 37.502669))).get();
+     * Double[][] result = client.geopos(gs("mySortedSet", new GlideString[]{gs("Palermo"), gs("Catania"), gs("NonExisting")}).get();
+     * System.out.println(Arrays.deepToString(result));
+     * }
+ */ + CompletableFuture geopos(GlideString key, GlideString[] members); + /** * Returns the distance between member1 and member2 saved in the * geospatial index stored at key. @@ -99,6 +121,26 @@ CompletableFuture geoadd( */ CompletableFuture geodist(String key, String member1, String member2, GeoUnit geoUnit); + /** + * Returns the distance between member1 and member2 saved in the + * geospatial index stored at key. + * + * @see valkey.io for more details. + * @param key The key of the sorted set. + * @param member1 The name of the first member. + * @param member2 The name of the second member. + * @param geoUnit The unit of distance measurement - see {@link GeoUnit}. + * @return The distance between member1 and member2. If one or both + * members do not exist, or if the key does not exist, returns null. + * @example + *
{@code
+     * Double result = client.geodist(gs("mySortedSet"), gs("Palermo"), gs("Catania"), GeoUnit.KILOMETERS).get();
+     * System.out.println(result);
+     * }
+ */ + CompletableFuture geodist( + GlideString key, GlideString member1, GlideString member2, GeoUnit geoUnit); + /** * Returns the distance between member1 and member2 saved in the * geospatial index stored at key. @@ -118,6 +160,25 @@ CompletableFuture geoadd( */ CompletableFuture geodist(String key, String member1, String member2); + /** + * Returns the distance between member1 and member2 saved in the + * geospatial index stored at key. + * + * @see valkey.io for more details. + * @param key The key of the sorted set. + * @param member1 The name of the first member. + * @param member2 The name of the second member. + * @return The distance between member1 and member2. If one or both + * members do not exist, or if the key does not exist, returns null. The default + * unit is {@see GeoUnit#METERS}. + * @example + *
{@code
+     * Double result = client.geodist(gs("mySortedSet"), gs("Palermo"), gs("Catania")).get();
+     * System.out.println(result);
+     * }
+ */ + CompletableFuture geodist(GlideString key, GlideString member1, GlideString member2); + /** * Returns the GeoHash strings representing the positions of all the specified * members in the sorted set stored at key. diff --git a/java/client/src/main/java/glide/api/commands/HashBaseCommands.java b/java/client/src/main/java/glide/api/commands/HashBaseCommands.java index 51fe72dc03..c5b3939aa7 100644 --- a/java/client/src/main/java/glide/api/commands/HashBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/HashBaseCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -72,6 +73,29 @@ public interface HashBaseCommands { */ CompletableFuture hsetnx(String key, String field, String value); + /** + * Sets field in the hash stored at key to value, only if + * field does not yet exist.
+ * If key does not exist, a new key holding a hash is created.
+ * If field already exists, this operation has no effect. + * + * @see redis.io for details. + * @param key The key of the hash. + * @param field The field to set the value for. + * @param value The value to set. + * @return true if the field was set, false if the field already existed + * and was not set. + * @example + *
{@code
+     * Boolean payload1 = client.hsetnx(gs("myHash"), gs("field"), gs("value")).get();
+     * assert payload1; // Indicates that the field "field" was set successfully in the hash "myHash".
+     *
+     * Boolean payload2 = client.hsetnx(gs("myHash"), gs("field"), gs("newValue")).get();
+     * assert !payload2; // Indicates that the field "field" already existed in the hash "myHash" and was not set again.
+     * }
+ */ + CompletableFuture hsetnx(GlideString key, GlideString field, GlideString value); + /** * Removes the specified fields from the hash stored at key. Specified fields that do * not exist within this hash are ignored. @@ -161,6 +185,25 @@ public interface HashBaseCommands { */ CompletableFuture hexists(String key, String field); + /** + * Returns if field is an existing field in the hash stored at key. + * + * @see redis.io for details. + * @param key The key of the hash. + * @param field The field to check in the hash stored at key. + * @return True if the hash contains the specified field. If the hash does not + * contain the field, or if the key does not exist, it returns False. + * @example + *
{@code
+     * Boolean exists = client.hexists(gs("my_hash"), gs("field1")).get();
+     * assert exists;
+     *
+     * Boolean exists = client.hexists(gs("my_hash"), gs("non_existent_field")).get();
+     * assert !exists;
+     * }
+ */ + CompletableFuture hexists(GlideString key, GlideString field); + /** * Returns all fields and values of the hash stored at key. * @@ -177,6 +220,22 @@ public interface HashBaseCommands { */ CompletableFuture> hgetall(String key); + /** + * Returns all fields and values of the hash stored at key. + * + * @see redis.io for details. + * @param key The key of the hash. + * @return A Map of fields and their values stored in the hash. Every field name in + * the map is associated with its corresponding value.
+ * If key does not exist, it returns an empty map. + * @example + *
{@code
+     * Map fieldValueMap = client.hgetall(gs("my_hash")).get();
+     * assert fieldValueMap.equals(Map.of(gs("field1"), gs("value1"), gs("field2"), gs("value2")));
+     * }
+ */ + CompletableFuture> hgetall(GlideString key); + /** * Increments the number stored at field in the hash stored at key by * increment. By using a negative increment value, the value stored at field in the @@ -199,6 +258,28 @@ public interface HashBaseCommands { */ CompletableFuture hincrBy(String key, String field, long amount); + /** + * Increments the number stored at field in the hash stored at key by + * increment. By using a negative increment value, the value stored at field in the + * hash stored at key is decremented. If field or key does + * not exist, it is set to 0 before performing the operation. + * + * @see redis.io for details. + * @param key The key of the hash. + * @param field The field in the hash stored at key to increment or decrement its + * value. + * @param amount The amount by which to increment or decrement the field's value. Use a negative + * value to decrement. + * @return The value of field in the hash stored at key after the + * increment or decrement. + * @example + *
{@code
+     * Long num = client.hincrBy(gs("my_hash"), gs("field1"), 5).get();
+     * assert num == 5L;
+     * }
+ */ + CompletableFuture hincrBy(GlideString key, GlideString field, long amount); + /** * Increments the string representing a floating point number stored at field in the * hash stored at key by increment. By using a negative increment value, the value @@ -222,6 +303,29 @@ public interface HashBaseCommands { */ CompletableFuture hincrByFloat(String key, String field, double amount); + /** + * Increments the string representing a floating point number stored at field in the + * hash stored at key by increment. By using a negative increment value, the value + * stored at field in the hash stored at key is decremented. If + * field or key does not exist, it is set to 0 before performing the + * operation. + * + * @see redis.io for details. + * @param key The key of the hash. + * @param field The field in the hash stored at key to increment or decrement its + * value. + * @param amount The amount by which to increment or decrement the field's value. Use a negative + * value to decrement. + * @return The value of field in the hash stored at key after the + * increment or decrement. + * @example + *
{@code
+     * Double num = client.hincrByFloat(gs("my_hash"), gs("field1"), 2.5).get();
+     * assert num == 2.5;
+     * }
+ */ + CompletableFuture hincrByFloat(GlideString key, GlideString field, double amount); + /** * Returns all field names in the hash stored at key. * @@ -254,6 +358,23 @@ public interface HashBaseCommands { */ CompletableFuture hstrlen(String key, String field); + /** + * Returns the string length of the value associated with field in the hash stored at + * key. + * + * @see valkey.io for details. + * @param key The key of the hash. + * @param field The field in the hash. + * @return The string length or 0 if field or key does not + * exist. + * @example + *
{@code
+     * Long strlen = client.hstrlen(gs("my_hash"), gs("my_field")).get();
+     * assert strlen >= 0L;
+     * }
+ */ + CompletableFuture hstrlen(GlideString key, GlideString field); + /** * Returns a random field name from the hash value stored at key. * diff --git a/java/client/src/main/java/glide/api/commands/HyperLogLogBaseCommands.java b/java/client/src/main/java/glide/api/commands/HyperLogLogBaseCommands.java index 022bc9ac45..45e1044f8f 100644 --- a/java/client/src/main/java/glide/api/commands/HyperLogLogBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/HyperLogLogBaseCommands.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import java.util.concurrent.CompletableFuture; diff --git a/java/client/src/main/java/glide/api/commands/ListBaseCommands.java b/java/client/src/main/java/glide/api/commands/ListBaseCommands.java index 427a10e336..c3c7f24f9f 100644 --- a/java/client/src/main/java/glide/api/commands/ListBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/ListBaseCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import glide.api.models.commands.LInsertOptions.InsertPosition; import glide.api.models.commands.LPosOptions; import glide.api.models.commands.ListDirection; @@ -39,6 +40,27 @@ public interface ListBaseCommands { */ CompletableFuture lpush(String key, String[] elements); + /** + * Inserts all the specified values at the head of the list stored at key. + * elements are inserted one after the other to the head of the list, from the leftmost + * element to the rightmost element. If key does not exist, it is created as an empty + * list before performing the push operation. + * + * @see redis.io for details. + * @param key The key of the list. + * @param elements The elements to insert at the head of the list stored at key. + * @return The length of the list after the push operation. + * @example + *
{@code
+     * Long pushCount1 = client.lpush(gs("my_list"), new GlideString[] {gs("value1"), gs("value2")}).get();
+     * assert pushCount1 == 2L;
+     *
+     * Long pushCount2 = client.lpush(gs("nonexistent_list"), new GlideString[] {gs("new_value")}).get();
+     * assert pushCount2 == 1L;
+     * }
+ */ + CompletableFuture lpush(GlideString key, GlideString[] elements); + /** * Removes and returns the first elements of the list stored at key. The command pops * a single element from the beginning of the list. @@ -255,6 +277,32 @@ CompletableFuture lposCount( */ CompletableFuture ltrim(String key, long start, long end); + /** + * Trims an existing list so that it will contain only the specified range of elements specified. + *
+ * The offsets start and end are zero-based indexes, with 0 being the + * first element of the list, 1 being the next element and so on.
+ * These offsets can also be negative numbers indicating offsets starting at the end of the list, + * with -1 being the last element of the list, -2 being the penultimate, and so on. + * + * @see redis.io for details. + * @param key The key of the list. + * @param start The starting point of the range. + * @param end The end of the range. + * @return Always OK.
+ * If start exceeds the end of the list, or if start is greater than + * end, the result will be an empty list (which causes key to be removed).
+ * If end exceeds the actual end of the list, it will be treated like the last + * element of the list.
+ * If key does not exist, OK will be returned without changes to the database. + * @example + *
{@code
+     * String payload = client.ltrim(gs("my_list"), 0, 1).get();
+     * assert payload.equals("OK");
+     * }
+ */ + CompletableFuture ltrim(GlideString key, long start, long end); + /** * Returns the length of the list stored at key. * @@ -271,6 +319,22 @@ CompletableFuture lposCount( */ CompletableFuture llen(String key); + /** + * Returns the length of the list stored at key. + * + * @see redis.io for details. + * @param key The key of the list. + * @return The length of the list at key.
+ * If key does not exist, it is interpreted as an empty list and 0 + * is returned. + * @example + *
{@code
+     * Long lenList = client.llen(gs("my_list")).get();
+     * assert lenList == 3L //Indicates that there are 3 elements in the list.;
+     * }
+ */ + CompletableFuture llen(GlideString key); + /** * Removes the first count occurrences of elements equal to element from * the list stored at key.
@@ -295,6 +359,30 @@ CompletableFuture lposCount( */ CompletableFuture lrem(String key, long count, String element); + /** + * Removes the first count occurrences of elements equal to element from + * the list stored at key.
+ * If count is positive: Removes elements equal to element moving from + * head to tail.
+ * If count is negative: Removes elements equal to element moving from + * tail to head.
+ * If count is 0 or count is greater than the occurrences of elements + * equal to element, it removes all elements equal to element. + * + * @see redis.io for details. + * @param key The key of the list. + * @param count The count of the occurrences of elements equal to element to remove. + * @param element The element to remove from the list. + * @return The number of the removed elements.
+ * If key does not exist, 0 is returned. + * @example + *
{@code
+     * Long num = client.rem(gs("my_list"), 2, gs("value")).get();
+     * assert num == 2L;
+     * }
+ */ + CompletableFuture lrem(GlideString key, long count, GlideString element); + /** * Inserts all the specified values at the tail of the list stored at key.
* elements are inserted one after the other to the tail of the list, from the @@ -316,6 +404,27 @@ CompletableFuture lposCount( */ CompletableFuture rpush(String key, String[] elements); + /** + * Inserts all the specified values at the tail of the list stored at key.
+ * elements are inserted one after the other to the tail of the list, from the + * leftmost element to the rightmost element. If key does not exist, it is created as + * an empty list before performing the push operation. + * + * @see redis.io for details. + * @param key The key of the list. + * @param elements The elements to insert at the tail of the list stored at key. + * @return The length of the list after the push operation. + * @example + *
{@code
+     * Long pushCount1 = client.rpush(gs("my_list"), new GlideString[] {gs("value1"), gs("value2")}).get();
+     * assert pushCount1 == 2L;
+     *
+     * Long pushCount2 = client.rpush(gs("nonexistent_list"), new GlideString[] {gs("new_value")}).get();
+     * assert pushCount2 == 1L;
+     * }
+ */ + CompletableFuture rpush(GlideString key, GlideString[] elements); + /** * Removes and returns the last elements of the list stored at key.
* The command pops a single element from the end of the list. @@ -454,6 +563,23 @@ CompletableFuture linsert( */ CompletableFuture rpushx(String key, String[] elements); + /** + * Inserts all the specified values at the tail of the list stored at key, only if + * key exists and holds a list. If key is not a list, this performs no + * operation. + * + * @see redis.io for details. + * @param key The key of the list. + * @param elements The elements to insert at the tail of the list stored at key. + * @return The length of the list after the push operation. + * @example + *
{@code
+     * Long listLength = client.rpushx(gs("my_list"), new GlideString[] {gs("value1"), gs("value2")}).get();
+     * assert listLength >= 2L;
+     * }
+ */ + CompletableFuture rpushx(GlideString key, GlideString[] elements); + /** * Inserts all the specified values at the head of the list stored at key, only if * key exists and holds a list. If key is not a list, this performs no @@ -471,6 +597,23 @@ CompletableFuture linsert( */ CompletableFuture lpushx(String key, String[] elements); + /** + * Inserts all the specified values at the head of the list stored at key, only if + * key exists and holds a list. If key is not a list, this performs no + * operation. + * + * @see redis.io for details. + * @param key The key of the list. + * @param elements The elements to insert at the head of the list stored at key. + * @return The length of the list after the push operation. + * @example + *
{@code
+     * Long listLength = client.lpushx(gs("my_list"), new GlideString[] {gs("value1"), gs("value2")}).get();
+     * assert listLength >= 2L;
+     * }
+ */ + CompletableFuture lpushx(GlideString key, GlideString[] elements); + /** * Pops one or more elements from the first non-empty list from the provided keys * . @@ -600,6 +743,25 @@ CompletableFuture> blmpop( */ CompletableFuture lset(String key, long index, String element); + /** + * Sets the list element at index to element.
+ * The index is zero-based, so 0 means the first element, 1 the second + * element and so on. Negative indices can be used to designate elements starting at the tail of + * the list. Here, -1 means the last element, -2 means the penultimate + * and so forth. + * + * @see valkey.io for details. + * @param key The key of the list. + * @param index The index of the element in the list to be set. + * @return OK. + * @example + *
{@code
+     * String response = client.lset(gs("testKey"), 1, gs("two")).get();
+     * assertEquals(response, "OK");
+     * }
+ */ + CompletableFuture lset(GlideString key, long index, GlideString element); + /** * Atomically pops and removes the left/right-most element to the list stored at source * depending on wherefrom, and pushes the element at the first/last element diff --git a/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsBaseCommands.java b/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsBaseCommands.java index ac4773d74f..d1063ed3e8 100644 --- a/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsBaseCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.configuration.ReadFrom; import java.util.concurrent.CompletableFuture; /** @@ -13,12 +14,14 @@ public interface ScriptingAndFunctionsBaseCommands { /** - * Invokes a previously loaded function. + * Invokes a previously loaded function.
+ * This command is routed to primary nodes only.
+ * To route to a replica please refer to {@link #fcallReadOnly}. * * @apiNote When in cluster mode *
    *
  • all keys must map to the same hash slot. - *
  • if no keys are given, command will be routed to a random node. + *
  • if no keys are given, command will be routed to a random primary node. *
* * @since Redis 7.0 and above. @@ -27,7 +30,7 @@ public interface ScriptingAndFunctionsBaseCommands { * @param keys An array of keys accessed by the function. To ensure the correct * execution of functions, both in standalone and clustered deployments, all names of keys * that a function accesses must be explicitly provided as keys. - * @param arguments An array of function arguments. Arguments + * @param arguments An array of function arguments. arguments * should not represent names of keys. * @return The invoked function's return value. * @example @@ -38,4 +41,32 @@ public interface ScriptingAndFunctionsBaseCommands { * } */ CompletableFuture fcall(String function, String[] keys, String[] arguments); + + /** + * Invokes a previously loaded read-only function.
+ * This command is routed depending on the client's {@link ReadFrom} strategy. + * + * @apiNote When in cluster mode + *
    + *
  • all keys must map to the same hash slot. + *
  • if no keys are given, command will be routed to a random node. + *
+ * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @param keys An array of keys accessed by the function. To ensure the correct + * execution of functions, both in standalone and clustered deployments, all names of keys + * that a function accesses must be explicitly provided as keys. + * @param arguments An array of function arguments. arguments + * should not represent names of keys. + * @return The invoked function's return value. + * @example + *
{@code
+     * String[] args = new String[] { "Answer", "to", "the", "Ultimate", "Question", "of", "Life,", "the", "Universe,", "and", "Everything"};
+     * Object response = client.fcallReadOnly("Deep_Thought", new String[0], args).get();
+     * assert response == 42L;
+     * }
+ */ + CompletableFuture fcallReadOnly(String function, String[] keys, String[] arguments); } diff --git a/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsClusterCommands.java b/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsClusterCommands.java index 84432a9954..313d0479dc 100644 --- a/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsClusterCommands.java +++ b/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsClusterCommands.java @@ -1,8 +1,10 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import glide.api.models.ClusterValue; import glide.api.models.commands.FlushMode; +import glide.api.models.commands.function.FunctionRestorePolicy; +import glide.api.models.configuration.ReadFrom; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -257,6 +259,8 @@ CompletableFuture[]>> functionList( * @since Redis 7.0 and above. * @see redis.io for details. * @param libName The library name to delete. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. * @return OK. * @example *
{@code
@@ -267,10 +271,115 @@ CompletableFuture[]>> functionList(
     CompletableFuture functionDelete(String libName, Route route);
 
     /**
-     * Invokes a previously loaded function.
+ * Returns the serialized payload of all loaded libraries.
* The command will be routed to a random node. * * @since Redis 7.0 and above. + * @see redis.io for details. + * @return The serialized payload of all loaded libraries. + * @example + *
{@code
+     * byte[] data = client.functionDump().get();
+     * // data can be used to restore loaded functions on any Redis instance
+     * }
+ */ + CompletableFuture functionDump(); + + /** + * Returns the serialized payload of all loaded libraries. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return The serialized payload of all loaded libraries. + * @example + *
{@code
+     * byte[] data = client.functionDump(RANDOM).get().getSingleValue();
+     * // data can be used to restore loaded functions on any Redis instance
+     * }
+ */ + CompletableFuture> functionDump(Route route); + + /** + * Restores libraries from the serialized payload returned by {@link #functionDump()}.
+ * The command will be routed to all primary nodes. + * + * @since Redis 7.0 and above. + * @see redis.io for + * details. + * @param payload The serialized data from {@link #functionDump()}. + * @return OK. + * @example + *
{@code
+     * String response = client.functionRestore(data).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture functionRestore(byte[] payload); + + /** + * Restores libraries from the serialized payload returned by {@link #functionDump()}.
+ * The command will be routed to all primary nodes. + * + * @since Redis 7.0 and above. + * @see redis.io for + * details. + * @param payload The serialized data from {@link #functionDump()}. + * @param policy A policy for handling existing libraries. + * @return OK. + * @example + *
{@code
+     * String response = client.functionRestore(data, FLUSH).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture functionRestore(byte[] payload, FunctionRestorePolicy policy); + + /** + * Restores libraries from the serialized payload returned by {@link #functionDump(Route)}. + * + * @since Redis 7.0 and above. + * @see redis.io for + * details. + * @param payload The serialized data from {@link #functionDump()}. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return OK. + * @example + *
{@code
+     * String response = client.functionRestore(data, ALL_PRIMARIES).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture functionRestore(byte[] payload, Route route); + + /** + * Restores libraries from the serialized payload returned by {@link #functionDump(Route)}. + * + * @since Redis 7.0 and above. + * @see redis.io for + * details. + * @param payload The serialized data from {@link #functionDump()}. + * @param policy A policy for handling existing libraries. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return OK. + * @example + *
{@code
+     * String response = client.functionRestore(data, FLUSH, ALL_PRIMARIES).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture functionRestore( + byte[] payload, FunctionRestorePolicy policy, Route route); + + /** + * Invokes a previously loaded function.
+ * The command will be routed to a primary random node.
+ * To route to a replica please refer to {@link #fcallReadOnly(String)}. + * + * @since Redis 7.0 and above. * @see redis.io for details. * @param function The function name. * @return The invoked function's return value. @@ -303,12 +412,13 @@ CompletableFuture[]>> functionList( /** * Invokes a previously loaded function.
- * The command will be routed to a random node. + * The command will be routed to a random primary node.
+ * To route to a replica please refer to {@link #fcallReadOnly(String, String[])}. * * @since Redis 7.0 and above. * @see redis.io for details. * @param function The function name. - * @param arguments An array of function arguments. Arguments + * @param arguments An array of function arguments. arguments * should not represent names of keys. * @return The invoked function's return value. * @example @@ -326,7 +436,7 @@ CompletableFuture[]>> functionList( * @since Redis 7.0 and above. * @see redis.io for details. * @param function The function name. - * @param arguments An array of function arguments. Arguments + * @param arguments An array of function arguments. arguments * should not represent names of keys. * @param route Specifies the routing configuration for the command. The client will route the * command to the nodes defined by route. @@ -340,6 +450,81 @@ CompletableFuture[]>> functionList( */ CompletableFuture> fcall(String function, String[] arguments, Route route); + /** + * Invokes a previously loaded read-only function.
+ * The command is routed to a random node depending on the client's {@link ReadFrom} strategy. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @return The invoked function's return value. + * @example + *
{@code
+     * Object response = client.fcallReadOnly("Deep_Thought").get();
+     * assert response == 42L;
+     * }
+ */ + CompletableFuture fcallReadOnly(String function); + + /** + * Invokes a previously loaded read-only function. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return The invoked function's return value wrapped by a {@link ClusterValue}. + * @example + *
{@code
+     * ClusterValue response = client.fcallReadOnly("Deep_Thought", ALL_NODES).get();
+     * for (Object nodeResponse : response.getMultiValue().values()) {
+     *   assert nodeResponse == 42L;
+     * }
+     * }
+     */
+    CompletableFuture> fcallReadOnly(String function, Route route);
+
+    /**
+     * Invokes a previously loaded function.
+ * The command is routed to a random node depending on the client's {@link ReadFrom} strategy. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @param arguments An array of function arguments. arguments + * should not represent names of keys. + * @return The invoked function's return value. + * @example + *
{@code
+     * String[] args = new String[] { "Answer", "to", "the", "Ultimate", "Question", "of", "Life,", "the", "Universe,", "and", "Everything" };
+     * Object response = client.fcallReadOnly("Deep_Thought", args).get();
+     * assert response == 42L;
+     * }
+ */ + CompletableFuture fcallReadOnly(String function, String[] arguments); + + /** + * Invokes a previously loaded read-only function. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @param arguments An array of function arguments. arguments + * should not represent names of keys. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return The invoked function's return value wrapped by a {@link ClusterValue}. + * @example + *
{@code
+     * String[] args = new String[] { "Answer", "to", "the", "Ultimate", "Question", "of", "Life,", "the", "Universe,", "and", "Everything" };
+     * ClusterValue response = client.fcallReadOnly("Deep_Thought", args, RANDOM).get();
+     * assert response.getSingleValue() == 42L;
+     * }
+     */
+    CompletableFuture> fcallReadOnly(
+            String function, String[] arguments, Route route);
+
     /**
      * Kills a function that is currently executing.
* FUNCTION KILL terminates read-only functions only.
diff --git a/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsCommands.java b/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsCommands.java index daa69819dd..189a88e874 100644 --- a/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsCommands.java +++ b/java/client/src/main/java/glide/api/commands/ScriptingAndFunctionsCommands.java @@ -1,7 +1,9 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import glide.api.models.commands.FlushMode; +import glide.api.models.commands.function.FunctionRestorePolicy; +import glide.api.models.configuration.ReadFrom; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -129,7 +131,56 @@ public interface ScriptingAndFunctionsCommands { CompletableFuture functionDelete(String libName); /** - * Invokes a previously loaded function. + * Returns the serialized payload of all loaded libraries. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @return The serialized payload of all loaded libraries. + * @example + *
{@code
+     * byte[] data = client.functionDump().get();
+     * // now data could be saved to restore loaded functions on any Redis instance
+     * }
+ */ + CompletableFuture functionDump(); + + /** + * Restores libraries from the serialized payload returned by {@link #functionDump()}. + * + * @since Redis 7.0 and above. + * @see redis.io for + * details. + * @param payload The serialized data from {@link #functionDump()}. + * @return OK. + * @example + *
{@code
+     * String response = client.functionRestore(data).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture functionRestore(byte[] payload); + + /** + * Restores libraries from the serialized payload returned by {@link #functionDump()}.. + * + * @since Redis 7.0 and above. + * @see redis.io for + * details. + * @param payload The serialized data from {@link #functionDump()}. + * @param policy A policy for handling existing libraries. + * @return OK. + * @example + *
{@code
+     * String response = client.functionRestore(data, FLUSH).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture functionRestore(byte[] payload, FunctionRestorePolicy policy); + + /** + * Invokes a previously loaded function.
+ * This command is routed to primary nodes only.
+ * To route to a replica please refer to {@link #fcallReadOnly}. * * @since Redis 7.0 and above. * @see redis.io for details. @@ -143,6 +194,22 @@ public interface ScriptingAndFunctionsCommands { */ CompletableFuture fcall(String function); + /** + * Invokes a previously loaded read-only function.
+ * This command is routed depending on the client's {@link ReadFrom} strategy. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @return The invoked function's return value. + * @example + *
{@code
+     * Object response = client.fcallReadOnly("Deep_Thought").get();
+     * assert response == 42L;
+     * }
+ */ + CompletableFuture fcallReadOnly(String function); + /** * Kills a function that is currently executing.
* FUNCTION KILL terminates read-only functions only. diff --git a/java/client/src/main/java/glide/api/commands/ServerManagementClusterCommands.java b/java/client/src/main/java/glide/api/commands/ServerManagementClusterCommands.java index edfd1cd5d7..0f9b4e3dbe 100644 --- a/java/client/src/main/java/glide/api/commands/ServerManagementClusterCommands.java +++ b/java/client/src/main/java/glide/api/commands/ServerManagementClusterCommands.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import glide.api.models.ClusterValue; @@ -6,7 +6,6 @@ import glide.api.models.commands.InfoOptions; import glide.api.models.commands.InfoOptions.Section; import glide.api.models.configuration.RequestRoutingConfiguration.Route; -import glide.api.models.configuration.RequestRoutingConfiguration.SingleNodeRoute; import java.util.Map; import java.util.concurrent.CompletableFuture; @@ -362,7 +361,7 @@ public interface ServerManagementClusterCommands { * assert response.equals("OK"); * } */ - CompletableFuture flushall(SingleNodeRoute route); + CompletableFuture flushall(Route route); /** * Deletes all the keys of all the existing databases. This command never fails. @@ -380,7 +379,71 @@ public interface ServerManagementClusterCommands { * assert response.equals("OK"); * } */ - CompletableFuture flushall(FlushMode mode, SingleNodeRoute route); + CompletableFuture flushall(FlushMode mode, Route route); + + /** + * Deletes all the keys of the currently selected database. This command never fails.
+ * The command will be routed to all primary nodes. + * + * @see valkey.io for details. + * @return OK. + * @example + *
{@code
+     * String response = client.flushdb().get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture flushdb(); + + /** + * Deletes all the keys of the currently selected database. This command never fails.
+ * The command will be routed to all primary nodes. + * + * @see valkey.io for details. + * @param mode The flushing mode, could be either {@link FlushMode#SYNC} or {@link + * FlushMode#ASYNC}. + * @return OK. + * @example + *
{@code
+     * String response = client.flushdb(ASYNC).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture flushdb(FlushMode mode); + + /** + * Deletes all the keys of the currently selected database. This command never fails. + * + * @see valkey.io for details. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return OK. + * @example + *
{@code
+     * Route route = new SlotKeyRoute("key", PRIMARY);
+     * String response = client.flushdb(route).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture flushdb(Route route); + + /** + * Deletes all the keys of the currently selected database. This command never fails. + * + * @see valkey.io for details. + * @param mode The flushing mode, could be either {@link FlushMode#SYNC} or {@link + * FlushMode#ASYNC}. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return OK. + * @example + *
{@code
+     * Route route = new SlotKeyRoute("key", PRIMARY);
+     * String response = client.flushdb(SYNC, route).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture flushdb(FlushMode mode, Route route); /** * Displays a piece of generative computer art and the Redis version.
diff --git a/java/client/src/main/java/glide/api/commands/ServerManagementCommands.java b/java/client/src/main/java/glide/api/commands/ServerManagementCommands.java index d59568eb73..330908fec3 100644 --- a/java/client/src/main/java/glide/api/commands/ServerManagementCommands.java +++ b/java/client/src/main/java/glide/api/commands/ServerManagementCommands.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; import glide.api.models.commands.FlushMode; @@ -180,6 +180,34 @@ public interface ServerManagementCommands { */ CompletableFuture flushall(FlushMode mode); + /** + * Deletes all the keys of the currently selected database. This command never fails. + * + * @see valkey.io for details. + * @return OK. + * @example + *
{@code
+     * String response = client.flushdb().get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture flushdb(); + + /** + * Deletes all the keys of the currently selected database. This command never fails. + * + * @see valkey.io for details. + * @param mode The flushing mode, could be either {@link FlushMode#SYNC} or {@link + * FlushMode#ASYNC}. + * @return OK. + * @example + *
{@code
+     * String response = client.flushdb(ASYNC).get();
+     * assert response.equals("OK");
+     * }
+ */ + CompletableFuture flushdb(FlushMode mode); + /** * Displays a piece of generative computer art and the Redis version. * diff --git a/java/client/src/main/java/glide/api/commands/SetBaseCommands.java b/java/client/src/main/java/glide/api/commands/SetBaseCommands.java index cb17baf6ad..72bea1012f 100644 --- a/java/client/src/main/java/glide/api/commands/SetBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/SetBaseCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import java.util.Set; import java.util.concurrent.CompletableFuture; @@ -32,6 +33,24 @@ public interface SetBaseCommands { */ CompletableFuture sadd(String key, String[] members); + /** + * Adds specified members to the set stored at key. Specified members that are + * already a member of this set are ignored. + * + * @see redis.io for details. + * @param key The key where members will be added to its set. + * @param members A list of members to add to the set stored at key. + * @return The number of members that were added to the set, excluding members already present. + * @remarks If key does not exist, a new set is created before adding members + * . + * @example + *
{@code
+     * Long result = client.sadd(gs("my_set"), new GlideString[]{gs("member1"), gs("member2")}).get();
+     * assert result == 2L;
+     * }
+ */ + CompletableFuture sadd(GlideString key, GlideString[] members); + /** * Removes specified members from the set stored at key. Specified members that are * not a member of this set are ignored. @@ -50,6 +69,24 @@ public interface SetBaseCommands { */ CompletableFuture srem(String key, String[] members); + /** + * Removes specified members from the set stored at key. Specified members that are + * not a member of this set are ignored. + * + * @see redis.io for details. + * @param key The key from which members will be removed. + * @param members A list of members to remove from the set stored at key. + * @return The number of members that were removed from the set, excluding non-existing members. + * @remarks If key does not exist, it is treated as an empty set and this command + * returns 0. + * @example + *
{@code
+     * Long result = client.srem(gs("my_set"), new GlideString[]{gs("member1"), gs("member2")}).get();
+     * assert result == 2L;
+     * }
+ */ + CompletableFuture srem(GlideString key, GlideString[] members); + /** * Retrieves all the members of the set value stored at key. * @@ -65,6 +102,21 @@ public interface SetBaseCommands { */ CompletableFuture> smembers(String key); + /** + * Retrieves all the members of the set value stored at key. + * + * @see redis.io for details. + * @param key The key from which to retrieve the set members. + * @return A Set of all members of the set. + * @remarks If key does not exist an empty set will be returned. + * @example + *
{@code
+     * Set result = client.smembers(gs("my_set")).get();
+     * assert result.equals(Set.of(gs("member1"), gs("member2"), gs("member3")));
+     * }
+ */ + CompletableFuture> smembers(GlideString key); + /** * Retrieves the set cardinality (number of elements) of the set stored at key. * @@ -79,6 +131,20 @@ public interface SetBaseCommands { */ CompletableFuture scard(String key); + /** + * Retrieves the set cardinality (number of elements) of the set stored at key. + * + * @see redis.io for details. + * @param key The key from which to retrieve the number of set members. + * @return The cardinality (number of elements) of the set, or 0 if the key does not exist. + * @example + *
{@code
+     * Long result = client.scard("my_set").get();
+     * assert result == 3L;
+     * }
+ */ + CompletableFuture scard(GlideString key); + /** * Checks whether each member is contained in the members of the set stored at key. * @@ -116,6 +182,27 @@ public interface SetBaseCommands { */ CompletableFuture smove(String source, String destination, String member); + /** + * Moves member from the set at source to the set at destination + * , removing it from the source set. Creates a new destination set if needed. The + * operation is atomic. + * + * @apiNote When in cluster mode, both source and destination must map + * to the same hash slot. + * @see redis.io for details. + * @param source The key of the set to remove the element from. + * @param destination The key of the set to add the element to. + * @param member The set element to move. + * @return true on success, or false if the source set does + * not exist or the element is not a member of the source set. + * @example + *
{@code
+     * Boolean moved = client.smove(gs("set1"), gs("set2"), gs("element")).get();
+     * assert moved;
+     * }
+ */ + CompletableFuture smove(GlideString source, GlideString destination, GlideString member); + /** * Returns if member is a member of the set stored at key. * @@ -136,6 +223,26 @@ public interface SetBaseCommands { */ CompletableFuture sismember(String key, String member); + /** + * Returns if member is a member of the set stored at key. + * + * @see redis.io for details. + * @param key The key of the set. + * @param member The member to check for existence in the set. + * @return true if the member exists in the set, false otherwise. If + * key doesn't exist, it is treated as an empty set and the command + * returns false. + * @example + *
{@code
+     * Boolean payload1 = client.sismember(gs("mySet"), gs("member1")).get();
+     * assert payload1; // Indicates that "member1" exists in the set "mySet".
+     *
+     * Boolean payload2 = client.sismember(gs("mySet"), gs("nonExistingMember")).get();
+     * assert !payload2; // Indicates that "nonExistingMember" does not exist in the set "mySet".
+     * }
+ */ + CompletableFuture sismember(GlideString key, GlideString member); + /** * Computes the difference between the first set and all the successive sets in keys. * @@ -189,6 +296,25 @@ public interface SetBaseCommands { */ CompletableFuture> sinter(String[] keys); + /** + * Gets the intersection of all the given sets. + * + * @apiNote When in cluster mode, all keys must map to the same hash slot. + * @see redis.io for details. + * @param keys The keys of the sets. + * @return A Set of members which are present in all given sets.
+ * If one or more sets do not exist, an empty set will be returned. + * @example + *
{@code
+     * Set values = client.sinter(new GlideString[] {gs("set1"), gs("set2")}).get();
+     * assert values.contains(gs("element")); // Indicates that these sets have a common element
+     *
+     * Set values = client.sinter(new GlideString[] {gs("set1"), gs("nonExistingSet")}).get();
+     * assert values.size() == 0;
+     * }
+ */ + CompletableFuture> sinter(GlideString[] keys); + /** * Gets the cardinality of the intersection of all the given sets. * @@ -209,6 +335,26 @@ public interface SetBaseCommands { */ CompletableFuture sintercard(String[] keys); + /** + * Gets the cardinality of the intersection of all the given sets. + * + * @since Redis 7.0 and above. + * @apiNote When in cluster mode, all keys must map to the same hash slot. + * @see redis.io for details. + * @param keys The keys of the sets. + * @return The cardinality of the intersection result. If one or more sets do not exist, 0 + * is returned. + * @example + *
{@code
+     * Long response = client.sintercard(new GlideString[] {gs("set1"), gs("set2")}).get();
+     * assertEquals(2L, response);
+     *
+     * Long emptyResponse = client.sintercard(new GlideString[] {gs("set1"), gs("nonExistingSet")}).get();
+     * assertEquals(emptyResponse, 0L);
+     * }
+ */ + CompletableFuture sintercard(GlideString[] keys); + /** * Gets the cardinality of the intersection of all the given sets. * @@ -235,6 +381,32 @@ public interface SetBaseCommands { */ CompletableFuture sintercard(String[] keys, long limit); + /** + * Gets the cardinality of the intersection of all the given sets. + * + * @since Redis 7.0 and above. + * @apiNote When in cluster mode, all keys must map to the same hash slot. + * @see redis.io for details. + * @param keys The keys of the sets. + * @param limit The limit for the intersection cardinality value. + * @return The cardinality of the intersection result. If one or more sets do not exist, 0 + * is returned. If the intersection cardinality reaches limit partway + * through the computation, returns limit as the cardinality. + * @example + *
{@code
+     * Long response = client.sintercard(new GlideString[] {gs("set1"), gs("set2")}, 3).get();
+     * assertEquals(2L, response);
+     *
+     * Long emptyResponse = client.sintercard(new GlideString[] {gs("set1"), gs("nonExistingSet")}, 3).get();
+     * assertEquals(emptyResponse, 0L);
+     *
+     * // when intersection cardinality > limit, returns limit as cardinality
+     * Long response2 = client.sintercard(new GlideString[] {gs("set3"), gs("set4")}, 3).get();
+     * assertEquals(3L, response2);
+     * }
+ */ + CompletableFuture sintercard(GlideString[] keys, long limit); + /** * Stores the members of the intersection of all given sets specified by keys into a * new set at destination. @@ -253,6 +425,24 @@ public interface SetBaseCommands { */ CompletableFuture sinterstore(String destination, String[] keys); + /** + * Stores the members of the intersection of all given sets specified by keys into a + * new set at destination. + * + * @apiNote When in cluster mode, destination and all keys must map to + * the same hash slot. + * @see redis.io for details. + * @param destination The key of the destination set. + * @param keys The keys from which to retrieve the set members. + * @return The number of elements in the resulting set. + * @example + *
{@code
+     * Long length = client.sinterstore(gs("mySet"), new GlideString[] { gs("set1"), gs("set2") }).get();
+     * assert length == 5L;
+     * }
+ */ + CompletableFuture sinterstore(GlideString destination, GlideString[] keys); + /** * Stores the members of the union of all given sets specified by keys into a new set * at destination. @@ -342,4 +532,25 @@ public interface SetBaseCommands { * } */ CompletableFuture> spopCount(String key, long count); + + /** + * Gets the union of all the given sets. + * + * @apiNote When in cluster mode, all keys must map to the same hash slot. + * @see valkey.io for details. + * @param keys The keys of the sets. + * @return A set of members which are present in at least one of the given sets. If none of the + * sets exist, an empty set will be returned. + * @example + *
{@code
+     * assert client.sadd("my_set1", new String[]{"member1", "member2"}).get() == 2;
+     * assert client.sadd("my_set2", new String[]{"member2", "member3"}).get() == 2;
+     * Set result = client.sunion(new String[] {"my_set1", "my_set2"}).get();
+     * assertEquals(Set.of("member1", "member2", "member3"), result);
+     *
+     * result = client.sunion(new String[] {"my_set1", "non_existent_set"}).get();
+     * assertEquals(Set.of("member1", "member2"), result);
+     * }
+ */ + CompletableFuture> sunion(String[] keys); } diff --git a/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java b/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java index 12956d9425..0da26317d4 100644 --- a/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/SortedSetBaseCommands.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import glide.api.models.commands.RangeOptions.InfLexBound; import glide.api.models.commands.RangeOptions.InfScoreBound; import glide.api.models.commands.RangeOptions.LexBoundary; @@ -198,6 +199,28 @@ CompletableFuture zaddIncr( */ CompletableFuture zrem(String key, String[] members); + /** + * Removes the specified members from the sorted set stored at key.
+ * Specified members that are not a member of this set are ignored. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @param members An array of members to remove from the sorted set. + * @return The number of members that were removed from the sorted set, not including non-existing + * members.
+ * If key does not exist, it is treated as an empty sorted set, and this command + * returns 0. + * @example + *
{@code
+     * Long num1 = client.zrem(gs("mySortedSet"), new GlideString[] {gs("member1"), gs("member2")}).get();
+     * assert num1 == 2L; // Indicates that two members have been removed from the sorted set "mySortedSet".
+     *
+     * Long num2 = client.zrem(gs("nonExistingSortedSet"), new GlideString[] {gs("member1"), gs("member2")}).get();
+     * assert num2 == 0L; // Indicates that no members were removed as the sorted set "nonExistingSortedSet" does not exist.
+     * }
+ */ + CompletableFuture zrem(GlideString key, GlideString[] members); + /** * Returns the cardinality (number of elements) of the sorted set stored at key. * @@ -217,6 +240,25 @@ CompletableFuture zaddIncr( */ CompletableFuture zcard(String key); + /** + * Returns the cardinality (number of elements) of the sorted set stored at key. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @return The number of elements in the sorted set.
+ * If key does not exist, it is treated as an empty sorted set, and this command + * return 0. + * @example + *
{@code
+     * Long num1 = client.zcard(gs("mySortedSet")).get();
+     * assert num1 == 3L; // Indicates that there are 3 elements in the sorted set "mySortedSet".
+     *
+     * Long num2 = client.zcard((gs("nonExistingSortedSet")).get();
+     * assert num2 == 0L;
+     * }
+ */ + CompletableFuture zcard(GlideString key); + /** * Removes and returns up to count members with the lowest scores from the sorted set * stored at the specified key. @@ -373,6 +415,26 @@ CompletableFuture zaddIncr( */ CompletableFuture zscore(String key, String member); + /** + * Returns the score of member in the sorted set stored at key. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @param member The member whose score is to be retrieved. + * @return The score of the member.
+ * If member does not exist in the sorted set, null is returned.
+ * If key does not exist, null is returned. + * @example + *
{@code
+     * Double num1 = client.zscore(gs("mySortedSet")), gs("member")).get();
+     * assert num1 == 10.5; // Indicates that the score of "member" in the sorted set "mySortedSet" is 10.5.
+     *
+     * Double num2 = client.zscore(gs("mySortedSet"), gs("nonExistingMember")).get();
+     * assert num2 == null;
+     * }
+ */ + CompletableFuture zscore(GlideString key, GlideString member); + /** * Returns the specified range of elements in the sorted set stored at key.
* ZRANGE can perform different types of range queries: by index (rank), by the @@ -581,6 +643,28 @@ CompletableFuture zrangestore( */ CompletableFuture zrank(String key, String member); + /** + * Returns the rank of member in the sorted set stored at key, with + * scores ordered from low to high, starting from 0.
+ * To get the rank of member with its score, see {@link #zrankWithScore}. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @param member The member whose rank is to be retrieved. + * @return The rank of member in the sorted set.
+ * If key doesn't exist, or if member is not present in the set, + * null will be returned. + * @example + *
{@code
+     * Long num1 = client.zrank(gs("mySortedSet"), gs("member2")).get();
+     * assert num1 == 3L; // Indicates that "member2" has the second-lowest score in the sorted set "mySortedSet".
+     *
+     * Long num2 = client.zcard(gs("mySortedSet"), gs("nonExistingMember")).get();
+     * assert num2 == null; // Indicates that "nonExistingMember" is not present in the sorted set "mySortedSet".
+     * }
+ */ + CompletableFuture zrank(GlideString key, GlideString member); + /** * Returns the rank of member in the sorted set stored at key with its * score, where scores are ordered from the lowest to highest, starting from 0.
@@ -667,6 +751,24 @@ CompletableFuture zrangestore( */ CompletableFuture zmscore(String key, String[] members); + /** + * Returns the scores associated with the specified members in the sorted set stored + * at key. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @param members An array of members in the sorted set. + * @return An Array of scores of the members.
+ * If a member does not exist, the corresponding value in the Array + * will be null. + * @example + *
{@code
+     * Double[] payload = client.zmscore(key1, new GlideString[] {gs("one"), gs("nonExistentMember"), gs("three")}).get();
+     * assert payload.equals(new Double[] {1.0, null, 3.0});
+     * }
+ */ + CompletableFuture zmscore(GlideString key, GlideString[] members); + /** * Returns the difference between the first sorted set and all the successive sorted sets.
* To get the elements with their scores, see {@link #zdiffWithScores}. @@ -726,6 +828,26 @@ CompletableFuture zrangestore( */ CompletableFuture zdiffstore(String destination, String[] keys); + /** + * Calculates the difference between the first sorted set and all the successive sorted sets at + * keys and stores the difference as a sorted set to destination, + * overwriting it if it already exists. Non-existent keys are treated as empty sets. + * + * @apiNote When in cluster mode, destination and all keys must map to + * the same hash slot. + * @since Redis 6.2 and above. + * @see redis.io for more details. + * @param destination The key for the resulting sorted set. + * @param keys The keys of the sorted sets to compare. + * @return The number of members in the resulting sorted set stored at destination. + * @example + *
{@code
+     * Long payload = client.zdiffstore(gs("mySortedSet"), new GlideString[] {gs("key1"), gs("key2")}).get();
+     * assert payload > 0; // At least one member differed in "key1" compared to "key2", and this difference was stored in "mySortedSet".
+     * }
+ */ + CompletableFuture zdiffstore(GlideString destination, GlideString[] keys); + /** * Returns the number of members in the sorted set stored at key with scores between * minScore and maxScore. @@ -780,6 +902,33 @@ CompletableFuture zrangestore( */ CompletableFuture zremrangebyrank(String key, long start, long end); + /** + * Removes all elements in the sorted set stored at key with rank between start + * and end. Both start and end are zero-based + * indexes with 0 being the element with the lowest score. These indexes can be + * negative numbers, where they indicate offsets starting at the element with the highest score. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @param start The starting point of the range. + * @param end The end of the range. + * @return The number of elements removed.
+ * If start exceeds the end of the sorted set, or if start is + * greater than end, 0 returned.
+ * If end exceeds the actual end of the sorted set, the range will stop at the + * actual end of the sorted set.
+ * If key does not exist 0 will be returned. + * @example + *
{@code
+     * Long payload1 = client.zremrangebyrank(gs("mySortedSet"), 0, 4).get();
+     * assert payload1 == 5L; // Indicates that 5 elements, with ranks ranging from 0 to 4 (inclusive), have been removed from "mySortedSet".
+     *
+     * Long payload2 = client.zremrangebyrank(gs("mySortedSet"), 0, 4).get();
+     * assert payload2 == 0L; // Indicates that nothing was removed.
+     * }
+ */ + CompletableFuture zremrangebyrank(GlideString key, long start, long end); + /** * Removes all elements in the sorted set stored at key with a lexicographical order * between minLex and maxLex. @@ -1335,6 +1484,26 @@ CompletableFuture> zinterWithScores( */ CompletableFuture zincrby(String key, double increment, String member); + /** + * Increments the score of member in the sorted set stored at key by + * increment.
+ * If member does not exist in the sorted set, it is added with increment + * as its score. If key does not exist, a new sorted set with the specified + * member as its sole member is created. + * + * @see redis.io for more details. + * @param key The key of the sorted set. + * @param increment The score increment. + * @param member A member of the sorted set. + * @return The new score of member. + * @example + *
{@code
+     * Double score = client.zincrby(gs("mySortedSet"), -3.14, gs("value")).get();
+     * assert score > 0; // member "value" existed in the set before score was altered
+     * }
+ */ + CompletableFuture zincrby(GlideString key, double increment, GlideString member); + /** * Returns the cardinality of the intersection of the sorted sets specified by keys. * @@ -1351,6 +1520,22 @@ CompletableFuture> zinterWithScores( */ CompletableFuture zintercard(String[] keys); + /** + * Returns the cardinality of the intersection of the sorted sets specified by keys. + * + * @apiNote When in cluster mode, all keys must map to the same hash slot. + * @since Redis 7.0 and above. + * @see redis.io for more details. + * @param keys The keys of the sorted sets to intersect. + * @return The cardinality of the intersection of the given sorted sets. + * @example + *
{@code
+     * Long length = client.zintercard(new GlideString[] {gs("mySortedSet1"), gs("mySortedSet2")}).get();
+     * assert length == 3L;
+     * }
+ */ + CompletableFuture zintercard(GlideString[] keys); + /** * Returns the cardinality of the intersection of the sorted sets specified by keys. * If the intersection cardinality reaches limit partway through the computation, the @@ -1371,4 +1556,25 @@ CompletableFuture> zinterWithScores( * } */ CompletableFuture zintercard(String[] keys, long limit); + + /** + * Returns the cardinality of the intersection of the sorted sets specified by keys. + * If the intersection cardinality reaches limit partway through the computation, the + * algorithm will exit early and yield limit as the cardinality. + * + * @apiNote When in cluster mode, all keys must map to the same hash slot. + * @since Redis 7.0 and above. + * @see redis.io for more details. + * @param keys The keys of the sorted sets to intersect. + * @param limit Specifies a maximum number for the intersection cardinality. If limit is set to + * 0 the range will be unlimited. + * @return The cardinality of the intersection of the given sorted sets, or the limit + * if reached. + * @example + *
{@code
+     * Long length = client.zintercard(new GlideString[] {gs("mySortedSet1"), gs("mySortedSet2")}, 5).get();
+     * assert length == 3L;
+     * }
+ */ + CompletableFuture zintercard(GlideString[] keys, long limit); } diff --git a/java/client/src/main/java/glide/api/commands/StreamBaseCommands.java b/java/client/src/main/java/glide/api/commands/StreamBaseCommands.java index dbe98bc6b3..7c87d1cede 100644 --- a/java/client/src/main/java/glide/api/commands/StreamBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/StreamBaseCommands.java @@ -1,11 +1,15 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; import glide.api.models.commands.stream.StreamAddOptions; import glide.api.models.commands.stream.StreamAddOptions.StreamAddOptionsBuilder; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamPendingOptions; import glide.api.models.commands.stream.StreamRange; import glide.api.models.commands.stream.StreamRange.IdBound; import glide.api.models.commands.stream.StreamRange.InfRangeBound; +import glide.api.models.commands.stream.StreamReadGroupOptions; import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions; import java.util.Map; @@ -23,7 +27,7 @@ public interface StreamBaseCommands { * Adds an entry to the specified stream stored at key.
* If the key doesn't exist, the stream is created. * - * @see redis.io for details. + * @see valkey.io for details. * @param key The key of the stream. * @param values Field-value pairs to be added to the entry. * @return The id of the added entry. @@ -39,7 +43,7 @@ public interface StreamBaseCommands { * Adds an entry to the specified stream stored at key.
* If the key doesn't exist, the stream is created. * - * @see redis.io for details. + * @see valkey.io for details. * @param key The key of the stream. * @param values Field-value pairs to be added to the entry. * @param options Stream add options {@link StreamAddOptions}. @@ -63,11 +67,11 @@ public interface StreamBaseCommands { * * @apiNote When in cluster mode, all keys in keysAndIds must map to the same hash * slot. - * @see redis.io for details. + * @see valkey.io for details. * @param keysAndIds A Map of keys and entry ids to read from. The * Map is composed of a stream's key and the id of the entry after which the stream * will be read. - * @return A {@literal Map>} with stream + * @return A {@literal Map>} with stream * keys, to Map of stream-ids, to an array of pairings with format [[field, entry], [field, entry], ...]. * @example *
{@code
@@ -89,12 +93,12 @@ public interface StreamBaseCommands {
      *
      * @apiNote When in cluster mode, all keys in keysAndIds must map to the same hash
      *     slot.
-     * @see redis.io for details.
+     * @see valkey.io for details.
      * @param keysAndIds A Map of keys and entry ids to read from. The 
      *     Map is composed of a stream's key and the id of the entry after which the stream
      *     will be read.
      * @param options Options detailing how to read the stream {@link StreamReadOptions}.
-     * @return A {@literal Map>} with stream
+     * @return A {@literal Map>} with stream
      *     keys, to Map of stream-ids, to an array of pairings with format [[field, entry], [field, entry], ...].
      * @example
      *     
{@code
@@ -117,7 +121,7 @@ CompletableFuture>> xread(
     /**
      * Trims the stream by evicting older entries.
      *
-     * @see redis.io for details.
+     * @see valkey.io for details.
      * @param key The key of the stream.
      * @param options Stream trim options {@link StreamTrimOptions}.
      * @return The number of entries deleted from the stream.
@@ -149,6 +153,21 @@ CompletableFuture>> xread(
      */
     CompletableFuture xlen(String key);
 
+    /**
+     * Returns the number of entries in the stream stored at key.
+     *
+     * @see valkey.io for details.
+     * @param key The key of the stream.
+     * @return The number of entries in the stream. If key does not exist, return 0
+     *     .
+     * @example
+     *     
{@code
+     * Long num = client.xlen(gs("key")).get();
+     * assert num == 2L; // Stream has 2 entries
+     * }
+ */ + CompletableFuture xlen(GlideString key); + /** * Removes the specified entries by id from a stream, and returns the number of entries deleted. * @@ -166,9 +185,27 @@ CompletableFuture>> xread( */ CompletableFuture xdel(String key, String[] ids); + /** + * Removes the specified entries by id from a stream, and returns the number of entries deleted. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param ids An array of entry ids. + * @return The number of entries removed from the stream. This number may be less than the number + * of entries in ids, if the specified ids don't exist in the + * stream. + * @example + *
{@code
+     * Long num = client.xdel("key", new GlideString[] {gs("1538561698944-0"), gs("1538561698944-1")}).get();
+     * assert num == 2L; // Stream marked 2 entries as deleted
+     * }
+ */ + CompletableFuture xdel(GlideString key, GlideString[] ids); + /** * Returns stream entries matching a given range of IDs. * + * @see valkey.io for details. * @param key The key of the stream. * @param start Starting stream ID bound for range. *
    @@ -205,6 +242,7 @@ CompletableFuture>> xread( /** * Returns stream entries matching a given range of IDs. * + * @see valkey.io for details. * @param key The key of the stream. * @param start Starting stream ID bound for range. *
      @@ -242,6 +280,7 @@ CompletableFuture> xrange( * Equivalent to {@link #xrange(String, StreamRange, StreamRange)} but returns the entries in * reverse order. * + * @see valkey.io for details. * @param key The key of the stream. * @param end Ending stream ID bound for range. *
        @@ -281,6 +320,7 @@ CompletableFuture> xrevrange( * Equivalent to {@link #xrange(String, StreamRange, StreamRange, long)} but returns the entries * in reverse order. * + * @see valkey.io for details. * @param key The key of the stream. * @param end Ending stream ID bound for range. *
          @@ -312,4 +352,336 @@ CompletableFuture> xrevrange( */ CompletableFuture> xrevrange( String key, StreamRange end, StreamRange start, long count); + + /** + * Creates a new consumer group uniquely identified by groupname for the stream + * stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param groupname The newly created consumer group name. + * @param id Stream entry ID that specifies the last delivered entry in the stream from the new + * group’s perspective. The special ID "$" can be used to specify the last entry + * in the stream. + * @return OK. + * @example + *
          {@code
          +     * // Create the consumer group "mygroup", using zero as the starting ID:
          +     * assert client.xgroupCreate("mystream", "mygroup", "0-0").get().equals("OK");
          +     * }
          + */ + CompletableFuture xgroupCreate(String key, String groupname, String id); + + /** + * Creates a new consumer group uniquely identified by groupname for the stream + * stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param groupname The newly created consumer group name. + * @param id Stream entry ID that specifies the last delivered entry in the stream from the new + * group’s perspective. The special ID "$" can be used to specify the last entry + * in the stream. + * @param options The group options {@link StreamGroupOptions}. + * @return OK. + * @example + *
          {@code
          +     * // Create the consumer group "mygroup", and the stream if it does not exist, after the last ID
          +     * assert client.xgroupCreate("mystream", "mygroup", "$", new StreamGroupOptions(true)).get().equals("OK");
          +     * }
          + */ + CompletableFuture xgroupCreate( + String key, String groupname, String id, StreamGroupOptions options); + + /** + * Destroys the consumer group groupname for the stream stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param groupname The newly created consumer group name. + * @return true if the consumer group is destroyed. Otherwise, false. + * @example + *
          {@code
          +     * // Destroys the consumer group "mygroup"
          +     * assert client.xgroupDestroy("mystream", "mygroup").get().equals("OK");
          +     * }
          + */ + CompletableFuture xgroupDestroy(String key, String groupname); + + /** + * Creates a consumer named consumer in the consumer group group for the + * stream stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @return true if the consumer is created. Otherwise, false. + * @example + *
          {@code
          +     * // Creates the consumer "myconsumer" in consumer group "mygroup"
          +     * assert client.xgroupCreateConsumer("mystream", "mygroup", "myconsumer").get();
          +     * }
          + */ + CompletableFuture xgroupCreateConsumer(String key, String group, String consumer); + + /** + * Deletes a consumer named consumer in the consumer group group. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @return The number of pending messages the consumer had before it was deleted. + * @example + *
          {@code
          +     * // Deletes the consumer "myconsumer" in consumer group "mygroup"
          +     * Long pendingMsgCount = client.xgroupDelConsumer("mystream", "mygroup", "myconsumer").get();
          +     * System.out.println("Consumer 'myconsumer' had " +
          +     *     + pendingMsgCount + " pending messages unclaimed.");
          +     * }
          + */ + CompletableFuture xgroupDelConsumer(String key, String group, String consumer); + + /** + * Reads entries from the given streams owned by a consumer group. + * + * @apiNote When in cluster mode, all keys in keysAndIds must map to the same hash + * slot. + * @see valkey.io for details. + * @param keysAndIds A Map of keys and entry ids to read from. The + * Map is composed of a stream's key and the id of the entry after which the stream + * will be read. Use the special id of {@literal ">"} to receive only new messages. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @return A {@literal Map>} with stream + * keys, to Map of stream-ids, to an array of pairings with format [[field, entry], [field, entry], ...]. + * Returns null if the consumer group does not exist. Returns a Map with a value of null if the stream is empty. + * @example + *
          {@code
          +     * // create a new stream at "mystream", with stream id "1-0"
          +     * Map xreadKeys = Map.of("myfield", "mydata");
          +     * String streamId = client.xadd("mystream", Map.of("myfield", "mydata"), StreamAddOptions.builder().id("1-0").build()).get();
          +     * assert client.xgroupCreate("mystream", "mygroup").get().equals("OK"); // create the consumer group "mygroup"
          +     * Map> streamReadResponse = client.xreadgroup(Map.of("mystream", ">"), "mygroup", "myconsumer").get();
          +     * // Returns "mystream": "1-0": {{"myfield", "mydata"}}
          +     * for (var keyEntry : streamReadResponse.entrySet()) {
          +     *     System.out.printf("Key: %s", keyEntry.getKey());
          +     *     for (var streamEntry : keyEntry.getValue().entrySet()) {
          +     *         Arrays.stream(streamEntry.getValue()).forEach(entity ->
          +     *             System.out.printf("stream id: %s; field: %s; value: %s\n", streamEntry.getKey(), entity[0], entity[1])
          +     *         );
          +     *     }
          +     * }
          +     * assert client.xdel("mystream", "1-0").get() == 1L;
          +     * client.xreadgroup(Map.of("mystream", "0"), "mygroup", "myconsumer").get();
          +     * // Returns "mystream": "1-0": null
          +     * assert streamReadResponse.get("mystream").get("1-0") == null;
          +     * 
          + */ + CompletableFuture>> xreadgroup( + Map keysAndIds, String group, String consumer); + + /** + * Reads entries from the given streams owned by a consumer group. + * + * @apiNote When in cluster mode, all keys in keysAndIds must map to the same hash + * slot. + * @see valkey.io for details. + * @param keysAndIds A Map of keys and entry ids to read from. The + * Map is composed of a stream's key and the id of the entry after which the stream + * will be read. Use the special id of {@literal ">"} to receive only new messages. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @param options Options detailing how to read the stream {@link StreamReadGroupOptions}. + * @return A {@literal Map>} with stream + * keys, to Map of stream-ids, to an array of pairings with format [[field, entry], [field, entry], ...]. + * Returns null if the consumer group does not exist. Returns a Map with a value of null if the stream is empty. + * @example + *
          {@code
          +     * // create a new stream at "mystream", with stream id "1-0"
          +     * Map xreadKeys = Map.of("myfield", "mydata");
          +     * String streamId = client.xadd("mystream", Map.of("myfield", "mydata"), StreamAddOptions.builder().id("1-0").build()).get();
          +     * assert client.xgroupCreate("mystream", "mygroup").get().equals("OK"); // create the consumer group "mygroup"
          +     * StreamReadGroupOptions options = StreamReadGroupOptions.builder().count(1).build(); // retrieves only a single message at a time
          +     * Map> streamReadResponse = client.xreadgroup(Map.of("mystream", ">"), "mygroup", "myconsumer", options).get();
          +     * // Returns "mystream": "1-0": {{"myfield", "mydata"}}
          +     * for (var keyEntry : streamReadResponse.entrySet()) {
          +     *     System.out.printf("Key: %s", keyEntry.getKey());
          +     *     for (var streamEntry : keyEntry.getValue().entrySet()) {
          +     *         Arrays.stream(streamEntry.getValue()).forEach(entity ->
          +     *             System.out.printf("stream id: %s; field: %s; value: %s\n", streamEntry.getKey(), entity[0], entity[1])
          +     *         );
          +     *     }
          +     * }
          +     * assert client.xdel("mystream", "1-0").get() == 1L;
          +     * // read the first 10 items and acknowledge (ACK) them:
          +     * StreamReadGroupOptions options = StreamReadGroupOptions.builder().count(10L).noack().build();
          +     * streamReadResponse = client.xreadgroup(Map.of("mystream", "0"), "mygroup", "myconsumer", options).get();
          +     * // Returns "mystream": "1-0": null
          +     * assert streamReadResponse.get("mystream").get("1-0") == null;
          +     * 
          + */ + CompletableFuture>> xreadgroup( + Map keysAndIds, + String group, + String consumer, + StreamReadGroupOptions options); + + /** + * Returns the number of messages that were successfully acknowledged by the consumer group member of a stream. + * This command should be called on a pending message so that such message does not get processed again. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param ids Stream entry ID to acknowledge and purge messages. + * @return The number of messages that were successfully acknowledged. + * @example + *
          {@code
          +     * String entryId = client.xadd("mystream", Map.of("myfield", "mydata")).get();
          +     * // read messages from streamId
          +     * var readResult = client.xreadgroup(Map.of("mystream", entryId), "mygroup", "my0consumer").get();
          +     * // acknowledge messages on stream
          +     * assert 1L == client.xack("mystream", "mygroup", new String[] {entryId}).get();
          +     * 
          + */ + CompletableFuture xack(String key, String group, String[] ids); + + /** + * Returns the number of messages that were successfully acknowledged by the consumer group member of a stream. + * This command should be called on a pending message so that such message does not get processed again. + * + * @param key The key of the stream. + * @param group The consumer group name. + * @param ids Stream entry ID to acknowledge and purge messages. + * @return The number of messages that were successfully acknowledged. + * @example + *
          {@code
          +     * GlideString entryId = client.xadd(gs("mystream"), Map.of(gs("myfield"), gs("mydata")).get();
          +     * // read messages from streamId
          +     * var readResult = client.xreadgroup(Map.of(gs("mystream"), entryId), gs("mygroup"), gs("my0consumer")).get();
          +     * // acknowledge messages on stream
          +     * assert 1L == client.xack(gs("mystream"), gs("mygroup"), new GlideString[] {entryId}).get();
          +     * 
          + */ + CompletableFuture xack(GlideString key, GlideString group, GlideString[] ids); + + /** + * Returns stream message summary information for pending messages matching a given range of IDs. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @return An array that includes the summary of pending messages, with the format + * [NumOfMessages, StartId, EndId, [Consumer, NumOfMessages]], where: + *
            + *
          • NumOfMessages: The total number of pending messages for this consumer group. + *
          • StartId: The smallest ID among the pending messages. + *
          • EndId: The greatest ID among the pending messages. + *
          • [[Consumer, NumOfMessages], ...]: A 2D-array of every consumer + * in the consumer group with at least one pending message, and the number of pending messages it has. + *
          + * @example + *
          {@code
          +     * // Retrieve a summary of all pending messages from key "my_stream"
          +     * Object[] result = client.xpending("my_stream", "my_group").get();
          +     * System.out.println("Number of pending messages: " + result[0]);
          +     * System.out.println("Start and End ID of messages: [" + result[1] + ", " + result[2] + "]");
          +     * for (Object[] consumerResult : (Object[][]) result[3]) {
          +     *     System.out.println("Number of Consumer messages: [" + consumerResult[0] + ", " + consumerResult[1] + "]");
          +     * }
          + */ + CompletableFuture xpending(String key, String group); + + /** + * Returns an extended form of stream message information for pending messages matching a given range of IDs. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param start Starting stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. + *
          + * + * @param end Ending stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. + *
          + * @param count Limits the number of messages returned. + * @return A 2D-array of 4-tuples containing extended message information with the format + * [[ID, Consumer, TimeElapsed, NumOfDelivered], ... ], where: + *
            + *
          • ID: The ID of the message. + *
          • Consumer: The name of the consumer that fetched the message and has still to acknowledge it. We call it the current owner of the message. + *
          • TimeElapsed: The number of milliseconds that elapsed since the last time this message was delivered to this consumer. + *
          • NumOfDelivered: The number of times this message was delivered. + *
          + * @example + *
          {@code
          +     * // Retrieve up to 10 pending messages from key "my_stream" in extended form
          +     * Object[][] result = client.xpending("my_stream", "my_group", InfRangeBound.MIN, InfRangeBound.MAX, 10L).get();
          +     * for (Object[] messageResult : result) {
          +     *     System.out.printf("Message %s from consumer %s was read %s times", messageResult[0], messageResult[1], messageResult[2]);
          +     * }
          + */ + CompletableFuture xpending( + String key, String group, StreamRange start, StreamRange end, long count); + + /** + * Returns an extended form of stream message information for pending messages matching a given range of IDs. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param start Starting stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. + *
          + * + * @param end Ending stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. + *
          + * @param count Limits the number of messages returned. + * @param options Stream add options {@link StreamPendingOptions}. + * @return A 2D-array of 4-tuples containing extended message information with the format + * [[ID, Consumer, TimeElapsed, NumOfDelivered], ... ], where: + *
            + *
          • ID: The ID of the message. + *
          • Consumer: The name of the consumer that fetched the message and has still to acknowledge it. We call it the current owner of the message. + *
          • TimeElapsed: The number of milliseconds that elapsed since the last time this message was delivered to this consumer. + *
          • NumOfDelivered: The number of times this message was delivered. + *
          + * @example + *
          {@code
          +     * // Retrieve up to 10 pending messages from key "my_stream" and consumer "my_consumer" in extended form
          +     * Object[][] result = client.xpending(
          +     *     "my_stream",
          +     *     "my_group",
          +     *     InfRangeBound.MIN,
          +     *     InfRangeBound.MAX,
          +     *     10L,
          +     *     StreamPendingOptions.builder().consumer("my_consumer").build()
          +     * ).get();
          +     * for (Object[] messageResult : result) {
          +     *     System.out.printf("Message %s from consumer %s was read %s times", messageResult[0], messageResult[1], messageResult[2]);
          +     * }
          + */ + CompletableFuture xpending( + String key, + String group, + StreamRange start, + StreamRange end, + long count, + StreamPendingOptions options); } diff --git a/java/client/src/main/java/glide/api/commands/StringBaseCommands.java b/java/client/src/main/java/glide/api/commands/StringBaseCommands.java index b38e21c573..d703e90fbb 100644 --- a/java/client/src/main/java/glide/api/commands/StringBaseCommands.java +++ b/java/client/src/main/java/glide/api/commands/StringBaseCommands.java @@ -1,6 +1,8 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.commands; +import glide.api.models.GlideString; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.SetOptions; import glide.api.models.commands.SetOptions.ConditionalSet; import glide.api.models.commands.SetOptions.SetOptionsBuilder; @@ -18,6 +20,18 @@ public interface StringBaseCommands { /** Redis API keyword used to indicate that the length of the lcs should be returned. */ public static final String LEN_REDIS_API = "LEN"; + /** IDX option string to include in the LCS command. */ + public static final String IDX_COMMAND_STRING = "IDX"; + + /** MINMATCHLEN option string to include in the LCS command. */ + public static final String MINMATCHLEN_COMMAND_STRING = "MINMATCHLEN"; + + /** WITHMATCHLEN option string to include in the LCS command. */ + public static final String WITHMATCHLEN_COMMAND_STRING = "WITHMATCHLEN"; + + /** Key for LCS matches result. */ + public static final String LCS_MATCHES_RESULT_KEY = "matches"; + /** * Gets the value associated with the given key, or null if no such * value exists. @@ -47,14 +61,14 @@ public interface StringBaseCommands { * key as a String. Otherwise, return null. * @example *
          {@code
          -     * byte[] value = client.get("key").get();
          -     * assert Arrays.equals(value, "value".getBytes());
          +     * GlideString value = client.get(gs("key")).get();
          +     * assert Arrays.equals(value.getString(), "value");
                *
                * String value = client.get("non_existing_key").get();
                * assert value.equals(null);
                * }
          */ - CompletableFuture get(byte[] key); + CompletableFuture get(GlideString key); /** * Gets a string value associated with the given key and deletes the key. @@ -74,6 +88,59 @@ public interface StringBaseCommands { */ CompletableFuture getdel(String key); + /** + * Gets the value associated with the given key. + * + * @since Redis 6.2.0. + * @see redis.io for details. + * @param key The key to retrieve from the database. + * @return If key exists, return the value of the key. + * Otherwise, return null. + * @example + *
          {@code
          +     * String value = client.getex("key").get();
          +     * assert value.equals("value");
          +     * }
          + */ + CompletableFuture getex(String key); + + /** + * Gets the value associated with the given key. + * + * @since Redis 6.2.0. + * @see redis.io for details. + * @param key The key to retrieve from the database. + * @param options The {@link GetExOptions} options. + * @return If key exists, return the value of the key. + * Otherwise, return null. + * @example + *
          {@code
          +     * String response = client.set("key", "value").get();
          +     * assert response.equals(OK);
          +     * String value = client.getex("key", GetExOptions.Seconds(10L)).get();
          +     * assert value.equals("value");
          +     * }
          + */ + CompletableFuture getex(String key, GetExOptions options); + + /** + * Gets a string value associated with the given key and deletes the key. + * + * @see redis.io for details. + * @param key The key to retrieve from the database. + * @return If key exists, returns the value of key. + * Otherwise, return null. + * @example + *
          {@code
          +     * GlideString value = client.getdel(gs("key")).get();
          +     * assert assert Arrays.equals(value.getString(), "value");
          +     *
          +     * String value = client.getdel("key").get();
          +     * assert value.equals(null);
          +     * }
          + */ + CompletableFuture getdel(GlideString key); + /** * Sets the given key with the given value. * @@ -98,11 +165,11 @@ public interface StringBaseCommands { * @return Response from Redis containing "OK". * @example *
          {@code
          -     * String value = client.set("key".getBytes(), "value".getBytes()).get();
          -     * assert value.equals("OK");
          +     * GlideString value = client.set(gs("key"), gs("value")).get();
          +     * assert value.getString().equals("OK");
                * }
          */ - CompletableFuture set(byte[] key, byte[] value); + CompletableFuture set(GlideString key, GlideString value); /** * Sets the given key with the given value. Return value is dependent on the passed options. @@ -125,6 +192,27 @@ public interface StringBaseCommands { */ CompletableFuture set(String key, String value, SetOptions options); + /** + * Sets the given key with the given value. Return value is dependent on the passed options. + * + * @see redis.io for details. + * @param key The key to store. + * @param value The value to store with the given key. + * @param options The Set options. + * @return Response from Redis containing a String or null response. If + * the value is successfully set, return "OK". If value isn't set because of + * {@link ConditionalSet#ONLY_IF_EXISTS} or {@link ConditionalSet#ONLY_IF_DOES_NOT_EXIST} + * conditions, return null. If {@link SetOptionsBuilder#returnOldValue(boolean)} + * is set, return the old value as a String. + * @example + *
          {@code
          +     * SetOptions options = SetOptions.builder().conditionalSet(ONLY_IF_EXISTS).expiry(Seconds(5L)).build();
          +     * String value = client.set("key".getBytes(), "value".getBytes(), options).get();
          +     * assert value.equals("OK");
          +     * }
          + */ + CompletableFuture set(GlideString key, GlideString value, SetOptions options); + /** * Retrieves the values of multiple keys. * @@ -143,6 +231,24 @@ public interface StringBaseCommands { */ CompletableFuture mget(String[] keys); + /** + * Retrieves the values of multiple keys. + * + * @apiNote When in cluster mode, the command may route to multiple nodes when keys + * map to different hash slots. + * @see redis.io for details. + * @param keys A list of keys to retrieve values for. + * @return An array of values corresponding to the provided keys.
          + * If a keyis not found, its corresponding value in the list will be null + * . + * @example + *
          {@code
          +     * GlideString[] values = client.mget(new GlideString[] {"key1", "key2"}).get();
          +     * assert values.equals(new GlideString[] {"value1", "value2"});
          +     * }
          + */ + CompletableFuture mget(GlideString[] keys); + /** * Sets multiple keys to multiple values in a single operation. * @@ -207,6 +313,22 @@ public interface StringBaseCommands { */ CompletableFuture incrBy(String key, long amount); + /** + * Increments the number stored at key by amount. If key + * does not exist, it is set to 0 before performing the operation. + * + * @see redis.io for details. + * @param key The key to increment its value. + * @param amount The amount to increment. + * @return The value of key after the increment. + * @example + *
          {@code
          +     * Long num = client.incrBy(gs("key"), 2).get();
          +     * assert num == 7L;
          +     * }
          + */ + CompletableFuture incrBy(GlideString key, long amount); + /** * Increments the string representing a floating point number stored at key by * amount. By using a negative increment value, the result is that the value stored at @@ -225,6 +347,24 @@ public interface StringBaseCommands { */ CompletableFuture incrByFloat(String key, double amount); + /** + * Increments the string representing a floating point number stored at key by + * amount. By using a negative increment value, the result is that the value stored at + * key is decremented. If key does not exist, it is set to 0 before + * performing the operation. + * + * @see redis.io for details. + * @param key The key to increment its value. + * @param amount The amount to increment. + * @return The value of key after the increment. + * @example + *
          {@code
          +     * Double num = client.incrByFloat(gs("key"), 0.5).get();
          +     * assert num == 7.5;
          +     * }
          + */ + CompletableFuture incrByFloat(GlideString key, double amount); + /** * Decrements the number stored at key by one. If key does not exist, it * is set to 0 before performing the operation. @@ -276,6 +416,26 @@ public interface StringBaseCommands { */ CompletableFuture strlen(String key); + /** + * Returns the length of the string value stored at key. + * + * @see redis.io for details. + * @param key The key to check its length. + * @return The length of the string value stored at key.
          + * If key does not exist, it is treated as an empty string, and the command + * returns 0. + * @example + *
          {@code
          +     * client.set(gs("key"), gs("GLIDE")).get();
          +     * Long len = client.strlen(gs("key")).get();
          +     * assert len == 5L;
          +     *
          +     * len = client.strlen(gs("non_existing_key")).get();
          +     * assert len == 0L;
          +     * }
          + */ + CompletableFuture strlen(GlideString key); + /** * Overwrites part of the string stored at key, starting at the specified * offset, for the entire length of value.
          @@ -337,6 +497,23 @@ public interface StringBaseCommands { */ CompletableFuture append(String key, String value); + /** + * Appends a value to a key. If key does not exist it is + * created and set as an empty string, so APPEND will be similar to {@see #set} in + * this special case. + * + * @see redis.io for details. + * @param key The key of the string. + * @param value The value to append. + * @return The length of the string after appending the value. + * @example + *
          {@code
          +     * Long value = client.append(gs("key"), gs("value")).get();
          +     * assert value.equals(5L);
          +     * }
          + */ + CompletableFuture append(GlideString key, GlideString value); + /** * Returns the longest common subsequence between strings stored at key1 and * key2. @@ -373,9 +550,188 @@ public interface StringBaseCommands { * @example *
          {@code
                * // testKey1 = abcd, testKey2 = axcd
          -     * Long result = client.lcs("testKey1", "testKey2").get();
          +     * Long result = client.lcsLen("testKey1", "testKey2").get();
                * assert result.equals(3L);
                * }
          */ CompletableFuture lcsLen(String key1, String key2); + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @apiNote When in cluster mode, key1 and key2 must map to the same + * hash slot. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @return A Map containing the indices of the longest common subsequence between the + * 2 strings and the length of the longest common subsequence. The resulting map contains two + * keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd123" and key2 + * holds the string "bcdef123" then the sample result would be + *
          {@code
          +     * new Long[][][] {
          +     *      {
          +     *          {4L, 6L},
          +     *          {5L, 7L}
          +     *      },
          +     *      {
          +     *          {1L, 3L},
          +     *          {0L, 2L}
          +     *      }
          +     *  }
          +     * }
          + * The result indicates that the first substring match is "123" in key1 + * at index 4 to 6 which matches the substring in key2 + * at index 5 to 7. And the second substring match is + * "bcd" in key1 at index 1 to 3 which matches + * the substring in key2 at index 0 to 2. + */ + CompletableFuture> lcsIdx(String key1, String key2); + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @apiNote When in cluster mode, key1 and key2 must map to the same + * hash slot. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @param minMatchLen The minimum length of matches to include in the result. + * @return A Map containing the indices of the longest common subsequence between the + * 2 strings and the length of the longest common subsequence. The resulting map contains two + * keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd123" and key2 + * holds the string "bcdef123" then the sample result would be + *
          {@code
          +     * new Long[][][] {
          +     *      {
          +     *          {4L, 6L},
          +     *          {5L, 7L}
          +     *      },
          +     *      {
          +     *          {1L, 3L},
          +     *          {0L, 2L}
          +     *      }
          +     *  }
          +     * }
          + * The result indicates that the first substring match is "123" in key1 + * at index 4 to 6 which matches the substring in key2 + * at index 5 to 7. And the second substring match is + * "bcd" in key1 at index 1 to 3 which matches + * the substring in key2 at index 0 to 2. + */ + CompletableFuture> lcsIdx(String key1, String key2, long minMatchLen); + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @apiNote When in cluster mode, key1 and key2 must map to the same + * hash slot. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @return A Map containing the indices of the longest common subsequence between the + * 2 strings and the length of the longest common subsequence. The resulting map contains two + * keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd1234" and key2 + * holds the string "bcdef1234" then the sample result would be + *
          {@code
          +     * new Object[] {
          +     *      new Object[] {
          +     *          new Long[] {4L, 7L},
          +     *          new Long[] {5L, 8L},
          +     *          4L},
          +     *      new Object[] {
          +     *          new Long[] {1L, 3L},
          +     *          new Long[] {0L, 2L},
          +     *          3L}
          +     *      }
          +     * }
          + * The result indicates that the first substring match is "1234" in key1 + * at index 4 to 7 which matches the substring in key2 + * at index 5 to 8 and the last element in the array is the + * length of the substring match which is 4. And the second substring match is + * "bcd" in key1 at index 1 to 3 which + * matches the substring in key2 at index 0 to 2 and + * the last element in the array is the length of the substring match which is 3. + */ + CompletableFuture> lcsIdxWithMatchLen(String key1, String key2); + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @apiNote When in cluster mode, key1 and key2 must map to the same + * hash slot. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @param minMatchLen The minimum length of matches to include in the result. + * @return A Map containing the indices of the longest common subsequence between the + * 2 strings and the length of the longest common subsequence. The resulting map contains two + * keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd1234" and key2 + * holds the string "bcdef1234" then the sample result would be + *
          {@code
          +     * new Object[] {
          +     *      new Object[] {
          +     *          new Long[] {4L, 7L},
          +     *          new Long[] {5L, 8L},
          +     *          4L},
          +     *      new Object[] {
          +     *          new Long[] {1L, 3L},
          +     *          new Long[] {0L, 2L},
          +     *          3L}
          +     *      }
          +     * }
          + * The result indicates that the first substring match is "1234" in key1 + * at index 4 to 7 which matches the substring in key2 + * at index 5 to 8 and the last element in the array is the + * length of the substring match which is 4. And the second substring match is + * "bcd" in key1 at index 1 to 3 which + * matches the substring in key2 at index 0 to 2 and + * the last element in the array is the length of the substring match which is 3. + */ + CompletableFuture> lcsIdxWithMatchLen( + String key1, String key2, long minMatchLen); } diff --git a/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java b/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java new file mode 100644 index 0000000000..217795a3cf --- /dev/null +++ b/java/client/src/main/java/glide/api/commands/TransactionsBaseCommands.java @@ -0,0 +1,64 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.commands; + +import glide.api.models.GlideString; +import java.util.concurrent.CompletableFuture; + +/** + * Supports commands for the "Transactions Commands" group for standalone and cluster clients. + * + * @see Transactions Commands + */ +public interface TransactionsBaseCommands { + /** + * Marks the given keys to be watched for conditional execution of a transaction. Transactions + * will only execute commands if the watched keys are not modified before execution of the + * transaction. + * + * @apiNote When in cluster mode, the command may route to multiple nodes when keys + * map to different hash slots. + * @see redis.io for details. + * @param keys The keys to watch. + * @return OK. + * @example + *
          {@code
          +     * assert client.watch(new String[] {"sampleKey"}).get().equals("OK");
          +     * transaction.set("sampleKey", "foobar");
          +     * Object[] result = client.exec(transaction).get();
          +     * assert result != null; // Executes successfully and keys are unwatched.
          +     *
          +     * assert client.watch(new String[] {"sampleKey"}).get().equals("OK");
          +     * transaction.set("sampleKey", "foobar");
          +     * assert client.set("sampleKey", "hello world").get().equals("OK");
          +     * Object[] result = client.exec(transaction).get();
          +     * assert result == null; // null is returned when the watched key is modified before transaction execution.
          +     * }
          + */ + CompletableFuture watch(String[] keys); + + /** + * Marks the given keys to be watched for conditional execution of a transaction. Transactions + * will only execute commands if the watched keys are not modified before execution of the + * transaction. + * + * @apiNote When in cluster mode, the command may route to multiple nodes when keys + * map to different hash slots. + * @see redis.io for details. + * @param keys The keys to watch. + * @return OK. + * @example + *
          {@code
          +     * assert client.watch(new GlideString[] {gs("sampleKey")}).get().equals("OK");
          +     * transaction.set(gs("sampleKey"), gs("foobar"));
          +     * Object[] result = client.exec(transaction).get();
          +     * assert result != null; // Executes successfully and keys are unwatched.
          +     *
          +     * assert client.watch(new GlideString[] {gs("sampleKey")}).get().equals("OK");
          +     * transaction.set(gs("sampleKey"), gs("foobar"));
          +     * assert client.set(gs("sampleKey"), gs("hello world")).get().equals("OK");
          +     * Object[] result = client.exec(transaction).get();
          +     * assert result == null; // null is returned when the watched key is modified before transaction execution.
          +     * }
          + */ + CompletableFuture watch(GlideString[] keys); +} diff --git a/java/client/src/main/java/glide/api/commands/TransactionsClusterCommands.java b/java/client/src/main/java/glide/api/commands/TransactionsClusterCommands.java new file mode 100644 index 0000000000..cadbac86c9 --- /dev/null +++ b/java/client/src/main/java/glide/api/commands/TransactionsClusterCommands.java @@ -0,0 +1,43 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.commands; + +import glide.api.models.configuration.RequestRoutingConfiguration.Route; +import java.util.concurrent.CompletableFuture; + +/** + * Supports commands for the "Transactions Commands" group for cluster clients. + * + * @see Transactions Commands + */ +public interface TransactionsClusterCommands { + /** + * Flushes all the previously watched keys for a transaction. Executing a transaction will + * automatically flush all previously watched keys.
          + * The command will be routed to all primary nodes. + * + * @see redis.io for details. + * @return OK. + * @example + *
          {@code
          +     * assert client.watch(new String[] {"sampleKey"}).get().equals("OK");
          +     * assert client.unwatch().get().equals("OK"); // Flushes "sampleKey" from watched keys.
          +     * }
          + */ + CompletableFuture unwatch(); + + /** + * Flushes all the previously watched keys for a transaction. Executing a transaction will + * automatically flush all previously watched keys. + * + * @see redis.io for details. + * @param route Specifies the routing configuration for the command. The client will route the + * command to the nodes defined by route. + * @return OK. + * @example + *
          {@code
          +     * assert client.watch(new String[] {"sampleKey"}).get().equals("OK");
          +     * assert client.unwatch(ALL_PRIMARIES).get().equals("OK"); // Flushes "sampleKey" from watched keys for all primary nodes.
          +     * }
          + */ + CompletableFuture unwatch(Route route); +} diff --git a/java/client/src/main/java/glide/api/commands/TransactionsCommands.java b/java/client/src/main/java/glide/api/commands/TransactionsCommands.java new file mode 100644 index 0000000000..994a3561bd --- /dev/null +++ b/java/client/src/main/java/glide/api/commands/TransactionsCommands.java @@ -0,0 +1,25 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.commands; + +import java.util.concurrent.CompletableFuture; + +/** + * Supports commands for the "Transactions Commands" group for standalone clients. + * + * @see Transactions Commands + */ +public interface TransactionsCommands { + /** + * Flushes all the previously watched keys for a transaction. Executing a transaction will + * automatically flush all previously watched keys. + * + * @see redis.io for details. + * @return OK. + * @example + *
          {@code
          +     * assert client.watch(new String[] {"sampleKey"}).get().equals("OK");
          +     * assert client.unwatch().get().equals("OK"); // Flushes "sampleKey" from watched keys.
          +     * }
          + */ + CompletableFuture unwatch(); +} diff --git a/java/client/src/main/java/glide/api/models/BaseTransaction.java b/java/client/src/main/java/glide/api/models/BaseTransaction.java index d8f36bd471..a07db292e5 100644 --- a/java/client/src/main/java/glide/api/models/BaseTransaction.java +++ b/java/client/src/main/java/glide/api/models/BaseTransaction.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; import static glide.api.commands.GenericBaseCommands.REPLACE_REDIS_API; @@ -10,8 +10,12 @@ import static glide.api.commands.SortedSetBaseCommands.LIMIT_REDIS_API; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORES_REDIS_API; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORE_REDIS_API; +import static glide.api.commands.StringBaseCommands.IDX_COMMAND_STRING; import static glide.api.commands.StringBaseCommands.LEN_REDIS_API; +import static glide.api.commands.StringBaseCommands.MINMATCHLEN_COMMAND_STRING; +import static glide.api.commands.StringBaseCommands.WITHMATCHLEN_COMMAND_STRING; import static glide.api.models.commands.RangeOptions.createZRangeArgs; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; import static glide.api.models.commands.bitmap.BitFieldOptions.createBitFieldArgs; import static glide.api.models.commands.function.FunctionListOptions.LIBRARY_NAME_REDIS_API; import static glide.api.models.commands.function.FunctionListOptions.WITH_CODE_REDIS_API; @@ -51,7 +55,9 @@ import static redis_request.RedisRequestOuterClass.RequestType.ExpireAt; import static redis_request.RedisRequestOuterClass.RequestType.ExpireTime; import static redis_request.RedisRequestOuterClass.RequestType.FCall; +import static redis_request.RedisRequestOuterClass.RequestType.FCallReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.FlushAll; +import static redis_request.RedisRequestOuterClass.RequestType.FlushDB; import static redis_request.RedisRequestOuterClass.RequestType.FunctionDelete; import static redis_request.RedisRequestOuterClass.RequestType.FunctionFlush; import static redis_request.RedisRequestOuterClass.RequestType.FunctionList; @@ -64,6 +70,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.Get; import static redis_request.RedisRequestOuterClass.RequestType.GetBit; import static redis_request.RedisRequestOuterClass.RequestType.GetDel; +import static redis_request.RedisRequestOuterClass.RequestType.GetEx; import static redis_request.RedisRequestOuterClass.RequestType.GetRange; import static redis_request.RedisRequestOuterClass.RequestType.HDel; import static redis_request.RedisRequestOuterClass.RequestType.HExists; @@ -118,6 +125,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.RPop; import static redis_request.RedisRequestOuterClass.RequestType.RPush; import static redis_request.RedisRequestOuterClass.RequestType.RPushX; +import static redis_request.RedisRequestOuterClass.RequestType.RandomKey; import static redis_request.RedisRequestOuterClass.RequestType.Rename; import static redis_request.RedisRequestOuterClass.RequestType.RenameNX; import static redis_request.RedisRequestOuterClass.RequestType.SAdd; @@ -134,21 +142,31 @@ import static redis_request.RedisRequestOuterClass.RequestType.SPop; import static redis_request.RedisRequestOuterClass.RequestType.SRandMember; import static redis_request.RedisRequestOuterClass.RequestType.SRem; +import static redis_request.RedisRequestOuterClass.RequestType.SUnion; import static redis_request.RedisRequestOuterClass.RequestType.SUnionStore; import static redis_request.RedisRequestOuterClass.RequestType.Set; import static redis_request.RedisRequestOuterClass.RequestType.SetBit; import static redis_request.RedisRequestOuterClass.RequestType.SetRange; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Strlen; import static redis_request.RedisRequestOuterClass.RequestType.TTL; import static redis_request.RedisRequestOuterClass.RequestType.Time; import static redis_request.RedisRequestOuterClass.RequestType.Touch; import static redis_request.RedisRequestOuterClass.RequestType.Type; import static redis_request.RedisRequestOuterClass.RequestType.Unlink; +import static redis_request.RedisRequestOuterClass.RequestType.XAck; import static redis_request.RedisRequestOuterClass.RequestType.XAdd; import static redis_request.RedisRequestOuterClass.RequestType.XDel; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreate; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreateConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDelConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDestroy; import static redis_request.RedisRequestOuterClass.RequestType.XLen; +import static redis_request.RedisRequestOuterClass.RequestType.XPending; import static redis_request.RedisRequestOuterClass.RequestType.XRange; import static redis_request.RedisRequestOuterClass.RequestType.XRead; +import static redis_request.RedisRequestOuterClass.RequestType.XReadGroup; import static redis_request.RedisRequestOuterClass.RequestType.XRevRange; import static redis_request.RedisRequestOuterClass.RequestType.XTrim; import static redis_request.RedisRequestOuterClass.RequestType.ZAdd; @@ -181,6 +199,7 @@ import com.google.protobuf.ByteString; import glide.api.models.commands.ExpireOptions; import glide.api.models.commands.FlushMode; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.InfoOptions; import glide.api.models.commands.InfoOptions.Section; import glide.api.models.commands.LInsertOptions.InsertPosition; @@ -223,7 +242,12 @@ import glide.api.models.commands.geospatial.GeospatialData; import glide.api.models.commands.stream.StreamAddOptions; import glide.api.models.commands.stream.StreamAddOptions.StreamAddOptionsBuilder; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamPendingOptions; import glide.api.models.commands.stream.StreamRange; +import glide.api.models.commands.stream.StreamRange.IdBound; +import glide.api.models.commands.stream.StreamRange.InfRangeBound; +import glide.api.models.commands.stream.StreamReadGroupOptions; import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions; import glide.api.models.configuration.ReadFrom; @@ -381,6 +405,37 @@ public T getdel(@NonNull String key) { return getThis(); } + /** + * Gets the value associated with the given key. + * + * @since Redis 6.2.0. + * @see redis.io for details. + * @param key The key to retrieve from the database. + * @return Command Response - If key exists, return the value of the + * key. Otherwise, return null. + */ + public T getex(@NonNull String key) { + ArgsArray commandArgs = buildArgs(key); + protobufTransaction.addCommands(buildCommand(GetEx, commandArgs)); + return getThis(); + } + + /** + * Gets the value associated with the given key. + * + * @since Redis 6.2.0. + * @see redis.io for details. + * @param key The key to retrieve from the database. + * @param options The {@link GetExOptions} options. + * @return Command Response - If key exists, return the value of the + * key. Otherwise, return null. + */ + public T getex(@NonNull String key, @NonNull GetExOptions options) { + ArgsArray commandArgs = buildArgs(ArrayUtils.addFirst(options.toArgs(), key)); + protobufTransaction.addCommands(buildCommand(GetEx, commandArgs)); + return getThis(); + } + /** * Sets the given key with the given value. * @@ -2720,7 +2775,7 @@ public T zinterWithScores( * Adds an entry to the specified stream stored at key.
          * If the key doesn't exist, the stream is created. * - * @see redis.io for details. + * @see valkey.io for details. * @param key The key of the stream. * @param values Field-value pairs to be added to the entry. * @return Command Response - The id of the added entry. @@ -2733,7 +2788,7 @@ public T xadd(@NonNull String key, @NonNull Map values) { * Adds an entry to the specified stream stored at key.
          * If the key doesn't exist, the stream is created. * - * @see redis.io for details. + * @see valkey.io for details. * @param key The key of the stream. * @param values Field-value pairs to be added to the entry. * @param options Stream add options {@link StreamAddOptions}. @@ -2754,11 +2809,11 @@ public T xadd( /** * Reads entries from the given streams. * - * @see redis.io for details. + * @see valkey.io for details. * @param keysAndIds An array of Pairs of keys and entry ids to read from. A * pair is composed of a stream's key and the id of the entry after which the stream * will be read. - * @return Command Response - A {@literal Map>} with stream + * @return Command Response - A {@literal Map>} with stream * keys, to Map of stream-ids, to an array of pairings with format [[field, entry], [field, entry], ...]. */ public T xread(@NonNull Map keysAndIds) { @@ -2768,12 +2823,12 @@ public T xread(@NonNull Map keysAndIds) { /** * Reads entries from the given streams. * - * @see redis.io for details. + * @see valkey.io for details. * @param keysAndIds An array of Pairs of keys and entry ids to read from. A * pair is composed of a stream's key and the id of the entry after which the stream * will be read. * @param options options detailing how to read the stream {@link StreamReadOptions}. - * @return Command Response - A {@literal Map>} with stream + * @return Command Response - A {@literal Map>} with stream * keys, to Map of stream-ids, to an array of pairings with format [[field, entry], [field, entry], ...]. */ public T xread(@NonNull Map keysAndIds, @NonNull StreamReadOptions options) { @@ -2784,7 +2839,7 @@ public T xread(@NonNull Map keysAndIds, @NonNull StreamReadOptio /** * Trims the stream by evicting older entries. * - * @see redis.io for details. + * @see valkey.io for details. * @param key The key of the stream. * @param options Stream trim options {@link StreamTrimOptions}. * @return Command Response - The number of entries deleted from the stream. @@ -2827,21 +2882,22 @@ public T xdel(@NonNull String key, @NonNull String[] ids) { /** * Returns stream entries matching a given range of IDs. * + * @see valkey.io for details. * @param key The key of the stream. * @param start Starting stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MIN} to start with the minimum available ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. *
          * * @param end Ending stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MAX} to end with the maximum available ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. *
          * * @return Command Response - A Map of key to stream entry data, where entry data is an array of pairings with format [[field, entry], [field, entry], ...]. @@ -2855,21 +2911,22 @@ public T xrange(@NonNull String key, @NonNull StreamRange start, @NonNull Stream /** * Returns stream entries matching a given range of IDs. * + * @see valkey.io for details. * @param key The key of the stream. * @param start Starting stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MIN} to start with the minimum available ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. *
          * * @param end Ending stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MAX} to end with the maximum available ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. *
          * * @param count Maximum count of stream entries to return. @@ -2888,21 +2945,22 @@ public T xrange( * Equivalent to {@link #xrange(String, StreamRange, StreamRange)} but returns the entries in * reverse order. * + * @see valkey.io for details. * @param key The key of the stream. * @param end Ending stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MAX} to end with the maximum available ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. *
          * * @param start Starting stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MIN} to start with the minimum available ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. *
          * * @return Command Response - A Map of key to stream entry data, where entry data is an array of pairings with format [[field, entry], [field, entry], ...]. @@ -2918,21 +2976,22 @@ public T xrevrange(@NonNull String key, @NonNull StreamRange end, @NonNull Strea * Equivalent to {@link #xrange(String, StreamRange, StreamRange, long)} but returns the entries * in reverse order. * + * @see valkey.io for details. * @param key The key of the stream. * @param start Starting stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MIN} to start with the minimum available ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. *
          * * @param end Ending stream ID bound for range. *
            - *
          • Use {@link StreamRange.IdBound#of} to specify a stream ID. - *
          • Use {@link StreamRange.IdBound#ofExclusive} to specify an exclusive bounded stream + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream * ID. - *
          • Use {@link StreamRange.InfRangeBound#MAX} to end with the maximum available ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. *
          * * @param count Maximum count of stream entries to return. @@ -2946,6 +3005,279 @@ public T xrevrange( return getThis(); } + /** + * Creates a new consumer group uniquely identified by groupname for the stream + * stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param groupname The newly created consumer group name. + * @param id Stream entry ID that specifies the last delivered entry in the stream from the new + * group’s perspective. The special ID "$" can be used to specify the last entry + * in the stream. + * @return Command Response - OK. + */ + public T xgroupCreate(@NonNull String key, @NonNull String groupname, @NonNull String id) { + protobufTransaction.addCommands(buildCommand(XGroupCreate, buildArgs(key, groupname, id))); + return getThis(); + } + + /** + * Creates a new consumer group uniquely identified by groupname for the stream + * stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param groupname The newly created consumer group name. + * @param id Stream entry ID that specifies the last delivered entry in the stream from the new + * group’s perspective. The special ID "$" can be used to specify the last entry + * in the stream. + * @param options The group options {@link StreamGroupOptions}. + * @return Command Response - OK. + */ + public T xgroupCreate( + @NonNull String key, + @NonNull String groupname, + @NonNull String id, + @NonNull StreamGroupOptions options) { + ArgsArray commandArgs = + buildArgs(concatenateArrays(new String[] {key, groupname, id}, options.toArgs())); + protobufTransaction.addCommands(buildCommand(XGroupCreate, commandArgs)); + return getThis(); + } + + /** + * Destroys the consumer group groupname for the stream stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param groupname The newly created consumer group name. + * @return Command Response - true if the consumer group is destroyed. Otherwise, + * false. + */ + public T xgroupDestroy(@NonNull String key, @NonNull String groupname) { + protobufTransaction.addCommands(buildCommand(XGroupDestroy, buildArgs(key, groupname))); + return getThis(); + } + + /** + * Creates a consumer named consumer in the consumer group group for the + * stream stored at key. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @return Command Response - true if the consumer is created. Otherwise, false + * . + */ + public T xgroupCreateConsumer( + @NonNull String key, @NonNull String group, @NonNull String consumer) { + protobufTransaction.addCommands( + buildCommand(XGroupCreateConsumer, buildArgs(key, group, consumer))); + return getThis(); + } + + /** + * Deletes a consumer named consumer in the consumer group group. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @return Command Response - The number of pending messages the consumer had before + * it was deleted. + */ + public T xgroupDelConsumer(@NonNull String key, @NonNull String group, @NonNull String consumer) { + protobufTransaction.addCommands( + buildCommand(XGroupDelConsumer, buildArgs(key, group, consumer))); + return getThis(); + } + + /** + * Reads entries from the given streams owned by a consumer group. + * + * @apiNote When in cluster mode, all keys in keysAndIds must map to the same hash + * slot. + * @see valkey.io for details. + * @param keysAndIds A Map of keys and entry ids to read from. The + * Map is composed of a stream's key and the id of the entry after which the stream + * will be read. Use the special id of {@literal Map>} + * to receive only new messages. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @return Command Response - A {@literal Map>} with + * stream keys, to Map of stream-ids, to an array of pairings with format + * [[field, entry], [field, entry], ...]. + * Returns null if the consumer group does not exist. Returns a Map + * with a value of code>null if the stream is empty. + */ + public T xreadgroup( + @NonNull Map keysAndIds, @NonNull String group, @NonNull String consumer) { + return xreadgroup(keysAndIds, group, consumer, StreamReadGroupOptions.builder().build()); + } + + /** + * Reads entries from the given streams owned by a consumer group. + * + * @apiNote When in cluster mode, all keys in keysAndIds must map to the same hash + * slot. + * @see valkey.io for details. + * @param keysAndIds A Map of keys and entry ids to read from. The + * Map is composed of a stream's key and the id of the entry after which the stream + * will be read. Use the special id of {@literal Map>} + * to receive only new messages. + * @param group The consumer group name. + * @param consumer The newly created consumer. + * @param options Options detailing how to read the stream {@link StreamReadGroupOptions}. + * @return Command Response - A {@literal Map>} with + * stream keys, to Map of stream-ids, to an array of pairings with format + * [[field, entry], [field, entry], ...]. + * Returns null if the consumer group does not exist. Returns a Map + * with a value of code>null if the stream is empty. + */ + public T xreadgroup( + @NonNull Map keysAndIds, + @NonNull String group, + @NonNull String consumer, + @NonNull StreamReadGroupOptions options) { + protobufTransaction.addCommands( + buildCommand(XReadGroup, buildArgs(options.toArgs(group, consumer, keysAndIds)))); + return getThis(); + } + + /** + * Returns the number of messages that were successfully acknowledged by the consumer group member + * of a stream. This command should be called on a pending message so that such message does not + * get processed again. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param ids Stream entry ID to acknowledge and purge messages. + * @return Command Response - The number of messages that were successfully acknowledged. + */ + public T xack(@NonNull String key, @NonNull String group, @NonNull String[] ids) { + String[] args = concatenateArrays(new String[] {key, group}, ids); + protobufTransaction.addCommands(buildCommand(XAck, buildArgs(args))); + return getThis(); + } + + /** + * Returns stream message summary information for pending messages matching a given range of IDs. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @return Command Response - A 2D-array that includes the summary of pending + * messages, with the format + * [NumOfMessages, StartId, EndId, [[Consumer, NumOfMessages], ...], where: + *
            + *
          • NumOfMessages: The total number of pending messages for this consumer + * group. + *
          • StartId: The smallest ID among the pending messages. + *
          • EndId: The greatest ID among the pending messages. + *
          • [[Consumer, NumOfMessages], ...]: A 2D-array of every + * consumer in the consumer group with at least one pending message, and the number of + * pending messages it has. + *
          + */ + public T xpending(@NonNull String key, @NonNull String group) { + String[] args = {key, group}; + protobufTransaction.addCommands(buildCommand(XPending, buildArgs(args))); + return getThis(); + } + + /** + * Returns an extended form of stream message information for pending messages matching a given + * range of IDs. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param start Starting stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. + *
          + * + * @param end Ending stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. + *
          + * + * @param count Limits the number of messages returned. + * @return Command Response - A 2D-array of 4-tuples containing extended message + * information with the format [[ID, Consumer, TimeElapsed, NumOfDelivered], ... ] + * , where: + *
            + *
          • ID: The ID of the message. + *
          • Consumer: The name of the consumer that fetched the message and has + * still to acknowledge it. We call it the current owner of the message. + *
          • TimeElapsed: The number of milliseconds that elapsed since the last time + * this message was delivered to this consumer. + *
          • NumOfDelivered: The number of times this message was delivered. + *
          + */ + public T xpending( + @NonNull String key, + @NonNull String group, + @NonNull StreamRange start, + @NonNull StreamRange end, + long count) { + return xpending(key, group, start, end, count, StreamPendingOptions.builder().build()); + } + + /** + * Returns an extended form of stream message information for pending messages matching a given + * range of IDs. + * + * @see valkey.io for details. + * @param key The key of the stream. + * @param group The consumer group name. + * @param start Starting stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MIN} to start with the minimum available ID. + *
          + * + * @param end Ending stream ID bound for range. + *
            + *
          • Use {@link IdBound#of} to specify a stream ID. + *
          • Use {@link IdBound#ofExclusive} to specify an exclusive bounded stream ID. + *
          • Use {@link InfRangeBound#MAX} to end with the maximum available ID. + *
          + * + * @param count Limits the number of messages returned. + * @param options Stream add options {@link StreamPendingOptions}. + * @return Command Response - A 2D-array of 4-tuples containing extended message + * information with the format [[ID, Consumer, TimeElapsed, NumOfDelivered], ... ] + * , where: + *
            + *
          • ID: The ID of the message. + *
          • Consumer: The name of the consumer that fetched the message and has + * still to acknowledge it. We call it the current owner of the message. + *
          • TimeElapsed: The number of milliseconds that elapsed since the last time + * this message was delivered to this consumer. + *
          • NumOfDelivered: The number of times this message was delivered. + *
          + */ + public T xpending( + @NonNull String key, + @NonNull String group, + @NonNull StreamRange start, + @NonNull StreamRange end, + long count, + @NonNull StreamPendingOptions options) { + String[] args = concatenateArrays(new String[] {key, group}, options.toArgs(start, end, count)); + protobufTransaction.addCommands(buildCommand(XPending, buildArgs(args))); + return getThis(); + } + /** * Returns the remaining time to live of key that has a timeout, in milliseconds. * @@ -3026,6 +3358,30 @@ public T flushall(FlushMode mode) { return getThis(); } + /** + * Deletes all the keys of the currently selected database. This command never fails. + * + * @see valkey.io for details. + * @return Command Response - OK. + */ + public T flushdb() { + protobufTransaction.addCommands(buildCommand(FlushDB)); + return getThis(); + } + + /** + * Deletes all the keys of the currently selected database. This command never fails. + * + * @see valkey.io for details. + * @param mode The flushing mode, could be either {@link FlushMode#SYNC} or {@link + * FlushMode#ASYNC}. + * @return Command Response - OK. + */ + public T flushdb(FlushMode mode) { + protobufTransaction.addCommands(buildCommand(FlushDB, buildArgs(mode.toString()))); + return getThis(); + } + /** * Displays a piece of generative computer art and the Redis version. * @@ -3125,6 +3481,17 @@ public T type(@NonNull String key) { return getThis(); } + /** + * Returns a random key from the currently selected database. * + * + * @see redis.io for details. + * @return Command Response - A random key from the database. + */ + public T randomKey() { + protobufTransaction.addCommands(buildCommand(RandomKey)); + return getThis(); + } + /** * Renames key to newKey.
          * If newKey already exists it is overwritten. @@ -3879,7 +4246,7 @@ public T functionList(@NonNull String libNamePattern, boolean withCode) { * @param keys An array of key arguments accessed by the function. To ensure the * correct execution of functions, both in standalone and clustered deployments, all names of * keys that a function accesses must be explicitly provided as keys. - * @param arguments An array of function arguments. Arguments + * @param arguments An array of function arguments. arguments * should not represent names of keys. * @return Command Response - The invoked function's return value. */ @@ -3893,12 +4260,12 @@ public T fcall(@NonNull String function, @NonNull String[] keys, @NonNull String } /** - * Invokes a previously loaded function. + * Invokes a previously loaded read-only function. * * @since Redis 7.0 and above. * @see redis.io for details. * @param function The function name. - * @param arguments An array of function arguments. Arguments + * @param arguments An array of function arguments. arguments * should not represent names of keys. * @return Command Response - The invoked function's return value. */ @@ -3906,6 +4273,43 @@ public T fcall(@NonNull String function, @NonNull String[] arguments) { return fcall(function, new String[0], arguments); } + /** + * Invokes a previously loaded read-only function. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @param keys An array of key arguments accessed by the function. To ensure the + * correct execution of functions, both in standalone and clustered deployments, all names of + * keys that a function accesses must be explicitly provided as keys. + * @param arguments An array of function arguments. arguments + * should not represent names of keys. + * @return Command Response - The invoked function's return value. + */ + public T fcallReadOnly( + @NonNull String function, @NonNull String[] keys, @NonNull String[] arguments) { + ArgsArray commandArgs = + buildArgs( + concatenateArrays( + new String[] {function, Long.toString(keys.length)}, keys, arguments)); + protobufTransaction.addCommands(buildCommand(FCallReadOnly, commandArgs)); + return getThis(); + } + + /** + * Invokes a previously loaded function. + * + * @since Redis 7.0 and above. + * @see redis.io for details. + * @param function The function name. + * @param arguments An array of function arguments. arguments + * should not represent names of keys. + * @return Command Response - The invoked function's return value. + */ + public T fcallReadOnly(@NonNull String function, @NonNull String[] arguments) { + return fcallReadOnly(function, new String[0], arguments); + } + /** * Returns information about the function that's currently running and information about the * available execution engines. @@ -4461,6 +4865,269 @@ public T lcsLen(@NonNull String key1, @NonNull String key2) { return getThis(); } + /** + * Gets the union of all the given sets. + * + * @see valkey.io for details. + * @param keys The keys of the sets. + * @return Command Response - A set of members which are present in at least one of the given + * sets. If none of the sets exist, an empty set will be returned. + */ + public T sunion(@NonNull String[] keys) { + protobufTransaction.addCommands(buildCommand(SUnion, buildArgs(keys))); + return getThis(); + } + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
          + * The sort command can be used to sort elements based on different criteria and + * apply transformations on sorted elements.
          + * To store the result into a new key, see {@link #sortStore(String, String)}.
          + * + * @param key The key of the list, set, or sorted set to be sorted. + * @return Command Response - An Array of sorted elements. + */ + public T sort(@NonNull String key) { + ArgsArray commandArgs = buildArgs(key); + protobufTransaction.addCommands(buildCommand(Sort, commandArgs)); + return getThis(); + } + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
          + * The sortReadOnly command can be used to sort elements based on different criteria + * and apply transformations on sorted elements. + * + * @since Redis 7.0 and above. + * @param key The key of the list, set, or sorted set to be sorted. + * @return Command Response - An Array of sorted elements. + */ + public T sortReadOnly(@NonNull String key) { + ArgsArray commandArgs = buildArgs(key); + protobufTransaction.addCommands(buildCommand(SortReadOnly, commandArgs)); + return getThis(); + } + + /** + * Sorts the elements in the list, set, or sorted set at key and stores the result in + * destination. The sort command can be used to sort elements based on + * different criteria, apply transformations on sorted elements, and store the result in a new + * key.
          + * To get the sort result without storing it into a key, see {@link #sort(String)} or {@link + * #sortReadOnly(String)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param destination The key where the sorted result will be stored. + * @return Command Response - The number of elements in the sorted key stored at destination + * . + */ + public T sortStore(@NonNull String key, @NonNull String destination) { + ArgsArray commandArgs = buildArgs(new String[] {key, STORE_COMMAND_STRING, destination}); + protobufTransaction.addCommands(buildCommand(Sort, commandArgs)); + return getThis(); + } + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @return Command Response - A Map containing the indices of the longest common + * subsequence between the 2 strings and the length of the longest common subsequence. The + * resulting map contains two keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd123" and key2 + * holds the string "bcdef123" then the sample result would be + *
          {@code
          +     * new Long[][][] {
          +     *      {
          +     *          {4L, 6L},
          +     *          {5L, 7L}
          +     *      },
          +     *      {
          +     *          {1L, 3L},
          +     *          {0L, 2L}
          +     *      }
          +     *  }
          +     * }
          + * The result indicates that the first substring match is "123" in key1 + * at index 4 to 6 which matches the substring in key2 + * at index 5 to 7. And the second substring match is + * "bcd" in key1 at index 1 to 3 which matches + * the substring in key2 at index 0 to 2. + */ + public T lcsIdx(@NonNull String key1, @NonNull String key2) { + ArgsArray args = buildArgs(key1, key2, IDX_COMMAND_STRING); + protobufTransaction.addCommands(buildCommand(LCS, args)); + return getThis(); + } + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @param minMatchLen The minimum length of matches to include in the result. + * @return Command Response - A Map containing the indices of the longest common + * subsequence between the 2 strings and the length of the longest common subsequence. The + * resulting map contains two keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd123" and key2 + * holds the string "bcdef123" then the sample result would be + *
          {@code
          +     * new Long[][][] {
          +     *      {
          +     *          {4L, 6L},
          +     *          {5L, 7L}
          +     *      },
          +     *      {
          +     *          {1L, 3L},
          +     *          {0L, 2L}
          +     *      }
          +     *  }
          +     * }
          + * The result indicates that the first substring match is "123" in key1 + * at index 4 to 6 which matches the substring in key2 + * at index 5 to 7. And the second substring match is + * "bcd" in key1 at index 1 to 3 which matches + * the substring in key2 at index 0 to 2. + */ + public T lcsIdx(@NonNull String key1, @NonNull String key2, long minMatchLen) { + ArgsArray args = + buildArgs( + key1, + key2, + IDX_COMMAND_STRING, + MINMATCHLEN_COMMAND_STRING, + String.valueOf(minMatchLen)); + protobufTransaction.addCommands(buildCommand(LCS, args)); + return getThis(); + } + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @return Command Response - A Map containing the indices of the longest common + * subsequence between the 2 strings and the length of the longest common subsequence. The + * resulting map contains two keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. For example, + *
          + * + * @example If key1 holds the string "abcd1234" and key2 + * holds the string "bcdef1234" then the sample result would be + *
          {@code
          +     * new Object[] {
          +     *      new Object[] {
          +     *          new Long[] {4L, 7L},
          +     *          new Long[] {5L, 8L},
          +     *          4L},
          +     *      new Object[] {
          +     *          new Long[] {1L, 3L},
          +     *          new Long[] {0L, 2L},
          +     *          3L}
          +     *      }
          +     * }
          + * The result indicates that the first substring match is "1234" in key1 + * at index 4 to 7 which matches the substring in key2 + * at index 5 to 8 and the last element in the array is the + * length of the substring match which is 4. And the second substring match is + * "bcd" in key1 at index 1 to 3 which + * matches the substring in key2 at index 0 to 2 and + * the last element in the array is the length of the substring match which is 3. + */ + public T lcsIdxWithMatchLen(@NonNull String key1, @NonNull String key2) { + ArgsArray args = buildArgs(key1, key2, IDX_COMMAND_STRING, WITHMATCHLEN_COMMAND_STRING); + protobufTransaction.addCommands(buildCommand(LCS, args)); + return getThis(); + } + + /** + * Returns the indices and length of the longest common subsequence between strings stored at + * key1 and key2. + * + * @since Redis 7.0 and above. + * @see valkey.io for details. + * @param key1 The key that stores the first string. + * @param key2 The key that stores the second string. + * @param minMatchLen The minimum length of matches to include in the result. + * @return Command Response - A Map containing the indices of the longest common + * subsequence between the 2 strings and the length of the longest common subsequence. The + * resulting map contains two keys, "matches" and "len": + *
            + *
          • "len" is mapped to the length of the longest common subsequence between the 2 strings + * stored as Long. + *
          • "matches" is mapped to a three dimensional Long array that stores pairs + * of indices that represent the location of the common subsequences in the strings held + * by key1 and key2. + *
          + * + * @example If key1 holds the string "abcd1234" and key2 + * holds the string "bcdef1234" then the sample result would be + *
          {@code
          +     * new Object[] {
          +     *      new Object[] {
          +     *          new Long[] {4L, 7L},
          +     *          new Long[] {5L, 8L},
          +     *          4L},
          +     *      new Object[] {
          +     *          new Long[] {1L, 3L},
          +     *          new Long[] {0L, 2L},
          +     *          3L}
          +     *      }
          +     * }
          + * The result indicates that the first substring match is "1234" in key1 + * at index 4 to 7 which matches the substring in key2 + * at index 5 to 8 and the last element in the array is the + * length of the substring match which is 4. And the second substring match is + * "bcd" in key1 at index 1 to 3 which + * matches the substring in key2 at index 0 to 2 and + * the last element in the array is the length of the substring match which is 3. + */ + public T lcsIdxWithMatchLen(@NonNull String key1, @NonNull String key2, long minMatchLen) { + ArgsArray args = + buildArgs( + key1, + key2, + IDX_COMMAND_STRING, + MINMATCHLEN_COMMAND_STRING, + String.valueOf(minMatchLen), + WITHMATCHLEN_COMMAND_STRING); + protobufTransaction.addCommands(buildCommand(LCS, args)); + return getThis(); + } + /** Build protobuf {@link Command} object for given command and arguments. */ protected Command buildCommand(RequestType requestType) { return buildCommand(requestType, buildArgs()); diff --git a/java/client/src/main/java/glide/api/models/ClusterTransaction.java b/java/client/src/main/java/glide/api/models/ClusterTransaction.java index e2c4820057..1fabea46fd 100644 --- a/java/client/src/main/java/glide/api/models/ClusterTransaction.java +++ b/java/client/src/main/java/glide/api/models/ClusterTransaction.java @@ -1,7 +1,16 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.utils.ArrayTransformUtils.concatenateArrays; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; + +import glide.api.models.commands.SortClusterOptions; import lombok.AllArgsConstructor; +import lombok.NonNull; +import org.apache.commons.lang3.ArrayUtils; +import redis_request.RedisRequestOuterClass; /** * Extends BaseTransaction class for cluster mode commands. Transactions allow the execution of a @@ -27,4 +36,68 @@ public class ClusterTransaction extends BaseTransaction { protected ClusterTransaction getThis() { return this; } + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
          + * The sort command can be used to sort elements based on different criteria and + * apply transformations on sorted elements.
          + * To store the result into a new key, see {@link #sortStore(String, String, SortClusterOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortClusterOptions The {@link SortClusterOptions}. + * @return Command Response - An Array of sorted elements. + */ + public ClusterTransaction sort( + @NonNull String key, @NonNull SortClusterOptions sortClusterOptions) { + RedisRequestOuterClass.Command.ArgsArray commandArgs = + buildArgs(ArrayUtils.addFirst(sortClusterOptions.toArgs(), key)); + protobufTransaction.addCommands(buildCommand(Sort, commandArgs)); + return this; + } + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + *
          + * The sortReadOnly command can be used to sort elements based on different criteria + * and apply transformations on sorted elements.
          + * + * @since Redis 7.0 and above. + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortClusterOptions The {@link SortClusterOptions}. + * @return Command Response - An Array of sorted elements. + */ + public ClusterTransaction sortReadOnly( + @NonNull String key, @NonNull SortClusterOptions sortClusterOptions) { + RedisRequestOuterClass.Command.ArgsArray commandArgs = + buildArgs(ArrayUtils.addFirst(sortClusterOptions.toArgs(), key)); + protobufTransaction.addCommands(buildCommand(SortReadOnly, commandArgs)); + return this; + } + + /** + * Sorts the elements in the list, set, or sorted set at key and stores the result in + * destination. The sort command can be used to sort elements based on + * different criteria, apply transformations on sorted elements, and store the result in a new + * key.
          + * To get the sort result without storing it into a key, see {@link #sort(String, + * SortClusterOptions)} or {@link #sortReadOnly(String, SortClusterOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param destination The key where the sorted result will be stored. + * @param sortClusterOptions The {@link SortClusterOptions}. + * @return Command Response - The number of elements in the sorted key stored at destination + * . + */ + public ClusterTransaction sortStore( + @NonNull String key, + @NonNull String destination, + @NonNull SortClusterOptions sortClusterOptions) { + String[] storeArguments = new String[] {STORE_COMMAND_STRING, destination}; + RedisRequestOuterClass.Command.ArgsArray commandArgs = + buildArgs( + concatenateArrays(new String[] {key}, sortClusterOptions.toArgs(), storeArguments)); + protobufTransaction.addCommands(buildCommand(Sort, commandArgs)); + return this; + } } diff --git a/java/client/src/main/java/glide/api/models/ClusterValue.java b/java/client/src/main/java/glide/api/models/ClusterValue.java index 360b2bcaa9..01abbf72b4 100644 --- a/java/client/src/main/java/glide/api/models/ClusterValue.java +++ b/java/client/src/main/java/glide/api/models/ClusterValue.java @@ -1,8 +1,9 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import java.util.Map; +import java.util.stream.Collectors; /** * Represents a returned value object from a Redis server with cluster-mode enabled. The response @@ -68,6 +69,17 @@ public static ClusterValue ofMultiValue(Map data) { return res; } + /** A constructor for the value. */ + public static ClusterValue ofMultiValueBinary(Map data) { + var res = new ClusterValue(); + // the map node address can be converted to a string + Map multiValue = + data.entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey().getString(), Map.Entry::getValue)); + res.multiValue = multiValue; + return res; + } + /** * Check that multi-value is stored in this object. Should be called prior to {@link * #getMultiValue()}. diff --git a/java/client/src/main/java/glide/api/models/GlideString.java b/java/client/src/main/java/glide/api/models/GlideString.java new file mode 100644 index 0000000000..f02927ba4b --- /dev/null +++ b/java/client/src/main/java/glide/api/models/GlideString.java @@ -0,0 +1,98 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicBoolean; +import lombok.Getter; + +// TODO docs for the god of docs +public class GlideString { + + @Getter private byte[] bytes; + private String string = null; + + /** Flag whether possibility to convert to string was checked. */ + private final AtomicBoolean conversionChecked = new AtomicBoolean(false); + + private GlideString() {} + + public static GlideString of(String string) { + var res = new GlideString(); + res.string = string; + res.bytes = string.getBytes(StandardCharsets.UTF_8); + return res; + } + + public static GlideString of(byte[] bytes) { + var res = new GlideString(); + res.bytes = bytes; + return res; + } + + public static GlideString gs(String string) { + return GlideString.of(string); + } + + public static GlideString gs(byte[] bytes) { + return GlideString.of(bytes); + } + + @Override + public String toString() { + return getString(); + } + + public String getString() { + if (string != null) { + return string; + } + + assert canConvertToString() : "Value cannot be represented as a string"; + return string; + } + + public boolean canConvertToString() { + if (string != null) { + return true; + } + + // double-checked locking + if (conversionChecked.get()) { + return false; + } else { + synchronized (this) { + if (conversionChecked.get()) { + return false; + } else + try { + // TODO find a better way to check this + // Detect whether `bytes` could be represented by a `String` without data corruption + var tmpStr = new String(bytes, StandardCharsets.UTF_8); + if (Arrays.equals(bytes, tmpStr.getBytes(StandardCharsets.UTF_8))) { + string = tmpStr; + return true; + } else { + return false; + } + } finally { + conversionChecked.set(true); + } + } + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof GlideString)) return false; + GlideString that = (GlideString) o; + + return Arrays.equals(bytes, that.bytes); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bytes); + } +} diff --git a/java/client/src/main/java/glide/api/models/Script.java b/java/client/src/main/java/glide/api/models/Script.java index 80688ddd2f..21be84f98f 100644 --- a/java/client/src/main/java/glide/api/models/Script.java +++ b/java/client/src/main/java/glide/api/models/Script.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; import static glide.ffi.resolvers.ScriptResolver.dropScript; diff --git a/java/client/src/main/java/glide/api/models/Transaction.java b/java/client/src/main/java/glide/api/models/Transaction.java index 835fdc98e9..ff49530ab4 100644 --- a/java/client/src/main/java/glide/api/models/Transaction.java +++ b/java/client/src/main/java/glide/api/models/Transaction.java @@ -1,12 +1,18 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; import static glide.api.commands.GenericBaseCommands.REPLACE_REDIS_API; import static glide.api.commands.GenericCommands.DB_REDIS_API; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.STORE_COMMAND_STRING; +import static glide.utils.ArrayTransformUtils.concatenateArrays; import static redis_request.RedisRequestOuterClass.RequestType.Copy; import static redis_request.RedisRequestOuterClass.RequestType.Move; import static redis_request.RedisRequestOuterClass.RequestType.Select; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; +import glide.api.models.commands.SortOptions; import lombok.AllArgsConstructor; import lombok.NonNull; import org.apache.commons.lang3.ArrayUtils; @@ -111,4 +117,58 @@ public Transaction copy( protobufTransaction.addCommands(buildCommand(Copy, commandArgs)); return this; } + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + * The sort command can be used to sort elements based on different criteria and + * apply transformations on sorted elements.
          + * To store the result into a new key, see {@link #sortStore(String, String, SortOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortOptions The {@link SortOptions}. + * @return Command Response - An Array of sorted elements. + */ + public Transaction sort(@NonNull String key, @NonNull SortOptions sortOptions) { + ArgsArray commandArgs = buildArgs(ArrayUtils.addFirst(sortOptions.toArgs(), key)); + protobufTransaction.addCommands(buildCommand(Sort, commandArgs)); + return this; + } + + /** + * Sorts the elements in the list, set, or sorted set at key and returns the result. + * The sortReadOnly command can be used to sort elements based on different criteria + * and apply transformations on sorted elements.
          + * + * @since Redis 7.0 and above. + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortOptions The {@link SortOptions}. + * @return Command Response - An Array of sorted elements. + */ + public Transaction sortReadOnly(@NonNull String key, @NonNull SortOptions sortOptions) { + ArgsArray commandArgs = buildArgs(ArrayUtils.addFirst(sortOptions.toArgs(), key)); + protobufTransaction.addCommands(buildCommand(SortReadOnly, commandArgs)); + return this; + } + + /** + * Sorts the elements in the list, set, or sorted set at key and stores the result in + * destination. The sort command can be used to sort elements based on + * different criteria, apply transformations on sorted elements, and store the result in a new + * key.
          + * To get the sort result without storing it into a key, see {@link #sort(String, SortOptions)}. + * + * @param key The key of the list, set, or sorted set to be sorted. + * @param sortOptions The {@link SortOptions}. + * @param destination The key where the sorted result will be stored. + * @return Command Response - The number of elements in the sorted key stored at destination + * . + */ + public Transaction sortStore( + @NonNull String key, @NonNull String destination, @NonNull SortOptions sortOptions) { + String[] storeArguments = new String[] {STORE_COMMAND_STRING, destination}; + ArgsArray arguments = + buildArgs(concatenateArrays(new String[] {key}, sortOptions.toArgs(), storeArguments)); + protobufTransaction.addCommands(buildCommand(Sort, arguments)); + return this; + } } diff --git a/java/client/src/main/java/glide/api/models/commands/ConditionalChange.java b/java/client/src/main/java/glide/api/models/commands/ConditionalChange.java index 28a4e3467d..d24bb1a0a1 100644 --- a/java/client/src/main/java/glide/api/models/commands/ConditionalChange.java +++ b/java/client/src/main/java/glide/api/models/commands/ConditionalChange.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.GeospatialIndicesBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/ExpireOptions.java b/java/client/src/main/java/glide/api/models/commands/ExpireOptions.java index 2f51745af5..c62641a38c 100644 --- a/java/client/src/main/java/glide/api/models/commands/ExpireOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/ExpireOptions.java @@ -1,7 +1,8 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.GenericBaseCommands; +import glide.api.models.GlideString; import lombok.RequiredArgsConstructor; /** @@ -42,4 +43,13 @@ public enum ExpireOptions { public String[] toArgs() { return new String[] {this.redisApi}; } + + /** + * Converts SetOptions into a GlideString[] to add to a {@link Command} arguments. + * + * @return GlideString[] + */ + public GlideString[] toGlideStringArgs() { + return new GlideString[] {GlideString.gs(redisApi)}; + } } diff --git a/java/client/src/main/java/glide/api/models/commands/FlushMode.java b/java/client/src/main/java/glide/api/models/commands/FlushMode.java index 69fc260ecd..9ba5c2938c 100644 --- a/java/client/src/main/java/glide/api/models/commands/FlushMode.java +++ b/java/client/src/main/java/glide/api/models/commands/FlushMode.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.RedisClient; @@ -14,16 +14,24 @@ *
        • FLUSHALL command implemented by {@link RedisClient#flushall(FlushMode)}, * {@link RedisClusterClient#flushall(FlushMode)}, and {@link * RedisClusterClient#flushall(FlushMode, SingleNodeRoute)}. + *
        • FLUSHDB command implemented by {@link RedisClient#flushdb(FlushMode)}, {@link + * RedisClusterClient#flushdb(FlushMode)}, and {@link RedisClusterClient#flushdb(FlushMode, + * SingleNodeRoute)}. *
        • FUNCTION FLUSH command implemented by {@link * RedisClient#functionFlush(FlushMode)}, {@link RedisClusterClient#functionFlush(FlushMode)}, * and {@link RedisClusterClient#functionFlush(FlushMode, Route)}. *
        * - * @see valkey.io and valkey.io + * @see flushall, flushdb, and function flush at valkey.io */ public enum FlushMode { - /** Flushes synchronously. */ + /** + * Flushes synchronously. + * + * @since Redis 6.2 and above. + */ SYNC, /** Flushes asynchronously. */ ASYNC diff --git a/java/client/src/main/java/glide/api/models/commands/GetExOptions.java b/java/client/src/main/java/glide/api/models/commands/GetExOptions.java new file mode 100644 index 0000000000..25048bc0e0 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/GetExOptions.java @@ -0,0 +1,111 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands; + +import static glide.api.models.commands.GetExOptions.ExpiryType.MILLISECONDS; +import static glide.api.models.commands.GetExOptions.ExpiryType.PERSIST; +import static glide.api.models.commands.GetExOptions.ExpiryType.SECONDS; +import static glide.api.models.commands.GetExOptions.ExpiryType.UNIX_MILLISECONDS; +import static glide.api.models.commands.GetExOptions.ExpiryType.UNIX_SECONDS; + +import glide.api.commands.StringBaseCommands; +import java.util.ArrayList; +import java.util.List; +import lombok.RequiredArgsConstructor; + +/** + * Optional arguments to {@link StringBaseCommands#getex(String, GetExOptions)} command. + * + * @see redis.io + */ +public class GetExOptions { + + /** Expiry type for the time to live */ + private final ExpiryType type; + + /** The amount of time to live before the key expires. */ + private Long count; + + private GetExOptions(ExpiryType type) { + this.type = type; + } + + private GetExOptions(ExpiryType type, Long count) { + this.type = type; + this.count = count; + } + + /** + * Set the specified expire time, in seconds. Equivalent to EX in the Redis API. + * + * @param seconds The time to expire, in seconds. + * @return The options specifying the given expiry. + */ + public static GetExOptions Seconds(Long seconds) { + return new GetExOptions(SECONDS, seconds); + } + + /** + * Set the specified expire time, in milliseconds. Equivalent to PX in the Redis API. + * + * @param milliseconds The time to expire, in milliseconds. + * @return The options specifying the given expiry. + */ + public static GetExOptions Milliseconds(Long milliseconds) { + return new GetExOptions(MILLISECONDS, milliseconds); + } + + /** + * Set the specified Unix time at which the key will expire, in seconds. Equivalent to + * EXAT in the Redis API. + * + * @param unixSeconds The UNIX TIME to expire, in seconds. + * @return The options specifying the given expiry. + */ + public static GetExOptions UnixSeconds(Long unixSeconds) { + return new GetExOptions(UNIX_SECONDS, unixSeconds); + } + + /** + * Set the specified Unix time at which the key will expire, in milliseconds. Equivalent to + * PXAT in the Redis API. + * + * @param unixMilliseconds The UNIX TIME to expire, in milliseconds. + * @return The options specifying the given expiry. + */ + public static GetExOptions UnixMilliseconds(Long unixMilliseconds) { + return new GetExOptions(UNIX_MILLISECONDS, unixMilliseconds); + } + + /** Remove the time to live associated with the key. */ + public static GetExOptions Persist() { + return new GetExOptions(PERSIST); + } + + /** Types of value expiration configuration. */ + @RequiredArgsConstructor + protected enum ExpiryType { + SECONDS("EX"), + MILLISECONDS("PX"), + UNIX_SECONDS("EXAT"), + UNIX_MILLISECONDS("PXAT"), + PERSIST("PERSIST"); + + private final String redisApi; + } + + /** + * Converts GetExOptions into a String[] to pass to the GETEX command. + * + * @return String[] + */ + public String[] toArgs() { + List optionArgs = new ArrayList<>(); + + optionArgs.add(type.redisApi); + if (count != null) { + optionArgs.add(String.valueOf(count)); + } + System.out.println(optionArgs); + return optionArgs.toArray(new String[0]); + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/InfoOptions.java b/java/client/src/main/java/glide/api/models/commands/InfoOptions.java index 8b518ab87b..40fd734e4b 100644 --- a/java/client/src/main/java/glide/api/models/commands/InfoOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/InfoOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.ServerManagementCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/LInsertOptions.java b/java/client/src/main/java/glide/api/models/commands/LInsertOptions.java index 92ea5488ef..6158b1044a 100644 --- a/java/client/src/main/java/glide/api/models/commands/LInsertOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/LInsertOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.ListBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/LPosOptions.java b/java/client/src/main/java/glide/api/models/commands/LPosOptions.java index 384c4c18c9..843c953079 100644 --- a/java/client/src/main/java/glide/api/models/commands/LPosOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/LPosOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.ListBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/ListDirection.java b/java/client/src/main/java/glide/api/models/commands/ListDirection.java index d7a1b7ba31..ebbf5961e4 100644 --- a/java/client/src/main/java/glide/api/models/commands/ListDirection.java +++ b/java/client/src/main/java/glide/api/models/commands/ListDirection.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.ListBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/RangeOptions.java b/java/client/src/main/java/glide/api/models/commands/RangeOptions.java index 179145eaaf..5cd8c80be6 100644 --- a/java/client/src/main/java/glide/api/models/commands/RangeOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/RangeOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORES_REDIS_API; diff --git a/java/client/src/main/java/glide/api/models/commands/RestoreOptions.java b/java/client/src/main/java/glide/api/models/commands/RestoreOptions.java new file mode 100644 index 0000000000..c0dc48d714 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/RestoreOptions.java @@ -0,0 +1,91 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands; + +import static glide.api.models.GlideString.gs; + +import glide.api.commands.GenericBaseCommands; +import glide.api.models.GlideString; +import java.util.ArrayList; +import java.util.List; +import lombok.*; + +/** + * Optional arguments to {@link GenericBaseCommands#restore(GlideString, long, byte[], + * RestoreOptions)} + * + * @see valkey.io + */ +@Getter +@Builder +public final class RestoreOptions { + /** REPLACE subcommand string to replace existing key */ + public static final String REPLACE_REDIS_API = "REPLACE"; + + /** + * ABSTTL subcommand string to represent absolute timestamp (in milliseconds) for TTL + */ + public static final String ABSTTL_REDIS_API = "ABSTTL"; + + /** IDELTIME subcommand string to set Object Idletime */ + public static final String IDLETIME_REDIS_API = "IDLETIME"; + + /** FREQ subcommand string to set Object Frequency */ + public static final String FREQ_REDIS_API = "FREQ"; + + /** When `true`, it represents REPLACE keyword has been used */ + @Builder.Default private boolean hasReplace = false; + + /** When `true`, it represents ABSTTL keyword has been used */ + @Builder.Default private boolean hasAbsttl = false; + + /** It represents the idletime of object */ + @Builder.Default private Long idletime = null; + + /** It represents the frequency of object */ + @Builder.Default private Long frequency = null; + + /** + * Creates the argument to be used in {@link GenericBaseCommands#restore(GlideString, long, + * byte[], RestoreOptions)} + * + * @return a GlideString array that holds the subcommands and their arguments. + */ + public GlideString[] toArgs(GlideString key, long ttl, byte[] value) { + List resultList = new ArrayList<>(); + + resultList.add(key); + resultList.add(gs(Long.toString(ttl))); + resultList.add(gs(value)); + + if (hasReplace) { + resultList.add(gs(REPLACE_REDIS_API)); + } + + if (hasAbsttl) { + resultList.add(gs(ABSTTL_REDIS_API)); + } + + if (idletime != null) { + resultList.add(gs(IDLETIME_REDIS_API)); + resultList.add(gs(Long.toString(idletime))); + } + + if (frequency != null) { + resultList.add(gs(FREQ_REDIS_API)); + resultList.add(gs(Long.toString(frequency))); + } + + return resultList.toArray(new GlideString[0]); + } + + /** Custom setter methods for replace and absttl */ + public static class RestoreOptionsBuilder { + public RestoreOptionsBuilder replace() { + return hasReplace(true); + } + + public RestoreOptionsBuilder absttl() { + return hasAbsttl(true); + } + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/ScoreFilter.java b/java/client/src/main/java/glide/api/models/commands/ScoreFilter.java index 19403d477a..4fc0c92e58 100644 --- a/java/client/src/main/java/glide/api/models/commands/ScoreFilter.java +++ b/java/client/src/main/java/glide/api/models/commands/ScoreFilter.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.SortedSetBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/ScriptOptions.java b/java/client/src/main/java/glide/api/models/commands/ScriptOptions.java index 6aef640569..308002723a 100644 --- a/java/client/src/main/java/glide/api/models/commands/ScriptOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/ScriptOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.GenericBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/SetOptions.java b/java/client/src/main/java/glide/api/models/commands/SetOptions.java index 831e29c1b1..f116e4538c 100644 --- a/java/client/src/main/java/glide/api/models/commands/SetOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/SetOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import static glide.api.models.commands.SetOptions.ExpiryType.KEEP_EXISTING; @@ -8,6 +8,7 @@ import static glide.api.models.commands.SetOptions.ExpiryType.UNIX_SECONDS; import glide.api.commands.StringBaseCommands; +import glide.api.models.GlideString; import java.util.ArrayList; import java.util.List; import lombok.Builder; @@ -168,4 +169,31 @@ public String[] toArgs() { return optionArgs.toArray(new String[0]); } + + /** + * Converts SetOptions into a GlideString[] to add to a {@link Command} arguments. + * + * @return GlideString[] + */ + public GlideString[] toGlideStringArgs() { + List optionArgs = new ArrayList<>(); + if (conditionalSet != null) { + optionArgs.add(GlideString.of(conditionalSet.redisApi)); + } + + if (returnOldValue) { + optionArgs.add(GlideString.of(RETURN_OLD_VALUE)); + } + + if (expiry != null) { + optionArgs.add(GlideString.of(expiry.type.redisApi)); + if (expiry.type != KEEP_EXISTING) { + assert expiry.count != null + : "Set command received expiry type " + expiry.type + ", but count was not set."; + optionArgs.add(GlideString.of(expiry.count.toString())); + } + } + + return optionArgs.toArray(new GlideString[0]); + } } diff --git a/java/client/src/main/java/glide/api/models/commands/SortBaseOptions.java b/java/client/src/main/java/glide/api/models/commands/SortBaseOptions.java new file mode 100644 index 0000000000..4f546f14f9 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/SortBaseOptions.java @@ -0,0 +1,109 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands; + +import java.util.ArrayList; +import java.util.List; +import lombok.RequiredArgsConstructor; +import lombok.experimental.SuperBuilder; + +/** + * Optional arguments to sort, sortReadOnly, and sortStore commands + * + * @see redis.io and redis.io + */ +@SuperBuilder +public abstract class SortBaseOptions { + /** + * LIMIT subcommand string to include in the SORT and SORT_RO + * commands. + */ + public static final String LIMIT_COMMAND_STRING = "LIMIT"; + + /** + * ALPHA subcommand string to include in the SORT and SORT_RO + * commands. + */ + public static final String ALPHA_COMMAND_STRING = "ALPHA"; + + /** STORE subcommand string to include in the SORT command. */ + public static final String STORE_COMMAND_STRING = "STORE"; + + /** + * Limiting the range of the query by setting offset and result count. See {@link Limit} class for + * more information. + */ + private final Limit limit; + + /** Options for sorting order of elements. */ + private final OrderBy orderBy; + + /** + * When true, sorts elements lexicographically. When false (default), + * sorts elements numerically. Use this when the list, set, or sorted set contains string values + * that cannot be converted into double precision floating point numbers. + */ + private final boolean isAlpha; + + public abstract static class SortBaseOptionsBuilder< + C extends SortBaseOptions, B extends SortBaseOptionsBuilder> { + public B alpha() { + this.isAlpha = true; + return self(); + } + } + + /** + * The LIMIT argument is commonly used to specify a subset of results from the + * matching elements, similar to the LIMIT clause in SQL (e.g., `SELECT LIMIT offset, + * count`). + */ + @RequiredArgsConstructor + public static final class Limit { + /** The starting position of the range, zero based. */ + private final long offset; + + /** + * The maximum number of elements to include in the range. A negative count returns all elements + * from the offset. + */ + private final long count; + } + + /** + * Specifies the order to sort the elements. Can be ASC (ascending) or DESC + * (descending). + */ + @RequiredArgsConstructor + public enum OrderBy { + ASC, + DESC + } + + /** + * Creates the arguments to be used in SORT and SORT_RO commands. + * + * @return a String array that holds the sub commands and their arguments. + */ + public String[] toArgs() { + List optionArgs = new ArrayList<>(); + + if (limit != null) { + optionArgs.addAll( + List.of( + LIMIT_COMMAND_STRING, + Long.toString(this.limit.offset), + Long.toString(this.limit.count))); + } + + if (orderBy != null) { + optionArgs.add(this.orderBy.toString()); + } + + if (isAlpha) { + optionArgs.add(ALPHA_COMMAND_STRING); + } + + return optionArgs.toArray(new String[0]); + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/SortClusterOptions.java b/java/client/src/main/java/glide/api/models/commands/SortClusterOptions.java new file mode 100644 index 0000000000..8b87309135 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/SortClusterOptions.java @@ -0,0 +1,13 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands; + +import glide.api.commands.GenericBaseCommands; +import lombok.experimental.SuperBuilder; + +/** + * Optional arguments to {@link GenericBaseCommands#sort(String, SortClusterOptions)}, {@link + * GenericBaseCommands#sortReadOnly(String, SortClusterOptions)}, and {@link + * GenericBaseCommands#sortStore(String, String, SortClusterOptions)} + */ +@SuperBuilder +public class SortClusterOptions extends SortBaseOptions {} diff --git a/java/client/src/main/java/glide/api/models/commands/SortOptions.java b/java/client/src/main/java/glide/api/models/commands/SortOptions.java new file mode 100644 index 0000000000..74edad9908 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/SortOptions.java @@ -0,0 +1,77 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands; + +import glide.api.commands.GenericCommands; +import java.util.ArrayList; +import java.util.List; +import lombok.Singular; +import lombok.experimental.SuperBuilder; + +/** + * Optional arguments to {@link GenericCommands#sort(String, SortOptions)}, {@link + * GenericCommands#sortReadOnly(String, SortOptions)}, and {@link GenericCommands#sortStore(String, + * String, SortOptions)} + * + * @see redis.io and redis.io + */ +@SuperBuilder +public class SortOptions extends SortBaseOptions { + /** + * BY subcommand string to include in the SORT and SORT_RO + * commands. + */ + public static final String BY_COMMAND_STRING = "BY"; + + /** + * GET subcommand string to include in the SORT and SORT_RO + * commands. + */ + public static final String GET_COMMAND_STRING = "GET"; + + /** + * A pattern to sort by external keys instead of by the elements stored at the key themselves. The + * pattern should contain an asterisk (*) as a placeholder for the element values, where the value + * from the key replaces the asterisk to create the key name. For example, if key + * contains IDs of objects, byPattern can be used to sort these IDs based on an + * attribute of the objects, like their weights or timestamps. + */ + private final String byPattern; + + /** + * A pattern used to retrieve external keys' values, instead of the elements at key. + * The pattern should contain an asterisk (*) as a placeholder for the element values, where the + * value from key replaces the asterisk to create the key name. This + * allows the sorted elements to be transformed based on the related keys values. For example, if + * key contains IDs of users, getPatterns can be used to retrieve + * specific attributes of these users, such as their names or email addresses. E.g., if + * getPatterns is name_*, the command will return the values of the keys + * name_<element> for each sorted element. Multiple getPatterns + * arguments can be provided to retrieve multiple attributes. The special value # can + * be used to include the actual element from key being sorted. If not provided, only + * the sorted elements themselves are returned.
        + * + * @see valkey.io for more information. + */ + @Singular private final List getPatterns; + + /** + * Creates the arguments to be used in SORT and SORT_RO commands. + * + * @return a String array that holds the sub commands and their arguments. + */ + public String[] toArgs() { + List optionArgs = new ArrayList<>(List.of(super.toArgs())); + + if (byPattern != null) { + optionArgs.addAll(List.of(BY_COMMAND_STRING, byPattern)); + } + + if (getPatterns != null) { + getPatterns.stream() + .forEach(getPattern -> optionArgs.addAll(List.of(GET_COMMAND_STRING, getPattern))); + } + + return optionArgs.toArray(new String[0]); + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/WeightAggregateOptions.java b/java/client/src/main/java/glide/api/models/commands/WeightAggregateOptions.java index 2148af618b..b29f15ea89 100644 --- a/java/client/src/main/java/glide/api/models/commands/WeightAggregateOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/WeightAggregateOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import static glide.utils.ArrayTransformUtils.concatenateArrays; diff --git a/java/client/src/main/java/glide/api/models/commands/ZAddOptions.java b/java/client/src/main/java/glide/api/models/commands/ZAddOptions.java index 09d5e0e2f4..1991f754c4 100644 --- a/java/client/src/main/java/glide/api/models/commands/ZAddOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/ZAddOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands; import glide.api.commands.SortedSetBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/bitmap/BitFieldOptions.java b/java/client/src/main/java/glide/api/models/commands/bitmap/BitFieldOptions.java index 743ef17b15..aec3b2b485 100644 --- a/java/client/src/main/java/glide/api/models/commands/bitmap/BitFieldOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/bitmap/BitFieldOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.bitmap; import static glide.utils.ArrayTransformUtils.concatenateArrays; diff --git a/java/client/src/main/java/glide/api/models/commands/bitmap/BitmapIndexType.java b/java/client/src/main/java/glide/api/models/commands/bitmap/BitmapIndexType.java index 2b685eac32..21dfc4127a 100644 --- a/java/client/src/main/java/glide/api/models/commands/bitmap/BitmapIndexType.java +++ b/java/client/src/main/java/glide/api/models/commands/bitmap/BitmapIndexType.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.bitmap; import glide.api.commands.BitmapBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/bitmap/BitwiseOperation.java b/java/client/src/main/java/glide/api/models/commands/bitmap/BitwiseOperation.java index 156b6bb556..929a3a6c56 100644 --- a/java/client/src/main/java/glide/api/models/commands/bitmap/BitwiseOperation.java +++ b/java/client/src/main/java/glide/api/models/commands/bitmap/BitwiseOperation.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.bitmap; import glide.api.commands.BitmapBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/function/FunctionListOptions.java b/java/client/src/main/java/glide/api/models/commands/function/FunctionListOptions.java index 6cac32fbac..07914eaa8d 100644 --- a/java/client/src/main/java/glide/api/models/commands/function/FunctionListOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/function/FunctionListOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.function; import glide.api.commands.ScriptingAndFunctionsClusterCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/function/FunctionLoadOptions.java b/java/client/src/main/java/glide/api/models/commands/function/FunctionLoadOptions.java index cdf2648c0e..0897e7fe8d 100644 --- a/java/client/src/main/java/glide/api/models/commands/function/FunctionLoadOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/function/FunctionLoadOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.function; /** diff --git a/java/client/src/main/java/glide/api/models/commands/function/FunctionRestorePolicy.java b/java/client/src/main/java/glide/api/models/commands/function/FunctionRestorePolicy.java new file mode 100644 index 0000000000..3c4aeb23f8 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/function/FunctionRestorePolicy.java @@ -0,0 +1,30 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands.function; + +import glide.api.commands.ScriptingAndFunctionsClusterCommands; +import glide.api.commands.ScriptingAndFunctionsCommands; +import glide.api.models.configuration.RequestRoutingConfiguration.Route; + +/** + * Option for FUNCTION RESTORE command: {@link + * ScriptingAndFunctionsCommands#functionRestore(byte[], FunctionRestorePolicy)}, {@link + * ScriptingAndFunctionsClusterCommands#functionRestore(byte[], FunctionRestorePolicy)}, and {@link + * ScriptingAndFunctionsClusterCommands#functionRestore(byte[], FunctionRestorePolicy, Route)}. + * + * @see redis.io for details. + */ +public enum FunctionRestorePolicy { + /** + * Appends the restored libraries to the existing libraries and aborts on collision. This is the + * default policy. + */ + APPEND, + /** Deletes all existing libraries before restoring the payload. */ + FLUSH, + /** + * Appends the restored libraries to the existing libraries, replacing any existing ones in case + * of name collisions. Note that this policy doesn't prevent function name collisions, only + * libraries. + */ + REPLACE +} diff --git a/java/client/src/main/java/glide/api/models/commands/geospatial/GeoAddOptions.java b/java/client/src/main/java/glide/api/models/commands/geospatial/GeoAddOptions.java index 44ebf18147..9fc2dc46ae 100644 --- a/java/client/src/main/java/glide/api/models/commands/geospatial/GeoAddOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/geospatial/GeoAddOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.geospatial; import glide.api.commands.GeospatialIndicesBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/geospatial/GeoUnit.java b/java/client/src/main/java/glide/api/models/commands/geospatial/GeoUnit.java index c5120e50b8..4bf6239e6e 100644 --- a/java/client/src/main/java/glide/api/models/commands/geospatial/GeoUnit.java +++ b/java/client/src/main/java/glide/api/models/commands/geospatial/GeoUnit.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.geospatial; import glide.api.commands.GeospatialIndicesBaseCommands; diff --git a/java/client/src/main/java/glide/api/models/commands/geospatial/GeospatialData.java b/java/client/src/main/java/glide/api/models/commands/geospatial/GeospatialData.java index bb8bf39fba..636cfd3df9 100644 --- a/java/client/src/main/java/glide/api/models/commands/geospatial/GeospatialData.java +++ b/java/client/src/main/java/glide/api/models/commands/geospatial/GeospatialData.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.geospatial; import lombok.Getter; diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamAddOptions.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamAddOptions.java index 91c8bc03ab..daeecdd570 100644 --- a/java/client/src/main/java/glide/api/models/commands/stream/StreamAddOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamAddOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.stream; import glide.api.commands.StreamBaseCommands; @@ -10,7 +10,7 @@ /** * Optional arguments to {@link StreamBaseCommands#xadd(String, Map, StreamAddOptions)} * - * @see redis.io + * @see valkey.io */ @Builder public final class StreamAddOptions { diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamGroupOptions.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamGroupOptions.java new file mode 100644 index 0000000000..7f1c41d0bc --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamGroupOptions.java @@ -0,0 +1,67 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands.stream; + +import glide.api.commands.StreamBaseCommands; +import java.util.ArrayList; +import java.util.List; +import lombok.Builder; + +/** + * Optional arguments for {@link StreamBaseCommands#xgroupCreate(String, String, String, + * StreamGroupOptions)} + * + * @see valkey.io + */ +@Builder +public final class StreamGroupOptions { + + // Redis API String argument for makeStream + public static final String MAKE_STREAM_REDIS_API = "MKSTREAM"; + + // Redis API String argument for entriesRead + public static final String ENTRIES_READ_REDIS_API = "ENTRIESREAD"; + + /** + * If true and the stream doesn't exist, creates a new stream with a length of + * 0. + */ + @Builder.Default private boolean mkStream = false; + + public static class StreamGroupOptionsBuilder { + + /** If the stream doesn't exist, this creates a new stream with a length of 0. */ + public StreamGroupOptionsBuilder makeStream() { + return mkStream(true); + } + } + + /** + * An arbitrary ID (that isn't the first ID, last ID, or the zero "0-0". Use it to + * find out how many entries are between the arbitrary ID (excluding it) and the stream's last + * entry. + * + * @since Redis 7.0.0 + */ + private String entriesRead; + + /** + * Converts options and the key-to-id input for {@link StreamBaseCommands#xgroupCreate(String, + * String, String, StreamGroupOptions)} into a String[]. + * + * @return String[] + */ + public String[] toArgs() { + List optionArgs = new ArrayList<>(); + + if (this.mkStream) { + optionArgs.add(MAKE_STREAM_REDIS_API); + } + + if (this.entriesRead != null) { + optionArgs.add(ENTRIES_READ_REDIS_API); + optionArgs.add(this.entriesRead); + } + + return optionArgs.toArray(new String[0]); + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamPendingOptions.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamPendingOptions.java new file mode 100644 index 0000000000..5cdf9ea4c6 --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamPendingOptions.java @@ -0,0 +1,49 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands.stream; + +import glide.api.commands.StreamBaseCommands; +import java.util.ArrayList; +import java.util.List; +import lombok.Builder; + +/** + * Arguments for {@link StreamBaseCommands#xpending} to specify additional filter items by idle time + * and consumer. + * + * @see valkey.io + */ +@Builder +public class StreamPendingOptions { + + /** Redis api string to designate IDLE or minimum idle time */ + public static final String IDLE_TIME_REDIS_API = "IDLE"; + + /** Filters pending entries by their idle time - in Milliseconds */ + private final Long minIdleTime; // Milliseconds + + /** Filters pending entries by consumer */ + private final String consumer; + + /** + * Convert StreamPendingOptions arguments to a string array + * + * @return arguments converted to an array to be consumed by Redis + */ + public String[] toArgs(StreamRange start, StreamRange end, long count) { + List optionArgs = new ArrayList<>(); + if (minIdleTime != null) { + optionArgs.add(IDLE_TIME_REDIS_API); + optionArgs.add(Long.toString(minIdleTime)); + } + + optionArgs.add(start.getRedisApi()); + optionArgs.add(end.getRedisApi()); + optionArgs.add(Long.toString(count)); + + if (consumer != null) { + optionArgs.add(consumer); + } + + return optionArgs.toArray(new String[0]); + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamRange.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamRange.java index 6a32d35ca8..d3c39308e2 100644 --- a/java/client/src/main/java/glide/api/models/commands/stream/StreamRange.java +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamRange.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.stream; import glide.utils.ArrayTransformUtils; @@ -10,17 +10,25 @@ * glide.api.commands.StreamBaseCommands#xrevrange} to specify the starting and ending range for the * stream search by stream ID. * - * @see redis.io - * @see redis.io + * @see valkey.io + * @see valkey.io */ public interface StreamRange { String getRedisApi(); + /** Redis API string for MINIMUM entry ID range bounds */ String MINIMUM_RANGE_REDIS_API = "-"; + + /** Redis API string for MAXIMUM entry ID range bounds */ String MAXIMUM_RANGE_REDIS_API = "+"; + + /** Redis API string to designate COUNT */ String RANGE_COUNT_REDIS_API = "COUNT"; + /** Redis API character to designate exclusive range bounds */ + String EXCLUSIVE_RANGE_REDIS_API = "("; + /** * Enumeration representing minimum or maximum stream entry bounds for the range search, to get * the first or last stream ID. @@ -80,7 +88,7 @@ public static IdBound of(long timestamp) { * @param timestamp The stream timestamp as ID. */ public static IdBound ofExclusive(long timestamp) { - return new IdBound("(" + timestamp); + return new IdBound(EXCLUSIVE_RANGE_REDIS_API + timestamp); } /** @@ -89,7 +97,7 @@ public static IdBound ofExclusive(long timestamp) { * @param id The stream id. */ public static IdBound ofExclusive(String id) { - return new IdBound("(" + id); + return new IdBound(EXCLUSIVE_RANGE_REDIS_API + id); } } diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamReadGroupOptions.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamReadGroupOptions.java new file mode 100644 index 0000000000..2c7728dabf --- /dev/null +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamReadGroupOptions.java @@ -0,0 +1,72 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models.commands.stream; + +import glide.api.commands.StreamBaseCommands; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import lombok.experimental.SuperBuilder; + +/** + * Optional arguments for {@link StreamBaseCommands#xreadgroup(Map, String, String, + * StreamReadGroupOptions)} + * + * @see redis.io + */ +@SuperBuilder +public final class StreamReadGroupOptions extends StreamReadOptions { + + public static final String READ_GROUP_REDIS_API = "GROUP"; + public static final String READ_NOACK_REDIS_API = "NOACK"; + + /** + * If set, messages are not added to the Pending Entries List (PEL). This is equivalent to + * acknowledging the message when it is read. + */ + private boolean noack; + + public abstract static class StreamReadGroupOptionsBuilder< + C extends StreamReadGroupOptions, B extends StreamReadGroupOptionsBuilder> + extends StreamReadOptions.StreamReadOptionsBuilder { + public B noack() { + this.noack = true; + return self(); + } + } + + /** + * Converts options and the key-to-id input for {@link StreamBaseCommands#xreadgroup(Map, String, + * String, StreamReadGroupOptions)} into a String[]. + * + * @return String[] + */ + public String[] toArgs(String group, String consumer, Map streams) { + List optionArgs = new ArrayList<>(); + optionArgs.add(READ_GROUP_REDIS_API); + optionArgs.add(group); + optionArgs.add(consumer); + + if (this.count != null) { + optionArgs.add(READ_COUNT_REDIS_API); + optionArgs.add(count.toString()); + } + + if (this.block != null) { + optionArgs.add(READ_BLOCK_REDIS_API); + optionArgs.add(block.toString()); + } + + if (this.noack) { + optionArgs.add(READ_NOACK_REDIS_API); + } + + optionArgs.add(READ_STREAMS_REDIS_API); + Set> entrySet = streams.entrySet(); + optionArgs.addAll(entrySet.stream().map(Map.Entry::getKey).collect(Collectors.toList())); + optionArgs.addAll(entrySet.stream().map(Map.Entry::getValue).collect(Collectors.toList())); + + return optionArgs.toArray(new String[0]); + } +} diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamReadOptions.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamReadOptions.java index 7baad14121..935ca1c225 100644 --- a/java/client/src/main/java/glide/api/models/commands/stream/StreamReadOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamReadOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.stream; import glide.api.commands.StreamBaseCommands; @@ -7,15 +7,15 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; -import lombok.Builder; +import lombok.experimental.SuperBuilder; /** * Optional arguments for {@link StreamBaseCommands#xread(Map, StreamReadOptions)} * - * @see redis.io + * @see valkey.io */ -@Builder -public final class StreamReadOptions { +@SuperBuilder +public class StreamReadOptions { public static final String READ_COUNT_REDIS_API = "COUNT"; public static final String READ_BLOCK_REDIS_API = "BLOCK"; @@ -25,12 +25,12 @@ public final class StreamReadOptions { * If set, the request will be blocked for the set amount of milliseconds or until the server has * the required number of entries. Equivalent to BLOCK in the Redis API. */ - Long block; + protected Long block; /** * The maximal number of elements requested. Equivalent to COUNT in the Redis API. */ - Long count; + protected Long count; /** * Converts options and the key-to-id input for {@link StreamBaseCommands#xread(Map, diff --git a/java/client/src/main/java/glide/api/models/commands/stream/StreamTrimOptions.java b/java/client/src/main/java/glide/api/models/commands/stream/StreamTrimOptions.java index 4db11aff69..6aef46609c 100644 --- a/java/client/src/main/java/glide/api/models/commands/stream/StreamTrimOptions.java +++ b/java/client/src/main/java/glide/api/models/commands/stream/StreamTrimOptions.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.commands.stream; import glide.api.commands.StreamBaseCommands; @@ -9,7 +9,7 @@ /** * Optional arguments for {@link StreamBaseCommands#xtrim(String, StreamTrimOptions)} * - * @see redis.io + * @see valkey.io */ public abstract class StreamTrimOptions { diff --git a/java/client/src/main/java/glide/api/models/configuration/BackoffStrategy.java b/java/client/src/main/java/glide/api/models/configuration/BackoffStrategy.java index 38d18c0286..45f04e986d 100644 --- a/java/client/src/main/java/glide/api/models/configuration/BackoffStrategy.java +++ b/java/client/src/main/java/glide/api/models/configuration/BackoffStrategy.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import lombok.Builder; diff --git a/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java index f22002f183..f1fe8319a4 100644 --- a/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/BaseClientConfiguration.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import glide.connectors.resources.ThreadPoolResource; diff --git a/java/client/src/main/java/glide/api/models/configuration/NodeAddress.java b/java/client/src/main/java/glide/api/models/configuration/NodeAddress.java index c52f70911d..97b90c8d01 100644 --- a/java/client/src/main/java/glide/api/models/configuration/NodeAddress.java +++ b/java/client/src/main/java/glide/api/models/configuration/NodeAddress.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import lombok.Builder; diff --git a/java/client/src/main/java/glide/api/models/configuration/ReadFrom.java b/java/client/src/main/java/glide/api/models/configuration/ReadFrom.java index d7510718af..2d80ae7b60 100644 --- a/java/client/src/main/java/glide/api/models/configuration/ReadFrom.java +++ b/java/client/src/main/java/glide/api/models/configuration/ReadFrom.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; /** Represents the client's read from strategy. */ diff --git a/java/client/src/main/java/glide/api/models/configuration/RedisClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/RedisClientConfiguration.java index 6edab11c5d..cd25d262f9 100644 --- a/java/client/src/main/java/glide/api/models/configuration/RedisClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/RedisClientConfiguration.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import lombok.Getter; diff --git a/java/client/src/main/java/glide/api/models/configuration/RedisClusterClientConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/RedisClusterClientConfiguration.java index 0335bd75c2..3b36709f11 100644 --- a/java/client/src/main/java/glide/api/models/configuration/RedisClusterClientConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/RedisClusterClientConfiguration.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import lombok.experimental.SuperBuilder; diff --git a/java/client/src/main/java/glide/api/models/configuration/RedisCredentials.java b/java/client/src/main/java/glide/api/models/configuration/RedisCredentials.java index ac72031c4f..c6272dfcde 100644 --- a/java/client/src/main/java/glide/api/models/configuration/RedisCredentials.java +++ b/java/client/src/main/java/glide/api/models/configuration/RedisCredentials.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import lombok.Builder; diff --git a/java/client/src/main/java/glide/api/models/configuration/RequestRoutingConfiguration.java b/java/client/src/main/java/glide/api/models/configuration/RequestRoutingConfiguration.java index bea04a7e75..023e179f8c 100644 --- a/java/client/src/main/java/glide/api/models/configuration/RequestRoutingConfiguration.java +++ b/java/client/src/main/java/glide/api/models/configuration/RequestRoutingConfiguration.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.configuration; import glide.api.models.exceptions.RequestException; @@ -31,7 +31,12 @@ public interface MultiNodeRoute extends Route {} @RequiredArgsConstructor @Getter public enum SimpleSingleNodeRoute implements SingleNodeRoute { - /** Route request to a random node. */ + /** + * Route request to a random node.
        + * Warning
        + * Don't use it with write commands, because they could be randomly routed to a replica (RO) + * node and fail. + */ RANDOM(2); private final int ordinal; @@ -40,7 +45,10 @@ public enum SimpleSingleNodeRoute implements SingleNodeRoute { @RequiredArgsConstructor @Getter public enum SimpleMultiNodeRoute implements MultiNodeRoute { - /** Route request to all nodes. */ + /** + * Route request to all nodes. Warning
        + * Don't use it with write commands, they could be routed to a replica (RO) node and fail. + */ ALL_NODES(0), /** Route request to all primary nodes. */ ALL_PRIMARIES(1); diff --git a/java/client/src/main/java/glide/api/models/exceptions/ClosingException.java b/java/client/src/main/java/glide/api/models/exceptions/ClosingException.java index bf2ae85728..f9758abeb8 100644 --- a/java/client/src/main/java/glide/api/models/exceptions/ClosingException.java +++ b/java/client/src/main/java/glide/api/models/exceptions/ClosingException.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.exceptions; /** Redis client error: Errors that report that the client has closed and is no longer usable. */ diff --git a/java/client/src/main/java/glide/api/models/exceptions/ConnectionException.java b/java/client/src/main/java/glide/api/models/exceptions/ConnectionException.java index c6416464ee..b2dd44b05e 100644 --- a/java/client/src/main/java/glide/api/models/exceptions/ConnectionException.java +++ b/java/client/src/main/java/glide/api/models/exceptions/ConnectionException.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.exceptions; /** diff --git a/java/client/src/main/java/glide/api/models/exceptions/ExecAbortException.java b/java/client/src/main/java/glide/api/models/exceptions/ExecAbortException.java index 06c593d93c..ed58c19205 100644 --- a/java/client/src/main/java/glide/api/models/exceptions/ExecAbortException.java +++ b/java/client/src/main/java/glide/api/models/exceptions/ExecAbortException.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.exceptions; /** Redis client error: Errors that are thrown when a transaction is aborted. */ diff --git a/java/client/src/main/java/glide/api/models/exceptions/RedisException.java b/java/client/src/main/java/glide/api/models/exceptions/RedisException.java index bb03b7c90b..a4a712e0ac 100644 --- a/java/client/src/main/java/glide/api/models/exceptions/RedisException.java +++ b/java/client/src/main/java/glide/api/models/exceptions/RedisException.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.exceptions; /** Redis client error: Base class for errors. */ diff --git a/java/client/src/main/java/glide/api/models/exceptions/RequestException.java b/java/client/src/main/java/glide/api/models/exceptions/RequestException.java index 420da9c4a2..7b2210217c 100644 --- a/java/client/src/main/java/glide/api/models/exceptions/RequestException.java +++ b/java/client/src/main/java/glide/api/models/exceptions/RequestException.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.exceptions; /** Redis client error: Errors that were reported during a request. */ diff --git a/java/client/src/main/java/glide/api/models/exceptions/TimeoutException.java b/java/client/src/main/java/glide/api/models/exceptions/TimeoutException.java index e8be0cd4ae..8cf11dc46c 100644 --- a/java/client/src/main/java/glide/api/models/exceptions/TimeoutException.java +++ b/java/client/src/main/java/glide/api/models/exceptions/TimeoutException.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models.exceptions; /** Redis client error: Errors that are thrown when a request times out. */ diff --git a/java/client/src/main/java/glide/connectors/handlers/CallbackDispatcher.java b/java/client/src/main/java/glide/connectors/handlers/CallbackDispatcher.java index 6c5e86e2d2..1ba510a285 100644 --- a/java/client/src/main/java/glide/connectors/handlers/CallbackDispatcher.java +++ b/java/client/src/main/java/glide/connectors/handlers/CallbackDispatcher.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.handlers; import glide.api.models.exceptions.ClosingException; diff --git a/java/client/src/main/java/glide/connectors/handlers/ChannelHandler.java b/java/client/src/main/java/glide/connectors/handlers/ChannelHandler.java index 4800316803..af9507b867 100644 --- a/java/client/src/main/java/glide/connectors/handlers/ChannelHandler.java +++ b/java/client/src/main/java/glide/connectors/handlers/ChannelHandler.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.handlers; import connection_request.ConnectionRequestOuterClass.ConnectionRequest; diff --git a/java/client/src/main/java/glide/connectors/handlers/ProtobufSocketChannelInitializer.java b/java/client/src/main/java/glide/connectors/handlers/ProtobufSocketChannelInitializer.java index a52894cf2c..8d56a479e8 100644 --- a/java/client/src/main/java/glide/connectors/handlers/ProtobufSocketChannelInitializer.java +++ b/java/client/src/main/java/glide/connectors/handlers/ProtobufSocketChannelInitializer.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.handlers; import io.netty.channel.ChannelInitializer; diff --git a/java/client/src/main/java/glide/connectors/handlers/ReadHandler.java b/java/client/src/main/java/glide/connectors/handlers/ReadHandler.java index 29b7f4c01b..95f4a2a745 100644 --- a/java/client/src/main/java/glide/connectors/handlers/ReadHandler.java +++ b/java/client/src/main/java/glide/connectors/handlers/ReadHandler.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.handlers; import io.netty.channel.ChannelHandlerContext; diff --git a/java/client/src/main/java/glide/connectors/resources/EpollResource.java b/java/client/src/main/java/glide/connectors/resources/EpollResource.java index ead5b53e50..642074d682 100644 --- a/java/client/src/main/java/glide/connectors/resources/EpollResource.java +++ b/java/client/src/main/java/glide/connectors/resources/EpollResource.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.resources; import io.netty.channel.epoll.EpollDomainSocketChannel; diff --git a/java/client/src/main/java/glide/connectors/resources/KQueuePoolResource.java b/java/client/src/main/java/glide/connectors/resources/KQueuePoolResource.java index 53e9623515..5cacf80a01 100644 --- a/java/client/src/main/java/glide/connectors/resources/KQueuePoolResource.java +++ b/java/client/src/main/java/glide/connectors/resources/KQueuePoolResource.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.resources; import io.netty.channel.kqueue.KQueueDomainSocketChannel; diff --git a/java/client/src/main/java/glide/connectors/resources/Platform.java b/java/client/src/main/java/glide/connectors/resources/Platform.java index 8846f0478e..9efc8cf8ad 100644 --- a/java/client/src/main/java/glide/connectors/resources/Platform.java +++ b/java/client/src/main/java/glide/connectors/resources/Platform.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.resources; import io.netty.channel.epoll.Epoll; diff --git a/java/client/src/main/java/glide/connectors/resources/ThreadPoolResource.java b/java/client/src/main/java/glide/connectors/resources/ThreadPoolResource.java index b02380158b..772ff66e23 100644 --- a/java/client/src/main/java/glide/connectors/resources/ThreadPoolResource.java +++ b/java/client/src/main/java/glide/connectors/resources/ThreadPoolResource.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.resources; import io.netty.channel.EventLoopGroup; diff --git a/java/client/src/main/java/glide/connectors/resources/ThreadPoolResourceAllocator.java b/java/client/src/main/java/glide/connectors/resources/ThreadPoolResourceAllocator.java index cf5629ccba..4054b17697 100644 --- a/java/client/src/main/java/glide/connectors/resources/ThreadPoolResourceAllocator.java +++ b/java/client/src/main/java/glide/connectors/resources/ThreadPoolResourceAllocator.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.resources; import java.util.function.Supplier; diff --git a/java/client/src/main/java/glide/ffi/resolvers/NativeUtils.java b/java/client/src/main/java/glide/ffi/resolvers/NativeUtils.java index 1f66d25a7e..c799c67672 100644 --- a/java/client/src/main/java/glide/ffi/resolvers/NativeUtils.java +++ b/java/client/src/main/java/glide/ffi/resolvers/NativeUtils.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.ffi.resolvers; import java.io.*; diff --git a/java/client/src/main/java/glide/ffi/resolvers/RedisValueResolver.java b/java/client/src/main/java/glide/ffi/resolvers/RedisValueResolver.java index 4aaa4a3123..8b8b8cc000 100644 --- a/java/client/src/main/java/glide/ffi/resolvers/RedisValueResolver.java +++ b/java/client/src/main/java/glide/ffi/resolvers/RedisValueResolver.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.ffi.resolvers; import response.ResponseOuterClass.Response; diff --git a/java/client/src/main/java/glide/ffi/resolvers/ScriptResolver.java b/java/client/src/main/java/glide/ffi/resolvers/ScriptResolver.java index 7b6df0c4ac..31d276ebb9 100644 --- a/java/client/src/main/java/glide/ffi/resolvers/ScriptResolver.java +++ b/java/client/src/main/java/glide/ffi/resolvers/ScriptResolver.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.ffi.resolvers; public class ScriptResolver { diff --git a/java/client/src/main/java/glide/ffi/resolvers/SocketListenerResolver.java b/java/client/src/main/java/glide/ffi/resolvers/SocketListenerResolver.java index fb897a5da3..0cb3bf613a 100644 --- a/java/client/src/main/java/glide/ffi/resolvers/SocketListenerResolver.java +++ b/java/client/src/main/java/glide/ffi/resolvers/SocketListenerResolver.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.ffi.resolvers; public class SocketListenerResolver { diff --git a/java/client/src/main/java/glide/managers/BaseCommandResponseResolver.java b/java/client/src/main/java/glide/managers/BaseCommandResponseResolver.java index f9b7ed87ab..7439a0bc7d 100644 --- a/java/client/src/main/java/glide/managers/BaseCommandResponseResolver.java +++ b/java/client/src/main/java/glide/managers/BaseCommandResponseResolver.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.managers; import static glide.api.BaseClient.OK; diff --git a/java/client/src/main/java/glide/managers/CommandManager.java b/java/client/src/main/java/glide/managers/CommandManager.java index 214a819016..1dd41e7bba 100644 --- a/java/client/src/main/java/glide/managers/CommandManager.java +++ b/java/client/src/main/java/glide/managers/CommandManager.java @@ -1,8 +1,9 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.managers; import com.google.protobuf.ByteString; import glide.api.models.ClusterTransaction; +import glide.api.models.GlideString; import glide.api.models.Script; import glide.api.models.Transaction; import glide.api.models.configuration.RequestRoutingConfiguration.ByAddressRoute; @@ -67,7 +68,7 @@ public CompletableFuture submitNewCommand( */ public CompletableFuture submitNewCommand( RequestType requestType, - List arguments, + GlideString[] arguments, RedisExceptionCheckedFunction responseHandler) { RedisRequest.Builder command = prepareRedisRequest(requestType, arguments); @@ -104,7 +105,7 @@ public CompletableFuture submitNewCommand( */ public CompletableFuture submitNewCommand( RequestType requestType, - List arguments, + GlideString[] arguments, Route route, RedisExceptionCheckedFunction responseHandler) { @@ -223,10 +224,10 @@ protected RedisRequest.Builder prepareRedisRequest( * adding a callback id. */ protected RedisRequest.Builder prepareRedisRequest( - RequestType requestType, List arguments, Route route) { + RequestType requestType, GlideString[] arguments, Route route) { ArgsArray.Builder commandArgs = ArgsArray.newBuilder(); for (var arg : arguments) { - commandArgs.addArgs(ByteString.copyFrom(arg)); + commandArgs.addArgs(ByteString.copyFrom(arg.getBytes())); } var builder = @@ -319,10 +320,10 @@ protected RedisRequest.Builder prepareRedisRequest(RequestType requestType, Stri * adding a callback id. */ protected RedisRequest.Builder prepareRedisRequest( - RequestType requestType, List arguments) { + RequestType requestType, GlideString[] arguments) { ArgsArray.Builder commandArgs = ArgsArray.newBuilder(); for (var arg : arguments) { - commandArgs.addArgs(ByteString.copyFrom(arg)); + commandArgs.addArgs(ByteString.copyFrom(arg.getBytes())); } return RedisRequest.newBuilder() diff --git a/java/client/src/main/java/glide/managers/ConnectionManager.java b/java/client/src/main/java/glide/managers/ConnectionManager.java index d9a8f58574..79e3252884 100644 --- a/java/client/src/main/java/glide/managers/ConnectionManager.java +++ b/java/client/src/main/java/glide/managers/ConnectionManager.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.managers; import connection_request.ConnectionRequestOuterClass; diff --git a/java/client/src/main/java/glide/managers/RedisExceptionCheckedFunction.java b/java/client/src/main/java/glide/managers/RedisExceptionCheckedFunction.java index 03312ec9a5..32000aed29 100644 --- a/java/client/src/main/java/glide/managers/RedisExceptionCheckedFunction.java +++ b/java/client/src/main/java/glide/managers/RedisExceptionCheckedFunction.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.managers; import glide.api.models.exceptions.RedisException; diff --git a/java/client/src/main/java/glide/utils/ArrayTransformUtils.java b/java/client/src/main/java/glide/utils/ArrayTransformUtils.java index a251693293..c4055d0027 100644 --- a/java/client/src/main/java/glide/utils/ArrayTransformUtils.java +++ b/java/client/src/main/java/glide/utils/ArrayTransformUtils.java @@ -1,10 +1,11 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.utils; import glide.api.commands.GeospatialIndicesBaseCommands; import glide.api.models.commands.geospatial.GeospatialData; import java.lang.reflect.Array; import java.util.Arrays; +import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -97,6 +98,27 @@ public static U[][] castArrayofArrays(T[] outerObjectArr, Class return (U[][]) castArray(convertedArr, Array.newInstance(clazz, 0).getClass()); } + /** + * Casts an Object[][][] to T[][][] by casting each nested array and + * every array element. + * + * @param outerObjectArr 3D array of objects to cast. + * @param clazz The class of the array elements to cast to. + * @return An array of arrays of type U, containing the elements from the input array. + * @param The base type from which the elements are being cast. + * @param The subtype of T to which the elements are cast. + */ + public static U[][][] cast3DArray(T[] outerObjectArr, Class clazz) { + if (outerObjectArr == null) { + return null; + } + T[] convertedArr = (T[]) new Object[outerObjectArr.length]; + for (int i = 0; i < outerObjectArr.length; i++) { + convertedArr[i] = (T) castArrayofArrays((T[]) outerObjectArr[i], clazz); + } + return (U[][][]) castArrayofArrays(convertedArr, Array.newInstance(clazz, 0).getClass()); + } + /** * Maps a Map of Arrays with value type T[] to value of U[]. * @@ -128,7 +150,10 @@ public static Map castMapOf2DArray( return null; } return mapOfArrays.entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> castArrayofArrays(e.getValue(), clazz))); + .collect( + HashMap::new, + (m, e) -> m.put(e.getKey(), castArrayofArrays(e.getValue(), clazz)), + HashMap::putAll); } /** diff --git a/java/client/src/test/java/glide/ExceptionHandlingTests.java b/java/client/src/test/java/glide/ExceptionHandlingTests.java index 43bdb224d0..eb7232ae3f 100644 --- a/java/client/src/test/java/glide/ExceptionHandlingTests.java +++ b/java/client/src/test/java/glide/ExceptionHandlingTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static glide.ffi.resolvers.SocketListenerResolver.getSocket; diff --git a/java/client/src/test/java/glide/api/RedisClientCreateTest.java b/java/client/src/test/java/glide/api/RedisClientCreateTest.java index 04ab6d2a8a..ab6f3e9651 100644 --- a/java/client/src/test/java/glide/api/RedisClientCreateTest.java +++ b/java/client/src/test/java/glide/api/RedisClientCreateTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; import static glide.api.BaseClient.buildChannelHandler; diff --git a/java/client/src/test/java/glide/api/RedisClientTest.java b/java/client/src/test/java/glide/api/RedisClientTest.java index dbccdd0ddc..e254d364ba 100644 --- a/java/client/src/test/java/glide/api/RedisClientTest.java +++ b/java/client/src/test/java/glide/api/RedisClientTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; import static glide.api.BaseClient.OK; @@ -11,7 +11,12 @@ import static glide.api.commands.SortedSetBaseCommands.LIMIT_REDIS_API; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORES_REDIS_API; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORE_REDIS_API; +import static glide.api.commands.StringBaseCommands.IDX_COMMAND_STRING; +import static glide.api.commands.StringBaseCommands.LCS_MATCHES_RESULT_KEY; import static glide.api.commands.StringBaseCommands.LEN_REDIS_API; +import static glide.api.commands.StringBaseCommands.MINMATCHLEN_COMMAND_STRING; +import static glide.api.commands.StringBaseCommands.WITHMATCHLEN_COMMAND_STRING; +import static glide.api.models.GlideString.gs; import static glide.api.models.commands.FlushMode.ASYNC; import static glide.api.models.commands.FlushMode.SYNC; import static glide.api.models.commands.LInsertOptions.InsertPosition.BEFORE; @@ -19,6 +24,11 @@ import static glide.api.models.commands.SetOptions.ConditionalSet.ONLY_IF_DOES_NOT_EXIST; import static glide.api.models.commands.SetOptions.ConditionalSet.ONLY_IF_EXISTS; import static glide.api.models.commands.SetOptions.RETURN_OLD_VALUE; +import static glide.api.models.commands.SortBaseOptions.ALPHA_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.LIMIT_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.BY_COMMAND_STRING; import static glide.api.models.commands.bitmap.BitFieldOptions.BitFieldOverflow.BitOverflowControl.SAT; import static glide.api.models.commands.bitmap.BitFieldOptions.GET_COMMAND_STRING; import static glide.api.models.commands.bitmap.BitFieldOptions.INCRBY_COMMAND_STRING; @@ -28,9 +38,15 @@ import static glide.api.models.commands.function.FunctionListOptions.WITH_CODE_REDIS_API; import static glide.api.models.commands.geospatial.GeoAddOptions.CHANGED_REDIS_API; import static glide.api.models.commands.stream.StreamAddOptions.NO_MAKE_STREAM_REDIS_API; +import static glide.api.models.commands.stream.StreamGroupOptions.ENTRIES_READ_REDIS_API; +import static glide.api.models.commands.stream.StreamGroupOptions.MAKE_STREAM_REDIS_API; +import static glide.api.models.commands.stream.StreamPendingOptions.IDLE_TIME_REDIS_API; +import static glide.api.models.commands.stream.StreamRange.EXCLUSIVE_RANGE_REDIS_API; import static glide.api.models.commands.stream.StreamRange.MAXIMUM_RANGE_REDIS_API; import static glide.api.models.commands.stream.StreamRange.MINIMUM_RANGE_REDIS_API; import static glide.api.models.commands.stream.StreamRange.RANGE_COUNT_REDIS_API; +import static glide.api.models.commands.stream.StreamReadGroupOptions.READ_GROUP_REDIS_API; +import static glide.api.models.commands.stream.StreamReadGroupOptions.READ_NOACK_REDIS_API; import static glide.api.models.commands.stream.StreamReadOptions.READ_BLOCK_REDIS_API; import static glide.api.models.commands.stream.StreamReadOptions.READ_COUNT_REDIS_API; import static glide.api.models.commands.stream.StreamReadOptions.READ_STREAMS_REDIS_API; @@ -44,6 +60,7 @@ import static glide.utils.ArrayTransformUtils.convertMapToValueKeyStringArray; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -77,18 +94,23 @@ import static redis_request.RedisRequestOuterClass.RequestType.Decr; import static redis_request.RedisRequestOuterClass.RequestType.DecrBy; import static redis_request.RedisRequestOuterClass.RequestType.Del; +import static redis_request.RedisRequestOuterClass.RequestType.Dump; import static redis_request.RedisRequestOuterClass.RequestType.Echo; import static redis_request.RedisRequestOuterClass.RequestType.Exists; import static redis_request.RedisRequestOuterClass.RequestType.Expire; import static redis_request.RedisRequestOuterClass.RequestType.ExpireAt; import static redis_request.RedisRequestOuterClass.RequestType.ExpireTime; import static redis_request.RedisRequestOuterClass.RequestType.FCall; +import static redis_request.RedisRequestOuterClass.RequestType.FCallReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.FlushAll; +import static redis_request.RedisRequestOuterClass.RequestType.FlushDB; import static redis_request.RedisRequestOuterClass.RequestType.FunctionDelete; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionDump; import static redis_request.RedisRequestOuterClass.RequestType.FunctionFlush; import static redis_request.RedisRequestOuterClass.RequestType.FunctionKill; import static redis_request.RedisRequestOuterClass.RequestType.FunctionList; import static redis_request.RedisRequestOuterClass.RequestType.FunctionLoad; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionRestore; import static redis_request.RedisRequestOuterClass.RequestType.FunctionStats; import static redis_request.RedisRequestOuterClass.RequestType.GeoAdd; import static redis_request.RedisRequestOuterClass.RequestType.GeoDist; @@ -97,6 +119,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.Get; import static redis_request.RedisRequestOuterClass.RequestType.GetBit; import static redis_request.RedisRequestOuterClass.RequestType.GetDel; +import static redis_request.RedisRequestOuterClass.RequestType.GetEx; import static redis_request.RedisRequestOuterClass.RequestType.GetRange; import static redis_request.RedisRequestOuterClass.RequestType.HDel; import static redis_request.RedisRequestOuterClass.RequestType.HExists; @@ -152,8 +175,10 @@ import static redis_request.RedisRequestOuterClass.RequestType.RPop; import static redis_request.RedisRequestOuterClass.RequestType.RPush; import static redis_request.RedisRequestOuterClass.RequestType.RPushX; +import static redis_request.RedisRequestOuterClass.RequestType.RandomKey; import static redis_request.RedisRequestOuterClass.RequestType.Rename; import static redis_request.RedisRequestOuterClass.RequestType.RenameNX; +import static redis_request.RedisRequestOuterClass.RequestType.Restore; import static redis_request.RedisRequestOuterClass.RequestType.SAdd; import static redis_request.RedisRequestOuterClass.RequestType.SCard; import static redis_request.RedisRequestOuterClass.RequestType.SDiff; @@ -168,21 +193,33 @@ import static redis_request.RedisRequestOuterClass.RequestType.SPop; import static redis_request.RedisRequestOuterClass.RequestType.SRandMember; import static redis_request.RedisRequestOuterClass.RequestType.SRem; +import static redis_request.RedisRequestOuterClass.RequestType.SUnion; import static redis_request.RedisRequestOuterClass.RequestType.SUnionStore; import static redis_request.RedisRequestOuterClass.RequestType.Select; import static redis_request.RedisRequestOuterClass.RequestType.SetBit; import static redis_request.RedisRequestOuterClass.RequestType.SetRange; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Strlen; import static redis_request.RedisRequestOuterClass.RequestType.TTL; import static redis_request.RedisRequestOuterClass.RequestType.Time; import static redis_request.RedisRequestOuterClass.RequestType.Touch; import static redis_request.RedisRequestOuterClass.RequestType.Type; +import static redis_request.RedisRequestOuterClass.RequestType.UnWatch; import static redis_request.RedisRequestOuterClass.RequestType.Unlink; +import static redis_request.RedisRequestOuterClass.RequestType.Watch; +import static redis_request.RedisRequestOuterClass.RequestType.XAck; import static redis_request.RedisRequestOuterClass.RequestType.XAdd; import static redis_request.RedisRequestOuterClass.RequestType.XDel; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreate; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreateConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDelConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDestroy; import static redis_request.RedisRequestOuterClass.RequestType.XLen; +import static redis_request.RedisRequestOuterClass.RequestType.XPending; import static redis_request.RedisRequestOuterClass.RequestType.XRange; import static redis_request.RedisRequestOuterClass.RequestType.XRead; +import static redis_request.RedisRequestOuterClass.RequestType.XReadGroup; import static redis_request.RedisRequestOuterClass.RequestType.XRevRange; import static redis_request.RedisRequestOuterClass.RequestType.XTrim; import static redis_request.RedisRequestOuterClass.RequestType.ZAdd; @@ -212,11 +249,13 @@ import static redis_request.RedisRequestOuterClass.RequestType.ZUnion; import static redis_request.RedisRequestOuterClass.RequestType.ZUnionStore; +import glide.api.models.GlideString; import glide.api.models.Script; import glide.api.models.Transaction; import glide.api.models.commands.ConditionalChange; import glide.api.models.commands.ExpireOptions; import glide.api.models.commands.FlushMode; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.InfoOptions; import glide.api.models.commands.LPosOptions; import glide.api.models.commands.ListDirection; @@ -228,10 +267,13 @@ import glide.api.models.commands.RangeOptions.RangeByLex; import glide.api.models.commands.RangeOptions.RangeByScore; import glide.api.models.commands.RangeOptions.ScoreBoundary; +import glide.api.models.commands.RestoreOptions; import glide.api.models.commands.ScoreFilter; import glide.api.models.commands.ScriptOptions; import glide.api.models.commands.SetOptions; import glide.api.models.commands.SetOptions.Expiry; +import glide.api.models.commands.SortBaseOptions; +import glide.api.models.commands.SortOptions; import glide.api.models.commands.WeightAggregateOptions.Aggregate; import glide.api.models.commands.WeightAggregateOptions.KeyArray; import glide.api.models.commands.WeightAggregateOptions.WeightedKeys; @@ -248,13 +290,17 @@ import glide.api.models.commands.bitmap.BitmapIndexType; import glide.api.models.commands.bitmap.BitwiseOperation; import glide.api.models.commands.function.FunctionLoadOptions; +import glide.api.models.commands.function.FunctionRestorePolicy; import glide.api.models.commands.geospatial.GeoAddOptions; import glide.api.models.commands.geospatial.GeoUnit; import glide.api.models.commands.geospatial.GeospatialData; import glide.api.models.commands.stream.StreamAddOptions; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamPendingOptions; import glide.api.models.commands.stream.StreamRange; import glide.api.models.commands.stream.StreamRange.IdBound; import glide.api.models.commands.stream.StreamRange.InfRangeBound; +import glide.api.models.commands.stream.StreamReadGroupOptions; import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions; import glide.api.models.commands.stream.StreamTrimOptions.MaxLen; @@ -365,6 +411,28 @@ public void echo_returns_success() { assertEquals(message, echo); } + @SneakyThrows + @Test + public void echo_binary_returns_success() { + // setup + GlideString message = gs("GLIDE FOR REDIS"); + GlideString[] arguments = new GlideString[] {message}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(message); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Echo), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.echo(message); + GlideString echo = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(message, echo); + } + @SneakyThrows @Test public void ping_returns_success() { @@ -470,6 +538,28 @@ public void unlink_returns_long_success() { assertEquals(numberUnlinked, result); } + @SneakyThrows + @Test + public void unlink_binary_returns_long_success() { + // setup + GlideString[] keys = new GlideString[] {gs("testKey1"), gs("testKey2")}; + Long numberUnlinked = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(numberUnlinked); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Unlink), eq(keys), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.unlink(keys); + Long result = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(numberUnlinked, result); + } + @SneakyThrows @Test public void get_returns_success() { @@ -510,6 +600,58 @@ public void getdel() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void getex() { + // setup + String key = "testKey"; + String value = "testValue"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + when(commandManager.submitNewCommand(eq(GetEx), eq(new String[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.getex(key); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + private static List getGetExOptions() { + return List.of( + Arguments.of( + // seconds + "test_with_seconds", GetExOptions.Seconds(10L), new String[] {"EX", "10"}), + Arguments.of( + // milliseconds + "test_with_milliseconds", + GetExOptions.Milliseconds(1000L), + new String[] {"PX", "1000"}), + Arguments.of( + // unix seconds + "test_with_unix_seconds", GetExOptions.UnixSeconds(10L), new String[] {"EXAT", "10"}), + Arguments.of( + // unix milliseconds + "test_with_unix_milliseconds", + GetExOptions.UnixMilliseconds(1000L), + new String[] {"PXAT", "1000"}), + Arguments.of( + // persist + "test_with_persist", GetExOptions.Persist(), new String[] {"PERSIST"})); + } + + @SneakyThrows + @ParameterizedTest(name = "{0}") + @MethodSource("getGetExOptions") + public void getex_options(String testName, GetExOptions options, String[] expectedArgs) { + assertArrayEquals( + expectedArgs, options.toArgs(), "Expected " + testName + " toArgs() to pass."); + System.out.println(expectedArgs); + } + @SneakyThrows @Test public void set_returns_success() { @@ -606,6 +748,26 @@ public void exists_returns_long_success() { assertEquals(numberExisting, result); } + @SneakyThrows + @Test + public void exists_binary_returns_long_success() { + // setup + GlideString[] keys = new GlideString[] {gs("testKey1"), gs("testKey2")}; + Long numberExisting = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(numberExisting); + when(commandManager.submitNewCommand(eq(Exists), eq(keys), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.exists(keys); + Long result = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(numberExisting, result); + } + @SneakyThrows @Test public void expire_returns_success() { @@ -629,6 +791,29 @@ public void expire_returns_success() { assertEquals(true, response.get()); } + @SneakyThrows + @Test + public void expire_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long seconds = 10L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(seconds))}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.TRUE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Expire), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.expire(key, seconds); + + // verify + assertEquals(testResponse, response); + assertEquals(true, response.get()); + } + @SneakyThrows @Test public void expire_with_expireOptions_returns_success() { @@ -652,6 +837,29 @@ public void expire_with_expireOptions_returns_success() { assertEquals(false, response.get()); } + @SneakyThrows + @Test + public void expire_with_expireOptions_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long seconds = 10L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(seconds)), gs("NX")}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.FALSE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Expire), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.expire(key, seconds, ExpireOptions.HAS_NO_EXPIRY); + + // verify + assertEquals(testResponse, response); + assertEquals(false, response.get()); + } + @SneakyThrows @Test public void expireAt_returns_success() { @@ -675,6 +883,29 @@ public void expireAt_returns_success() { assertEquals(true, response.get()); } + @SneakyThrows + @Test + public void expireAt_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long unixSeconds = 100000L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(unixSeconds))}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.TRUE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ExpireAt), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.expireAt(key, unixSeconds); + + // verify + assertEquals(testResponse, response); + assertEquals(true, response.get()); + } + @SneakyThrows @Test public void expireAt_with_expireOptions_returns_success() { @@ -699,6 +930,30 @@ public void expireAt_with_expireOptions_returns_success() { assertEquals(false, response.get()); } + @SneakyThrows + @Test + public void expireAt_with_expireOptions_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long unixSeconds = 100000L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(unixSeconds)), gs("XX")}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.FALSE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ExpireAt), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.expireAt(key, unixSeconds, ExpireOptions.HAS_EXISTING_EXPIRY); + + // verify + assertEquals(testResponse, response); + assertEquals(false, response.get()); + } + @SneakyThrows @Test public void pexpire_returns_success() { @@ -722,6 +977,29 @@ public void pexpire_returns_success() { assertEquals(true, response.get()); } + @SneakyThrows + @Test + public void pexpire_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long milliseconds = 50000L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(milliseconds))}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.TRUE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(PExpire), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.pexpire(key, milliseconds); + + // verify + assertEquals(testResponse, response); + assertEquals(true, response.get()); + } + @SneakyThrows @Test public void pexpire_with_expireOptions_returns_success() { @@ -746,6 +1024,30 @@ public void pexpire_with_expireOptions_returns_success() { assertEquals(false, response.get()); } + @SneakyThrows + @Test + public void pexpire_with_expireOptions_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long milliseconds = 50000L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(milliseconds)), gs("LT")}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.FALSE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(PExpire), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.pexpire(key, milliseconds, ExpireOptions.NEW_EXPIRY_LESS_THAN_CURRENT); + + // verify + assertEquals(testResponse, response); + assertEquals(false, response.get()); + } + @SneakyThrows @Test public void pexpireAt_returns_success() { @@ -769,6 +1071,29 @@ public void pexpireAt_returns_success() { assertEquals(true, response.get()); } + @SneakyThrows + @Test + public void pexpireAt_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long unixMilliseconds = 999999L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(unixMilliseconds))}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.TRUE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(PExpireAt), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.pexpireAt(key, unixMilliseconds); + + // verify + assertEquals(testResponse, response); + assertEquals(true, response.get()); + } + @SneakyThrows @Test public void pexpireAt_with_expireOptions_returns_success() { @@ -793,6 +1118,31 @@ public void pexpireAt_with_expireOptions_returns_success() { assertEquals(false, response.get()); } + @SneakyThrows + @Test + public void pexpireAt_with_expireOptions_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long unixMilliseconds = 999999L; + GlideString[] arguments = + new GlideString[] {key, gs(Long.toString(unixMilliseconds)), gs("GT")}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.FALSE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(PExpireAt), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.pexpireAt(key, unixMilliseconds, ExpireOptions.NEW_EXPIRY_GREATER_THAN_CURRENT); + + // verify + assertEquals(testResponse, response); + assertEquals(false, response.get()); + } + @SneakyThrows @Test public void ttl_returns_success() { @@ -816,23 +1166,65 @@ public void ttl_returns_success() { @SneakyThrows @Test - public void expiretime_returns_success() { + public void ttl_binary_returns_success() { // setup - String key = "testKey"; - long value = 999L; + GlideString key = gs("testKey"); + long ttl = 999L; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + testResponse.complete(ttl); // match on protobuf request - when(commandManager.submitNewCommand(eq(ExpireTime), eq(new String[] {key}), any())) + when(commandManager.submitNewCommand(eq(TTL), eq(new GlideString[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.expiretime(key); + CompletableFuture response = service.ttl(key); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(ttl, response.get()); + } + + @SneakyThrows + @Test + public void expiretime_returns_success() { + // setup + String key = "testKey"; + long value = 999L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ExpireTime), eq(new String[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.expiretime(key); + + // verify + assertEquals(testResponse, response); + assertEquals(value, response.get()); + } + + @SneakyThrows + @Test + public void expiretime_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long value = 999L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ExpireTime), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.expiretime(key); + + // verify + assertEquals(testResponse, response); + assertEquals(value, response.get()); } @SneakyThrows @@ -856,6 +1248,27 @@ public void pexpiretime_returns_success() { assertEquals(value, response.get()); } + @SneakyThrows + @Test + public void pexpiretime_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long value = 999L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(PExpireTime), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.pexpiretime(key); + + // verify + assertEquals(testResponse, response); + assertEquals(value, response.get()); + } + @SneakyThrows @Test public void invokeScript_returns_success() { @@ -930,6 +1343,28 @@ public void pttl_returns_success() { assertEquals(pttl, response.get()); } + @SneakyThrows + @Test + public void pttl_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long pttl = 999000L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(pttl); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(PTTL), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.pttl(key); + + // verify + assertEquals(testResponse, response); + assertEquals(pttl, response.get()); + } + @SneakyThrows @Test public void persist_returns_success() { @@ -952,6 +1387,28 @@ public void persist_returns_success() { assertEquals(isTimeoutRemoved, response.get()); } + @SneakyThrows + @Test + public void persist_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Boolean isTimeoutRemoved = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(isTimeoutRemoved); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Persist), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.persist(key); + + // verify + assertEquals(testResponse, response); + assertEquals(isTimeoutRemoved, response.get()); + } + @SneakyThrows @Test public void info_returns_success() { @@ -1138,6 +1595,31 @@ public void incrBy_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void incrBy_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long amount = 1L; + Long value = 10L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(IncrBy), eq(new GlideString[] {key, gs(Long.toString(amount).getBytes())}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.incrBy(key, amount); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void incrByFloat_returns_success() { @@ -1163,6 +1645,33 @@ public void incrByFloat_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void incrByFloat_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + double amount = 1.1; + Double value = 10.1; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(IncrByFloat), + eq(new GlideString[] {key, gs(Double.toString(amount).getBytes())}), + any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.incrByFloat(key, amount); + Double payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void decr_returns_success() { @@ -1358,6 +1867,31 @@ public void hsetnx_success() { assertTrue(payload); } + @SneakyThrows + @Test + public void hsetnx_binary_success() { + // setup + GlideString key = gs("testKey"); + GlideString field = gs("testField"); + GlideString value = gs("testValue"); + GlideString[] args = new GlideString[] {key, field, value}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.TRUE); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(HSetNX), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.hsetnx(key, field, value); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertTrue(payload); + } + @SneakyThrows @Test public void hdel_success() { @@ -1479,6 +2013,31 @@ public void hexists_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void hexists_binary_success() { + // setup + GlideString key = gs("testKey"); + GlideString field = gs("testField"); + GlideString[] args = new GlideString[] {key, field}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(HExists), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.hexists(key, field); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void hgetall_success() { @@ -1531,6 +2090,34 @@ public void hincrBy_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void hincrBy_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString field = gs("field"); + long amount = 1L; + Long value = 10L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(HIncrBy), + eq(new GlideString[] {key, field, gs(Long.toString(amount).getBytes())}), + any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.hincrBy(key, field, amount); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void hincrByFloat_returns_success() { @@ -1557,6 +2144,34 @@ public void hincrByFloat_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void hincrByFloat_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString field = gs("field"); + double amount = 1.0; + Double value = 10.0; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(HIncrByFloat), + eq(new GlideString[] {key, field, gs(Double.toString(amount).getBytes())}), + any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.hincrByFloat(key, field, amount); + Double payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void hkeys_returns_success() { @@ -1703,6 +2318,31 @@ public void lpush_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void lpush_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] elements = new GlideString[] {gs("value1"), gs("value2")}; + GlideString[] args = new GlideString[] {key, gs("value1"), gs("value2")}; + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(LPush), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.lpush(key, elements); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void lpop_returns_success() { @@ -1922,14 +2562,39 @@ public void ltrim_returns_success() { @SneakyThrows @Test - public void llen_returns_success() { + public void ltrim_binary_returns_success() { // setup - String key = "testKey"; - String[] args = new String[] {key}; - long value = 2L; + GlideString key = gs("testKey"); + long start = 2L; + long end = 2L; + GlideString[] args = new GlideString[] {key, gs(Long.toString(end)), gs(Long.toString(start))}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(LTrim), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.ltrim(key, start, end); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void llen_returns_success() { + // setup + String key = "testKey"; + String[] args = new String[] {key}; + long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request when(commandManager.submitNewCommand(eq(LLen), eq(args), any())).thenReturn(testResponse); @@ -1968,6 +2633,31 @@ public void lrem_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void lrem_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long count = 2L; + GlideString element = gs("value"); + GlideString[] args = new GlideString[] {key, gs(Long.toString(count).getBytes()), element}; + long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(LRem), eq(args), any())).thenReturn(testResponse); + + // exercise + CompletableFuture response = service.lrem(key, count, element); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void rpush_returns_success() { @@ -1993,6 +2683,31 @@ public void rpush_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void rpush_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] elements = new GlideString[] {gs("value1"), gs("value2")}; + GlideString[] args = new GlideString[] {key, gs("value1"), gs("value2")}; + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(RPush), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.rpush(key, elements); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void rpop_returns_success() { @@ -2067,6 +2782,31 @@ public void sadd_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void sadd_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] members = new GlideString[] {gs("testMember1"), gs("testMember2")}; + GlideString[] arguments = ArrayUtils.addFirst(members, key); + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SAdd), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sadd(key, members); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void sismember_returns_success() { @@ -2091,6 +2831,30 @@ public void sismember_returns_success() { assertTrue(payload); } + @SneakyThrows + @Test + public void sismember_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString member = gs("testMember"); + GlideString[] arguments = new GlideString[] {key, member}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(true); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SIsMember), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sismember(key, member); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertTrue(payload); + } + @SneakyThrows @Test public void srem_returns_success() { @@ -2116,6 +2880,31 @@ public void srem_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void srem_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] members = new GlideString[] {gs("testMember1"), gs("testMember2")}; + GlideString[] arguments = ArrayUtils.addFirst(members, key); + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SRem), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.srem(key, members); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void smembers_returns_success() { @@ -2162,6 +2951,29 @@ public void scard_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void scard_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SCard), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.scard(key); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void sdiff_returns_success() { @@ -2261,6 +3073,30 @@ public void smove_returns_success() { assertTrue(response.get()); } + @SneakyThrows + @Test + public void smove_binary_returns_success() { + // setup + GlideString source = gs("src"); + GlideString destination = gs("dst"); + GlideString member = gs("elem"); + GlideString[] arguments = {source, destination, member}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(true); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SMove), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.smove(source, destination, member); + + // verify + assertEquals(testResponse, response); + assertTrue(response.get()); + } + @SneakyThrows @Test public void sinter_returns_success() { @@ -2284,6 +3120,29 @@ public void sinter_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void sinter_binary_returns_success() { + // setup + GlideString[] keys = new GlideString[] {gs("key1"), gs("key2")}; + Set value = Set.of(gs("1"), gs("2")); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(SInter), eq(keys), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.sinter(keys); + Set payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void sinterstore_returns_success() { @@ -2309,6 +3168,31 @@ public void sinterstore_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void sinterstore_binary_returns_success() { + // setup + GlideString destination = gs("key"); + GlideString[] keys = new GlideString[] {gs("set1"), gs("set2")}; + GlideString[] args = new GlideString[] {gs("key"), gs("set1"), gs("set2")}; + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SInterStore), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sinterstore(destination, keys); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void sunionstore_returns_success() { @@ -2725,6 +3609,31 @@ public void zrem_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zrem_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] members = new GlideString[] {gs("member1"), gs("member2")}; + GlideString[] arguments = ArrayUtils.addFirst(members, key); + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZRem), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zrem(key, members); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zcard_returns_success() { @@ -2749,6 +3658,30 @@ public void zcard_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zcard_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] arguments = new GlideString[] {key}; + Long value = 3L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zcard(key); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zpopmin_returns_success() { @@ -2924,14 +3857,39 @@ public void zscore_returns_success() { @SneakyThrows @Test - public void zrange_by_index_returns_success() { + public void zscore_binary_returns_success() { // setup - String key = "testKey"; - RangeByIndex rangeByIndex = new RangeByIndex(0, 1); - String[] arguments = new String[] {key, rangeByIndex.getStart(), rangeByIndex.getEnd()}; - String[] value = new String[] {"one", "two"}; + GlideString key = gs("testKey"); + GlideString member = gs("testMember"); + GlideString[] arguments = new GlideString[] {key, member}; + Double value = 3.5; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZScore), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zscore(key, member); + Double payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void zrange_by_index_returns_success() { + // setup + String key = "testKey"; + RangeByIndex rangeByIndex = new RangeByIndex(0, 1); + String[] arguments = new String[] {key, rangeByIndex.getStart(), rangeByIndex.getEnd()}; + String[] value = new String[] {"one", "two"}; + + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request @@ -3091,6 +4049,31 @@ public void zrank_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zrank_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString member = gs("testMember"); + GlideString[] arguments = new GlideString[] {key, member}; + Long value = 3L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZRank), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zrank(key, member); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zrankWithScore_returns_success() { @@ -3191,6 +4174,31 @@ public void zmscore_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zmscore_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] members = new GlideString[] {gs("member1"), gs("member2")}; + GlideString[] arguments = new GlideString[] {key, gs("member1"), gs("member2")}; + Double[] value = new Double[] {2.5, 8.2}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZMScore), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zmscore(key, members); + Double[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zdiffstore_returns_success() { @@ -3216,6 +4224,34 @@ public void zdiffstore_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zdiffstore_binary_returns_success() { + // setup + GlideString destKey = gs("testDestKey"); + GlideString[] keys = new GlideString[] {gs("testKey1"), gs("testKey2")}; + GlideString[] arguments = + new GlideString[] { + destKey, gs(Long.toString(keys.length).getBytes()), gs("testKey1"), gs("testKey2") + }; + Long value = 3L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZDiffStore), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zdiffstore(destKey, keys); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zdiff_returns_success() { @@ -3318,6 +4354,35 @@ public void zremrangebyrank_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zremrangebyrank_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long start = 0; + long end = -1; + GlideString[] arguments = + new GlideString[] { + key, gs(Long.toString(start).getBytes()), gs(Long.toString(end).getBytes()) + }; + Long value = 5L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZRemRangeByRank), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zremrangebyrank(key, start, end); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zremrangebylex_returns_success() { @@ -3780,6 +4845,32 @@ public void zintercard_with_limit_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zintercard_with_limit_binary_returns_success() { + // setup + GlideString[] keys = new GlideString[] {gs("key1"), gs("key2")}; + long limit = 3L; + GlideString[] arguments = + new GlideString[] {gs("2"), gs("key1"), gs("key2"), gs(LIMIT_REDIS_API), gs("3")}; + Long value = 3L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZInterCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zintercard(keys, limit); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zintercard_returns_success() { @@ -3804,6 +4895,30 @@ public void zintercard_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zintercard_binary_returns_success() { + // setup + GlideString[] keys = new GlideString[] {gs("key1"), gs("key2")}; + GlideString[] arguments = new GlideString[] {gs("2"), gs("key1"), gs("key2")}; + Long value = 3L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZInterCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zintercard(keys); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void zrandmember_returns_success() { @@ -3904,6 +5019,32 @@ public void zincrby_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void zincrby_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + double increment = 4.2; + GlideString member = gs("member"); + GlideString[] arguments = new GlideString[] {key, gs("4.2"), member}; + Double value = 3.14; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(ZIncrBy), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.zincrby(key, increment, member); + Double payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void xadd_returns_success() { @@ -3938,67 +5079,67 @@ private static List getStreamAddOptions() { // no TRIM option "test_xadd_no_trim", StreamAddOptions.builder().id("id").makeStream(Boolean.FALSE).build(), - new String[] {NO_MAKE_STREAM_REDIS_API, "id"}, - Arguments.of( - // MAXLEN with LIMIT - "test_xadd_maxlen_with_limit", - StreamAddOptions.builder() - .id("id") - .makeStream(Boolean.TRUE) - .trim(new MaxLen(5L, 10L)) - .build(), - new String[] { - TRIM_MAXLEN_REDIS_API, - TRIM_EXACT_REDIS_API, - Long.toString(5L), - TRIM_LIMIT_REDIS_API, - Long.toString(10L), - "id" - }), - Arguments.of( - // MAXLEN with non exact match - "test_xadd_maxlen_with_non_exact_match", - StreamAddOptions.builder() - .makeStream(Boolean.FALSE) - .trim(new MaxLen(false, 2L)) - .build(), - new String[] { - NO_MAKE_STREAM_REDIS_API, - TRIM_MAXLEN_REDIS_API, - TRIM_NOT_EXACT_REDIS_API, - Long.toString(2L), - "*" - }), - Arguments.of( - // MIN ID with LIMIT - "test_xadd_minid_with_limit", - StreamAddOptions.builder() - .id("id") - .makeStream(Boolean.TRUE) - .trim(new MinId("testKey", 10L)) - .build(), - new String[] { - TRIM_MINID_REDIS_API, - TRIM_EXACT_REDIS_API, - Long.toString(5L), - TRIM_LIMIT_REDIS_API, - Long.toString(10L), - "id" - }), - Arguments.of( - // MIN ID with non exact match - "test_xadd_minid_with_non_exact_match", - StreamAddOptions.builder() - .makeStream(Boolean.FALSE) - .trim(new MinId(false, "testKey")) - .build(), - new String[] { - NO_MAKE_STREAM_REDIS_API, - TRIM_MINID_REDIS_API, - TRIM_NOT_EXACT_REDIS_API, - Long.toString(5L), - "*" - }))); + new String[] {NO_MAKE_STREAM_REDIS_API, "id"}), + Arguments.of( + // MAXLEN with LIMIT + "test_xadd_maxlen_with_limit", + StreamAddOptions.builder() + .id("id") + .makeStream(Boolean.TRUE) + .trim(new MaxLen(5L, 10L)) + .build(), + new String[] { + TRIM_MAXLEN_REDIS_API, + TRIM_NOT_EXACT_REDIS_API, + Long.toString(5L), + TRIM_LIMIT_REDIS_API, + Long.toString(10L), + "id" + }), + Arguments.of( + // MAXLEN with non exact match + "test_xadd_maxlen_with_non_exact_match", + StreamAddOptions.builder() + .makeStream(Boolean.FALSE) + .trim(new MaxLen(false, 2L)) + .build(), + new String[] { + NO_MAKE_STREAM_REDIS_API, + TRIM_MAXLEN_REDIS_API, + TRIM_NOT_EXACT_REDIS_API, + Long.toString(2L), + "*" + }), + Arguments.of( + // MIN ID with LIMIT + "test_xadd_minid_with_limit", + StreamAddOptions.builder() + .id("id") + .makeStream(Boolean.TRUE) + .trim(new MinId("testKey", 10L)) + .build(), + new String[] { + TRIM_MINID_REDIS_API, + TRIM_NOT_EXACT_REDIS_API, + "testKey", + TRIM_LIMIT_REDIS_API, + Long.toString(10L), + "id" + }), + Arguments.of( + // MIN ID with non-exact match + "test_xadd_minid_with_non_exact_match", + StreamAddOptions.builder() + .makeStream(Boolean.FALSE) + .trim(new MinId(false, "testKey")) + .build(), + new String[] { + NO_MAKE_STREAM_REDIS_API, + TRIM_MINID_REDIS_API, + TRIM_NOT_EXACT_REDIS_API, + "testKey", + "*" + })); } @SneakyThrows @@ -4006,7 +5147,8 @@ private static List getStreamAddOptions() { @MethodSource("getStreamAddOptions") public void xadd_with_options_to_arguments( String testName, StreamAddOptions options, String[] expectedArgs) { - assertArrayEquals(expectedArgs, options.toArgs()); + assertArrayEquals( + expectedArgs, options.toArgs(), "Expected " + testName + " toArgs() to pass."); } @SneakyThrows @@ -4240,6 +5382,31 @@ public void xdel_returns_success() { assertEquals(completedResult, payload); } + @Test + @SneakyThrows + public void xdel_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + GlideString[] ids = {gs("one-1"), gs("two-2"), gs("three-3")}; + Long completedResult = 69L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(completedResult); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(XDel), eq(new GlideString[] {key, gs("one-1"), gs("two-2"), gs("three-3")}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.xdel(key, ids); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(completedResult, payload); + } + @Test @SneakyThrows public void xrange_returns_success() { @@ -4370,278 +5537,399 @@ public void xrevrange_withcount_returns_success() { @SneakyThrows @Test - public void type_returns_success() { + public void xgroupCreate() { // setup String key = "testKey"; - String[] arguments = new String[] {key}; - String value = "none"; + String groupName = "testGroupName"; + String id = "testId"; + String[] arguments = new String[] {key, groupName, id}; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(Type), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(XGroupCreate), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.type(key); + CompletableFuture response = service.xgroupCreate(key, groupName, id); String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void rename() { + public void xgroupCreate_withOptions() { // setup - String key = "key1"; - String newKey = "key2"; - String[] arguments = new String[] {key, newKey}; + String key = "testKey"; + String groupName = "testGroupName"; + String id = "testId"; + String testEntry = "testEntry"; + StreamGroupOptions options = + StreamGroupOptions.builder().makeStream().entriesRead(testEntry).build(); + String[] arguments = + new String[] {key, groupName, id, MAKE_STREAM_REDIS_API, ENTRIES_READ_REDIS_API, testEntry}; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(Rename), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(XGroupCreate), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.rename(key, newKey); + CompletableFuture response = service.xgroupCreate(key, groupName, id, options); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(OK, response.get()); + assertEquals(OK, payload); } @SneakyThrows @Test - public void renamenx_returns_success() { + public void xgroupDestroy() { // setup - String key = "key1"; - String newKey = "key2"; - String[] arguments = new String[] {key, newKey}; + String key = "testKey"; + String groupName = "testGroupName"; + String[] arguments = new String[] {key, groupName}; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(true); + testResponse.complete(Boolean.TRUE); // match on protobuf request - when(commandManager.submitNewCommand(eq(RenameNX), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(XGroupDestroy), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.renamenx(key, newKey); + CompletableFuture response = service.xgroupDestroy(key, groupName); + Boolean payload = response.get(); // verify assertEquals(testResponse, response); - assertTrue(response.get()); + assertEquals(Boolean.TRUE, payload); } @SneakyThrows @Test - public void time_returns_success() { + public void xgroupCreateConsumer() { // setup - CompletableFuture testResponse = new CompletableFuture<>(); - String[] payload = new String[] {"UnixTime", "ms"}; - testResponse.complete(payload); + String key = "testKey"; + String groupName = "testGroupName"; + String consumerName = "testConsumerName"; + String[] arguments = new String[] {key, groupName, consumerName}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(Boolean.TRUE); + // match on protobuf request - when(commandManager.submitNewCommand(eq(Time), eq(new String[0]), any())) + when(commandManager.submitNewCommand(eq(XGroupCreateConsumer), eq(arguments), any())) .thenReturn(testResponse); + // exercise - CompletableFuture response = service.time(); + CompletableFuture response = + service.xgroupCreateConsumer(key, groupName, consumerName); + Boolean payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(payload, response.get()); + assertEquals(Boolean.TRUE, payload); } @SneakyThrows @Test - public void lastsave_returns_success() { + public void xgroupDelConsumer() { // setup - Long value = 42L; + String key = "testKey"; + String groupName = "testGroupName"; + String consumerName = "testConsumerName"; + String[] arguments = new String[] {key, groupName, consumerName}; + Long result = 28L; + CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + testResponse.complete(result); // match on protobuf request - when(commandManager.submitNewCommand(eq(LastSave), eq(new String[0]), any())) + when(commandManager.submitNewCommand(eq(XGroupDelConsumer), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lastsave(); + CompletableFuture response = service.xgroupDelConsumer(key, groupName, consumerName); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(result, payload); } @SneakyThrows @Test - public void flushall_returns_success() { + public void xreadgroup_multiple_keys() { // setup - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + String keyOne = "one"; + String streamIdOne = "id-one"; + String keyTwo = "two"; + String streamIdTwo = "id-two"; + String groupName = "testGroup"; + String consumerName = "consumerGroup"; + String[][] fieldValues = {{"field", "value"}}; + Map> completedResult = new LinkedHashMap<>(); + completedResult.put(keyOne, Map.of(streamIdOne, fieldValues)); + completedResult.put(keyTwo, Map.of(streamIdTwo, fieldValues)); + String[] arguments = { + READ_GROUP_REDIS_API, + groupName, + consumerName, + READ_STREAMS_REDIS_API, + keyOne, + keyTwo, + streamIdOne, + streamIdTwo + }; + + CompletableFuture>> testResponse = + new CompletableFuture<>(); + testResponse.complete(completedResult); // match on protobuf request - when(commandManager.submitNewCommand(eq(FlushAll), eq(new String[0]), any())) + when(commandManager.>>submitNewCommand( + eq(XReadGroup), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.flushall(); - String payload = response.get(); + Map keysAndIds = new LinkedHashMap<>(); + keysAndIds.put(keyOne, streamIdOne); + keysAndIds.put(keyTwo, streamIdTwo); + CompletableFuture>> response = + service.xreadgroup(keysAndIds, groupName, consumerName); + Map> payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(completedResult, payload); } @SneakyThrows @Test - public void flushall_with_mode_returns_success() { + public void xreadgroup_with_options() { // setup - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + String keyOne = "one"; + String streamIdOne = "id-one"; + Long block = 2L; + Long count = 10L; + String groupName = "testGroup"; + String consumerName = "consumerGroup"; + String[][] fieldValues = {{"field", "value"}}; + Map> completedResult = + Map.of(keyOne, Map.of(streamIdOne, fieldValues)); + String[] arguments = { + READ_GROUP_REDIS_API, + groupName, + consumerName, + READ_COUNT_REDIS_API, + count.toString(), + READ_BLOCK_REDIS_API, + block.toString(), + READ_NOACK_REDIS_API, + READ_STREAMS_REDIS_API, + keyOne, + streamIdOne + }; + + CompletableFuture>> testResponse = + new CompletableFuture<>(); + testResponse.complete(completedResult); // match on protobuf request - when(commandManager.submitNewCommand( - eq(FlushAll), eq(new String[] {SYNC.toString()}), any())) + when(commandManager.>>submitNewCommand( + eq(XReadGroup), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.flushall(SYNC); - String payload = response.get(); + CompletableFuture>> response = + service.xreadgroup( + Map.of(keyOne, streamIdOne), + groupName, + consumerName, + StreamReadGroupOptions.builder().block(block).count(count).noack().build()); + Map> payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(completedResult, payload); } @SneakyThrows @Test - public void lolwut_returns_success() { + public void xack_returns_success() { // setup - String value = "pewpew"; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key = "testKey"; + String groupName = "testGroupName"; + String[] ids = new String[] {"testId"}; + String[] arguments = concatenateArrays(new String[] {key, groupName}, ids); + Long mockResult = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(mockResult); // match on protobuf request - when(commandManager.submitNewCommand(eq(Lolwut), eq(new String[0]), any())) + when(commandManager.submitNewCommand(eq(XAck), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lolwut(); + CompletableFuture response = service.xack(key, groupName, ids); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(mockResult, payload); } @SneakyThrows @Test - public void lolwut_with_params_returns_success() { + public void xack_binary_returns_success() { // setup - String value = "pewpew"; - String[] arguments = new String[] {"1", "2"}; - int[] params = new int[] {1, 2}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString key = gs("testKey"); + GlideString groupName = gs("testGroupName"); + GlideString[] ids = new GlideString[] {gs("testId")}; + GlideString[] arguments = concatenateArrays(new GlideString[] {key, groupName}, ids); + Long mockResult = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(mockResult); // match on protobuf request - when(commandManager.submitNewCommand(eq(Lolwut), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(XAck), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lolwut(params); + CompletableFuture response = service.xack(key, groupName, ids); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(mockResult, payload); } @SneakyThrows @Test - public void lolwut_with_version_returns_success() { + public void xpending_returns_success() { // setup - String value = "pewpew"; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key = "testKey"; + String groupName = "testGroupName"; + String[] arguments = {key, groupName}; + Object[] summary = new Object[] {1L, "1234-0", "2345-4", new Object[][] {{"consumer", "4"}}}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(summary); // match on protobuf request - when(commandManager.submitNewCommand( - eq(Lolwut), eq(new String[] {VERSION_REDIS_API, "42"}), any())) + when(commandManager.submitNewCommand(eq(XPending), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lolwut(42); + CompletableFuture response = service.xpending(key, groupName); + Object[] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(summary, payload); } @SneakyThrows @Test - public void lolwut_with_version_and_params_returns_success() { + public void xpending_with_start_end_count_returns_success() { // setup - String value = "pewpew"; - String[] arguments = new String[] {VERSION_REDIS_API, "42", "1", "2"}; - int[] params = new int[] {1, 2}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key = "testKey"; + String groupName = "testGroupName"; + String[] arguments = {key, groupName, EXCLUSIVE_RANGE_REDIS_API + "1234-0", "2345-5", "4"}; + StreamRange start = IdBound.ofExclusive("1234-0"); + StreamRange end = IdBound.of("2345-5"); + Long count = 4L; + Object[][] extendedForm = new Object[][] {{"1234-0", "consumer", 4L, 1L}}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(extendedForm); // match on protobuf request - when(commandManager.submitNewCommand(eq(Lolwut), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(XPending), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lolwut(42, params); + CompletableFuture response = service.xpending(key, groupName, start, end, count); + Object[][] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(extendedForm, payload); } @SneakyThrows @Test - public void dbsize_returns_success() { + public void xpending_with_start_end_count_options_returns_success() { // setup - Long value = 10L; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key = "testKey"; + String groupName = "testGroupName"; + String consumer = "testConsumer"; + String[] arguments = { + key, + groupName, + IDLE_TIME_REDIS_API, + "100", + MINIMUM_RANGE_REDIS_API, + MAXIMUM_RANGE_REDIS_API, + "4", + consumer + }; + StreamRange start = InfRangeBound.MIN; + StreamRange end = InfRangeBound.MAX; + Long count = 4L; + Object[][] extendedForm = new Object[][] {{"1234-0", consumer, 4L, 1L}}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(extendedForm); // match on protobuf request - when(commandManager.submitNewCommand(eq(DBSize), eq(new String[0]), any())) + when(commandManager.submitNewCommand(eq(XPending), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.dbsize(); + CompletableFuture response = + service.xpending( + key, + groupName, + start, + end, + count, + StreamPendingOptions.builder().minIdleTime(100L).consumer(consumer).build()); + Object[][] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, response.get()); + assertEquals(extendedForm, payload); } @SneakyThrows @Test - public void linsert_returns_success() { + public void type_returns_success() { // setup String key = "testKey"; - var position = BEFORE; - String pivot = "pivot"; - String elem = "elem"; - String[] arguments = new String[] {key, position.toString(), pivot, elem}; - long value = 42; + String[] arguments = new String[] {key}; + String value = "none"; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(LInsert), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Type), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.linsert(key, position, pivot, elem); - long payload = response.get(); + CompletableFuture response = service.type(key); + String payload = response.get(); // verify assertEquals(testResponse, response); @@ -4650,23 +5938,22 @@ public void linsert_returns_success() { @SneakyThrows @Test - public void blpop_returns_success() { + public void type_binary_returns_success() { // setup - String key = "key"; - double timeout = 0.5; - String[] arguments = new String[] {key, "0.5"}; - String[] value = new String[] {"key", "value"}; + GlideString key = gs("testKey"); + GlideString[] arguments = new GlideString[] {key}; + String value = "none"; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(BLPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Type), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.blpop(new String[] {key}, timeout); - String[] payload = response.get(); + CompletableFuture response = service.type(key); + String payload = response.get(); // verify assertEquals(testResponse, response); @@ -4675,392 +5962,357 @@ public void blpop_returns_success() { @SneakyThrows @Test - public void rpushx_returns_success() { + public void randomKey() { // setup - String key = "testKey"; - String[] elements = new String[] {"value1", "value2"}; - String[] args = new String[] {key, "value1", "value2"}; - Long value = 2L; - - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key1 = "key1"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(key1); // match on protobuf request - when(commandManager.submitNewCommand(eq(RPushX), eq(args), any())) + when(commandManager.submitNewCommand(eq(RandomKey), eq(new String[0]), any())) .thenReturn(testResponse); - - // exercise - CompletableFuture response = service.rpushx(key, elements); - Long payload = response.get(); + CompletableFuture response = service.randomKey(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); } @SneakyThrows @Test - public void lpushx_returns_success() { + public void rename() { // setup - String key = "testKey"; - String[] elements = new String[] {"value1", "value2"}; - String[] args = new String[] {key, "value1", "value2"}; - Long value = 2L; - - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key = "key1"; + String newKey = "key2"; + String[] arguments = new String[] {key, newKey}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(LPushX), eq(args), any())) + when(commandManager.submitNewCommand(eq(Rename), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lpushx(key, elements); - Long payload = response.get(); + CompletableFuture response = service.rename(key, newKey); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, response.get()); } @SneakyThrows @Test - public void brpop_returns_success() { + public void rename_binary() { // setup - String key = "key"; - double timeout = 0.5; - String[] arguments = new String[] {key, "0.5"}; - String[] value = new String[] {"key", "value"}; - - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString key = gs("key1"); + GlideString newKey = gs("key2"); + GlideString[] arguments = new GlideString[] {key, newKey}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(BRPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Rename), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.brpop(new String[] {key}, timeout); - String[] payload = response.get(); + CompletableFuture response = service.rename(key, newKey); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, response.get()); } @SneakyThrows @Test - public void pfadd_returns_success() { + public void renamenx_returns_success() { // setup - String key = "testKey"; - String[] elements = new String[] {"a", "b", "c"}; - String[] arguments = new String[] {key, "a", "b", "c"}; - Long value = 1L; + String key = "key1"; + String newKey = "key2"; + String[] arguments = new String[] {key, newKey}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(true); // match on protobuf request - when(commandManager.submitNewCommand(eq(PfAdd), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(RenameNX), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.pfadd(key, elements); - Long payload = response.get(); + CompletableFuture response = service.renamenx(key, newKey); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertTrue(response.get()); } @SneakyThrows @Test - public void pfcount_returns_success() { + public void renamenx_binary_returns_success() { // setup - String[] keys = new String[] {"a", "b", "c"}; - Long value = 1L; + GlideString key = gs("key1"); + GlideString newKey = gs("key2"); + GlideString[] arguments = new GlideString[] {key, newKey}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(true); // match on protobuf request - when(commandManager.submitNewCommand(eq(PfCount), eq(keys), any())) + when(commandManager.submitNewCommand(eq(RenameNX), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.pfcount(keys); - Long payload = response.get(); + CompletableFuture response = service.renamenx(key, newKey); // verify assertEquals(testResponse, response); - assertEquals(value, payload); - assertEquals(payload, response.get()); + assertTrue(response.get()); } @SneakyThrows @Test - public void pfmerge_returns_success() { + public void time_returns_success() { // setup - String destKey = "testKey"; - String[] sourceKeys = new String[] {"a", "b", "c"}; - String[] arguments = new String[] {destKey, "a", "b", "c"}; + CompletableFuture testResponse = new CompletableFuture<>(); + String[] payload = new String[] {"UnixTime", "ms"}; + testResponse.complete(payload); + // match on protobuf request + when(commandManager.submitNewCommand(eq(Time), eq(new String[0]), any())) + .thenReturn(testResponse); + // exercise + CompletableFuture response = service.time(); - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + // verify + assertEquals(testResponse, response); + assertEquals(payload, response.get()); + } + + @SneakyThrows + @Test + public void lastsave_returns_success() { + // setup + Long value = 42L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(PfMerge), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(LastSave), eq(new String[0]), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.pfmerge(destKey, sourceKeys); + CompletableFuture response = service.lastsave(); // verify assertEquals(testResponse, response); - assertEquals(OK, response.get()); + assertEquals(value, response.get()); } @SneakyThrows @Test - public void objectEncoding_returns_success() { + public void flushall_returns_success() { // setup - String key = "testKey"; - String encoding = "testEncoding"; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(encoding); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(ObjectEncoding), eq(new String[] {key}), any())) + when(commandManager.submitNewCommand(eq(FlushAll), eq(new String[0]), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.objectEncoding(key); + CompletableFuture response = service.flushall(); String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(encoding, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void objectFreq_returns_success() { + public void flushall_with_mode_returns_success() { // setup - String key = "testKey"; - Long frequency = 0L; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(frequency); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(ObjectFreq), eq(new String[] {key}), any())) + when(commandManager.submitNewCommand( + eq(FlushAll), eq(new String[] {SYNC.toString()}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.objectFreq(key); - Long payload = response.get(); + CompletableFuture response = service.flushall(SYNC); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(frequency, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void objectIdletime_returns_success() { + public void flushdb_returns_success() { // setup - String key = "testKey"; - Long idletime = 0L; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(idletime); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(ObjectIdleTime), eq(new String[] {key}), any())) + when(commandManager.submitNewCommand(eq(FlushDB), eq(new String[0]), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.objectIdletime(key); - Long payload = response.get(); + CompletableFuture response = service.flushdb(); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(idletime, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void objectRefcount_returns_success() { + public void flushdb_with_mode_returns_success() { // setup - String key = "testKey"; - Long refcount = 0L; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(refcount); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(ObjectRefCount), eq(new String[] {key}), any())) + when(commandManager.submitNewCommand( + eq(FlushDB), eq(new String[] {SYNC.toString()}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.objectRefcount(key); - Long payload = response.get(); + CompletableFuture response = service.flushdb(SYNC); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(refcount, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void touch_returns_success() { + public void lolwut_returns_success() { // setup - String[] keys = new String[] {"testKey1", "testKey2"}; - Long value = 2L; - CompletableFuture testResponse = new CompletableFuture<>(); + String value = "pewpew"; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(Touch), eq(keys), any())) + when(commandManager.submitNewCommand(eq(Lolwut), eq(new String[0]), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.touch(keys); - Long payload = response.get(); + CompletableFuture response = service.lolwut(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(value, response.get()); } @SneakyThrows @Test - public void geoadd_returns_success() { + public void lolwut_with_params_returns_success() { // setup - String key = "testKey"; - Map membersToGeoSpatialData = new LinkedHashMap<>(); - membersToGeoSpatialData.put("Catania", new GeospatialData(15.087269, 40)); - membersToGeoSpatialData.put("Palermo", new GeospatialData(13.361389, 38.115556)); - String[] arguments = - new String[] {key, "15.087269", "40.0", "Catania", "13.361389", "38.115556", "Palermo"}; - Long value = 1L; - - CompletableFuture testResponse = new CompletableFuture<>(); + String value = "pewpew"; + String[] arguments = new String[] {"1", "2"}; + int[] params = new int[] {1, 2}; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GeoAdd), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Lolwut), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.geoadd(key, membersToGeoSpatialData); - Long payload = response.get(); + CompletableFuture response = service.lolwut(params); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(value, response.get()); } @SneakyThrows @Test - public void geoadd_with_options_returns_success() { + public void lolwut_with_version_returns_success() { // setup - String key = "testKey"; - Map membersToGeoSpatialData = new LinkedHashMap<>(); - membersToGeoSpatialData.put("Catania", new GeospatialData(15.087269, 40)); - membersToGeoSpatialData.put("Palermo", new GeospatialData(13.361389, 38.115556)); - GeoAddOptions options = new GeoAddOptions(ConditionalChange.ONLY_IF_EXISTS, true); - String[] arguments = - new String[] { - key, - ConditionalChange.ONLY_IF_EXISTS.getRedisApi(), - CHANGED_REDIS_API, - "15.087269", - "40.0", - "Catania", - "13.361389", - "38.115556", - "Palermo" - }; - Long value = 1L; - - CompletableFuture testResponse = new CompletableFuture<>(); + String value = "pewpew"; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GeoAdd), eq(arguments), any())) + when(commandManager.submitNewCommand( + eq(Lolwut), eq(new String[] {VERSION_REDIS_API, "42"}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.geoadd(key, membersToGeoSpatialData, options); - Long payload = response.get(); + CompletableFuture response = service.lolwut(42); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(value, response.get()); } @SneakyThrows @Test - public void geopos_returns_success() { + public void lolwut_with_version_and_params_returns_success() { // setup - String key = "testKey"; - String[] members = {"Catania", "Palermo"}; - String[] arguments = new String[] {key, "Catania", "Palermo"}; - Double[][] value = {{15.087269, 40.0}, {13.361389, 38.115556}}; - - CompletableFuture testResponse = new CompletableFuture<>(); + String value = "pewpew"; + String[] arguments = new String[] {VERSION_REDIS_API, "42", "1", "2"}; + int[] params = new int[] {1, 2}; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GeoPos), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Lolwut), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.geopos(key, members); - Object[] payload = response.get(); + CompletableFuture response = service.lolwut(42, params); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(value, response.get()); } @SneakyThrows @Test - public void append() { + public void dbsize_returns_success() { // setup - String key = "testKey"; - String value = "testValue"; + Long value = 10L; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(1L); - when(commandManager.submitNewCommand(eq(Append), eq(new String[] {key, value}), any())) + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(DBSize), eq(new String[0]), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.append(key, value); - Long payload = response.get(); + CompletableFuture response = service.dbsize(); // verify assertEquals(testResponse, response); - assertEquals(1L, payload); + assertEquals(value, response.get()); } @SneakyThrows @Test - public void geohash_returns_success() { + public void linsert_returns_success() { // setup String key = "testKey"; - String[] members = {"Catania", "Palermo", "NonExisting"}; - String[] arguments = new String[] {key, "Catania", "Palermo", "NonExisting"}; - String[] value = {"sqc8b49rny0", "sqdtr74hyu0", null}; + var position = BEFORE; + String pivot = "pivot"; + String elem = "elem"; + String[] arguments = new String[] {key, position.toString(), pivot, elem}; + long value = 42; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GeoHash), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(LInsert), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.geohash(key, members); - Object[] payload = response.get(); + CompletableFuture response = service.linsert(key, position, pivot, elem); + long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5069,24 +6321,23 @@ public void geohash_returns_success() { @SneakyThrows @Test - public void geodist_returns_success() { + public void blpop_returns_success() { // setup - String key = "testKey"; - String member1 = "Catania"; - String member2 = "Palermo"; - String[] arguments = new String[] {key, member1, member2}; - Double value = 166274.1516; + String key = "key"; + double timeout = 0.5; + String[] arguments = new String[] {key, "0.5"}; + String[] value = new String[] {"key", "value"}; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GeoDist), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(BLPop), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.geodist(key, member1, member2); - Double payload = response.get(); + CompletableFuture response = service.blpop(new String[] {key}, timeout); + String[] payload = response.get(); // verify assertEquals(testResponse, response); @@ -5095,25 +6346,23 @@ public void geodist_returns_success() { @SneakyThrows @Test - public void geodist_with_metrics_returns_success() { + public void rpushx_returns_success() { // setup String key = "testKey"; - String member1 = "Catania"; - String member2 = "Palermo"; - GeoUnit geoUnit = GeoUnit.KILOMETERS; - String[] arguments = new String[] {key, member1, member2, GeoUnit.KILOMETERS.getRedisApi()}; - Double value = 166.2742; + String[] elements = new String[] {"value1", "value2"}; + String[] args = new String[] {key, "value1", "value2"}; + Long value = 2L; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GeoDist), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(RPushX), eq(args), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.geodist(key, member1, member2, geoUnit); - Double payload = response.get(); + CompletableFuture response = service.rpushx(key, elements); + Long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5122,21 +6371,23 @@ public void geodist_with_metrics_returns_success() { @SneakyThrows @Test - public void functionLoad_returns_success() { + public void rpushx_binary_returns_success() { // setup - String code = "The best code ever"; - String[] args = new String[] {code}; - String value = "42"; - CompletableFuture testResponse = new CompletableFuture<>(); + GlideString key = gs("testKey"); + GlideString[] elements = new GlideString[] {gs("value1"), gs("value2")}; + GlideString[] args = new GlideString[] {key, gs("value1"), gs("value2")}; + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(FunctionLoad), eq(args), any())) + when(commandManager.submitNewCommand(eq(RPushX), eq(args), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.functionLoad(code, false); - String payload = response.get(); + CompletableFuture response = service.rpushx(key, elements); + Long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5145,21 +6396,23 @@ public void functionLoad_returns_success() { @SneakyThrows @Test - public void functionLoad_with_replace_returns_success() { + public void lpushx_returns_success() { // setup - String code = "The best code ever"; - String[] args = new String[] {FunctionLoadOptions.REPLACE.toString(), code}; - String value = "42"; - CompletableFuture testResponse = new CompletableFuture<>(); + String key = "testKey"; + String[] elements = new String[] {"value1", "value2"}; + String[] args = new String[] {key, "value1", "value2"}; + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(FunctionLoad), eq(args), any())) + when(commandManager.submitNewCommand(eq(LPushX), eq(args), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.functionLoad(code, true); - String payload = response.get(); + CompletableFuture response = service.lpushx(key, elements); + Long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5168,21 +6421,23 @@ public void functionLoad_with_replace_returns_success() { @SneakyThrows @Test - public void functionList_returns_success() { + public void lpushx_binary_returns_success() { // setup - String[] args = new String[0]; - @SuppressWarnings("unchecked") - Map[] value = new Map[0]; - CompletableFuture[]> testResponse = new CompletableFuture<>(); + GlideString key = gs("testKey"); + GlideString[] elements = new GlideString[] {gs("value1"), gs("value2")}; + GlideString[] args = new GlideString[] {key, gs("value1"), gs("value2")}; + Long value = 2L; + + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.[]>submitNewCommand(eq(FunctionList), eq(args), any())) + when(commandManager.submitNewCommand(eq(LPushX), eq(args), any())) .thenReturn(testResponse); // exercise - CompletableFuture[]> response = service.functionList(false); - Map[] payload = response.get(); + CompletableFuture response = service.lpushx(key, elements); + Long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5191,22 +6446,23 @@ public void functionList_returns_success() { @SneakyThrows @Test - public void functionList_with_pattern_returns_success() { + public void brpop_returns_success() { // setup - String pattern = "*"; - String[] args = new String[] {LIBRARY_NAME_REDIS_API, pattern, WITH_CODE_REDIS_API}; - @SuppressWarnings("unchecked") - Map[] value = new Map[0]; - CompletableFuture[]> testResponse = new CompletableFuture<>(); + String key = "key"; + double timeout = 0.5; + String[] arguments = new String[] {key, "0.5"}; + String[] value = new String[] {"key", "value"}; + + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.[]>submitNewCommand(eq(FunctionList), eq(args), any())) + when(commandManager.submitNewCommand(eq(BRPop), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture[]> response = service.functionList(pattern, true); - Map[] payload = response.get(); + CompletableFuture response = service.brpop(new String[] {key}, timeout); + String[] payload = response.get(); // verify assertEquals(testResponse, response); @@ -5215,274 +6471,271 @@ public void functionList_with_pattern_returns_success() { @SneakyThrows @Test - public void functionFlush_returns_success() { + public void pfadd_returns_success() { // setup - String[] args = new String[0]; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + String key = "testKey"; + String[] elements = new String[] {"a", "b", "c"}; + String[] arguments = new String[] {key, "a", "b", "c"}; + Long value = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(FunctionFlush), eq(args), any())) + when(commandManager.submitNewCommand(eq(PfAdd), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.functionFlush(); - String payload = response.get(); + CompletableFuture response = service.pfadd(key, elements); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void functionFlush_with_mode_returns_success() { + public void pfcount_returns_success() { // setup - FlushMode mode = ASYNC; - String[] args = new String[] {mode.toString()}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + String[] keys = new String[] {"a", "b", "c"}; + Long value = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(FunctionFlush), eq(args), any())) + when(commandManager.submitNewCommand(eq(PfCount), eq(keys), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.functionFlush(mode); - String payload = response.get(); + CompletableFuture response = service.pfcount(keys); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(value, payload); + assertEquals(payload, response.get()); } @SneakyThrows @Test - public void functionDelete_returns_success() { + public void pfmerge_returns_success() { // setup - String libName = "GLIDE"; - String[] args = new String[] {libName}; + String destKey = "testKey"; + String[] sourceKeys = new String[] {"a", "b", "c"}; + String[] arguments = new String[] {destKey, "a", "b", "c"}; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(FunctionDelete), eq(args), any())) + when(commandManager.submitNewCommand(eq(PfMerge), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.functionDelete(libName); - String payload = response.get(); + CompletableFuture response = service.pfmerge(destKey, sourceKeys); // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(OK, response.get()); } @SneakyThrows @Test - public void fcall_with_keys_and_args_returns_success() { + public void objectEncoding_returns_success() { // setup - String function = "func"; - String[] keys = new String[] {"key1", "key2"}; - String[] arguments = new String[] {"1", "2"}; - String[] args = new String[] {function, "2", "key1", "key2", "1", "2"}; - Object value = "42"; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String key = "testKey"; + String encoding = "testEncoding"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(encoding); // match on protobuf request - when(commandManager.submitNewCommand(eq(FCall), eq(args), any())).thenReturn(testResponse); + when(commandManager.submitNewCommand(eq(ObjectEncoding), eq(new String[] {key}), any())) + .thenReturn(testResponse); // exercise - CompletableFuture response = service.fcall(function, keys, arguments); - Object payload = response.get(); + CompletableFuture response = service.objectEncoding(key); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(encoding, payload); } @SneakyThrows @Test - public void fcall_returns_success() { + public void objectEncoding_binary_returns_success() { // setup - String function = "func"; - String[] args = new String[] {function, "0"}; - Object value = "42"; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString key = gs("testKey"); + String encoding = "testEncoding"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(encoding); // match on protobuf request - when(commandManager.submitNewCommand(eq(FCall), eq(args), any())).thenReturn(testResponse); + when(commandManager.submitNewCommand( + eq(ObjectEncoding), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); // exercise - CompletableFuture response = service.fcall(function); - Object payload = response.get(); + CompletableFuture response = service.objectEncoding(key); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(encoding, payload); } @SneakyThrows @Test - public void functionKill_returns_success() { + public void objectFreq_returns_success() { // setup - String[] args = new String[0]; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + String key = "testKey"; + Long frequency = 0L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(frequency); // match on protobuf request - when(commandManager.submitNewCommand(eq(FunctionKill), eq(args), any())) + when(commandManager.submitNewCommand(eq(ObjectFreq), eq(new String[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.functionKill(); - String payload = response.get(); + CompletableFuture response = service.objectFreq(key); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(frequency, payload); } @SneakyThrows @Test - public void functionStats_returns_success() { + public void objectFreq_binary_returns_success() { // setup - String[] args = new String[0]; - Map> value = Map.of("1", Map.of("2", 2)); - CompletableFuture>> testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString key = gs("testKey"); + Long frequency = 0L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(frequency); // match on protobuf request - when(commandManager.>>submitNewCommand( - eq(FunctionStats), eq(args), any())) + when(commandManager.submitNewCommand(eq(ObjectFreq), eq(new GlideString[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture>> response = service.functionStats(); - Map> payload = response.get(); + CompletableFuture response = service.objectFreq(key); + Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(frequency, payload); } @SneakyThrows @Test - public void bitcount_returns_success() { + public void objectIdletime_returns_success() { // setup String key = "testKey"; - Long bitcount = 1L; + Long idletime = 0L; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitcount); + testResponse.complete(idletime); // match on protobuf request - when(commandManager.submitNewCommand(eq(BitCount), eq(new String[] {key}), any())) + when(commandManager.submitNewCommand(eq(ObjectIdleTime), eq(new String[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitcount(key); + CompletableFuture response = service.objectIdletime(key); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(1L, payload); - assertEquals(bitcount, payload); + assertEquals(idletime, payload); } @SneakyThrows @Test - public void bitcount_indices_returns_success() { + public void objectIdletime_binary_returns_success() { // setup - String key = "testKey"; - Long bitcount = 1L; + GlideString key = gs("testKey"); + Long idletime = 0L; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitcount); + testResponse.complete(idletime); // match on protobuf request when(commandManager.submitNewCommand( - eq(BitCount), eq(new String[] {key, "1", "2"}), any())) + eq(ObjectIdleTime), eq(new GlideString[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitcount(key, 1, 2); + CompletableFuture response = service.objectIdletime(key); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bitcount, payload); + assertEquals(idletime, payload); } @SneakyThrows @Test - public void bitcount_indices_with_option_returns_success() { + public void objectRefcount_returns_success() { // setup String key = "testKey"; - Long bitcount = 1L; + Long refcount = 0L; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitcount); + testResponse.complete(refcount); // match on protobuf request - when(commandManager.submitNewCommand( - eq(BitCount), eq(new String[] {key, "1", "2", "BIT"}), any())) + when(commandManager.submitNewCommand(eq(ObjectRefCount), eq(new String[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitcount(key, 1, 2, BitmapIndexType.BIT); + CompletableFuture response = service.objectRefcount(key); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bitcount, payload); + assertEquals(refcount, payload); } @SneakyThrows @Test - public void setbit_returns_success() { + public void objectRefcount_binary_returns_success() { // setup - String key = "testKey"; - Long value = 1L; + GlideString key = gs("testKey"); + Long refcount = 0L; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + testResponse.complete(refcount); // match on protobuf request - when(commandManager.submitNewCommand(eq(SetBit), eq(new String[] {key, "8", "1"}), any())) + when(commandManager.submitNewCommand( + eq(ObjectRefCount), eq(new GlideString[] {key}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.setbit(key, 8, 1); + CompletableFuture response = service.objectRefcount(key); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(refcount, payload); } @SneakyThrows @Test - public void blmpop_returns_success() { + public void touch_returns_success() { // setup - String key = "testKey"; - String key2 = "testKey2"; - String[] keys = {key, key2}; - ListDirection listDirection = ListDirection.LEFT; - double timeout = 0.1; - String[] arguments = - new String[] {Double.toString(timeout), "2", key, key2, listDirection.toString()}; - Map value = Map.of(key, new String[] {"five"}); - - CompletableFuture> testResponse = new CompletableFuture<>(); + String[] keys = new String[] {"testKey1", "testKey2"}; + Long value = 2L; + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.>submitNewCommand(eq(BLMPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Touch), eq(keys), any())) .thenReturn(testResponse); // exercise - CompletableFuture> response = - service.blmpop(keys, listDirection, timeout); - Map payload = response.get(); + CompletableFuture response = service.touch(keys); + Long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5491,37 +6744,26 @@ public void blmpop_returns_success() { @SneakyThrows @Test - public void blmpop_with_count_returns_success() { + public void geoadd_returns_success() { // setup String key = "testKey"; - String key2 = "testKey2"; - String[] keys = {key, key2}; - ListDirection listDirection = ListDirection.LEFT; - long count = 1L; - double timeout = 0.1; + Map membersToGeoSpatialData = new LinkedHashMap<>(); + membersToGeoSpatialData.put("Catania", new GeospatialData(15.087269, 40)); + membersToGeoSpatialData.put("Palermo", new GeospatialData(13.361389, 38.115556)); String[] arguments = - new String[] { - Double.toString(timeout), - "2", - key, - key2, - listDirection.toString(), - COUNT_FOR_LIST_REDIS_API, - Long.toString(count) - }; - Map value = Map.of(key, new String[] {"five"}); + new String[] {key, "15.087269", "40.0", "Catania", "13.361389", "38.115556", "Palermo"}; + Long value = 1L; - CompletableFuture> testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.>submitNewCommand(eq(BLMPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoAdd), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture> response = - service.blmpop(keys, listDirection, count, timeout); - Map payload = response.get(); + CompletableFuture response = service.geoadd(key, membersToGeoSpatialData); + Long payload = response.get(); // verify assertEquals(testResponse, response); @@ -5530,181 +6772,184 @@ public void blmpop_with_count_returns_success() { @SneakyThrows @Test - public void getbit_returns_success() { + public void geoadd_with_options_returns_success() { // setup String key = "testKey"; - Long bit = 1L; + Map membersToGeoSpatialData = new LinkedHashMap<>(); + membersToGeoSpatialData.put("Catania", new GeospatialData(15.087269, 40)); + membersToGeoSpatialData.put("Palermo", new GeospatialData(13.361389, 38.115556)); + GeoAddOptions options = new GeoAddOptions(ConditionalChange.ONLY_IF_EXISTS, true); + String[] arguments = + new String[] { + key, + ConditionalChange.ONLY_IF_EXISTS.getRedisApi(), + CHANGED_REDIS_API, + "15.087269", + "40.0", + "Catania", + "13.361389", + "38.115556", + "Palermo" + }; + Long value = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bit); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(GetBit), eq(new String[] {key, "8"}), any())) + when(commandManager.submitNewCommand(eq(GeoAdd), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.getbit(key, 8); + CompletableFuture response = service.geoadd(key, membersToGeoSpatialData, options); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bit, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void bitpos_returns_success() { + public void geopos_returns_success() { // setup String key = "testKey"; - Long bit = 0L; - Long bitPosition = 10L; - String[] arguments = new String[] {key, Long.toString(bit)}; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitPosition); + String[] members = {"Catania", "Palermo"}; + String[] arguments = new String[] {key, "Catania", "Palermo"}; + Double[][] value = {{15.087269, 40.0}, {13.361389, 38.115556}}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoPos), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitpos(key, bit); - Long payload = response.get(); + CompletableFuture response = service.geopos(key, members); + Object[] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bitPosition, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void bitpos_with_start_returns_success() { + public void geopos_binary_returns_success() { // setup - String key = "testKey"; - Long bit = 0L; - Long start = 5L; - Long bitPosition = 10L; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitPosition); + GlideString key = gs("testKey"); + GlideString[] members = {gs("Catania"), gs("Palermo")}; + GlideString[] arguments = new GlideString[] {key, gs("Catania"), gs("Palermo")}; + Double[][] value = {{15.087269, 40.0}, {13.361389, 38.115556}}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand( - eq(BitPos), eq(new String[] {key, Long.toString(bit), Long.toString(start)}), any())) + when(commandManager.submitNewCommand(eq(GeoPos), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitpos(key, bit, start); - Long payload = response.get(); + CompletableFuture response = service.geopos(key, members); + Object[] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bitPosition, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void bitpos_with_start_and_end_returns_success() { + public void append() { // setup String key = "testKey"; - Long bit = 0L; - Long start = 5L; - Long end = 10L; - Long bitPosition = 10L; - String[] arguments = - new String[] {key, Long.toString(bit), Long.toString(start), Long.toString(end)}; + String value = "testValue"; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitPosition); - - // match on protobuf request - when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + testResponse.complete(1L); + when(commandManager.submitNewCommand(eq(Append), eq(new String[] {key, value}), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitpos(key, bit, start, end); + CompletableFuture response = service.append(key, value); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bitPosition, payload); + assertEquals(1L, payload); } @SneakyThrows @Test - public void bitpos_with_start_and_end_and_type_returns_success() { + public void geohash_returns_success() { // setup String key = "testKey"; - Long bit = 0L; - Long start = 5L; - Long end = 10L; - Long bitPosition = 10L; - String[] arguments = - new String[] { - key, - Long.toString(bit), - Long.toString(start), - Long.toString(end), - BitmapIndexType.BIT.toString() - }; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(bitPosition); + String[] members = {"Catania", "Palermo", "NonExisting"}; + String[] arguments = new String[] {key, "Catania", "Palermo", "NonExisting"}; + String[] value = {"sqc8b49rny0", "sqdtr74hyu0", null}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoHash), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitpos(key, bit, start, end, BitmapIndexType.BIT); - Long payload = response.get(); + CompletableFuture response = service.geohash(key, members); + Object[] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(bitPosition, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void bitop_returns_success() { + public void geodist_returns_success() { // setup - String destination = "destination"; - String[] keys = new String[] {"key1", "key2"}; - Long result = 6L; - BitwiseOperation bitwiseAnd = BitwiseOperation.AND; - String[] arguments = concatenateArrays(new String[] {bitwiseAnd.toString(), destination}, keys); - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(result); + String key = "testKey"; + String member1 = "Catania"; + String member2 = "Palermo"; + String[] arguments = new String[] {key, member1, member2}; + Double value = 166274.1516; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(BitOp), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoDist), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.bitop(bitwiseAnd, destination, keys); - Long payload = response.get(); + CompletableFuture response = service.geodist(key, member1, member2); + Double payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(result, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void lmpop_returns_success() { + public void geodist_binary_returns_success() { // setup - String key = "testKey"; - String key2 = "testKey2"; - String[] keys = {key, key2}; - ListDirection listDirection = ListDirection.LEFT; - String[] arguments = new String[] {"2", key, key2, listDirection.toString()}; - Map value = Map.of(key, new String[] {"five"}); + GlideString key = gs("testKey"); + GlideString member1 = gs("Catania"); + GlideString member2 = gs("Palermo"); + GlideString[] arguments = new GlideString[] {key, member1, member2}; + Double value = 166274.1516; - CompletableFuture> testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.>submitNewCommand(eq(LMPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoDist), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture> response = service.lmpop(keys, listDirection); - Map payload = response.get(); + CompletableFuture response = service.geodist(key, member1, member2); + Double payload = response.get(); // verify assertEquals(testResponse, response); @@ -5713,29 +6958,25 @@ public void lmpop_returns_success() { @SneakyThrows @Test - public void lmpop_with_count_returns_success() { + public void geodist_with_metrics_returns_success() { // setup String key = "testKey"; - String key2 = "testKey2"; - String[] keys = {key, key2}; - ListDirection listDirection = ListDirection.LEFT; - long count = 1L; - String[] arguments = - new String[] { - "2", key, key2, listDirection.toString(), COUNT_FOR_LIST_REDIS_API, Long.toString(count) - }; - Map value = Map.of(key, new String[] {"five"}); + String member1 = "Catania"; + String member2 = "Palermo"; + GeoUnit geoUnit = GeoUnit.KILOMETERS; + String[] arguments = new String[] {key, member1, member2, GeoUnit.KILOMETERS.getRedisApi()}; + Double value = 166.2742; - CompletableFuture> testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.>submitNewCommand(eq(LMPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoDist), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture> response = service.lmpop(keys, listDirection, count); - Map payload = response.get(); + CompletableFuture response = service.geodist(key, member1, member2, geoUnit); + Double payload = response.get(); // verify assertEquals(testResponse, response); @@ -5744,24 +6985,26 @@ public void lmpop_with_count_returns_success() { @SneakyThrows @Test - public void lmove_returns_success() { + public void geodist_with_metrics_binary_returns_success() { // setup - String key1 = "testKey"; - String key2 = "testKey2"; - ListDirection wherefrom = ListDirection.LEFT; - ListDirection whereto = ListDirection.RIGHT; - String[] arguments = new String[] {key1, key2, wherefrom.toString(), whereto.toString()}; - String value = "one"; - CompletableFuture testResponse = new CompletableFuture<>(); + GlideString key = gs("testKey"); + GlideString member1 = gs("Catania"); + GlideString member2 = gs("Palermo"); + GeoUnit geoUnit = GeoUnit.KILOMETERS; + GlideString[] arguments = + new GlideString[] {key, member1, member2, gs(GeoUnit.KILOMETERS.getRedisApi())}; + Double value = 166.2742; + + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(LMove), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(GeoDist), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lmove(key1, key2, wherefrom, whereto); - String payload = response.get(); + CompletableFuture response = service.geodist(key, member1, member2, geoUnit); + Double payload = response.get(); // verify assertEquals(testResponse, response); @@ -5770,17 +7013,976 @@ public void lmove_returns_success() { @SneakyThrows @Test - public void lset_returns_success() { + public void functionLoad_returns_success() { // setup - String key = "testKey"; - long index = 0; - String element = "two"; - String[] arguments = new String[] {key, "0", element}; + String code = "The best code ever"; + String[] args = new String[] {code}; + String value = "42"; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(OK); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(LSet), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(FunctionLoad), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionLoad(code, false); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionLoad_with_replace_returns_success() { + // setup + String code = "The best code ever"; + String[] args = new String[] {FunctionLoadOptions.REPLACE.toString(), code}; + String value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionLoad), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionLoad(code, true); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionList_returns_success() { + // setup + String[] args = new String[0]; + @SuppressWarnings("unchecked") + Map[] value = new Map[0]; + CompletableFuture[]> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.[]>submitNewCommand(eq(FunctionList), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture[]> response = service.functionList(false); + Map[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionList_with_pattern_returns_success() { + // setup + String pattern = "*"; + String[] args = new String[] {LIBRARY_NAME_REDIS_API, pattern, WITH_CODE_REDIS_API}; + @SuppressWarnings("unchecked") + Map[] value = new Map[0]; + CompletableFuture[]> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.[]>submitNewCommand(eq(FunctionList), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture[]> response = service.functionList(pattern, true); + Map[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionFlush_returns_success() { + // setup + String[] args = new String[0]; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionFlush), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionFlush(); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionFlush_with_mode_returns_success() { + // setup + FlushMode mode = ASYNC; + String[] args = new String[] {mode.toString()}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionFlush), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionFlush(mode); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionDelete_returns_success() { + // setup + String libName = "GLIDE"; + String[] args = new String[] {libName}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionDelete), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionDelete(libName); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void fcall_with_keys_and_args_returns_success() { + // setup + String function = "func"; + String[] keys = new String[] {"key1", "key2"}; + String[] arguments = new String[] {"1", "2"}; + String[] args = new String[] {function, "2", "key1", "key2", "1", "2"}; + Object value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FCall), eq(args), any())).thenReturn(testResponse); + + // exercise + CompletableFuture response = service.fcall(function, keys, arguments); + Object payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void fcall_returns_success() { + // setup + String function = "func"; + String[] args = new String[] {function, "0"}; + Object value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FCall), eq(args), any())).thenReturn(testResponse); + + // exercise + CompletableFuture response = service.fcall(function); + Object payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void fcallReadOnly_with_keys_and_args_returns_success() { + // setup + String function = "func"; + String[] keys = new String[] {"key1", "key2"}; + String[] arguments = new String[] {"1", "2"}; + String[] args = new String[] {function, "2", "key1", "key2", "1", "2"}; + Object value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FCallReadOnly), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.fcallReadOnly(function, keys, arguments); + Object payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void fcallReadOnly_returns_success() { + // setup + String function = "func"; + String[] args = new String[] {function, "0"}; + Object value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FCallReadOnly), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.fcallReadOnly(function); + Object payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionKill_returns_success() { + // setup + String[] args = new String[0]; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionKill), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionKill(); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionStats_returns_success() { + // setup + String[] args = new String[0]; + Map> value = Map.of("1", Map.of("2", 2)); + CompletableFuture>> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>>submitNewCommand( + eq(FunctionStats), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture>> response = service.functionStats(); + Map> payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionDump_returns_success() { + // setup + byte[] value = new byte[] {42}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionDump), eq(new GlideString[0]), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionDump(); + byte[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionRestore_returns_success() { + // setup + byte[] data = new byte[] {42}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(FunctionRestore), eq(new GlideString[] {gs(data)}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionRestore(data); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionRestore_with_policy_returns_success() { + // setup + byte[] data = new byte[] {42}; + GlideString[] args = {gs(data), gs(FunctionRestorePolicy.FLUSH.toString())}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionRestore), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionRestore(data, FunctionRestorePolicy.FLUSH); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void bitcount_returns_success() { + // setup + String key = "testKey"; + Long bitcount = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitcount); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitCount), eq(new String[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitcount(key); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(1L, payload); + assertEquals(bitcount, payload); + } + + @SneakyThrows + @Test + public void bitcount_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long bitcount = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitcount); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitCount), eq(new GlideString[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitcount(key); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(1L, payload); + assertEquals(bitcount, payload); + } + + @SneakyThrows + @Test + public void bitcount_indices_returns_success() { + // setup + String key = "testKey"; + Long bitcount = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitcount); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(BitCount), eq(new String[] {key, "1", "2"}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitcount(key, 1, 2); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitcount, payload); + } + + @SneakyThrows + @Test + public void bitcount_indices_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long bitcount = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitcount); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(BitCount), eq(new GlideString[] {key, gs("1"), gs("2")}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitcount(key, 1, 2); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitcount, payload); + } + + @SneakyThrows + @Test + public void bitcount_indices_with_option_returns_success() { + // setup + String key = "testKey"; + Long bitcount = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitcount); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(BitCount), eq(new String[] {key, "1", "2", "BIT"}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitcount(key, 1, 2, BitmapIndexType.BIT); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitcount, payload); + } + + @SneakyThrows + @Test + public void bitcount_indices_with_option_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long bitcount = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitcount); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(BitCount), eq(new GlideString[] {key, gs("1"), gs("2"), gs("BIT")}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitcount(key, 1, 2, BitmapIndexType.BIT); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitcount, payload); + } + + @SneakyThrows + @Test + public void setbit_returns_success() { + // setup + String key = "testKey"; + Long value = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SetBit), eq(new String[] {key, "8", "1"}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.setbit(key, 8, 1); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void setbit_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long value = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(SetBit), eq(new GlideString[] {key, gs("8"), gs("1")}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.setbit(key, 8, 1); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void blmpop_returns_success() { + // setup + String key = "testKey"; + String key2 = "testKey2"; + String[] keys = {key, key2}; + ListDirection listDirection = ListDirection.LEFT; + double timeout = 0.1; + String[] arguments = + new String[] {Double.toString(timeout), "2", key, key2, listDirection.toString()}; + Map value = Map.of(key, new String[] {"five"}); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(BLMPop), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = + service.blmpop(keys, listDirection, timeout); + Map payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void blmpop_with_count_returns_success() { + // setup + String key = "testKey"; + String key2 = "testKey2"; + String[] keys = {key, key2}; + ListDirection listDirection = ListDirection.LEFT; + long count = 1L; + double timeout = 0.1; + String[] arguments = + new String[] { + Double.toString(timeout), + "2", + key, + key2, + listDirection.toString(), + COUNT_FOR_LIST_REDIS_API, + Long.toString(count) + }; + Map value = Map.of(key, new String[] {"five"}); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(BLMPop), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = + service.blmpop(keys, listDirection, count, timeout); + Map payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void getbit_returns_success() { + // setup + String key = "testKey"; + Long bit = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bit); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(GetBit), eq(new String[] {key, "8"}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.getbit(key, 8); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bit, payload); + } + + @SneakyThrows + @Test + public void getbit_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long bit = 1L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bit); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(GetBit), eq(new GlideString[] {key, gs("8")}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.getbit(key, 8); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bit, payload); + } + + @SneakyThrows + @Test + public void bitpos_returns_success() { + // setup + String key = "testKey"; + Long bit = 0L; + Long bitPosition = 10L; + String[] arguments = new String[] {key, Long.toString(bit)}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitPosition); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitpos(key, bit); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitPosition, payload); + } + + @SneakyThrows + @Test + public void bitpos_with_start_returns_success() { + // setup + String key = "testKey"; + Long bit = 0L; + Long start = 5L; + Long bitPosition = 10L; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitPosition); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(BitPos), eq(new String[] {key, Long.toString(bit), Long.toString(start)}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitpos(key, bit, start); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitPosition, payload); + } + + @SneakyThrows + @Test + public void bitpos_with_start_and_end_returns_success() { + // setup + String key = "testKey"; + Long bit = 0L; + Long start = 5L; + Long end = 10L; + Long bitPosition = 10L; + String[] arguments = + new String[] {key, Long.toString(bit), Long.toString(start), Long.toString(end)}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitPosition); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitpos(key, bit, start, end); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitPosition, payload); + } + + @SneakyThrows + @Test + public void bitpos_with_start_and_end_and_type_returns_success() { + // setup + String key = "testKey"; + Long bit = 0L; + Long start = 5L; + Long end = 10L; + Long bitPosition = 10L; + String[] arguments = + new String[] { + key, + Long.toString(bit), + Long.toString(start), + Long.toString(end), + BitmapIndexType.BIT.toString() + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitPosition); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitpos(key, bit, start, end, BitmapIndexType.BIT); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitPosition, payload); + } + + @SneakyThrows + @Test + public void bitpos_with_start_and_end_and_type_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + Long bit = 0L; + Long start = 5L; + Long end = 10L; + Long bitPosition = 10L; + GlideString[] arguments = + new GlideString[] { + key, + gs(Long.toString(bit)), + gs(Long.toString(start)), + gs(Long.toString(end)), + gs(BitmapIndexType.BIT.toString()) + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(bitPosition); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitPos), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitpos(key, bit, start, end, BitmapIndexType.BIT); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(bitPosition, payload); + } + + @SneakyThrows + @Test + public void bitop_returns_success() { + // setup + String destination = "destination"; + String[] keys = new String[] {"key1", "key2"}; + Long result = 6L; + BitwiseOperation bitwiseAnd = BitwiseOperation.AND; + String[] arguments = concatenateArrays(new String[] {bitwiseAnd.toString(), destination}, keys); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitOp), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitop(bitwiseAnd, destination, keys); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void bitop_bianry_returns_success() { + // setup + GlideString destination = gs("destination"); + GlideString[] keys = new GlideString[] {gs("key1"), gs("key2")}; + Long result = 6L; + BitwiseOperation bitwiseAnd = BitwiseOperation.AND; + GlideString[] arguments = + concatenateArrays(new GlideString[] {gs(bitwiseAnd.toString()), destination}, keys); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitOp), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.bitop(bitwiseAnd, destination, keys); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void lmpop_returns_success() { + // setup + String key = "testKey"; + String key2 = "testKey2"; + String[] keys = {key, key2}; + ListDirection listDirection = ListDirection.LEFT; + String[] arguments = new String[] {"2", key, key2, listDirection.toString()}; + Map value = Map.of(key, new String[] {"five"}); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(LMPop), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.lmpop(keys, listDirection); + Map payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void lmpop_with_count_returns_success() { + // setup + String key = "testKey"; + String key2 = "testKey2"; + String[] keys = {key, key2}; + ListDirection listDirection = ListDirection.LEFT; + long count = 1L; + String[] arguments = + new String[] { + "2", key, key2, listDirection.toString(), COUNT_FOR_LIST_REDIS_API, Long.toString(count) + }; + Map value = Map.of(key, new String[] {"five"}); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(LMPop), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.lmpop(keys, listDirection, count); + Map payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void lmove_returns_success() { + // setup + String key1 = "testKey"; + String key2 = "testKey2"; + ListDirection wherefrom = ListDirection.LEFT; + ListDirection whereto = ListDirection.RIGHT; + String[] arguments = new String[] {key1, key2, wherefrom.toString(), whereto.toString()}; + String value = "one"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(LMove), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.lmove(key1, key2, wherefrom, whereto); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void lset_returns_success() { + // setup + String key = "testKey"; + long index = 0; + String element = "two"; + String[] arguments = new String[] {key, "0", element}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(LSet), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.lset(key, index, element); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void lset_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long index = 0; + GlideString element = gs("two"); + GlideString[] arguments = new GlideString[] {key, gs("0"), element}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(LSet), eq(arguments), any())) .thenReturn(testResponse); // exercise @@ -5789,29 +7991,495 @@ public void lset_returns_success() { // verify assertEquals(testResponse, response); - assertEquals(OK, payload); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void blmove_returns_success() { + // setup + String key1 = "testKey"; + String key2 = "testKey2"; + ListDirection wherefrom = ListDirection.LEFT; + ListDirection whereto = ListDirection.RIGHT; + String[] arguments = new String[] {key1, key2, wherefrom.toString(), whereto.toString(), "0.1"}; + String value = "one"; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BLMove), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.blmove(key1, key2, wherefrom, whereto, 0.1); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void sintercard_returns_success() { + // setup + String key1 = "testKey"; + String key2 = "testKey2"; + String[] arguments = new String[] {"2", key1, key2}; + Long value = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SInterCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sintercard(new String[] {key1, key2}); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void sintercard_binary_returns_success() { + // setup + GlideString key1 = gs("testKey"); + GlideString key2 = gs("testKey2"); + GlideString[] arguments = new GlideString[] {gs("2"), key1, key2}; + Long value = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SInterCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sintercard(new GlideString[] {key1, key2}); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void sintercard_with_limit_returns_success() { + // setup + String key1 = "testKey"; + String key2 = "testKey2"; + long limit = 1L; + String[] arguments = new String[] {"2", key1, key2, SET_LIMIT_REDIS_API, "1"}; + Long value = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SInterCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sintercard(new String[] {key1, key2}, limit); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void sintercard_with_limit_binary_returns_success() { + // setup + GlideString key1 = gs("testKey"); + GlideString key2 = gs("testKey2"); + long limit = 1L; + GlideString[] arguments = + new GlideString[] {gs("2"), key1, key2, gs(SET_LIMIT_REDIS_API), gs("1")}; + Long value = 1L; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SInterCard), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sintercard(new GlideString[] {key1, key2}, limit); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void srandmember_returns_success() { + // setup + String key = "testKey"; + String[] arguments = new String[] {key}; + String value = "one"; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SRandMember), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.srandmember(key); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void srandmember_with_count_returns_success() { + // setup + String key = "testKey"; + long count = 2; + String[] arguments = new String[] {key, Long.toString(count)}; + String[] value = {"one", "two"}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SRandMember), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.srandmember(key, count); + String[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertArrayEquals(value, payload); + } + + @SneakyThrows + @Test + public void spop_returns_success() { + // setup + String key = "testKey"; + String[] arguments = new String[] {key}; + String value = "value"; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SPop), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.spop(key); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void spopCount_returns_success() { + // setup + String key = "testKey"; + long count = 2; + String[] arguments = new String[] {key, Long.toString(count)}; + Set value = Set.of("one", "two"); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(SPop), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.spopCount(key, count); + Set payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void bitfieldReadOnly_returns_success() { + // setup + String key = "testKey"; + Long[] result = new Long[] {7L, 8L}; + Offset offset = new Offset(1); + OffsetMultiplier offsetMultiplier = new OffsetMultiplier(8); + BitFieldGet subcommand1 = new BitFieldGet(new UnsignedEncoding(4), offset); + BitFieldGet subcommand2 = new BitFieldGet(new SignedEncoding(5), offsetMultiplier); + String[] args = { + key, + BitFieldOptions.GET_COMMAND_STRING, + BitFieldOptions.UNSIGNED_ENCODING_PREFIX.concat("4"), + offset.getOffset(), + BitFieldOptions.GET_COMMAND_STRING, + BitFieldOptions.SIGNED_ENCODING_PREFIX.concat("5"), + offsetMultiplier.getOffset() + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitFieldReadOnly), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.bitfieldReadOnly(key, new BitFieldReadOnlySubCommands[] {subcommand1, subcommand2}); + Long[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); } @SneakyThrows @Test - public void blmove_returns_success() { + public void bitfield_returns_success() { // setup - String key1 = "testKey"; + String key = "testKey"; + Long[] result = new Long[] {7L, 8L, 9L}; + UnsignedEncoding u2 = new UnsignedEncoding(2); + SignedEncoding i8 = new SignedEncoding(8); + Offset offset = new Offset(1); + OffsetMultiplier offsetMultiplier = new OffsetMultiplier(8); + long setValue = 2; + long incrbyValue = 5; + String[] args = + new String[] { + key, + SET_COMMAND_STRING, + u2.getEncoding(), + offset.getOffset(), + Long.toString(setValue), + GET_COMMAND_STRING, + i8.getEncoding(), + offsetMultiplier.getOffset(), + OVERFLOW_COMMAND_STRING, + SAT.toString(), + INCRBY_COMMAND_STRING, + u2.getEncoding(), + offset.getOffset(), + Long.toString(incrbyValue) + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(BitField), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.bitfield( + key, + new BitFieldSubCommands[] { + new BitFieldSet(u2, offset, setValue), + new BitFieldGet(i8, offsetMultiplier), + new BitFieldOptions.BitFieldOverflow(SAT), + new BitFieldOptions.BitFieldIncrby(u2, offset, incrbyValue), + }); + Long[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void move_returns_success() { + // setup + String key = "testKey"; + long dbIndex = 2L; + String[] arguments = new String[] {key, Long.toString(dbIndex)}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Move), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.move(key, dbIndex); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void move_binary_returns_success() { + // setup + GlideString key = gs("testKey"); + long dbIndex = 2L; + GlideString[] arguments = new GlideString[] {key, gs(Long.toString(dbIndex))}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Move), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.move(key, dbIndex); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void copy_returns_success() { + // setup + String source = "testKey1"; + String destination = "testKey2"; + String[] arguments = new String[] {source, destination}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.copy(source, destination); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void copy_binary_returns_success() { + // setup + GlideString source = gs("testKey1"); + GlideString destination = gs("testKey2"); + GlideString[] arguments = new GlideString[] {source, destination}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.copy(source, destination); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void copy_with_replace_returns_success() { + // setup + String source = "testKey1"; + String destination = "testKey2"; + String[] arguments = new String[] {source, destination, REPLACE_REDIS_API}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.copy(source, destination, true); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void copy_with_destinationDB_returns_success() { + // setup + String source = "testKey1"; + String destination = "testKey2"; + long destinationDB = 1; + String[] arguments = new String[] {source, destination, DB_REDIS_API, "1", REPLACE_REDIS_API}; + Boolean value = true; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.copy(source, destination, destinationDB, true); + Boolean payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void lcs() { + // setup + String key1 = "testKey1"; String key2 = "testKey2"; - ListDirection wherefrom = ListDirection.LEFT; - ListDirection whereto = ListDirection.RIGHT; - String[] arguments = new String[] {key1, key2, wherefrom.toString(), whereto.toString(), "0.1"}; - String value = "one"; + String[] arguments = new String[] {key1, key2}; + String value = "foo"; CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(BLMove), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(LCS), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.blmove(key1, key2, wherefrom, whereto, 0.1); + CompletableFuture response = service.lcs(key1, key2); String payload = response.get(); // verify @@ -5821,23 +8489,48 @@ public void blmove_returns_success() { @SneakyThrows @Test - public void sintercard_returns_success() { + public void lcs_with_len_option() { // setup - String key1 = "testKey"; + String key1 = "testKey1"; String key2 = "testKey2"; - String[] arguments = new String[] {"2", key1, key2}; - Long value = 1L; + String[] arguments = new String[] {key1, key2, LEN_REDIS_API}; + Long value = 3L; CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(SInterCard), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(LCS), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.lcsLen(key1, key2); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void lcsIdx() { + // setup + String key1 = "testKey1"; + String key2 = "testKey2"; + String[] arguments = new String[] {key1, key2, IDX_COMMAND_STRING}; + Map value = Map.of("matches", new Long[][][] {{{1L, 3L}, {0L, 2L}}}, "len", 3L); + + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand(eq(LCS), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.sintercard(new String[] {key1, key2}); - Long payload = response.get(); + CompletableFuture> response = service.lcsIdx(key1, key2); + Map payload = response.get(); // verify assertEquals(testResponse, response); @@ -5846,24 +8539,44 @@ public void sintercard_returns_success() { @SneakyThrows @Test - public void sintercard_with_limit_returns_success() { + public void lcsIdx_throws_NullPointerException() { // setup - String key1 = "testKey"; + Map value = Map.of("missing", new Long[][][] {{{1L, 3L}, {0L, 2L}}}, "len", 3L); + + // exception + RuntimeException runtimeException = + assertThrows(RuntimeException.class, () -> service.handleLcsIdxResponse(value)); + assertInstanceOf(NullPointerException.class, runtimeException); + assertEquals( + "LCS result does not contain the key \"" + LCS_MATCHES_RESULT_KEY + "\"", + runtimeException.getMessage()); + } + + @SneakyThrows + @Test + public void lcsIdx_with_options() { + // setup + String key1 = "testKey1"; String key2 = "testKey2"; - long limit = 1L; - String[] arguments = new String[] {"2", key1, key2, SET_LIMIT_REDIS_API, "1"}; - Long value = 1L; + String[] arguments = + new String[] {key1, key2, IDX_COMMAND_STRING, MINMATCHLEN_COMMAND_STRING, "2"}; + Map value = + Map.of( + "matches", + new Object[] {new Object[] {new Long[] {1L, 3L}, new Long[] {0L, 2L}, 3L}}, + "len", + 3L); - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture> testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(SInterCard), eq(arguments), any())) + when(commandManager.>submitNewCommand(eq(LCS), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.sintercard(new String[] {key1, key2}, limit); - Long payload = response.get(); + CompletableFuture> response = service.lcsIdx(key1, key2, 2); + Map payload = response.get(); // verify assertEquals(testResponse, response); @@ -5872,22 +8585,28 @@ public void sintercard_with_limit_returns_success() { @SneakyThrows @Test - public void srandmember_returns_success() { + public void lcsIdxWithMatchLen() { // setup - String key = "testKey"; - String[] arguments = new String[] {key}; - String value = "one"; + String key1 = "testKey1"; + String key2 = "testKey2"; + String[] arguments = new String[] {key1, key2, IDX_COMMAND_STRING, WITHMATCHLEN_COMMAND_STRING}; + Map value = + Map.of( + "matches", + new Object[] {new Object[] {new Long[] {1L, 3L}, new Long[] {0L, 2L}, 3L}}, + "len", + 3L); - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture> testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(SRandMember), eq(arguments), any())) + when(commandManager.>submitNewCommand(eq(LCS), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.srandmember(key); - String payload = response.get(); + CompletableFuture> response = service.lcsIdxWithMatchLen(key1, key2); + Map payload = response.get(); // verify assertEquals(testResponse, response); @@ -5896,186 +8615,148 @@ public void srandmember_returns_success() { @SneakyThrows @Test - public void srandmember_with_count_returns_success() { + public void lcsIdxWithMatchLen_with_options() { // setup - String key = "testKey"; - long count = 2; - String[] arguments = new String[] {key, Long.toString(count)}; - String[] value = {"one", "two"}; + String key1 = "testKey1"; + String key2 = "testKey2"; + String[] arguments = + new String[] { + key1, + key2, + IDX_COMMAND_STRING, + MINMATCHLEN_COMMAND_STRING, + "2", + WITHMATCHLEN_COMMAND_STRING + }; + Map value = + Map.of( + "matches", + new Object[] {new Object[] {new Long[] {1L, 3L}, new Long[] {0L, 2L}, 3L}}, + "len", + 3L); - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture> testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(SRandMember), eq(arguments), any())) + when(commandManager.>submitNewCommand(eq(LCS), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.srandmember(key, count); - String[] payload = response.get(); + CompletableFuture> response = service.lcsIdxWithMatchLen(key1, key2, 2); + Map payload = response.get(); // verify assertEquals(testResponse, response); - assertArrayEquals(value, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void spop_returns_success() { + public void watch_returns_success() { // setup - String key = "testKey"; - String[] arguments = new String[] {key}; - String value = "value"; - + String key1 = "testKey1"; + String key2 = "testKey2"; + String[] arguments = new String[] {key1, key2}; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(SPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Watch), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.spop(key); + CompletableFuture response = service.watch(arguments); String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void spopCount_returns_success() { + public void watch_binary_returns_success() { // setup - String key = "testKey"; - long count = 2; - String[] arguments = new String[] {key, Long.toString(count)}; - Set value = Set.of("one", "two"); - - CompletableFuture> testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString key1 = gs("testKey1"); + GlideString key2 = gs("testKey2"); + GlideString[] arguments = new GlideString[] {key1, key2}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.>submitNewCommand(eq(SPop), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Watch), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture> response = service.spopCount(key, count); - Set payload = response.get(); + CompletableFuture response = service.watch(arguments); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void bitfieldReadOnly_returns_success() { + public void unwatch_returns_success() { // setup - String key = "testKey"; - Long[] result = new Long[] {7L, 8L}; - Offset offset = new Offset(1); - OffsetMultiplier offsetMultiplier = new OffsetMultiplier(8); - BitFieldGet subcommand1 = new BitFieldGet(new UnsignedEncoding(4), offset); - BitFieldGet subcommand2 = new BitFieldGet(new SignedEncoding(5), offsetMultiplier); - String[] args = { - key, - BitFieldOptions.GET_COMMAND_STRING, - BitFieldOptions.UNSIGNED_ENCODING_PREFIX.concat("4"), - offset.getOffset(), - BitFieldOptions.GET_COMMAND_STRING, - BitFieldOptions.SIGNED_ENCODING_PREFIX.concat("5"), - offsetMultiplier.getOffset() - }; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(result); + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(BitFieldReadOnly), eq(args), any())) + when(commandManager.submitNewCommand(eq(UnWatch), eq(new String[0]), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = - service.bitfieldReadOnly(key, new BitFieldReadOnlySubCommands[] {subcommand1, subcommand2}); - Long[] payload = response.get(); + CompletableFuture response = service.unwatch(); + String payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(result, payload); + assertEquals(OK, payload); } @SneakyThrows @Test - public void bitfield_returns_success() { + public void sunion_returns_success() { // setup - String key = "testKey"; - Long[] result = new Long[] {7L, 8L, 9L}; - UnsignedEncoding u2 = new UnsignedEncoding(2); - SignedEncoding i8 = new SignedEncoding(8); - Offset offset = new Offset(1); - OffsetMultiplier offsetMultiplier = new OffsetMultiplier(8); - long setValue = 2; - long incrbyValue = 5; - String[] args = - new String[] { - key, - SET_COMMAND_STRING, - u2.getEncoding(), - offset.getOffset(), - Long.toString(setValue), - GET_COMMAND_STRING, - i8.getEncoding(), - offsetMultiplier.getOffset(), - OVERFLOW_COMMAND_STRING, - SAT.toString(), - INCRBY_COMMAND_STRING, - u2.getEncoding(), - offset.getOffset(), - Long.toString(incrbyValue) - }; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(result); + String[] keys = new String[] {"key1", "key2"}; + Set value = Set.of("1", "2"); + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(BitField), eq(args), any())) + when(commandManager.>submitNewCommand(eq(SUnion), eq(keys), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = - service.bitfield( - key, - new BitFieldSubCommands[] { - new BitFieldSet(u2, offset, setValue), - new BitFieldGet(i8, offsetMultiplier), - new BitFieldOptions.BitFieldOverflow(SAT), - new BitFieldOptions.BitFieldIncrby(u2, offset, incrbyValue), - }); - Long[] payload = response.get(); + CompletableFuture> response = service.sunion(keys); + Set payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(result, payload); + assertEquals(value, payload); } @SneakyThrows @Test - public void move_returns_success() { + public void dump_returns_success() { // setup - String key = "testKey"; - long dbIndex = 2L; - String[] arguments = new String[] {key, Long.toString(dbIndex)}; - Boolean value = true; + GlideString key = gs("testKey"); + byte[] value = "value".getBytes(); + GlideString[] arguments = new GlideString[] {key}; - CompletableFuture testResponse = new CompletableFuture<>(); + CompletableFuture testResponse = new CompletableFuture<>(); testResponse.complete(value); // match on protobuf request - when(commandManager.submitNewCommand(eq(Move), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Dump), eq(arguments), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.move(key, dbIndex); - Boolean payload = response.get(); + CompletableFuture response = service.dump(key); + byte[] payload = response.get(); // verify assertEquals(testResponse, response); @@ -6084,127 +8765,196 @@ public void move_returns_success() { @SneakyThrows @Test - public void copy_returns_success() { + public void restore_returns_success() { // setup - String source = "testKey1"; - String destination = "testKey2"; - String[] arguments = new String[] {source, destination}; - Boolean value = true; + GlideString key = gs("testKey"); + long ttl = 0L; + byte[] value = "value".getBytes(); - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString[] arg = new GlideString[] {key, gs(Long.toString(ttl).getBytes()), gs(value)}; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Restore), eq(arg), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.copy(source, destination); - Boolean payload = response.get(); + CompletableFuture response = service.restore(key, ttl, value); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, response.get()); } @SneakyThrows @Test - public void copy_with_replace_returns_success() { + public void restore_with_restoreOptions_returns_success() { // setup - String source = "testKey1"; - String destination = "testKey2"; - String[] arguments = new String[] {source, destination, REPLACE_REDIS_API}; - Boolean value = true; + GlideString key = gs("testKey"); + long ttl = 0L; + byte[] value = "value".getBytes(); + Long idletime = 10L; + Long frequency = 5L; - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + GlideString[] arg = + new GlideString[] { + key, + gs(Long.toString(ttl)), + gs(value), + gs("REPLACE"), + gs("ABSTTL"), + gs("IDLETIME"), + gs("10"), + gs("FREQ"), + gs("5") + }; + + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); // match on protobuf request - when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Restore), eq(arg), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.copy(source, destination, true); - Boolean payload = response.get(); + CompletableFuture response = + service.restore( + key, + ttl, + value, + RestoreOptions.builder().replace().absttl().idletime(10L).frequency(5L).build()); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(OK, response.get()); } @SneakyThrows @Test - public void copy_with_destinationDB_returns_success() { + public void sort_with_options_returns_success() { // setup - String source = "testKey1"; - String destination = "testKey2"; - long destinationDB = 1; - String[] arguments = new String[] {source, destination, DB_REDIS_API, "1", REPLACE_REDIS_API}; - Boolean value = true; - - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String[] result = new String[] {"1", "2", "3"}; + String key = "key"; + Long limitOffset = 0L; + Long limitCount = 2L; + String byPattern = "byPattern"; + String getPattern = "getPattern"; + String[] args = + new String[] { + key, + LIMIT_COMMAND_STRING, + limitOffset.toString(), + limitCount.toString(), + DESC.toString(), + ALPHA_COMMAND_STRING, + BY_COMMAND_STRING, + byPattern, + GET_COMMAND_STRING, + getPattern + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); // match on protobuf request - when(commandManager.submitNewCommand(eq(Copy), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(Sort), eq(args), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.copy(source, destination, destinationDB, true); - Boolean payload = response.get(); + CompletableFuture response = + service.sort( + key, + SortOptions.builder() + .alpha() + .limit(new SortBaseOptions.Limit(limitOffset, limitCount)) + .orderBy(DESC) + .getPattern(getPattern) + .byPattern(byPattern) + .build()); + String[] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(result, payload); } @SneakyThrows @Test - public void lcs() { + public void sortReadOnly_with_options_returns_success() { // setup - String key1 = "testKey1"; - String key2 = "testKey2"; - String[] arguments = new String[] {key1, key2}; - String value = "foo"; - - CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + String[] result = new String[] {"1", "2", "3"}; + String key = "key"; + String byPattern = "byPattern"; + String getPattern = "getPattern"; + String[] args = + new String[] {key, BY_COMMAND_STRING, byPattern, GET_COMMAND_STRING, getPattern}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); // match on protobuf request - when(commandManager.submitNewCommand(eq(LCS), eq(arguments), any())) + when(commandManager.submitNewCommand(eq(SortReadOnly), eq(args), any())) .thenReturn(testResponse); // exercise - CompletableFuture response = service.lcs(key1, key2); - String payload = response.get(); + CompletableFuture response = + service.sortReadOnly( + key, SortOptions.builder().getPattern(getPattern).byPattern(byPattern).build()); + String[] payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(result, payload); } @SneakyThrows @Test - public void lcs_with_len_option() { + public void sortStore_with_options_returns_success() { // setup - String key1 = "testKey1"; - String key2 = "testKey2"; - String[] arguments = new String[] {key1, key2, LEN_REDIS_API}; - Long value = 3L; - + Long result = 5L; + String key = "key"; + String destKey = "destKey"; + Long limitOffset = 0L; + Long limitCount = 2L; + String byPattern = "byPattern"; + String getPattern = "getPattern"; + String[] args = + new String[] { + key, + LIMIT_COMMAND_STRING, + limitOffset.toString(), + limitCount.toString(), + DESC.toString(), + ALPHA_COMMAND_STRING, + BY_COMMAND_STRING, + byPattern, + GET_COMMAND_STRING, + getPattern, + STORE_COMMAND_STRING, + destKey + }; CompletableFuture testResponse = new CompletableFuture<>(); - testResponse.complete(value); + testResponse.complete(result); // match on protobuf request - when(commandManager.submitNewCommand(eq(LCS), eq(arguments), any())) - .thenReturn(testResponse); + when(commandManager.submitNewCommand(eq(Sort), eq(args), any())).thenReturn(testResponse); // exercise - CompletableFuture response = service.lcsLen(key1, key2); + CompletableFuture response = + service.sortStore( + key, + destKey, + SortOptions.builder() + .alpha() + .limit(new SortBaseOptions.Limit(limitOffset, limitCount)) + .orderBy(DESC) + .getPattern(getPattern) + .byPattern(byPattern) + .build()); Long payload = response.get(); // verify assertEquals(testResponse, response); - assertEquals(value, payload); + assertEquals(result, payload); } } diff --git a/java/client/src/test/java/glide/api/RedisClusterClientTest.java b/java/client/src/test/java/glide/api/RedisClusterClientTest.java index 6ed9c5111c..7a82ff5e35 100644 --- a/java/client/src/test/java/glide/api/RedisClusterClientTest.java +++ b/java/client/src/test/java/glide/api/RedisClusterClientTest.java @@ -1,10 +1,18 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api; import static glide.api.BaseClient.OK; import static glide.api.commands.ServerManagementCommands.VERSION_REDIS_API; +import static glide.api.models.GlideString.gs; import static glide.api.models.commands.FlushMode.ASYNC; import static glide.api.models.commands.FlushMode.SYNC; +import static glide.api.models.commands.SortBaseOptions.ALPHA_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.LIMIT_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.ALPHA_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.LIMIT_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.STORE_COMMAND_STRING; import static glide.api.models.commands.function.FunctionListOptions.LIBRARY_NAME_REDIS_API; import static glide.api.models.commands.function.FunctionListOptions.WITH_CODE_REDIS_API; import static glide.api.models.configuration.RequestRoutingConfiguration.SimpleMultiNodeRoute.ALL_NODES; @@ -27,24 +35,36 @@ import static redis_request.RedisRequestOuterClass.RequestType.DBSize; import static redis_request.RedisRequestOuterClass.RequestType.Echo; import static redis_request.RedisRequestOuterClass.RequestType.FCall; +import static redis_request.RedisRequestOuterClass.RequestType.FCallReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.FlushAll; +import static redis_request.RedisRequestOuterClass.RequestType.FlushDB; import static redis_request.RedisRequestOuterClass.RequestType.FunctionDelete; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionDump; import static redis_request.RedisRequestOuterClass.RequestType.FunctionFlush; import static redis_request.RedisRequestOuterClass.RequestType.FunctionKill; import static redis_request.RedisRequestOuterClass.RequestType.FunctionList; import static redis_request.RedisRequestOuterClass.RequestType.FunctionLoad; +import static redis_request.RedisRequestOuterClass.RequestType.FunctionRestore; import static redis_request.RedisRequestOuterClass.RequestType.FunctionStats; import static redis_request.RedisRequestOuterClass.RequestType.Info; import static redis_request.RedisRequestOuterClass.RequestType.LastSave; import static redis_request.RedisRequestOuterClass.RequestType.Lolwut; import static redis_request.RedisRequestOuterClass.RequestType.Ping; +import static redis_request.RedisRequestOuterClass.RequestType.RandomKey; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Time; +import static redis_request.RedisRequestOuterClass.RequestType.UnWatch; import glide.api.models.ClusterTransaction; import glide.api.models.ClusterValue; +import glide.api.models.GlideString; import glide.api.models.commands.FlushMode; import glide.api.models.commands.InfoOptions; +import glide.api.models.commands.SortBaseOptions.Limit; +import glide.api.models.commands.SortClusterOptions; import glide.api.models.commands.function.FunctionLoadOptions; +import glide.api.models.commands.function.FunctionRestorePolicy; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import glide.api.models.configuration.RequestRoutingConfiguration.SingleNodeRoute; import glide.managers.CommandManager; @@ -336,6 +356,28 @@ public void echo_returns_success() { assertEquals(message, echo); } + @SneakyThrows + @Test + public void echo_binary_returns_success() { + // setup + GlideString message = gs("GLIDE FOR REDIS"); + GlideString[] arguments = new GlideString[] {message}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(message); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Echo), eq(arguments), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.echo(message); + GlideString echo = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(message, echo); + } + @SneakyThrows @Test public void echo_with_route_returns_success() { @@ -359,6 +401,29 @@ public void echo_with_route_returns_success() { assertEquals(message, echo); } + @SneakyThrows + @Test + public void echo_binary_with_route_returns_success() { + // setup + GlideString message = gs("GLIDE FOR REDIS"); + GlideString[] arguments = new GlideString[] {message}; + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(ClusterValue.ofSingleValue(message)); + + // match on protobuf request + when(commandManager.>submitNewCommand( + eq(Echo), eq(arguments), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.echo(message, RANDOM); + GlideString echo = response.get().getSingleValue(); + + // verify + assertEquals(testResponse, response); + assertEquals(message, echo); + } + @SneakyThrows @Test public void info_returns_string() { @@ -918,6 +983,88 @@ public void flushall_with_route_and_mode_returns_success() { assertEquals(OK, payload); } + @SneakyThrows + @Test + public void flushdb_returns_success() { + // setup + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FlushDB), eq(new String[0]), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.flushdb(); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void flushdb_with_mode_returns_success() { + // setup + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(FlushDB), eq(new String[] {SYNC.toString()}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.flushdb(SYNC); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void flushdb_with_route_returns_success() { + // setup + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FlushDB), eq(new String[0]), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.flushdb(RANDOM); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void flushdb_with_route_and_mode_returns_success() { + // setup + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(FlushDB), eq(new String[] {SYNC.toString()}), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.flushdb(SYNC, RANDOM); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + @SneakyThrows @Test public void lolwut_returns_success() { @@ -1452,6 +1599,46 @@ public void functionDelete_with_route_returns_success() { assertEquals(OK, payload); } + @SneakyThrows + @Test + public void unwatch_returns_success() { + // setup + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(UnWatch), eq(new String[0]), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.unwatch(); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void unwatch_with_route_returns_success() { + // setup + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(UnWatch), eq(new String[0]), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.unwatch(RANDOM); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + @SneakyThrows @Test public void fcall_without_keys_and_without_args_returns_success() { @@ -1546,6 +1733,29 @@ public void fcall_without_keys_and_with_route_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void fcallReadOnly_without_keys_and_without_args_returns_success() { + // setup + String function = "func"; + String[] args = new String[] {function, "0"}; + Object value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FCallReadOnly), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.fcallReadOnly(function); + Object payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void functionKill_returns_success() { @@ -1614,6 +1824,80 @@ public void functionStats_returns_success() { assertEquals(value, payload); } + @SneakyThrows + @Test + public void fcallReadOnly_without_keys_and_without_args_but_with_route_returns_success() { + // setup + String function = "func"; + String[] args = new String[] {function, "0"}; + ClusterValue value = ClusterValue.ofSingleValue("42"); + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand( + eq(FCallReadOnly), eq(args), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.fcallReadOnly(function, RANDOM); + ClusterValue payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void fcallReadOnly_without_keys_returns_success() { + // setup + String function = "func"; + String[] arguments = new String[] {"1", "2"}; + String[] args = new String[] {function, "0", "1", "2"}; + Object value = "42"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FCallReadOnly), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.fcallReadOnly(function, arguments); + Object payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void fcallReadOnly_without_keys_and_with_route_returns_success() { + // setup + String function = "func"; + String[] arguments = new String[] {"1", "2"}; + String[] args = new String[] {function, "0", "1", "2"}; + ClusterValue value = ClusterValue.ofSingleValue("42"); + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand( + eq(FCallReadOnly), eq(args), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = + service.fcallReadOnly(function, arguments, RANDOM); + ClusterValue payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + @SneakyThrows @Test public void functionStats_with_route_returns_success() { @@ -1639,4 +1923,363 @@ public void functionStats_with_route_returns_success() { assertEquals(testResponse, response); assertEquals(value, payload); } + + @SneakyThrows + @Test + public void functionDump_returns_success() { + // setup + byte[] value = new byte[] {42}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionDump), eq(new GlideString[0]), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionDump(); + byte[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionDump_with_route_returns_success() { + // setup + ClusterValue value = ClusterValue.of(new byte[] {42}); + CompletableFuture> testResponse = new CompletableFuture<>(); + testResponse.complete(value); + + // match on protobuf request + when(commandManager.>submitNewCommand( + eq(FunctionDump), eq(new GlideString[0]), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture> response = service.functionDump(RANDOM); + ClusterValue payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(value, payload); + } + + @SneakyThrows + @Test + public void functionRestore_returns_success() { + // setup + byte[] data = new byte[] {42}; + GlideString[] args = {gs(data)}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionRestore), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionRestore(data); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionRestore_with_policy_returns_success() { + // setup + byte[] data = new byte[] {42}; + GlideString[] args = {gs(data), gs(FunctionRestorePolicy.FLUSH.toString())}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionRestore), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionRestore(data, FunctionRestorePolicy.FLUSH); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionRestore_with_route_returns_success() { + // setup + byte[] data = new byte[] {42}; + GlideString[] args = {gs(data)}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionRestore), eq(args), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.functionRestore(data, RANDOM); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void functionRestore_with_policy_and_route_returns_success() { + // setup + byte[] data = new byte[] {42}; + GlideString[] args = {gs(data), gs(FunctionRestorePolicy.FLUSH.toString())}; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(OK); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(FunctionRestore), eq(args), eq(RANDOM), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.functionRestore(data, FunctionRestorePolicy.FLUSH, RANDOM); + String payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(OK, payload); + } + + @SneakyThrows + @Test + public void randomKey_with_route() { + // setup + String key1 = "key1"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(key1); + Route route = ALL_NODES; + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(RandomKey), eq(new String[0]), eq(route), any())) + .thenReturn(testResponse); + CompletableFuture response = service.randomKey(route); + + // verify + assertEquals(testResponse, response); + } + + @SneakyThrows + @Test + public void randomKey() { + // setup + String key1 = "key1"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(key1); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(RandomKey), eq(new String[0]), any())) + .thenReturn(testResponse); + CompletableFuture response = service.randomKey(); + + // verify + assertEquals(testResponse, response); + } + + @SneakyThrows + @Test + public void sort_returns_success() { + // setup + String[] result = new String[] {"1", "2", "3"}; + String key = "key"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Sort), eq(new String[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sort(key); + String[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void sort_with_options_returns_success() { + // setup + String[] result = new String[] {"1", "2", "3"}; + String key = "key"; + Long limitOffset = 0L; + Long limitCount = 2L; + String[] args = + new String[] { + key, + LIMIT_COMMAND_STRING, + limitOffset.toString(), + limitCount.toString(), + DESC.toString(), + ALPHA_COMMAND_STRING + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Sort), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.sort( + key, + SortClusterOptions.builder() + .alpha() + .limit(new Limit(limitOffset, limitCount)) + .orderBy(DESC) + .build()); + String[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void sortReadOnly_returns_success() { + // setup + String[] result = new String[] {"1", "2", "3"}; + String key = "key"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SortReadOnly), eq(new String[] {key}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sortReadOnly(key); + String[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void sortReadOnly_with_options_returns_success() { + // setup + String[] result = new String[] {"1", "2", "3"}; + String key = "key"; + Long limitOffset = 0L; + Long limitCount = 2L; + String[] args = + new String[] { + key, + LIMIT_COMMAND_STRING, + limitOffset.toString(), + limitCount.toString(), + DESC.toString(), + ALPHA_COMMAND_STRING + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(SortReadOnly), eq(args), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.sortReadOnly( + key, + SortClusterOptions.builder() + .alpha() + .limit(new Limit(limitOffset, limitCount)) + .orderBy(DESC) + .build()); + String[] payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void sortStore_returns_success() { + // setup + Long result = 5L; + String key = "key"; + String destKey = "destKey"; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand( + eq(Sort), eq(new String[] {key, STORE_COMMAND_STRING, destKey}), any())) + .thenReturn(testResponse); + + // exercise + CompletableFuture response = service.sortStore(key, destKey); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } + + @SneakyThrows + @Test + public void sortStore_with_options_returns_success() { + // setup + Long result = 5L; + String key = "key"; + String destKey = "destKey"; + Long limitOffset = 0L; + Long limitCount = 2L; + String[] args = + new String[] { + key, + LIMIT_COMMAND_STRING, + limitOffset.toString(), + limitCount.toString(), + DESC.toString(), + ALPHA_COMMAND_STRING, + STORE_COMMAND_STRING, + destKey + }; + CompletableFuture testResponse = new CompletableFuture<>(); + testResponse.complete(result); + + // match on protobuf request + when(commandManager.submitNewCommand(eq(Sort), eq(args), any())).thenReturn(testResponse); + + // exercise + CompletableFuture response = + service.sortStore( + key, + destKey, + SortClusterOptions.builder() + .alpha() + .limit(new Limit(limitOffset, limitCount)) + .orderBy(DESC) + .build()); + Long payload = response.get(); + + // verify + assertEquals(testResponse, response); + assertEquals(result, payload); + } } diff --git a/java/client/src/test/java/glide/api/models/ClusterTransactionTests.java b/java/client/src/test/java/glide/api/models/ClusterTransactionTests.java new file mode 100644 index 0000000000..c33a927792 --- /dev/null +++ b/java/client/src/test/java/glide/api/models/ClusterTransactionTests.java @@ -0,0 +1,94 @@ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ +package glide.api.models; + +import static glide.api.models.TransactionTests.buildArgs; +import static glide.api.models.commands.SortBaseOptions.ALPHA_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.LIMIT_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.OrderBy.ASC; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; + +import glide.api.models.commands.SortBaseOptions; +import glide.api.models.commands.SortClusterOptions; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.Stream; +import org.apache.commons.lang3.tuple.Pair; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import redis_request.RedisRequestOuterClass; + +public class ClusterTransactionTests { + private static Stream getTransactionBuilders() { + return Stream.of( + Arguments.of(new ClusterTransaction()), Arguments.of(new ClusterTransaction())); + } + + @ParameterizedTest + @MethodSource("getTransactionBuilders") + public void cluster_transaction_builds_protobuf_request(ClusterTransaction transaction) { + List> + results = new LinkedList<>(); + + transaction.sortReadOnly( + "key1", + SortClusterOptions.builder() + .orderBy(ASC) + .alpha() + .limit(new SortBaseOptions.Limit(0L, 1L)) + .build()); + results.add( + Pair.of( + SortReadOnly, + buildArgs( + "key1", LIMIT_COMMAND_STRING, "0", "1", ASC.toString(), ALPHA_COMMAND_STRING))); + + transaction.sort( + "key1", + SortClusterOptions.builder() + .orderBy(ASC) + .alpha() + .limit(new SortBaseOptions.Limit(0L, 1L)) + .build()); + results.add( + Pair.of( + Sort, + buildArgs( + "key1", LIMIT_COMMAND_STRING, "0", "1", ASC.toString(), ALPHA_COMMAND_STRING))); + + transaction.sortStore( + "key1", + "key2", + SortClusterOptions.builder() + .orderBy(ASC) + .alpha() + .limit(new SortBaseOptions.Limit(0L, 1L)) + .build()); + results.add( + Pair.of( + Sort, + buildArgs( + "key1", + LIMIT_COMMAND_STRING, + "0", + "1", + ASC.toString(), + ALPHA_COMMAND_STRING, + STORE_COMMAND_STRING, + "key2"))); + + var protobufTransaction = transaction.getProtobufTransaction().build(); + + for (int idx = 0; idx < protobufTransaction.getCommandsCount(); idx++) { + RedisRequestOuterClass.Command protobuf = protobufTransaction.getCommands(idx); + + assertEquals(results.get(idx).getLeft(), protobuf.getRequestType()); + assertEquals( + results.get(idx).getRight().getArgsCount(), protobuf.getArgsArray().getArgsCount()); + assertEquals(results.get(idx).getRight(), protobuf.getArgsArray()); + } + } +} diff --git a/java/client/src/test/java/glide/api/models/ClusterValueTests.java b/java/client/src/test/java/glide/api/models/ClusterValueTests.java index f74ab21494..d27bb1aaba 100644 --- a/java/client/src/test/java/glide/api/models/ClusterValueTests.java +++ b/java/client/src/test/java/glide/api/models/ClusterValueTests.java @@ -1,6 +1,7 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; +import static glide.api.models.GlideString.gs; import static org.junit.jupiter.api.Assertions.assertAll; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -70,6 +71,24 @@ public void multi_value_ctor() { assertAll( () -> assertTrue(value.hasMultiData()), () -> assertFalse(value.hasSingleData()), - () -> assertNotNull(value.getMultiValue())); + () -> assertNotNull(value.getMultiValue()), + () -> assertTrue(value.getMultiValue().containsKey("config1")), + () -> assertTrue(value.getMultiValue().containsKey("config2"))); + } + + @Test + public void multi_value_binary_ctor() { + var value = + ClusterValue.ofMultiValueBinary( + Map.of(gs("config1"), gs("param1"), gs("config2"), gs("param2"))); + assertAll( + () -> assertTrue(value.hasMultiData()), + () -> assertFalse(value.hasSingleData()), + () -> assertNotNull(value.getMultiValue()), + // ofMultiValueBinary converts the key to a String, but the values are not converted + () -> assertTrue(value.getMultiValue().containsKey("config1")), + () -> assertTrue(value.getMultiValue().get("config1").equals(gs("param1"))), + () -> assertTrue(value.getMultiValue().containsKey("config2")), + () -> assertTrue(value.getMultiValue().get("config2").equals(gs("param2")))); } } diff --git a/java/client/src/test/java/glide/api/models/StandaloneTransactionTests.java b/java/client/src/test/java/glide/api/models/StandaloneTransactionTests.java index a3f47e2e61..50331e1d96 100644 --- a/java/client/src/test/java/glide/api/models/StandaloneTransactionTests.java +++ b/java/client/src/test/java/glide/api/models/StandaloneTransactionTests.java @@ -1,14 +1,24 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; import static glide.api.commands.GenericBaseCommands.REPLACE_REDIS_API; import static glide.api.commands.GenericCommands.DB_REDIS_API; import static glide.api.models.TransactionTests.buildArgs; +import static glide.api.models.commands.SortBaseOptions.ALPHA_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.LIMIT_COMMAND_STRING; +import static glide.api.models.commands.SortBaseOptions.Limit; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.BY_COMMAND_STRING; +import static glide.api.models.commands.SortOptions.GET_COMMAND_STRING; import static org.junit.jupiter.api.Assertions.assertEquals; import static redis_request.RedisRequestOuterClass.RequestType.Copy; import static redis_request.RedisRequestOuterClass.RequestType.Move; import static redis_request.RedisRequestOuterClass.RequestType.Select; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; +import glide.api.models.commands.SortOptions; import java.util.LinkedList; import java.util.List; import org.apache.commons.lang3.tuple.Pair; @@ -29,6 +39,139 @@ public void standalone_transaction_commands() { transaction.copy("key1", "key2", 1, true); results.add(Pair.of(Copy, buildArgs("key1", "key2", DB_REDIS_API, "1", REPLACE_REDIS_API))); + transaction.sort( + "key1", + SortOptions.builder() + .byPattern("byPattern") + .getPatterns(List.of("getPattern1", "getPattern2")) + .build()); + results.add( + Pair.of( + Sort, + buildArgs( + "key1", + BY_COMMAND_STRING, + "byPattern", + GET_COMMAND_STRING, + "getPattern1", + GET_COMMAND_STRING, + "getPattern2"))); + transaction.sort( + "key1", + SortOptions.builder() + .orderBy(DESC) + .alpha() + .limit(new Limit(0L, 1L)) + .byPattern("byPattern") + .getPatterns(List.of("getPattern1", "getPattern2")) + .build()); + results.add( + Pair.of( + Sort, + buildArgs( + "key1", + LIMIT_COMMAND_STRING, + "0", + "1", + DESC.toString(), + ALPHA_COMMAND_STRING, + BY_COMMAND_STRING, + "byPattern", + GET_COMMAND_STRING, + "getPattern1", + GET_COMMAND_STRING, + "getPattern2"))); + transaction.sortReadOnly( + "key1", + SortOptions.builder() + .byPattern("byPattern") + .getPatterns(List.of("getPattern1", "getPattern2")) + .build()); + results.add( + Pair.of( + SortReadOnly, + buildArgs( + "key1", + BY_COMMAND_STRING, + "byPattern", + GET_COMMAND_STRING, + "getPattern1", + GET_COMMAND_STRING, + "getPattern2"))); + transaction.sortReadOnly( + "key1", + SortOptions.builder() + .orderBy(DESC) + .alpha() + .limit(new Limit(0L, 1L)) + .byPattern("byPattern") + .getPatterns(List.of("getPattern1", "getPattern2")) + .build()); + results.add( + Pair.of( + SortReadOnly, + buildArgs( + "key1", + LIMIT_COMMAND_STRING, + "0", + "1", + DESC.toString(), + ALPHA_COMMAND_STRING, + BY_COMMAND_STRING, + "byPattern", + GET_COMMAND_STRING, + "getPattern1", + GET_COMMAND_STRING, + "getPattern2"))); + transaction.sortStore( + "key1", + "key2", + SortOptions.builder() + .byPattern("byPattern") + .getPatterns(List.of("getPattern1", "getPattern2")) + .build()); + results.add( + Pair.of( + Sort, + buildArgs( + "key1", + BY_COMMAND_STRING, + "byPattern", + GET_COMMAND_STRING, + "getPattern1", + GET_COMMAND_STRING, + "getPattern2", + STORE_COMMAND_STRING, + "key2"))); + transaction.sortStore( + "key1", + "key2", + SortOptions.builder() + .orderBy(DESC) + .alpha() + .limit(new Limit(0L, 1L)) + .byPattern("byPattern") + .getPatterns(List.of("getPattern1", "getPattern2")) + .build()); + results.add( + Pair.of( + Sort, + buildArgs( + "key1", + LIMIT_COMMAND_STRING, + "0", + "1", + DESC.toString(), + ALPHA_COMMAND_STRING, + BY_COMMAND_STRING, + "byPattern", + GET_COMMAND_STRING, + "getPattern1", + GET_COMMAND_STRING, + "getPattern2", + STORE_COMMAND_STRING, + "key2"))); + var protobufTransaction = transaction.getProtobufTransaction().build(); for (int idx = 0; idx < protobufTransaction.getCommandsCount(); idx++) { diff --git a/java/client/src/test/java/glide/api/models/TransactionTests.java b/java/client/src/test/java/glide/api/models/TransactionTests.java index 1736643356..a0512764ec 100644 --- a/java/client/src/test/java/glide/api/models/TransactionTests.java +++ b/java/client/src/test/java/glide/api/models/TransactionTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.api.models; import static glide.api.commands.GenericBaseCommands.REPLACE_REDIS_API; @@ -7,6 +7,9 @@ import static glide.api.commands.SortedSetBaseCommands.LIMIT_REDIS_API; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORES_REDIS_API; import static glide.api.commands.SortedSetBaseCommands.WITH_SCORE_REDIS_API; +import static glide.api.commands.StringBaseCommands.IDX_COMMAND_STRING; +import static glide.api.commands.StringBaseCommands.MINMATCHLEN_COMMAND_STRING; +import static glide.api.commands.StringBaseCommands.WITHMATCHLEN_COMMAND_STRING; import static glide.api.models.commands.ExpireOptions.HAS_EXISTING_EXPIRY; import static glide.api.models.commands.ExpireOptions.HAS_NO_EXPIRY; import static glide.api.models.commands.ExpireOptions.NEW_EXPIRY_LESS_THAN_CURRENT; @@ -18,15 +21,22 @@ import static glide.api.models.commands.ScoreFilter.MAX; import static glide.api.models.commands.ScoreFilter.MIN; import static glide.api.models.commands.SetOptions.RETURN_OLD_VALUE; +import static glide.api.models.commands.SortBaseOptions.STORE_COMMAND_STRING; import static glide.api.models.commands.WeightAggregateOptions.AGGREGATE_REDIS_API; import static glide.api.models.commands.WeightAggregateOptions.WEIGHTS_REDIS_API; import static glide.api.models.commands.ZAddOptions.UpdateOptions.SCORE_LESS_THAN_CURRENT; import static glide.api.models.commands.function.FunctionListOptions.LIBRARY_NAME_REDIS_API; import static glide.api.models.commands.function.FunctionListOptions.WITH_CODE_REDIS_API; import static glide.api.models.commands.geospatial.GeoAddOptions.CHANGED_REDIS_API; +import static glide.api.models.commands.stream.StreamGroupOptions.ENTRIES_READ_REDIS_API; +import static glide.api.models.commands.stream.StreamGroupOptions.MAKE_STREAM_REDIS_API; +import static glide.api.models.commands.stream.StreamPendingOptions.IDLE_TIME_REDIS_API; +import static glide.api.models.commands.stream.StreamRange.EXCLUSIVE_RANGE_REDIS_API; import static glide.api.models.commands.stream.StreamRange.MAXIMUM_RANGE_REDIS_API; import static glide.api.models.commands.stream.StreamRange.MINIMUM_RANGE_REDIS_API; import static glide.api.models.commands.stream.StreamRange.RANGE_COUNT_REDIS_API; +import static glide.api.models.commands.stream.StreamReadGroupOptions.READ_GROUP_REDIS_API; +import static glide.api.models.commands.stream.StreamReadGroupOptions.READ_NOACK_REDIS_API; import static glide.api.models.commands.stream.StreamReadOptions.READ_BLOCK_REDIS_API; import static glide.api.models.commands.stream.StreamReadOptions.READ_COUNT_REDIS_API; import static glide.api.models.commands.stream.StreamReadOptions.READ_STREAMS_REDIS_API; @@ -63,7 +73,9 @@ import static redis_request.RedisRequestOuterClass.RequestType.ExpireAt; import static redis_request.RedisRequestOuterClass.RequestType.ExpireTime; import static redis_request.RedisRequestOuterClass.RequestType.FCall; +import static redis_request.RedisRequestOuterClass.RequestType.FCallReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.FlushAll; +import static redis_request.RedisRequestOuterClass.RequestType.FlushDB; import static redis_request.RedisRequestOuterClass.RequestType.FunctionDelete; import static redis_request.RedisRequestOuterClass.RequestType.FunctionFlush; import static redis_request.RedisRequestOuterClass.RequestType.FunctionList; @@ -76,6 +88,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.Get; import static redis_request.RedisRequestOuterClass.RequestType.GetBit; import static redis_request.RedisRequestOuterClass.RequestType.GetDel; +import static redis_request.RedisRequestOuterClass.RequestType.GetEx; import static redis_request.RedisRequestOuterClass.RequestType.GetRange; import static redis_request.RedisRequestOuterClass.RequestType.HDel; import static redis_request.RedisRequestOuterClass.RequestType.HExists; @@ -130,6 +143,7 @@ import static redis_request.RedisRequestOuterClass.RequestType.RPop; import static redis_request.RedisRequestOuterClass.RequestType.RPush; import static redis_request.RedisRequestOuterClass.RequestType.RPushX; +import static redis_request.RedisRequestOuterClass.RequestType.RandomKey; import static redis_request.RedisRequestOuterClass.RequestType.Rename; import static redis_request.RedisRequestOuterClass.RequestType.RenameNX; import static redis_request.RedisRequestOuterClass.RequestType.SAdd; @@ -146,21 +160,31 @@ import static redis_request.RedisRequestOuterClass.RequestType.SPop; import static redis_request.RedisRequestOuterClass.RequestType.SRandMember; import static redis_request.RedisRequestOuterClass.RequestType.SRem; +import static redis_request.RedisRequestOuterClass.RequestType.SUnion; import static redis_request.RedisRequestOuterClass.RequestType.SUnionStore; import static redis_request.RedisRequestOuterClass.RequestType.Set; import static redis_request.RedisRequestOuterClass.RequestType.SetBit; import static redis_request.RedisRequestOuterClass.RequestType.SetRange; +import static redis_request.RedisRequestOuterClass.RequestType.Sort; +import static redis_request.RedisRequestOuterClass.RequestType.SortReadOnly; import static redis_request.RedisRequestOuterClass.RequestType.Strlen; import static redis_request.RedisRequestOuterClass.RequestType.TTL; import static redis_request.RedisRequestOuterClass.RequestType.Time; import static redis_request.RedisRequestOuterClass.RequestType.Touch; import static redis_request.RedisRequestOuterClass.RequestType.Type; import static redis_request.RedisRequestOuterClass.RequestType.Unlink; +import static redis_request.RedisRequestOuterClass.RequestType.XAck; import static redis_request.RedisRequestOuterClass.RequestType.XAdd; import static redis_request.RedisRequestOuterClass.RequestType.XDel; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreate; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupCreateConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDelConsumer; +import static redis_request.RedisRequestOuterClass.RequestType.XGroupDestroy; import static redis_request.RedisRequestOuterClass.RequestType.XLen; +import static redis_request.RedisRequestOuterClass.RequestType.XPending; import static redis_request.RedisRequestOuterClass.RequestType.XRange; import static redis_request.RedisRequestOuterClass.RequestType.XRead; +import static redis_request.RedisRequestOuterClass.RequestType.XReadGroup; import static redis_request.RedisRequestOuterClass.RequestType.XRevRange; import static redis_request.RedisRequestOuterClass.RequestType.XTrim; import static redis_request.RedisRequestOuterClass.RequestType.ZAdd; @@ -192,6 +216,7 @@ import com.google.protobuf.ByteString; import glide.api.models.commands.ConditionalChange; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.InfoOptions; import glide.api.models.commands.LPosOptions; import glide.api.models.commands.ListDirection; @@ -223,7 +248,11 @@ import glide.api.models.commands.geospatial.GeoUnit; import glide.api.models.commands.geospatial.GeospatialData; import glide.api.models.commands.stream.StreamAddOptions; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamPendingOptions; +import glide.api.models.commands.stream.StreamRange; import glide.api.models.commands.stream.StreamRange.InfRangeBound; +import glide.api.models.commands.stream.StreamReadGroupOptions; import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions.MinId; import java.util.ArrayList; @@ -253,6 +282,12 @@ public void transaction_builds_protobuf_request(BaseTransaction transaction) transaction.get("key"); results.add(Pair.of(Get, buildArgs("key"))); + transaction.getex("key"); + results.add(Pair.of(GetEx, buildArgs("key"))); + + transaction.getex("key", GetExOptions.Seconds(10L)); + results.add(Pair.of(GetEx, buildArgs("key", "EX", "10"))); + transaction.set("key", "value"); results.add(Pair.of(Set, buildArgs("key", "value"))); @@ -752,6 +787,89 @@ InfScoreBound.NEGATIVE_INFINITY, new ScoreBoundary(3, false), new Limit(1, 2)), RANGE_COUNT_REDIS_API, "99"))); + transaction.xgroupCreate("key", "group", "id"); + results.add(Pair.of(XGroupCreate, buildArgs("key", "group", "id"))); + + transaction.xgroupCreate( + "key", + "group", + "id", + StreamGroupOptions.builder().makeStream().entriesRead("entry").build()); + results.add( + Pair.of( + XGroupCreate, + buildArgs( + "key", "group", "id", MAKE_STREAM_REDIS_API, ENTRIES_READ_REDIS_API, "entry"))); + + transaction.xgroupDestroy("key", "group"); + results.add(Pair.of(XGroupDestroy, buildArgs("key", "group"))); + + transaction.xgroupCreateConsumer("key", "group", "consumer"); + results.add(Pair.of(XGroupCreateConsumer, buildArgs("key", "group", "consumer"))); + + transaction.xgroupDelConsumer("key", "group", "consumer"); + results.add(Pair.of(XGroupDelConsumer, buildArgs("key", "group", "consumer"))); + + transaction.xreadgroup(Map.of("key", "id"), "group", "consumer"); + results.add( + Pair.of( + XReadGroup, + buildArgs( + READ_GROUP_REDIS_API, "group", "consumer", READ_STREAMS_REDIS_API, "key", "id"))); + + transaction.xreadgroup( + Map.of("key", "id"), + "group", + "consumer", + StreamReadGroupOptions.builder().block(1L).count(2L).noack().build()); + results.add( + Pair.of( + XReadGroup, + buildArgs( + READ_GROUP_REDIS_API, + "group", + "consumer", + READ_COUNT_REDIS_API, + "2", + READ_BLOCK_REDIS_API, + "1", + READ_NOACK_REDIS_API, + READ_STREAMS_REDIS_API, + "key", + "id"))); + + transaction.xack("key", "group", new String[] {"12345-1", "98765-4"}); + results.add(Pair.of(XAck, buildArgs("key", "group", "12345-1", "98765-4"))); + + transaction.xpending("key", "group"); + results.add(Pair.of(XPending, buildArgs("key", "group"))); + + transaction.xpending("key", "group", InfRangeBound.MAX, InfRangeBound.MIN, 99L); + results.add( + Pair.of( + XPending, + buildArgs("key", "group", MAXIMUM_RANGE_REDIS_API, MINIMUM_RANGE_REDIS_API, "99"))); + + transaction.xpending( + "key", + "group", + StreamRange.IdBound.ofExclusive("11"), + StreamRange.IdBound.ofExclusive("1234-0"), + 99L, + StreamPendingOptions.builder().minIdleTime(5L).consumer("consumer").build()); + results.add( + Pair.of( + XPending, + buildArgs( + "key", + "group", + IDLE_TIME_REDIS_API, + "5", + EXCLUSIVE_RANGE_REDIS_API + "11", + EXCLUSIVE_RANGE_REDIS_API + "1234-0", + "99", + "consumer"))); + transaction.time(); results.add(Pair.of(Time, buildArgs())); @@ -762,6 +880,10 @@ InfScoreBound.NEGATIVE_INFINITY, new ScoreBoundary(3, false), new Limit(1, 2)), results.add(Pair.of(FlushAll, buildArgs())); results.add(Pair.of(FlushAll, buildArgs(ASYNC.toString()))); + transaction.flushdb().flushdb(ASYNC); + results.add(Pair.of(FlushDB, buildArgs())); + results.add(Pair.of(FlushDB, buildArgs(ASYNC.toString()))); + transaction.lolwut().lolwut(5).lolwut(new int[] {1, 2}).lolwut(6, new int[] {42}); results.add(Pair.of(Lolwut, buildArgs())); results.add(Pair.of(Lolwut, buildArgs(VERSION_REDIS_API, "5"))); @@ -789,6 +911,9 @@ InfScoreBound.NEGATIVE_INFINITY, new ScoreBoundary(3, false), new Limit(1, 2)), transaction.type("key"); results.add(Pair.of(Type, buildArgs("key"))); + transaction.randomKey(); + results.add(Pair.of(RandomKey, buildArgs())); + transaction.rename("key", "newKey"); results.add(Pair.of(Rename, buildArgs("key", "newKey"))); @@ -890,6 +1015,11 @@ InfScoreBound.NEGATIVE_INFINITY, new ScoreBoundary(3, false), new Limit(1, 2)), transaction.fcall("func", new String[] {"arg1", "arg2"}); results.add(Pair.of(FCall, buildArgs("func", "0", "arg1", "arg2"))); + transaction.fcallReadOnly("func", new String[] {"key1", "key2"}, new String[] {"arg1", "arg2"}); + results.add(Pair.of(FCallReadOnly, buildArgs("func", "2", "key1", "key2", "arg1", "arg2"))); + transaction.fcallReadOnly("func", new String[] {"arg1", "arg2"}); + results.add(Pair.of(FCallReadOnly, buildArgs("func", "0", "arg1", "arg2"))); + transaction.functionStats(); results.add(Pair.of(FunctionStats, buildArgs())); @@ -1004,6 +1134,40 @@ InfScoreBound.NEGATIVE_INFINITY, new ScoreBoundary(3, false), new Limit(1, 2)), transaction.lcsLen("key1", "key2"); results.add(Pair.of(LCS, buildArgs("key1", "key2", "LEN"))); + transaction.lcsIdx("key1", "key2"); + results.add(Pair.of(LCS, buildArgs("key1", "key2", IDX_COMMAND_STRING))); + + transaction.lcsIdx("key1", "key2", 10); + results.add( + Pair.of( + LCS, buildArgs("key1", "key2", IDX_COMMAND_STRING, MINMATCHLEN_COMMAND_STRING, "10"))); + + transaction.lcsIdxWithMatchLen("key1", "key2"); + results.add( + Pair.of(LCS, buildArgs("key1", "key2", IDX_COMMAND_STRING, WITHMATCHLEN_COMMAND_STRING))); + + transaction.lcsIdxWithMatchLen("key1", "key2", 10); + results.add( + Pair.of( + LCS, + buildArgs( + "key1", + "key2", + IDX_COMMAND_STRING, + MINMATCHLEN_COMMAND_STRING, + "10", + WITHMATCHLEN_COMMAND_STRING))); + + transaction.sunion(new String[] {"key1", "key2"}); + results.add(Pair.of(SUnion, buildArgs("key1", "key2"))); + + transaction.sort("key1"); + results.add(Pair.of(Sort, buildArgs("key1"))); + transaction.sortReadOnly("key1"); + results.add(Pair.of(SortReadOnly, buildArgs("key1"))); + transaction.sortStore("key1", "key2"); + results.add(Pair.of(Sort, buildArgs("key1", STORE_COMMAND_STRING, "key2"))); + var protobufTransaction = transaction.getProtobufTransaction().build(); for (int idx = 0; idx < protobufTransaction.getCommandsCount(); idx++) { diff --git a/java/client/src/test/java/glide/connection/ConnectionWithGlideMockTests.java b/java/client/src/test/java/glide/connection/ConnectionWithGlideMockTests.java index 08235ac1fc..27e00ef52b 100644 --- a/java/client/src/test/java/glide/connection/ConnectionWithGlideMockTests.java +++ b/java/client/src/test/java/glide/connection/ConnectionWithGlideMockTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connection; import static java.util.concurrent.TimeUnit.SECONDS; diff --git a/java/client/src/test/java/glide/connectors/resources/ThreadPoolResourceAllocatorTest.java b/java/client/src/test/java/glide/connectors/resources/ThreadPoolResourceAllocatorTest.java index 3ce2052582..15d195deef 100644 --- a/java/client/src/test/java/glide/connectors/resources/ThreadPoolResourceAllocatorTest.java +++ b/java/client/src/test/java/glide/connectors/resources/ThreadPoolResourceAllocatorTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.connectors.resources; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/java/client/src/test/java/glide/ffi/FfiTest.java b/java/client/src/test/java/glide/ffi/FfiTest.java index 73c9082c20..b0c72f77c9 100644 --- a/java/client/src/test/java/glide/ffi/FfiTest.java +++ b/java/client/src/test/java/glide/ffi/FfiTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.ffi; import static org.junit.jupiter.api.Assertions.assertAll; diff --git a/java/client/src/test/java/glide/managers/CommandManagerTest.java b/java/client/src/test/java/glide/managers/CommandManagerTest.java index cb86c03623..876851cfb1 100644 --- a/java/client/src/test/java/glide/managers/CommandManagerTest.java +++ b/java/client/src/test/java/glide/managers/CommandManagerTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.managers; import static glide.api.models.configuration.RequestRoutingConfiguration.SimpleMultiNodeRoute.ALL_NODES; diff --git a/java/client/src/test/java/glide/managers/ConnectionManagerTest.java b/java/client/src/test/java/glide/managers/ConnectionManagerTest.java index 79389fcde1..792259799b 100644 --- a/java/client/src/test/java/glide/managers/ConnectionManagerTest.java +++ b/java/client/src/test/java/glide/managers/ConnectionManagerTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.managers; import static glide.api.models.configuration.NodeAddress.DEFAULT_HOST; diff --git a/java/client/src/test/java/glide/utils/RustCoreLibMockTestBase.java b/java/client/src/test/java/glide/utils/RustCoreLibMockTestBase.java index ecf59e4a17..21c8152e72 100644 --- a/java/client/src/test/java/glide/utils/RustCoreLibMockTestBase.java +++ b/java/client/src/test/java/glide/utils/RustCoreLibMockTestBase.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.utils; import glide.connectors.handlers.ChannelHandler; diff --git a/java/client/src/test/java/glide/utils/RustCoreMock.java b/java/client/src/test/java/glide/utils/RustCoreMock.java index b9bc53bae6..93fa8d0030 100644 --- a/java/client/src/test/java/glide/utils/RustCoreMock.java +++ b/java/client/src/test/java/glide/utils/RustCoreMock.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.utils; import connection_request.ConnectionRequestOuterClass.ConnectionRequest; diff --git a/java/examples/src/main/java/glide/examples/ExamplesApp.java b/java/examples/src/main/java/glide/examples/ExamplesApp.java index ea816f9632..98dfbe6d59 100644 --- a/java/examples/src/main/java/glide/examples/ExamplesApp.java +++ b/java/examples/src/main/java/glide/examples/ExamplesApp.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.examples; import glide.api.RedisClient; diff --git a/java/integTest/src/test/java/glide/ConnectionTests.java b/java/integTest/src/test/java/glide/ConnectionTests.java index 254ffad838..4e9f978604 100644 --- a/java/integTest/src/test/java/glide/ConnectionTests.java +++ b/java/integTest/src/test/java/glide/ConnectionTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import glide.api.RedisClient; diff --git a/java/integTest/src/test/java/glide/CustomThreadPoolResourceTest.java b/java/integTest/src/test/java/glide/CustomThreadPoolResourceTest.java index b552f141c1..1523df7c15 100644 --- a/java/integTest/src/test/java/glide/CustomThreadPoolResourceTest.java +++ b/java/integTest/src/test/java/glide/CustomThreadPoolResourceTest.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static org.junit.jupiter.api.Assertions.assertEquals; diff --git a/java/integTest/src/test/java/glide/ErrorHandlingTests.java b/java/integTest/src/test/java/glide/ErrorHandlingTests.java index 2776de3565..cb9b889758 100644 --- a/java/integTest/src/test/java/glide/ErrorHandlingTests.java +++ b/java/integTest/src/test/java/glide/ErrorHandlingTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static org.junit.jupiter.api.Assertions.assertAll; diff --git a/java/integTest/src/test/java/glide/SharedClientTests.java b/java/integTest/src/test/java/glide/SharedClientTests.java index c120a7d1b9..0dd919d557 100644 --- a/java/integTest/src/test/java/glide/SharedClientTests.java +++ b/java/integTest/src/test/java/glide/SharedClientTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static glide.TestUtilities.commonClientConfig; diff --git a/java/integTest/src/test/java/glide/SharedCommandTests.java b/java/integTest/src/test/java/glide/SharedCommandTests.java index b5ee8e13be..408f07ed4c 100644 --- a/java/integTest/src/test/java/glide/SharedCommandTests.java +++ b/java/integTest/src/test/java/glide/SharedCommandTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static glide.TestConfiguration.CLUSTER_PORTS; @@ -8,6 +8,7 @@ import static glide.TestUtilities.commonClientConfig; import static glide.TestUtilities.commonClusterClientConfig; import static glide.api.BaseClient.OK; +import static glide.api.models.GlideString.gs; import static glide.api.models.commands.LInsertOptions.InsertPosition.AFTER; import static glide.api.models.commands.LInsertOptions.InsertPosition.BEFORE; import static glide.api.models.commands.RangeOptions.InfScoreBound.NEGATIVE_INFINITY; @@ -30,9 +31,11 @@ import glide.api.BaseClient; import glide.api.RedisClient; import glide.api.RedisClusterClient; +import glide.api.models.GlideString; import glide.api.models.Script; import glide.api.models.commands.ConditionalChange; import glide.api.models.commands.ExpireOptions; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.LPosOptions; import glide.api.models.commands.ListDirection; import glide.api.models.commands.RangeOptions.InfLexBound; @@ -43,6 +46,7 @@ import glide.api.models.commands.RangeOptions.RangeByLex; import glide.api.models.commands.RangeOptions.RangeByScore; import glide.api.models.commands.RangeOptions.ScoreBoundary; +import glide.api.models.commands.RestoreOptions; import glide.api.models.commands.ScriptOptions; import glide.api.models.commands.SetOptions; import glide.api.models.commands.WeightAggregateOptions.Aggregate; @@ -66,8 +70,11 @@ import glide.api.models.commands.geospatial.GeoUnit; import glide.api.models.commands.geospatial.GeospatialData; import glide.api.models.commands.stream.StreamAddOptions; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamPendingOptions; import glide.api.models.commands.stream.StreamRange.IdBound; import glide.api.models.commands.stream.StreamRange.InfRangeBound; +import glide.api.models.commands.stream.StreamReadGroupOptions; import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions.MaxLen; import glide.api.models.commands.stream.StreamTrimOptions.MinId; @@ -89,6 +96,7 @@ import java.util.concurrent.TimeoutException; import lombok.Getter; import lombok.SneakyThrows; +import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.tuple.Pair; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -159,6 +167,26 @@ public void unlink_multiple_keys(BaseClient client) { assertEquals(3L, unlinkedKeysNum); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void unlink_binary_multiple_keys(BaseClient client) { + GlideString key1 = gs("{key}" + UUID.randomUUID()); + GlideString key2 = gs("{key}" + UUID.randomUUID()); + GlideString key3 = gs("{key}" + UUID.randomUUID()); + GlideString value = gs(UUID.randomUUID().toString()); + + String setResult = client.set(key1, value).get(); + assertEquals(OK, setResult); + setResult = client.set(key2, value).get(); + assertEquals(OK, setResult); + setResult = client.set(key3, value).get(); + assertEquals(OK, setResult); + + Long unlinkedKeysNum = client.unlink(new GlideString[] {key1, key2, key3}).get(); + assertEquals(3L, unlinkedKeysNum); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -207,6 +235,21 @@ public void append(BaseClient client) { assertTrue(executionException.getCause() instanceof RequestException); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void appendBinary(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + GlideString value = gs(String.valueOf(UUID.randomUUID())); + + // Append on non-existing string(similar to SET) + assertEquals(value.getString().length(), client.append(key, value).get()); + + assertEquals(value.getString().length() * 2L, client.append(key, value).get()); + GlideString value2 = gs(value.getString() + value.getString()); + assertEquals(value2, client.get(key).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -287,6 +330,52 @@ public void getdel(BaseClient client) { assertInstanceOf(RequestException.class, executionException.getCause()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void getex(BaseClient client) { + + assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0"), "This feature added in redis 6.2.0"); + + String key1 = "{key}" + UUID.randomUUID(); + String value1 = String.valueOf(UUID.randomUUID()); + String key2 = "{key}" + UUID.randomUUID(); + + client.set(key1, value1).get(); + String data = client.getex(key1).get(); + assertEquals(data, value1); + assertEquals(-1, client.ttl(key1).get()); + + data = client.getex(key1, GetExOptions.Seconds(10L)).get(); + Long ttlValue = client.ttl(key1).get(); + assertTrue(ttlValue >= 0L); + + // non-existent key + data = client.getex(key2).get(); + assertNull(data); + + // key isn't a string + client.sadd(key2, new String[] {"a"}).get(); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.getex(key2).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // with option + data = client.getex(key1, GetExOptions.Seconds(10L)).get(); + assertEquals(data, value1); + + // invalid time measurement + ExecutionException invalidTimeException = + assertThrows( + ExecutionException.class, () -> client.getex(key1, GetExOptions.Seconds(-10L)).get()); + assertInstanceOf(RequestException.class, invalidTimeException.getCause()); + + // setting and clearing expiration timer + assertEquals(value1, client.getex(key1, GetExOptions.Seconds(10L)).get()); + assertEquals(value1, client.getex(key1, GetExOptions.Persist()).get()); + assertEquals(-1L, client.ttl(key1).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -325,11 +414,23 @@ public void set_only_if_does_not_exists_missing_key(BaseClient client) { @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") public void set_get_binary_data(BaseClient client) { - byte[] key = "set_get_binary_data_key".getBytes(); - byte[] value = {(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00, (byte) 0x02}; - assert client.set(key, value).get().equals("OK"); - byte[] data = client.get(key).get(); - assert Arrays.equals(data, value); + GlideString key = gs("set_get_binary_data_key"); + byte[] binvalue = {(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00, (byte) 0x02}; + assertEquals(client.set(key, gs(binvalue)).get(), "OK"); + GlideString data = client.get(key).get(); + assertArrayEquals(data.getBytes(), binvalue); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void set_get_binary_data_with_options(BaseClient client) { + SetOptions options = SetOptions.builder().conditionalSet(ONLY_IF_DOES_NOT_EXIST).build(); + GlideString key = gs("set_get_binary_data_with_options"); + byte[] binvalue = {(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00, (byte) 0x02}; + assertEquals(client.set(key, gs(binvalue), options).get(), "OK"); + GlideString data = client.get(key).get(); + assertArrayEquals(data.getBytes(), binvalue); } @SneakyThrows @@ -431,6 +532,23 @@ public void mset_mget_existing_non_existing_key(BaseClient client) { client.mget(new String[] {key1, key2, nonExisting, key3}).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void mset_mget_binary(BaseClient client) { + // keys are from different slots + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); + String key3 = UUID.randomUUID().toString(); + String value = UUID.randomUUID().toString(); + Map keyValueMap = Map.of(key1, value, key2, value, key3, value); + + assertEquals(OK, client.mset(keyValueMap).get()); + assertArrayEquals( + new GlideString[] {gs(value), gs(value), gs(value)}, + client.mget(new GlideString[] {gs(key1), gs(key2), gs(key3)}).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -639,6 +757,25 @@ public void hsetnx(BaseClient client) { assertTrue(executionException.getCause() instanceof RequestException); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void hsetnx_binary(BaseClient client) { + GlideString key1 = gs(UUID.randomUUID().toString()); + GlideString key2 = gs(UUID.randomUUID().toString()); + GlideString field = gs(UUID.randomUUID().toString()); + + assertTrue(client.hsetnx(key1, field, gs("value")).get()); + assertFalse(client.hsetnx(key1, field, gs("newValue")).get()); + assertEquals("value", client.hget(key1.toString(), field.toString()).get()); + + // Key exists, but it is not a hash + assertEquals(OK, client.set(key2, gs("value")).get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.hsetnx(key2, field, gs("value")).get()); + assertTrue(executionException.getCause() instanceof RequestException); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -741,6 +878,22 @@ public void hexists_existing_field_non_existing_field_non_existing_key(BaseClien assertFalse(client.hexists("non_existing_key", field2).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void hexists_binary_existing_field_non_existing_field_non_existing_key(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + GlideString field1 = gs(UUID.randomUUID().toString()); + GlideString field2 = gs(UUID.randomUUID().toString()); + Map fieldValueMap = + Map.of(field1.toString(), "value1", field2.toString(), "value1"); + + assertEquals(2, client.hset(key.toString(), fieldValueMap).get()); + assertTrue(client.hexists(key, field1).get()); + assertFalse(client.hexists(key, gs("non_existing_field")).get()); + assertFalse(client.hexists(gs("non_existing_key"), field2).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -756,6 +909,25 @@ public void hgetall_multiple_existing_fields_existing_key_non_existing_key(BaseC assertEquals(Map.of(), client.hgetall("non_existing_key").get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void hgetall_binary_api(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + GlideString field1 = gs(UUID.randomUUID().toString()); + GlideString field2 = gs(UUID.randomUUID().toString()); + GlideString value = gs(UUID.randomUUID().toString()); + Map fieldValueMapStrings = + Map.of(field1.getString(), value.getString(), field2.getString(), value.getString()); + HashMap fieldValueMap = + new HashMap<>(Map.of(field1, value, field2, value)); + + assertEquals(2, client.hset(key.getString(), fieldValueMapStrings).get()); + Map allItems = client.hgetall(key).get(); + assertEquals(value, allItems.get(field1)); + assertEquals(value, allItems.get(field2)); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -991,6 +1163,30 @@ public void ltrim_existing_non_existing_key_and_type_error(BaseClient client) { assertTrue(ltrimException.getCause() instanceof RequestException); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void ltrim_binary_existing_non_existing_key_and_type_error(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + GlideString[] valueArray = + new GlideString[] {gs("value4"), gs("value3"), gs("value2"), gs("value1")}; + + assertEquals(4, client.lpush(key, valueArray).get()); + assertEquals(OK, client.ltrim(key, 0, 1).get()); + assertArrayEquals( + new String[] {"value1", "value2"}, client.lrange(key.toString(), 0, -1).get()); + + // `start` is greater than `end` so the key will be removed. + assertEquals(OK, client.ltrim(key, 4, 2).get()); + assertArrayEquals(new String[] {}, client.lrange(key.toString(), 0, -1).get()); + + assertEquals(OK, client.set(key, gs("foo")).get()); + + Exception ltrimException = + assertThrows(ExecutionException.class, () -> client.ltrim(key, 0, 1).get()); + assertTrue(ltrimException.getCause() instanceof RequestException); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1169,10 +1365,38 @@ public void sadd_srem_scard_smembers_existing_set(BaseClient client) { Set expectedMembers = Set.of("member1", "member2", "member4"); assertEquals(expectedMembers, client.smembers(key).get()); + + Set expectedMembersBin = Set.of("member1", "member2", "member4"); + assertEquals(expectedMembersBin, client.smembers(key).get()); + assertEquals(1, client.srem(key, new String[] {"member1"}).get()); assertEquals(2, client.scard(key).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sadd_srem_scard_smembers_binary_existing_set(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + assertEquals( + 4, + client + .sadd( + key, new GlideString[] {gs("member1"), gs("member2"), gs("member3"), gs("member4")}) + .get()); + assertEquals( + 1, client.srem(key, new GlideString[] {gs("member3"), gs("nonExistingMember")}).get()); + + Set expectedMembers = Set.of(gs("member1"), gs("member2"), gs("member4")); + assertEquals(expectedMembers, client.smembers(key).get()); + + Set expectedMembersBin = Set.of(gs("member1"), gs("member2"), gs("member4")); + assertEquals(expectedMembersBin, client.smembers(key).get()); + + assertEquals(1, client.srem(key, new GlideString[] {gs("member1")}).get()); + assertEquals(2, client.scard(key).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1255,6 +1479,60 @@ public void smove(BaseClient client) { assertInstanceOf(RequestException.class, executionException.getCause()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void smove_binary(BaseClient client) { + GlideString setKey1 = gs("{key}" + UUID.randomUUID()); + GlideString setKey2 = gs("{key}" + UUID.randomUUID()); + GlideString setKey3 = gs("{key}" + UUID.randomUUID()); + GlideString nonSetKey = gs("{key}" + UUID.randomUUID()); + + assertEquals(3, client.sadd(setKey1, new GlideString[] {gs("1"), gs("2"), gs("3")}).get()); + assertEquals(2, client.sadd(setKey2, new GlideString[] {gs("2"), gs("3")}).get()); + + // move an elem + assertTrue(client.smove(setKey1, setKey2, gs("1")).get()); + assertEquals(Set.of(gs("2"), gs("3")), client.smembers(setKey1).get()); + assertEquals(Set.of(gs("1"), gs("2"), gs("3")), client.smembers(setKey2).get()); + + // move an elem which preset at destination + assertTrue(client.smove(setKey2, setKey1, gs("2")).get()); + assertEquals(Set.of(gs("2"), gs("3")), client.smembers(setKey1).get()); + assertEquals(Set.of(gs("1"), gs("3")), client.smembers(setKey2).get()); + + // move from missing key + assertFalse(client.smove(setKey3, setKey1, gs("4")).get()); + assertEquals(Set.of(gs("2"), gs("3")), client.smembers(setKey1).get()); + + // move to a new set + assertTrue(client.smove(setKey1, setKey3, gs("2")).get()); + assertEquals(Set.of(gs("3")), client.smembers(setKey1).get()); + assertEquals(Set.of(gs("2")), client.smembers(setKey3).get()); + + // move missing element + assertFalse(client.smove(setKey1, setKey3, gs("42")).get()); + assertEquals(Set.of(gs("3")), client.smembers(setKey1).get()); + assertEquals(Set.of(gs("2")), client.smembers(setKey3).get()); + + // move missing element to missing key + assertFalse(client.smove(setKey1, nonSetKey, gs("42")).get()); + assertEquals(Set.of(gs("3")), client.smembers(setKey1).get()); + assertEquals("none", client.type(nonSetKey.toString()).get()); + + // Key exists, but it is not a set + assertEquals(OK, client.set(nonSetKey, gs("bar")).get()); + ExecutionException executionException = + assertThrows( + ExecutionException.class, () -> client.smove(nonSetKey, setKey1, gs("_")).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows( + ExecutionException.class, () -> client.smove(setKey1, nonSetKey, gs("_")).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1262,7 +1540,7 @@ public void rename(BaseClient client) { String key1 = "{key}" + UUID.randomUUID(); assertEquals(OK, client.set(key1, "foo").get()); - assertEquals(OK, client.rename(key1, key1 + "_rename").get()); + assertEquals(OK, client.rename(gs(key1), gs((key1 + "_rename"))).get()); assertEquals(1L, client.exists(new String[] {key1 + "_rename"}).get()); // key doesn't exist @@ -1291,8 +1569,8 @@ public void renamenx(BaseClient client) { // rename a string assertEquals(OK, client.set(key1, "key1").get()); - assertTrue(client.renamenx(key1, key2).get()); - assertFalse(client.renamenx(key2, key3).get()); + assertTrue(client.renamenx(gs(key1), gs(key2)).get()); + assertFalse(client.renamenx(gs(key2), gs(key3)).get()); assertEquals("key1", client.get(key2).get()); assertEquals(1, client.del(new String[] {key1, key2}).get()); @@ -1319,6 +1597,25 @@ public void sismember(BaseClient client) { assertTrue(executionException.getCause() instanceof RequestException); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sismember_binary(BaseClient client) { + GlideString key1 = gs(UUID.randomUUID().toString()); + GlideString key2 = gs(UUID.randomUUID().toString()); + GlideString member = gs(UUID.randomUUID().toString()); + + assertEquals(1, client.sadd(key1.toString(), new String[] {member.toString()}).get()); + assertTrue(client.sismember(key1, member).get()); + assertFalse(client.sismember(key1, gs("nonExistingMember")).get()); + assertFalse(client.sismember(gs("nonExistingKey"), member).get()); + + assertEquals(OK, client.set(key2, gs("value")).get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.sismember(key2, member).get()); + assertTrue(executionException.getCause() instanceof RequestException); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1366,6 +1663,55 @@ public void sinterstore(BaseClient client) { assertTrue(executionException.getCause() instanceof RequestException); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sinterstore_gs(BaseClient client) { + GlideString key1 = gs("{key}-1-" + UUID.randomUUID()); + GlideString key2 = gs("{key}-2-" + UUID.randomUUID()); + GlideString key3 = gs("{key}-3-" + UUID.randomUUID()); + GlideString key4 = gs("{key}-4-" + UUID.randomUUID()); + GlideString key5 = gs("{key}-5-" + UUID.randomUUID()); + + assertEquals(3, client.sadd(key1, new GlideString[] {gs("a"), gs("b"), gs("c")}).get()); + assertEquals(3, client.sadd(key2, new GlideString[] {gs("c"), gs("d"), gs("e")}).get()); + assertEquals(3, client.sadd(key4, new GlideString[] {gs("e"), gs("f"), gs("g")}).get()); + + // create new + assertEquals(1, client.sinterstore(key3, new GlideString[] {key1, key2}).get()); + assertEquals(Set.of(gs("c")), client.smembers(key3).get()); + + // overwrite existing set + assertEquals(1, client.sinterstore(key2, new GlideString[] {key3, key2}).get()); + assertEquals(Set.of(gs("c")), client.smembers(key2).get()); + + // overwrite source + assertEquals(0, client.sinterstore(key1, new GlideString[] {key1, key4}).get()); + assertEquals(Set.of(), client.smembers(key1).get()); + + // overwrite source + assertEquals(1, client.sinterstore(key2, new GlideString[] {key2}).get()); + assertEquals(Set.of(gs("c")), client.smembers(key2).get()); + + // source key exists, but it is not a set + assertEquals(OK, client.set(key5, gs("value")).get()); + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> client.sinterstore(key1, new GlideString[] {key5}).get()); + assertTrue(executionException.getCause() instanceof RequestException); + + // overwrite destination - not a set + assertEquals(0, client.sinterstore(key5, new GlideString[] {key1, key2}).get()); + assertEquals(Set.of(), client.smembers(key5).get()); + + // wrong arguments + executionException = + assertThrows( + ExecutionException.class, () -> client.sinterstore(key5, new GlideString[0]).get()); + assertTrue(executionException.getCause() instanceof RequestException); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1487,6 +1833,26 @@ public void sinter(BaseClient client) { assertInstanceOf(RequestException.class, executionException.getCause()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sinter_gs(BaseClient client) { + GlideString key1 = gs("{sinter}-" + UUID.randomUUID()); + GlideString key2 = gs("{sinter}-" + UUID.randomUUID()); + GlideString key3 = gs("{sinter}-" + UUID.randomUUID()); + + assertEquals(3, client.sadd(key1, new GlideString[] {gs("a"), gs("b"), gs("c")}).get()); + assertEquals(3, client.sadd(key2, new GlideString[] {gs("c"), gs("d"), gs("e")}).get()); + assertEquals(Set.of(gs("c")), client.sinter(new GlideString[] {key1, key2}).get()); + assertEquals(0, client.sinter(new GlideString[] {key1, key3}).get().size()); + + // Key exists, but it is not a set + assertEquals(OK, client.set(key3, gs("bar")).get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.sinter(new GlideString[] {key3}).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1548,6 +1914,24 @@ public void exists_multiple_keys(BaseClient client) { assertEquals(3L, existsKeysNum); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void exists_binary_multiple_keys(BaseClient client) { + GlideString key1 = gs("{key}" + UUID.randomUUID()); + GlideString key2 = gs("{key}" + UUID.randomUUID()); + GlideString value = gs(UUID.randomUUID().toString()); + + String setResult = client.set(key1, value).get(); + assertEquals(OK, setResult); + setResult = client.set(key2, value).get(); + assertEquals(OK, setResult); + + Long existsKeysNum = + client.exists(new GlideString[] {key1, key2, key1, gs(UUID.randomUUID().toString())}).get(); + assertEquals(3L, existsKeysNum); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1577,6 +1961,35 @@ public void expire_pexpire_ttl_and_expiretime_with_positive_timeout(BaseClient c assertTrue(client.ttl(key).get() <= 15L); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void expire_pexpire_ttl_and_expiretime_binary_with_positive_timeout(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + assertEquals(OK, client.set(key, gs("expire_timeout")).get()); + assertTrue(client.expire(key, 10L).get()); + assertTrue(client.ttl(key).get() <= 10L); + + // set command clears the timeout. + assertEquals(OK, client.set(key, gs("pexpire_timeout")).get()); + if (REDIS_VERSION.isLowerThan("7.0.0")) { + assertTrue(client.pexpire(key, 10000L).get()); + } else { + assertTrue(client.pexpire(key, 10000L, ExpireOptions.HAS_NO_EXPIRY).get()); + } + assertTrue(client.ttl(key).get() <= 10L); + + // TTL will be updated to the new value = 15 + if (REDIS_VERSION.isLowerThan("7.0.0")) { + assertTrue(client.expire(key, 15L).get()); + } else { + assertTrue(client.expire(key, 15L, ExpireOptions.HAS_EXISTING_EXPIRY).get()); + assertTrue(client.expiretime(key).get() > Instant.now().getEpochSecond()); + assertTrue(client.pexpiretime(key).get() > Instant.now().toEpochMilli()); + } + assertTrue(client.ttl(key).get() <= 15L); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1616,19 +2029,55 @@ public void expireAt_pexpireAt_and_ttl_with_positive_timeout(BaseClient client) @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") - public void expire_pexpire_ttl_and_expiretime_with_timestamp_in_the_past_or_negative_timeout( - BaseClient client) { - String key = UUID.randomUUID().toString(); - - assertEquals(OK, client.set(key, "expire_with_past_timestamp").get()); - // no timeout set yet - assertEquals(-1L, client.ttl(key).get()); - if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { - assertEquals(-1L, client.expiretime(key).get()); - assertEquals(-1L, client.pexpiretime(key).get()); - } + public void expireAt_pexpireAt_and_ttl_binary_with_positive_timeout(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + assertEquals(OK, client.set(key, gs("expireAt_timeout")).get()); + assertTrue(client.expireAt(key, Instant.now().getEpochSecond() + 10L).get()); + assertTrue(client.ttl(key).get() <= 10L); - assertTrue(client.expire(key, -10L).get()); + // extend TTL + if (REDIS_VERSION.isLowerThan("7.0.0")) { + assertTrue(client.expireAt(key, Instant.now().getEpochSecond() + 50L).get()); + } else { + assertTrue( + client + .expireAt( + key, + Instant.now().getEpochSecond() + 50L, + ExpireOptions.NEW_EXPIRY_GREATER_THAN_CURRENT) + .get()); + } + assertTrue(client.ttl(key).get() <= 50L); + + if (REDIS_VERSION.isLowerThan("7.0.0")) { + assertTrue(client.pexpireAt(key, Instant.now().toEpochMilli() + 50000L).get()); + } else { + // set command clears the timeout. + assertEquals(OK, client.set(key, gs("pexpireAt_timeout")).get()); + assertFalse( + client + .pexpireAt( + key, Instant.now().toEpochMilli() + 50000L, ExpireOptions.HAS_EXISTING_EXPIRY) + .get()); + } + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void expire_pexpire_ttl_and_expiretime_with_timestamp_in_the_past_or_negative_timeout( + BaseClient client) { + String key = UUID.randomUUID().toString(); + + assertEquals(OK, client.set(key, "expire_with_past_timestamp").get()); + // no timeout set yet + assertEquals(-1L, client.ttl(key).get()); + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertEquals(-1L, client.expiretime(key).get()); + assertEquals(-1L, client.pexpiretime(key).get()); + } + + assertTrue(client.expire(key, -10L).get()); assertEquals(-2L, client.ttl(key).get()); assertEquals(OK, client.set(key, "pexpire_with_past_timestamp").get()); @@ -1636,6 +2085,30 @@ public void expire_pexpire_ttl_and_expiretime_with_timestamp_in_the_past_or_nega assertEquals(-2L, client.ttl(key).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void + expire_pexpire_ttl_and_expiretime_binary_with_timestamp_in_the_past_or_negative_timeout( + BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + + assertEquals(OK, client.set(key, gs("expire_with_past_timestamp")).get()); + // no timeout set yet + assertEquals(-1L, client.ttl(key).get()); + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertEquals(-1L, client.expiretime(key).get()); + assertEquals(-1L, client.pexpiretime(key).get()); + } + + assertTrue(client.expire(key, -10L).get()); + assertEquals(-2L, client.ttl(key).get()); + + assertEquals(OK, client.set(key, gs("pexpire_with_past_timestamp")).get()); + assertTrue(client.pexpire(key, -10000L).get()); + assertEquals(-2L, client.ttl(key).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1654,6 +2127,24 @@ public void expireAt_pexpireAt_ttl_with_timestamp_in_the_past_or_negative_timeou assertEquals(-2L, client.ttl(key).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void expireAt_pexpireAt_ttl_binary_with_timestamp_in_the_past_or_negative_timeout( + BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + + assertEquals(OK, client.set(key, gs("expireAt_with_past_timestamp")).get()); + // set timeout in the past + assertTrue(client.expireAt(key, Instant.now().getEpochSecond() - 50L).get()); + assertEquals(-2L, client.ttl(key).get()); + + assertEquals(OK, client.set(key, gs("pexpireAt_with_past_timestamp")).get()); + // set timeout in the past + assertTrue(client.pexpireAt(key, Instant.now().toEpochMilli() - 50000L).get()); + assertEquals(-2L, client.ttl(key).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1670,6 +2161,22 @@ public void expire_pexpire_ttl_and_expiretime_with_non_existing_key(BaseClient c } } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void expire_pexpire_ttl_and_expiretime_binary_with_non_existing_key(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + + assertFalse(client.expire(key, 10L).get()); + assertFalse(client.pexpire(key, 10000L).get()); + + assertEquals(-2L, client.ttl(key).get()); + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertEquals(-2L, client.expiretime(key).get()); + assertEquals(-2L, client.pexpiretime(key).get()); + } + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1682,6 +2189,18 @@ public void expireAt_pexpireAt_and_ttl_with_non_existing_key(BaseClient client) assertEquals(-2L, client.ttl(key).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void expireAt_pexpireAt_and_ttl_binary_with_non_existing_key(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + + assertFalse(client.expireAt(key, Instant.now().getEpochSecond() + 10L).get()); + assertFalse(client.pexpireAt(key, Instant.now().toEpochMilli() + 10000L).get()); + + assertEquals(-2L, client.ttl(key).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1705,6 +2224,29 @@ public void expire_pexpire_and_pttl_with_positive_timeout(BaseClient client) { assertTrue(pttlResult <= 10000L); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void expire_pexpire_and_pttl_binary_with_positive_timeout(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + + assertEquals(-2L, client.pttl(key).get()); + + assertEquals(OK, client.set(key, gs("expire_timeout")).get()); + assertTrue(client.expire(key, 10L).get()); + Long pttlResult = client.pttl(key).get(); + assertTrue(0 <= pttlResult); + assertTrue(pttlResult <= 10000L); + + assertEquals(OK, client.set(key, gs("pexpire_timeout")).get()); + assertEquals(-1L, client.pttl(key).get()); + + assertTrue(client.pexpire(key, 10000L).get()); + pttlResult = client.pttl(key).get(); + assertTrue(0 <= pttlResult); + assertTrue(pttlResult <= 10000L); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -1713,14 +2255,13 @@ public void persist_on_existing_and_non_existing_key(BaseClient client) { assertFalse(client.persist(key).get()); - assertEquals(OK, client.set(key, "persist_value").get()); - assertFalse(client.persist(key).get()); + assertEquals(OK, client.set(gs(key), gs("persist_value")).get()); + assertFalse(client.persist(gs(key)).get()); assertTrue(client.expire(key, 10L).get()); Long persistAmount = client.ttl(key).get(); assertTrue(0L <= persistAmount && persistAmount <= 10L); - assertTrue(client.persist(key).get()); - + assertTrue(client.persist(gs(key)).get()); assertEquals(-1L, client.ttl(key).get()); } @@ -2181,10 +2722,15 @@ public void zmscore(BaseClient client) { assertArrayEquals( new Double[] {1.0, null, null, 3.0}, client - .zmscore(key1, new String[] {"one", "nonExistentMember", "nonExistentMember", "three"}) + .zmscore( + gs(key1), + new GlideString[] { + gs("one"), gs("nonExistentMember"), gs("nonExistentMember"), gs("three") + }) .get()); assertArrayEquals( - new Double[] {null}, client.zmscore("nonExistentKey", new String[] {"one"}).get()); + new Double[] {null}, + client.zmscore(gs("nonExistentKey"), new GlideString[] {gs("one")}).get()); // Key exists, but it is not a set assertEquals(OK, client.set(key2, "bar").get()); @@ -3421,125 +3967,733 @@ public void xrange_and_xrevrange(BaseClient client) { @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") - public void zrandmember(BaseClient client) { - String key1 = UUID.randomUUID().toString(); - String key2 = UUID.randomUUID().toString(); - Map membersScores = Map.of("one", 1.0, "two", 2.0); - assertEquals(2, client.zadd(key1, membersScores).get()); + public void xgroupCreate_xgroupDestroy(BaseClient client) { + String key = UUID.randomUUID().toString(); + String stringKey = UUID.randomUUID().toString(); + String groupName = "group" + UUID.randomUUID(); + String streamId = "0-1"; - String randMember = client.zrandmember(key1).get(); - assertTrue(membersScores.containsKey(randMember)); - assertNull(client.zrandmember("nonExistentKey").get()); + // Stream not created results in error + Exception executionException = + assertThrows( + ExecutionException.class, () -> client.xgroupCreate(key, groupName, streamId).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); - // Key exists, but it is not a set - assertEquals(OK, client.set(key2, "bar").get()); - ExecutionException executionException = - assertThrows(ExecutionException.class, () -> client.zrandmember(key2).get()); + // Stream with option to create creates stream & Group + assertEquals( + OK, + client + .xgroupCreate( + key, groupName, streamId, StreamGroupOptions.builder().makeStream().build()) + .get()); + + // ...and again results in BUSYGROUP error, because group names must be unique + executionException = + assertThrows( + ExecutionException.class, () -> client.xgroupCreate(key, groupName, streamId).get()); assertInstanceOf(RequestException.class, executionException.getCause()); - } + assertTrue(executionException.getMessage().contains("BUSYGROUP")); - @SneakyThrows - @ParameterizedTest(autoCloseArguments = false) - @MethodSource("getClients") - public void zrandmemberWithCount(BaseClient client) { - String key1 = UUID.randomUUID().toString(); - String key2 = UUID.randomUUID().toString(); - Map membersScores = Map.of("one", 1.0, "two", 2.0); - assertEquals(2, client.zadd(key1, membersScores).get()); + // Stream Group can be destroyed returns: true + assertEquals(true, client.xgroupDestroy(key, groupName).get()); - // Unique values are expected as count is positive - List randMembers = Arrays.asList(client.zrandmemberWithCount(key1, 4).get()); - assertEquals(2, randMembers.size()); - assertEquals(2, new HashSet<>(randMembers).size()); - randMembers.forEach(member -> assertTrue(membersScores.containsKey(member))); + // ...and again results in: false + assertEquals(false, client.xgroupDestroy(key, groupName).get()); - // Duplicate values are expected as count is negative - randMembers = Arrays.asList(client.zrandmemberWithCount(key1, -4).get()); - assertEquals(4, randMembers.size()); - randMembers.forEach(member -> assertTrue(membersScores.containsKey(member))); + // ENTRIESREAD option was added in redis 7.0.0 + StreamGroupOptions entriesReadOption = StreamGroupOptions.builder().entriesRead("10").build(); + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertEquals(OK, client.xgroupCreate(key, groupName, streamId, entriesReadOption).get()); + } else { + executionException = + assertThrows( + ExecutionException.class, + () -> client.xgroupCreate(key, groupName, streamId, entriesReadOption).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } - assertEquals(0, client.zrandmemberWithCount(key1, 0).get().length); - assertEquals(0, client.zrandmemberWithCount("nonExistentKey", 4).get().length); + // key is a string and cannot be created as a stream + assertEquals(OK, client.set(stringKey, "not_a_stream").get()); + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xgroupCreate( + stringKey, + groupName, + streamId, + StreamGroupOptions.builder().makeStream().build()) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); - // Key exists, but it is not a set - assertEquals(OK, client.set(key2, "bar").get()); - ExecutionException executionException = - assertThrows(ExecutionException.class, () -> client.zrandmemberWithCount(key2, 5).get()); + executionException = + assertThrows( + ExecutionException.class, () -> client.xgroupDestroy(stringKey, groupName).get()); assertInstanceOf(RequestException.class, executionException.getCause()); } @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") - public void zrandmemberWithCountWithScores(BaseClient client) { - String key1 = UUID.randomUUID().toString(); - String key2 = UUID.randomUUID().toString(); - Map membersScores = Map.of("one", 1.0, "two", 2.0); - assertEquals(2, client.zadd(key1, membersScores).get()); - - // Unique values are expected as count is positive - Object[][] randMembersWithScores = client.zrandmemberWithCountWithScores(key1, 4).get(); - assertEquals(2, randMembersWithScores.length); - for (Object[] membersWithScore : randMembersWithScores) { - String member = (String) membersWithScore[0]; - Double score = (Double) membersWithScore[1]; - - assertEquals(score, membersScores.get(member)); - } - - // Duplicate values are expected as count is negative - randMembersWithScores = client.zrandmemberWithCountWithScores(key1, -4).get(); - assertEquals(4, randMembersWithScores.length); - for (Object[] randMembersWithScore : randMembersWithScores) { - String member = (String) randMembersWithScore[0]; - Double score = (Double) randMembersWithScore[1]; - - assertEquals(score, membersScores.get(member)); - } + public void xgroupCreateConsumer_xgroupDelConsumer_xreadgroup_xack(BaseClient client) { + String key = UUID.randomUUID().toString(); + String stringKey = UUID.randomUUID().toString(); + String groupName = "group" + UUID.randomUUID(); + String zeroStreamId = "0"; + String consumerName = "consumer" + UUID.randomUUID(); - assertEquals(0, client.zrandmemberWithCountWithScores(key1, 0).get().length); - assertEquals(0, client.zrandmemberWithCountWithScores("nonExistentKey", 4).get().length); + // create group and consumer for the group + assertEquals( + OK, + client + .xgroupCreate( + key, groupName, zeroStreamId, StreamGroupOptions.builder().makeStream().build()) + .get()); + assertTrue(client.xgroupCreateConsumer(key, groupName, consumerName).get()); - // Key exists, but it is not a set - assertEquals(OK, client.set(key2, "bar").get()); + // create consumer for group that does not exist results in a NOGROUP request error ExecutionException executionException = assertThrows( - ExecutionException.class, () -> client.zrandmemberWithCountWithScores(key2, 5).get()); + ExecutionException.class, + () -> client.xgroupCreateConsumer(key, "not_a_group", consumerName).get()); assertInstanceOf(RequestException.class, executionException.getCause()); - } + assertTrue(executionException.getMessage().contains("NOGROUP")); + + // create consumer for group again + assertFalse(client.xgroupCreateConsumer(key, groupName, consumerName).get()); + + // Deletes a consumer that is not created yet returns 0 + assertEquals(0L, client.xgroupDelConsumer(key, groupName, "not_a_consumer").get()); + + // Add two stream entries + String streamid_1 = client.xadd(key, Map.of("field1", "value1")).get(); + assertNotNull(streamid_1); + String streamid_2 = client.xadd(key, Map.of("field2", "value2")).get(); + assertNotNull(streamid_2); + + // read the entire stream for the consumer and mark messages as pending + var result_1 = client.xreadgroup(Map.of(key, ">"), groupName, consumerName).get(); + assertDeepEquals( + Map.of( + key, + Map.of( + streamid_1, new String[][] {{"field1", "value1"}}, + streamid_2, new String[][] {{"field2", "value2"}})), + result_1); + + // delete one of the streams + assertEquals(1L, client.xdel(key, new String[] {streamid_1}).get()); + + // now xreadgroup returns one empty stream and one non-empty stream + var result_2 = client.xreadgroup(Map.of(key, "0"), groupName, consumerName).get(); + assertEquals(2, result_2.get(key).size()); + assertNull(result_2.get(key).get(streamid_1)); + assertArrayEquals(new String[][] {{"field2", "value2"}}, result_2.get(key).get(streamid_2)); + + String streamid_3 = client.xadd(key, Map.of("field3", "value3")).get(); + assertNotNull(streamid_3); + + // xack that streamid_1, and streamid_2 was received + assertEquals( + 2L, + client + .xack(gs(key), gs(groupName), new GlideString[] {gs(streamid_1), gs(streamid_2)}) + .get()); - @SneakyThrows - @ParameterizedTest(autoCloseArguments = false) - @MethodSource("getClients") - public void zincrby(BaseClient client) { - String key1 = UUID.randomUUID().toString(); - String key2 = UUID.randomUUID().toString(); + // Delete the consumer group and expect 1 pending messages (one was received) + assertEquals(0L, client.xgroupDelConsumer(key, groupName, consumerName).get()); - // key does not exist - assertEquals(2.5, client.zincrby(key1, 2.5, "value1").get()); - assertEquals(2.5, client.zscore(key1, "value1").get()); + // xack streamid_1, and streamid_2 already received returns 0L + assertEquals(0L, client.xack(key, groupName, new String[] {streamid_1, streamid_2}).get()); - // key exists, but value doesn't - assertEquals(-3.3, client.zincrby(key1, -3.3, "value2").get()); - assertEquals(-3.3, client.zscore(key1, "value2").get()); + // Consume the last message with the previously deleted consumer (creates the consumer anew) + var result_3 = client.xreadgroup(Map.of(key, ">"), groupName, consumerName).get(); + assertEquals(1, result_3.get(key).size()); - // updating existing value in existing key - assertEquals(3.5, client.zincrby(key1, 1., "value1").get()); - assertEquals(3.5, client.zscore(key1, "value1").get()); + // wrong group, so xack streamid_3 returns 0 + assertEquals( + 0L, client.xack(gs(key), gs("not_a_group"), new GlideString[] {gs(streamid_3)}).get()); - // Key exists, but it is not a sorted set - assertEquals(2L, client.sadd(key2, new String[] {"one", "two"}).get()); - ExecutionException executionException = - assertThrows(ExecutionException.class, () -> client.zincrby(key2, .5, "_").get()); + // Delete the consumer group and expect the pending message + assertEquals(1L, client.xgroupDelConsumer(key, groupName, consumerName).get()); + + // key is a string and cannot be created as a stream + assertEquals(OK, client.set(stringKey, "not_a_stream").get()); + executionException = + assertThrows( + ExecutionException.class, + () -> client.xgroupCreateConsumer(stringKey, groupName, consumerName).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows( + ExecutionException.class, + () -> client.xgroupDelConsumer(stringKey, groupName, consumerName).get()); assertInstanceOf(RequestException.class, executionException.getCause()); } @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") - public void type(BaseClient client) { - String nonExistingKey = UUID.randomUUID().toString(); - String stringKey = UUID.randomUUID().toString(); + public void xreadgroup_return_failures(BaseClient client) { + String key = "{key}:1" + UUID.randomUUID(); + String nonStreamKey = "{key}:3" + UUID.randomUUID(); + String groupName = "group" + UUID.randomUUID(); + String zeroStreamId = "0"; + String consumerName = "consumer" + UUID.randomUUID(); + + // setup first entries in streams key1 and key2 + String timestamp_1_1 = + client.xadd(key, Map.of("f1", "v1"), StreamAddOptions.builder().id("1-1").build()).get(); + assertNotNull(timestamp_1_1); + + // create group and consumer for the group + assertEquals( + OK, + client + .xgroupCreate( + key, groupName, zeroStreamId, StreamGroupOptions.builder().makeStream().build()) + .get()); + assertTrue(client.xgroupCreateConsumer(key, groupName, consumerName).get()); + + // First key exists, but it is not a stream + assertEquals(OK, client.set(nonStreamKey, "bar").get()); + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xreadgroup( + Map.of(nonStreamKey, timestamp_1_1, key, timestamp_1_1), + groupName, + consumerName) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // Second key exists, but it is not a stream + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xreadgroup( + Map.of(key, timestamp_1_1, nonStreamKey, timestamp_1_1), + groupName, + consumerName) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + try (var testClient = + client instanceof RedisClient + ? RedisClient.CreateClient(commonClientConfig().build()).get() + : RedisClusterClient.CreateClient(commonClusterClientConfig().build()).get()) { + String timeoutKey = "{key}:2" + UUID.randomUUID(); + String timeoutGroupName = "group" + UUID.randomUUID(); + String timeoutConsumerName = "consumer" + UUID.randomUUID(); + + // Create a group read with the test client + // add a single stream entry and consumer + // the first call to ">" will return an update consumer group + // the second call to ">" will block waiting for new entries + // using anything other than ">" won't block, but will return the empty consumer result + // see: https://github.com/redis/redis/issues/6587 + assertEquals( + OK, + testClient + .xgroupCreate( + timeoutKey, + timeoutGroupName, + zeroStreamId, + StreamGroupOptions.builder().makeStream().build()) + .get()); + assertTrue( + testClient.xgroupCreateConsumer(timeoutKey, timeoutGroupName, timeoutConsumerName).get()); + String streamid_1 = testClient.xadd(timeoutKey, Map.of("field1", "value1")).get(); + assertNotNull(streamid_1); + + // read the entire stream for the consumer and mark messages as pending + var result_1 = + testClient + .xreadgroup(Map.of(timeoutKey, ">"), timeoutGroupName, timeoutConsumerName) + .get(); + // returns a null result on the key + assertNull(result_1.get(key)); + + // subsequent calls to read ">" will block: + // ensure that command doesn't time out even if timeout > request timeout + long oneSecondInMS = 1000L; + assertNull( + testClient + .xreadgroup( + Map.of(timeoutKey, ">"), + timeoutGroupName, + timeoutConsumerName, + StreamReadGroupOptions.builder().block(oneSecondInMS).build()) + .get()); + + // with 0 timeout (no timeout) should never time out, + // but we wrap the test with timeout to avoid test failing or stuck forever + assertThrows( + TimeoutException.class, // <- future timeout, not command timeout + () -> + testClient + .xreadgroup( + Map.of(timeoutKey, ">"), + timeoutGroupName, + timeoutConsumerName, + StreamReadGroupOptions.builder().block(0L).build()) + .get(3, TimeUnit.SECONDS)); + } + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void xack_return_failures(BaseClient client) { + String key = "{key}:1" + UUID.randomUUID(); + String nonStreamKey = "{key}:3" + UUID.randomUUID(); + String groupName = "group" + UUID.randomUUID(); + String zeroStreamId = "0"; + String consumerName = "consumer" + UUID.randomUUID(); + + // setup first entries in streams key1 and key2 + String timestamp_1_1 = + client.xadd(key, Map.of("f1", "v1"), StreamAddOptions.builder().id("1-1").build()).get(); + assertNotNull(timestamp_1_1); + + // create group and consumer for the group + assertEquals( + OK, + client + .xgroupCreate( + key, groupName, zeroStreamId, StreamGroupOptions.builder().makeStream().build()) + .get()); + assertTrue(client.xgroupCreateConsumer(key, groupName, consumerName).get()); + + // Empty entity id list throws a RequestException + ExecutionException executionException = + assertThrows( + ExecutionException.class, () -> client.xack(key, groupName, new String[0]).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // Key exists, but it is not a stream + assertEquals(OK, client.set(nonStreamKey, "bar").get()); + executionException = + assertThrows( + ExecutionException.class, + () -> client.xack(nonStreamKey, groupName, new String[] {zeroStreamId}).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void xpending(BaseClient client) { + + String key = UUID.randomUUID().toString(); + String groupName = "group" + UUID.randomUUID(); + String zeroStreamId = "0"; + String consumer1 = "consumer-1-" + UUID.randomUUID(); + String consumer2 = "consumer-2-" + UUID.randomUUID(); + + // create group and consumer for the group + assertEquals( + OK, + client + .xgroupCreate( + key, groupName, zeroStreamId, StreamGroupOptions.builder().makeStream().build()) + .get()); + assertTrue(client.xgroupCreateConsumer(key, groupName, consumer1).get()); + assertTrue(client.xgroupCreateConsumer(key, groupName, consumer2).get()); + + // Add two stream entries for consumer 1 + String streamid_1 = client.xadd(key, Map.of("field1", "value1")).get(); + assertNotNull(streamid_1); + String streamid_2 = client.xadd(key, Map.of("field2", "value2")).get(); + assertNotNull(streamid_2); + + // read the entire stream for the consumer and mark messages as pending + var result_1 = client.xreadgroup(Map.of(key, ">"), groupName, consumer1).get(); + assertDeepEquals( + Map.of( + key, + Map.of( + streamid_1, new String[][] {{"field1", "value1"}}, + streamid_2, new String[][] {{"field2", "value2"}})), + result_1); + + // Add three stream entries for consumer 2 + String streamid_3 = client.xadd(key, Map.of("field3", "value3")).get(); + assertNotNull(streamid_3); + String streamid_4 = client.xadd(key, Map.of("field4", "value4")).get(); + assertNotNull(streamid_4); + String streamid_5 = client.xadd(key, Map.of("field5", "value5")).get(); + assertNotNull(streamid_5); + + // read the entire stream for the consumer and mark messages as pending + var result_2 = client.xreadgroup(Map.of(key, ">"), groupName, consumer2).get(); + assertDeepEquals( + Map.of( + key, + Map.of( + streamid_3, new String[][] {{"field3", "value3"}}, + streamid_4, new String[][] {{"field4", "value4"}}, + streamid_5, new String[][] {{"field5", "value5"}})), + result_2); + + Object[] pending_results = client.xpending(key, groupName).get(); + Object[] expectedResult = { + Long.valueOf(5L), streamid_1, streamid_5, new Object[][] {{consumer1, "2"}, {consumer2, "3"}} + }; + assertDeepEquals(expectedResult, pending_results); + + Object[][] pending_results_extended = + client.xpending(key, groupName, InfRangeBound.MIN, InfRangeBound.MAX, 10L).get(); + + // because of idle time return, we have to remove it from the expected results + // and check it separately + assertArrayEquals( + new Object[] {streamid_1, consumer1, 1L}, + ArrayUtils.remove(pending_results_extended[0], 2)); + assertTrue((Long) pending_results_extended[0][2] > 0L); + + assertArrayEquals( + new Object[] {streamid_2, consumer1, 1L}, + ArrayUtils.remove(pending_results_extended[1], 2)); + assertTrue((Long) pending_results_extended[1][2] > 0L); + + assertArrayEquals( + new Object[] {streamid_3, consumer2, 1L}, + ArrayUtils.remove(pending_results_extended[2], 2)); + assertTrue((Long) pending_results_extended[2][2] >= 0L); + + assertArrayEquals( + new Object[] {streamid_4, consumer2, 1L}, + ArrayUtils.remove(pending_results_extended[3], 2)); + assertTrue((Long) pending_results_extended[3][2] >= 0L); + + assertArrayEquals( + new Object[] {streamid_5, consumer2, 1L}, + ArrayUtils.remove(pending_results_extended[4], 2)); + assertTrue((Long) pending_results_extended[4][2] >= 0L); + + // acknowledge streams 2-4 and remove them from the xpending results + assertEquals( + 3L, client.xack(key, groupName, new String[] {streamid_2, streamid_3, streamid_4}).get()); + + pending_results_extended = + client + .xpending(key, groupName, IdBound.ofExclusive(streamid_3), InfRangeBound.MAX, 10L) + .get(); + assertEquals(1, pending_results_extended.length); + assertEquals(streamid_5, pending_results_extended[0][0]); + assertEquals(consumer2, pending_results_extended[0][1]); + + pending_results_extended = + client + .xpending(key, groupName, InfRangeBound.MIN, IdBound.ofExclusive(streamid_5), 10L) + .get(); + assertEquals(1, pending_results_extended.length); + assertEquals(streamid_1, pending_results_extended[0][0]); + assertEquals(consumer1, pending_results_extended[0][1]); + + pending_results_extended = + client + .xpending( + key, + groupName, + InfRangeBound.MIN, + InfRangeBound.MAX, + 10L, + StreamPendingOptions.builder().minIdleTime(1L).consumer(consumer2).build()) + .get(); + assertEquals(1, pending_results_extended.length); + assertEquals(streamid_5, pending_results_extended[0][0]); + assertEquals(consumer2, pending_results_extended[0][1]); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void xpending_return_failures(BaseClient client) { + + String key = UUID.randomUUID().toString(); + String stringkey = UUID.randomUUID().toString(); + String groupName = "group" + UUID.randomUUID(); + String zeroStreamId = "0"; + String consumer1 = "consumer-1-" + UUID.randomUUID(); + + // create group and consumer for the group + assertEquals( + OK, + client + .xgroupCreate( + key, groupName, zeroStreamId, StreamGroupOptions.builder().makeStream().build()) + .get()); + assertTrue(client.xgroupCreateConsumer(key, groupName, consumer1).get()); + + // Add two stream entries for consumer 1 + String streamid_1 = client.xadd(key, Map.of("field1", "value1")).get(); + assertNotNull(streamid_1); + String streamid_2 = client.xadd(key, Map.of("field2", "value2")).get(); + assertNotNull(streamid_2); + + // no pending messages yet... + var pending_results_summary = client.xpending(key, groupName).get(); + assertArrayEquals(new Object[] {0L, null, null, null}, pending_results_summary); + + var pending_results_extended = + client.xpending(key, groupName, InfRangeBound.MAX, InfRangeBound.MIN, 10L).get(); + assertEquals(0, pending_results_extended.length); + + // read the entire stream for the consumer and mark messages as pending + var result_1 = client.xreadgroup(Map.of(key, ">"), groupName, consumer1).get(); + assertDeepEquals( + Map.of( + key, + Map.of( + streamid_1, new String[][] {{"field1", "value1"}}, + streamid_2, new String[][] {{"field2", "value2"}})), + result_1); + + // sanity check - expect some results: + pending_results_summary = client.xpending(key, groupName).get(); + assertTrue((Long) pending_results_summary[0] > 0L); + + pending_results_extended = + client.xpending(key, groupName, InfRangeBound.MIN, InfRangeBound.MAX, 1L).get(); + assertTrue(pending_results_extended.length > 0); + + // returns empty if + before - + pending_results_extended = + client.xpending(key, groupName, InfRangeBound.MAX, InfRangeBound.MIN, 10L).get(); + assertEquals(0, pending_results_extended.length); + + // min idletime of 100 seconds shouldn't produce any results + pending_results_extended = + client + .xpending( + key, + groupName, + InfRangeBound.MIN, + InfRangeBound.MAX, + 10L, + StreamPendingOptions.builder().minIdleTime(100000L).build()) + .get(); + assertEquals(0, pending_results_extended.length); + + // invalid consumer - no results + pending_results_extended = + client + .xpending( + key, + groupName, + InfRangeBound.MIN, + InfRangeBound.MAX, + 10L, + StreamPendingOptions.builder().consumer("invalid_consumer").build()) + .get(); + assertEquals(0, pending_results_extended.length); + + // xpending when range bound is not valid ID throws a RequestError + Exception executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xpending( + key, + groupName, + IdBound.ofExclusive("not_a_stream_id"), + InfRangeBound.MAX, + 10L) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xpending( + key, + groupName, + InfRangeBound.MIN, + IdBound.ofExclusive("not_a_stream_id"), + 10L) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // invalid count should return no results + pending_results_extended = + client.xpending(key, groupName, InfRangeBound.MIN, InfRangeBound.MAX, -10L).get(); + assertEquals(0, pending_results_extended.length); + + pending_results_extended = + client.xpending(key, groupName, InfRangeBound.MIN, InfRangeBound.MAX, 0L).get(); + assertEquals(0, pending_results_extended.length); + + // invalid group throws a RequestError (NOGROUP) + executionException = + assertThrows(ExecutionException.class, () -> client.xpending(key, "not_a_group").get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("NOGROUP")); + + // non-existent key throws a RequestError (NOGROUP) + executionException = + assertThrows(ExecutionException.class, () -> client.xpending(stringkey, groupName).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("NOGROUP")); + + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xpending(stringkey, groupName, InfRangeBound.MIN, InfRangeBound.MAX, 10L) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("NOGROUP")); + + // Key exists, but it is not a stream + assertEquals(OK, client.set(stringkey, "bar").get()); + executionException = + assertThrows(ExecutionException.class, () -> client.xpending(stringkey, groupName).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .xpending(stringkey, groupName, InfRangeBound.MIN, InfRangeBound.MAX, 10L) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void zrandmember(BaseClient client) { + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); + Map membersScores = Map.of("one", 1.0, "two", 2.0); + assertEquals(2, client.zadd(key1, membersScores).get()); + + String randMember = client.zrandmember(key1).get(); + assertTrue(membersScores.containsKey(randMember)); + assertNull(client.zrandmember("nonExistentKey").get()); + + // Key exists, but it is not a set + assertEquals(OK, client.set(key2, "bar").get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.zrandmember(key2).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void zrandmemberWithCount(BaseClient client) { + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); + Map membersScores = Map.of("one", 1.0, "two", 2.0); + assertEquals(2, client.zadd(key1, membersScores).get()); + + // Unique values are expected as count is positive + List randMembers = Arrays.asList(client.zrandmemberWithCount(key1, 4).get()); + assertEquals(2, randMembers.size()); + assertEquals(2, new HashSet<>(randMembers).size()); + randMembers.forEach(member -> assertTrue(membersScores.containsKey(member))); + + // Duplicate values are expected as count is negative + randMembers = Arrays.asList(client.zrandmemberWithCount(key1, -4).get()); + assertEquals(4, randMembers.size()); + randMembers.forEach(member -> assertTrue(membersScores.containsKey(member))); + + assertEquals(0, client.zrandmemberWithCount(key1, 0).get().length); + assertEquals(0, client.zrandmemberWithCount("nonExistentKey", 4).get().length); + + // Key exists, but it is not a set + assertEquals(OK, client.set(key2, "bar").get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.zrandmemberWithCount(key2, 5).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void zrandmemberWithCountWithScores(BaseClient client) { + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); + Map membersScores = Map.of("one", 1.0, "two", 2.0); + assertEquals(2, client.zadd(key1, membersScores).get()); + + // Unique values are expected as count is positive + Object[][] randMembersWithScores = client.zrandmemberWithCountWithScores(key1, 4).get(); + assertEquals(2, randMembersWithScores.length); + for (Object[] membersWithScore : randMembersWithScores) { + String member = (String) membersWithScore[0]; + Double score = (Double) membersWithScore[1]; + + assertEquals(score, membersScores.get(member)); + } + + // Duplicate values are expected as count is negative + randMembersWithScores = client.zrandmemberWithCountWithScores(key1, -4).get(); + assertEquals(4, randMembersWithScores.length); + for (Object[] randMembersWithScore : randMembersWithScores) { + String member = (String) randMembersWithScore[0]; + Double score = (Double) randMembersWithScore[1]; + + assertEquals(score, membersScores.get(member)); + } + + assertEquals(0, client.zrandmemberWithCountWithScores(key1, 0).get().length); + assertEquals(0, client.zrandmemberWithCountWithScores("nonExistentKey", 4).get().length); + + // Key exists, but it is not a set + assertEquals(OK, client.set(key2, "bar").get()); + ExecutionException executionException = + assertThrows( + ExecutionException.class, () -> client.zrandmemberWithCountWithScores(key2, 5).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void zincrby(BaseClient client) { + String key1 = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); + + // key does not exist + assertEquals(2.5, client.zincrby(key1, 2.5, "value1").get()); + assertEquals(2.5, client.zscore(key1, "value1").get()); + + // key exists, but value doesn't + assertEquals(-3.3, client.zincrby(key1, -3.3, "value2").get()); + assertEquals(-3.3, client.zscore(key1, "value2").get()); + + // updating existing value in existing key + assertEquals(3.5, client.zincrby(key1, 1., "value1").get()); + assertEquals(3.5, client.zscore(key1, "value1").get()); + + // Key exists, but it is not a sorted set + assertEquals(2L, client.sadd(key2, new String[] {"one", "two"}).get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.zincrby(key2, .5, "_").get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void type(BaseClient client) { + String nonExistingKey = UUID.randomUUID().toString(); + String stringKey = UUID.randomUUID().toString(); String listKey = UUID.randomUUID().toString(); String hashKey = UUID.randomUUID().toString(); String setKey = UUID.randomUUID().toString(); @@ -3562,6 +4716,34 @@ public void type(BaseClient client) { assertTrue("stream".equalsIgnoreCase(client.type(streamKey).get())); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void type_binary(BaseClient client) { + GlideString nonExistingKey = gs(UUID.randomUUID().toString()); + GlideString stringKey = gs(UUID.randomUUID().toString()); + GlideString listKey = gs(UUID.randomUUID().toString()); + String hashKey = UUID.randomUUID().toString(); + String setKey = UUID.randomUUID().toString(); + String zsetKey = UUID.randomUUID().toString(); + String streamKey = UUID.randomUUID().toString(); + + assertEquals(OK, client.set(stringKey, gs("value")).get()); + assertEquals(1, client.lpush(listKey, new GlideString[] {gs("value")}).get()); + assertEquals(1, client.hset(hashKey, Map.of("1", "2")).get()); + assertEquals(1, client.sadd(setKey, new String[] {"value"}).get()); + assertEquals(1, client.zadd(zsetKey, Map.of("1", 2d)).get()); + assertNotNull(client.xadd(streamKey, Map.of("field", "value"))); + + assertTrue("none".equalsIgnoreCase(client.type(nonExistingKey).get())); + assertTrue("string".equalsIgnoreCase(client.type(stringKey).get())); + assertTrue("list".equalsIgnoreCase(client.type(listKey).get())); + assertTrue("hash".equalsIgnoreCase(client.type(hashKey).get())); + assertTrue("set".equalsIgnoreCase(client.type(setKey).get())); + assertTrue("zset".equalsIgnoreCase(client.type(zsetKey).get())); + assertTrue("stream".equalsIgnoreCase(client.type(streamKey).get())); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -3892,6 +5074,14 @@ public void objectEncoding_returns_null(BaseClient client) { assertNull(client.objectEncoding(nonExistingKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectEncoding_binary_returns_null(BaseClient client) { + GlideString nonExistingKey = gs(UUID.randomUUID().toString()); + assertNull(client.objectEncoding(nonExistingKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -3905,6 +5095,19 @@ public void objectEncoding_returns_string_raw(BaseClient client) { assertEquals("raw", client.objectEncoding(stringRawKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectEncoding_binary_returns_string_raw(BaseClient client) { + GlideString stringRawKey = gs(UUID.randomUUID().toString()); + assertEquals( + OK, + client + .set(stringRawKey, gs("a really loooooooooooooooooooooooooooooooooooooooong value")) + .get()); + assertEquals("raw", client.objectEncoding(stringRawKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -3914,6 +5117,15 @@ public void objectEncoding_returns_string_int(BaseClient client) { assertEquals("int", client.objectEncoding(stringIntKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectEncoding_binary_returns_string_int(BaseClient client) { + GlideString stringIntKey = gs(UUID.randomUUID().toString()); + assertEquals(OK, client.set(stringIntKey, gs("2")).get()); + assertEquals("int", client.objectEncoding(stringIntKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -3923,6 +5135,15 @@ public void objectEncoding_returns_string_embstr(BaseClient client) { assertEquals("embstr", client.objectEncoding(stringEmbstrKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectEncoding_binary_returns_string_embstr(BaseClient client) { + GlideString stringEmbstrKey = gs(UUID.randomUUID().toString()); + assertEquals(OK, client.set(stringEmbstrKey, gs("value")).get()); + assertEquals("embstr", client.objectEncoding(stringEmbstrKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -3936,6 +5157,19 @@ public void objectEncoding_returns_list_listpack(BaseClient client) { client.objectEncoding(listListpackKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectEncoding_binary_returns_list_listpack(BaseClient client) { + GlideString listListpackKey = gs(UUID.randomUUID().toString()); + assertEquals(1, client.lpush(listListpackKey, new GlideString[] {gs("1")}).get()); + // API documentation states that a ziplist should be returned for Redis versions <= 6.2, but + // actual behavior returns a quicklist. + assertEquals( + REDIS_VERSION.isLowerThan("7.0.0") ? "quicklist" : "listpack", + client.objectEncoding(listListpackKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -4026,8 +5260,16 @@ public void objectEncoding_returns_stream(BaseClient client) { @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") - public void objectFreq_returns_null(BaseClient client) { - String nonExistingKey = UUID.randomUUID().toString(); + public void objectFreq_returns_null(BaseClient client) { + String nonExistingKey = UUID.randomUUID().toString(); + assertNull(client.objectFreq(nonExistingKey).get()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectFreq_binary_returns_null(BaseClient client) { + GlideString nonExistingKey = gs(UUID.randomUUID().toString()); assertNull(client.objectFreq(nonExistingKey).get()); } @@ -4039,6 +5281,14 @@ public void objectIdletime_returns_null(BaseClient client) { assertNull(client.objectIdletime(nonExistingKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectIdletime_binary_returns_null(BaseClient client) { + GlideString nonExistingKey = gs(UUID.randomUUID().toString()); + assertNull(client.objectIdletime(nonExistingKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -4049,6 +5299,16 @@ public void objectIdletime(BaseClient client) { assertTrue(client.objectIdletime(key).get() > 0L); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectIdletime_binary_(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + assertEquals(OK, client.set(key, gs("")).get()); + Thread.sleep(2000); + assertTrue(client.objectIdletime(key).get() > 0L); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -4057,6 +5317,14 @@ public void objectRefcount_returns_null(BaseClient client) { assertNull(client.objectRefcount(nonExistingKey).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectRefcount_binary_returns_null(BaseClient client) { + String nonExistingKey = UUID.randomUUID().toString(); + assertNull(client.objectRefcount(nonExistingKey).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -4066,6 +5334,15 @@ public void objectRefcount(BaseClient client) { assertTrue(client.objectRefcount(key).get() >= 0L); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void objectRefcount_binary(BaseClient client) { + GlideString key = gs(UUID.randomUUID().toString()); + assertEquals(OK, client.set(key, gs("")).get()); + assertTrue(client.objectRefcount(key).get() >= 0L); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -4162,6 +5439,7 @@ public void geopos(BaseClient client) { String key1 = UUID.randomUUID().toString(); String key2 = UUID.randomUUID().toString(); String[] members = {"Palermo", "Catania"}; + GlideString[] members_gs = {gs("Palermo"), gs("Catania")}; Double[][] expected = { {13.36138933897018433, 38.11555639549629859}, {15.08726745843887329, 37.50266842333162032} }; @@ -4180,6 +5458,14 @@ public void geopos(BaseClient client) { } } + // Loop through the arrays and perform assertions + actual = client.geopos(gs(key1), members_gs).get(); + for (int i = 0; i < expected.length; i++) { + for (int j = 0; j < expected[i].length; j++) { + assertEquals(expected[i][j], actual[i][j], 1e-9); + } + } + // key exists but holding the wrong kind of value (non-ZSET) assertEquals(OK, client.set(key2, "geopos").get()); ExecutionException executionException = @@ -4208,11 +5494,11 @@ public void geodist(BaseClient client) { assertEquals(2, client.geoadd(key1, membersToCoordinates).get()); // assert correct result with default metric - Double actual = client.geodist(key1, member1, member2).get(); + Double actual = client.geodist(gs(key1), gs(member1), gs(member2)).get(); assertEquals(expected, actual, delta); // assert correct result with manual metric specification kilometers - Double actualKM = client.geodist(key1, member1, member2, geoUnitKM).get(); + Double actualKM = client.geodist(gs(key1), gs(member1), gs(member2), geoUnitKM).get(); assertEquals(expectedKM, actualKM, delta); // assert null result when member index is missing @@ -4320,6 +5606,8 @@ public void setbit(BaseClient client) { assertEquals(0, client.setbit(key1, 0, 1).get()); assertEquals(1, client.setbit(key1, 0, 0).get()); + assertEquals(0, client.setbit(gs(key1), 0, 1).get()); + assertEquals(1, client.setbit(gs(key1), 0, 0).get()); // Exception thrown due to the negative offset ExecutionException executionException = @@ -4346,11 +5634,13 @@ public void getbit(BaseClient client) { String key2 = UUID.randomUUID().toString(); String missingKey = UUID.randomUUID().toString(); String value = "foobar"; - assertEquals(OK, client.set(key1, value).get()); assertEquals(1, client.getbit(key1, 1).get()); assertEquals(0, client.getbit(key1, 1000).get()); assertEquals(0, client.getbit(missingKey, 1).get()); + assertEquals(1, client.getbit(gs(key1), 1).get()); + assertEquals(0, client.getbit(gs(key1), 1000).get()); + assertEquals(0, client.getbit(gs(missingKey), 1).get()); if (client instanceof RedisClient) { assertEquals( 1L, ((RedisClient) client).customCommand(new String[] {"SETBIT", key1, "5", "0"}).get()); @@ -4476,14 +5766,14 @@ public void bitop(BaseClient client) { // Returns null when all keys hold empty strings assertEquals(0L, client.bitop(BitwiseOperation.AND, destination, emptyKeys).get()); - assertEquals(null, client.get(destination).get()); + assertNull(client.get(destination).get()); assertEquals(0L, client.bitop(BitwiseOperation.OR, destination, emptyKeys).get()); - assertEquals(null, client.get(destination).get()); + assertNull(client.get(destination).get()); assertEquals(0L, client.bitop(BitwiseOperation.XOR, destination, emptyKeys).get()); - assertEquals(null, client.get(destination).get()); + assertNull(client.get(destination).get()); assertEquals( 0L, client.bitop(BitwiseOperation.NOT, destination, new String[] {emptyKey1}).get()); - assertEquals(null, client.get(destination).get()); + assertNull(client.get(destination).get()); // Exception thrown due to the key holding a value with the wrong type assertEquals(1, client.sadd(emptyKey1, new String[] {value1}).get()); @@ -4660,6 +5950,48 @@ public void lset(BaseClient client) { assertArrayEquals(updatedList2, expectedList2); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void lset_binary(BaseClient client) { + // setup + GlideString key = gs(UUID.randomUUID().toString()); + GlideString nonExistingKey = gs(UUID.randomUUID().toString()); + long index = 0; + long oobIndex = 10; + long negativeIndex = -1; + GlideString element = gs("zero"); + GlideString[] lpushArgs = {gs("four"), gs("three"), gs("two"), gs("one")}; + String[] expectedList = {"zero", "two", "three", "four"}; + String[] expectedList2 = {"zero", "two", "three", "zero"}; + + // key does not exist + ExecutionException noSuchKeyException = + assertThrows( + ExecutionException.class, () -> client.lset(nonExistingKey, index, element).get()); + assertInstanceOf(RequestException.class, noSuchKeyException.getCause()); + + // pushing elements to list + client.lpush(key, lpushArgs).get(); + + // index out of range + ExecutionException indexOutOfBoundException = + assertThrows(ExecutionException.class, () -> client.lset(key, oobIndex, element).get()); + assertInstanceOf(RequestException.class, indexOutOfBoundException.getCause()); + + // assert lset result + String response = client.lset(key, index, element).get(); + assertEquals(OK, response); + String[] updatedList = client.lrange(key.toString(), 0, -1).get(); + assertArrayEquals(updatedList, expectedList); + + // assert lset with a negative index for the last element in the list + String response2 = client.lset(key, negativeIndex, element).get(); + assertEquals(OK, response2); + String[] updatedList2 = client.lrange(key.toString(), 0, -1).get(); + assertArrayEquals(updatedList2, expectedList2); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -5190,6 +6522,9 @@ public void sintercard(BaseClient client) { // returns limit as cardinality when the limit is reached partway through the computation assertEquals(limit, client.sintercard(keys, limit).get()); + // returns actual cardinality if limit is higher + assertEquals(3, client.sintercard(keys, limit2).get()); + // non set keys are used assertEquals(OK, client.set(nonSetKey, "NotASet").get()); String[] badArr = new String[] {key1, nonSetKey}; @@ -5198,6 +6533,47 @@ public void sintercard(BaseClient client) { assertInstanceOf(RequestException.class, executionException.getCause()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sintercard_gs(BaseClient client) { + assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0"), "This feature added in redis 7.0.0"); + // setup + GlideString key1 = gs("{key}-1" + UUID.randomUUID()); + GlideString key2 = gs("{key}-2" + UUID.randomUUID()); + GlideString nonSetKey = gs("{key}-4" + UUID.randomUUID()); + GlideString[] saddargs = {gs("one"), gs("two"), gs("three"), gs("four")}; + GlideString[] saddargs2 = {gs("two"), gs("three"), gs("four"), gs("five")}; + long limit = 2; + long limit2 = 4; + + // keys does not exist or is empty + GlideString[] keys = {key1, key2}; + assertEquals(0, client.sintercard(keys).get()); + assertEquals(0, client.sintercard(keys, limit).get()); + + // one of the keys is empty, intersection is empty, cardinality equals to 0 + assertEquals(4, client.sadd(key1, saddargs).get()); + assertEquals(0, client.sintercard(keys).get()); + + // sets at both keys have value, get cardinality of the intersection + assertEquals(4, client.sadd(key2, saddargs2).get()); + assertEquals(3, client.sintercard(keys).get()); + + // returns limit as cardinality when the limit is reached partway through the computation + assertEquals(limit, client.sintercard(keys, limit).get()); + + // returns actual cardinality if limit is higher + assertEquals(3, client.sintercard(keys, limit2).get()); + + // non set keys are used + assertEquals(OK, client.set(nonSetKey, gs("NotASet")).get()); + GlideString[] badArr = new GlideString[] {key1, nonSetKey}; + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.sintercard(badArr).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -5229,6 +6605,37 @@ public void copy(BaseClient client) { assertEquals("two", client.get(destination).get()); } + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void copy_binary(BaseClient client) { + assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0"), "This feature added in redis 6.2.0"); + // setup + GlideString source = gs("{key}-1" + UUID.randomUUID()); + GlideString destination = gs("{key}-2" + UUID.randomUUID()); + + // neither key exists, returns false + assertFalse(client.copy(source, destination, false).get()); + assertFalse(client.copy(source, destination).get()); + + // source exists, destination does not + client.set(source, gs("one")); + assertTrue(client.copy(source, destination, false).get()); + assertEquals(gs("one"), client.get(destination).get()); + + // setting new value for source + client.set(source, gs("two")); + + // both exists, no REPLACE + assertFalse(client.copy(source, destination).get()); + assertFalse(client.copy(source, destination, false).get()); + assertEquals(gs("one"), client.get(destination).get()); + + // both exists, with REPLACE + assertTrue(client.copy(source, destination, true).get()); + assertEquals(gs("two"), client.get(destination).get()); + } + @SneakyThrows @ParameterizedTest(autoCloseArguments = false) @MethodSource("getClients") @@ -5312,4 +6719,280 @@ public void lcs_with_len_option(BaseClient client) { assertThrows(ExecutionException.class, () -> client.lcs(nonStringKey, key1).get()); assertInstanceOf(RequestException.class, executionException.getCause()); } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sunion(BaseClient client) { + // setup + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String key3 = "{key}-3" + UUID.randomUUID(); + String nonSetKey = "{key}-4" + UUID.randomUUID(); + String[] memberList1 = new String[] {"a", "b", "c"}; + String[] memberList2 = new String[] {"b", "c", "d", "e"}; + Set expectedUnion = Set.of("a", "b", "c", "d", "e"); + + assertEquals(3, client.sadd(key1, memberList1).get()); + assertEquals(4, client.sadd(key2, memberList2).get()); + assertEquals(expectedUnion, client.sunion(new String[] {key1, key2}).get()); + + // Key has an empty set + assertEquals(Set.of(), client.sunion(new String[] {key3}).get()); + + // Empty key with non-empty key returns non-empty key set + assertEquals(Set.of(memberList1), client.sunion(new String[] {key1, key3}).get()); + + // Exceptions + // Empty keys + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.sunion(new String[] {}).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // Non-set key + assertEquals(OK, client.set(nonSetKey, "value").get()); + assertThrows( + ExecutionException.class, () -> client.sunion(new String[] {nonSetKey, key1}).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void test_dump_restore(BaseClient client) { + String key = UUID.randomUUID().toString(); + String newKey1 = UUID.randomUUID().toString(); + String newKey2 = UUID.randomUUID().toString(); + String nonExistingKey = UUID.randomUUID().toString(); + String value = "oranges"; + + assertEquals(OK, client.set(key, value).get()); + + // Dump existing key + byte[] result = client.dump(gs(key)).get(); + assertNotNull(result); + + // Dump non-existing key + assertNull(client.dump(gs(nonExistingKey)).get()); + + // Restore to a new key + assertEquals(OK, client.restore(gs(newKey1), 0L, result).get()); + + // Restore to an existing key - Error: "Target key name already exists" + Exception executionException = + assertThrows(ExecutionException.class, () -> client.restore(gs(newKey1), 0L, result).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // Restore with checksum error - Error: "payload version or checksum are wrong" + executionException = + assertThrows( + ExecutionException.class, + () -> client.restore(gs(newKey2), 0L, value.getBytes()).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void test_dump_restore_withOptions(BaseClient client) { + String key = UUID.randomUUID().toString(); + String key2 = UUID.randomUUID().toString(); + String newKey = UUID.randomUUID().toString(); + String value = "oranges"; + + assertEquals(OK, client.set(key, value).get()); + + // Dump existing key + byte[] data = client.dump(gs(key)).get(); + assertNotNull(data); + + // Restore without option + String result = client.restore(gs(newKey), 0L, data).get(); + assertEquals(OK, result); + + // Restore with REPLACE option + result = client.restore(gs(newKey), 0L, data, RestoreOptions.builder().replace().build()).get(); + assertEquals(OK, result); + + // Restore with REPLACE and existing key holding different value + assertEquals(1, client.sadd(key2, new String[] {"a"}).get()); + result = client.restore(gs(key2), 0L, data, RestoreOptions.builder().replace().build()).get(); + assertEquals(OK, result); + + // Restore with REPLACE, ABSTTL, and positive TTL + result = + client + .restore(gs(newKey), 1000L, data, RestoreOptions.builder().replace().absttl().build()) + .get(); + assertEquals(OK, result); + + // Restore with REPLACE, ABSTTL, and negative TTL + ExecutionException executionException = + assertThrows( + ExecutionException.class, + () -> + client + .restore( + gs(newKey), -10L, data, RestoreOptions.builder().replace().absttl().build()) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // Restore with REPLACE and positive idletime + result = + client + .restore(gs(newKey), 0L, data, RestoreOptions.builder().replace().idletime(10L).build()) + .get(); + assertEquals(OK, result); + + // Restore with REPLACE and negative idletime + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .restore( + gs(newKey), + 0L, + data, + RestoreOptions.builder().replace().idletime(-10L).build()) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + // Restore with REPLACE and positive frequency + result = + client + .restore( + gs(newKey), 0L, data, RestoreOptions.builder().replace().frequency(10L).build()) + .get(); + assertEquals(OK, result); + + // Restore with REPLACE and negative frequency + executionException = + assertThrows( + ExecutionException.class, + () -> + client + .restore( + gs(newKey), + 0L, + data, + RestoreOptions.builder().replace().frequency(-10L).build()) + .get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void sort(BaseClient client) { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String key3 = "{key}-3" + UUID.randomUUID(); + String[] key1LpushArgs = {"2", "1", "4", "3"}; + String[] key1AscendingList = {"1", "2", "3", "4"}; + String[] key2LpushArgs = {"2", "1", "a", "x", "c", "4", "3"}; + + assertArrayEquals(new String[0], client.sort(key3).get()); + assertEquals(4, client.lpush(key1, key1LpushArgs).get()); + assertArrayEquals(key1AscendingList, client.sort(key1).get()); + + // SORT_R0 + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertArrayEquals(new String[0], client.sortReadOnly(key3).get()); + assertArrayEquals(key1AscendingList, client.sortReadOnly(key1).get()); + } + + // SORT with STORE + assertEquals(4, client.sortStore(key1, key3).get()); + assertArrayEquals(key1AscendingList, client.lrange(key3, 0, -1).get()); + + // Exceptions + // SORT with strings require ALPHA + assertEquals(7, client.lpush(key2, key2LpushArgs).get()); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.sort(key2).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @SneakyThrows + @ParameterizedTest(autoCloseArguments = false) + @MethodSource("getClients") + public void lcsIdx(BaseClient client) { + assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0"), "This feature added in redis 7.0.0"); + // setup + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String nonStringKey = "{key}-4" + UUID.randomUUID(); + + // keys does not exist or is empty + Map result = client.lcsIdx(key1, key2).get(); + assertDeepEquals(new Object[0], result.get("matches")); + assertEquals(0L, result.get("len")); + result = client.lcsIdx(key1, key2, 10L).get(); + assertDeepEquals(new Object[0], result.get("matches")); + assertEquals(0L, result.get("len")); + result = client.lcsIdxWithMatchLen(key1, key2).get(); + assertDeepEquals(new Object[0], result.get("matches")); + assertEquals(0L, result.get("len")); + + // setting string values + client.set(key1, "abcdefghijk"); + client.set(key2, "defjkjuighijk"); + + // LCS with only IDX + Object expectedMatchesObject = new Long[][][] {{{6L, 10L}, {8L, 12L}}, {{3L, 5L}, {0L, 2L}}}; + result = client.lcsIdx(key1, key2).get(); + assertDeepEquals(expectedMatchesObject, result.get("matches")); + assertEquals(8L, result.get("len")); + + // LCS with IDX and WITHMATCHLEN + expectedMatchesObject = + new Object[] { + new Object[] {new Long[] {6L, 10L}, new Long[] {8L, 12L}, 5L}, + new Object[] {new Long[] {3L, 5L}, new Long[] {0L, 2L}, 3L} + }; + result = client.lcsIdxWithMatchLen(key1, key2).get(); + assertDeepEquals(expectedMatchesObject, result.get("matches")); + assertEquals(8L, result.get("len")); + + // LCS with IDX and MINMATCHLEN + expectedMatchesObject = new Long[][][] {{{6L, 10L}, {8L, 12L}}}; + result = client.lcsIdx(key1, key2, 4).get(); + assertDeepEquals(expectedMatchesObject, result.get("matches")); + assertEquals(8L, result.get("len")); + + // LCS with IDX and a negative MINMATCHLEN + expectedMatchesObject = new Long[][][] {{{6L, 10L}, {8L, 12L}}, {{3L, 5L}, {0L, 2L}}}; + result = client.lcsIdx(key1, key2, -1L).get(); + assertDeepEquals(expectedMatchesObject, result.get("matches")); + assertEquals(8L, result.get("len")); + + // LCS with IDX, MINMATCHLEN, and WITHMATCHLEN + expectedMatchesObject = + new Object[] {new Object[] {new Long[] {6L, 10L}, new Long[] {8L, 12L}, 5L}}; + result = client.lcsIdxWithMatchLen(key1, key2, 4L).get(); + assertDeepEquals(expectedMatchesObject, result.get("matches")); + assertEquals(8L, result.get("len")); + + // non-string keys are used + client.sadd(nonStringKey, new String[] {"setmember"}).get(); + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.lcsIdx(nonStringKey, key1).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows(ExecutionException.class, () -> client.lcsIdx(nonStringKey, key1, 10L).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows( + ExecutionException.class, () -> client.lcsIdxWithMatchLen(nonStringKey, key1).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + + executionException = + assertThrows( + ExecutionException.class, + () -> client.lcsIdxWithMatchLen(nonStringKey, key1, 10L).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } } diff --git a/java/integTest/src/test/java/glide/TestConfiguration.java b/java/integTest/src/test/java/glide/TestConfiguration.java index 31e6489523..e95cef361a 100644 --- a/java/integTest/src/test/java/glide/TestConfiguration.java +++ b/java/integTest/src/test/java/glide/TestConfiguration.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import com.vdurmont.semver4j.Semver; diff --git a/java/integTest/src/test/java/glide/TestUtilities.java b/java/integTest/src/test/java/glide/TestUtilities.java index e162a0ea9e..eb0f578312 100644 --- a/java/integTest/src/test/java/glide/TestUtilities.java +++ b/java/integTest/src/test/java/glide/TestUtilities.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static glide.TestConfiguration.CLUSTER_PORTS; diff --git a/java/integTest/src/test/java/glide/TransactionTestUtilities.java b/java/integTest/src/test/java/glide/TransactionTestUtilities.java index 148837d52f..7d030bd283 100644 --- a/java/integTest/src/test/java/glide/TransactionTestUtilities.java +++ b/java/integTest/src/test/java/glide/TransactionTestUtilities.java @@ -1,7 +1,8 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide; import static glide.TestConfiguration.REDIS_VERSION; +import static glide.TestUtilities.generateLuaLibCode; import static glide.api.BaseClient.OK; import static glide.api.models.commands.FlushMode.ASYNC; import static glide.api.models.commands.FlushMode.SYNC; @@ -12,6 +13,7 @@ import glide.api.models.BaseTransaction; import glide.api.models.commands.ExpireOptions; +import glide.api.models.commands.GetExOptions; import glide.api.models.commands.LPosOptions; import glide.api.models.commands.ListDirection; import glide.api.models.commands.RangeOptions.InfLexBound; @@ -35,7 +37,11 @@ import glide.api.models.commands.geospatial.GeoUnit; import glide.api.models.commands.geospatial.GeospatialData; import glide.api.models.commands.stream.StreamAddOptions; +import glide.api.models.commands.stream.StreamGroupOptions; +import glide.api.models.commands.stream.StreamRange; import glide.api.models.commands.stream.StreamRange.IdBound; +import glide.api.models.commands.stream.StreamReadGroupOptions; +import glide.api.models.commands.stream.StreamReadOptions; import glide.api.models.commands.stream.StreamTrimOptions.MinId; import java.util.HashMap; import java.util.Map; @@ -101,6 +107,8 @@ private static Object[] genericCommands(BaseTransaction transaction) { String genericKey2 = "{GenericKey}-2-" + UUID.randomUUID(); String genericKey3 = "{GenericKey}-3-" + UUID.randomUUID(); String genericKey4 = "{GenericKey}-4-" + UUID.randomUUID(); + String[] ascendingList = new String[] {"1", "2", "3"}; + String[] descendingList = new String[] {"3", "2", "1"}; transaction .set(genericKey1, value1) @@ -122,7 +130,11 @@ private static Object[] genericCommands(BaseTransaction transaction) { .expireAt(genericKey1, 42) // expire (delete) key immediately .pexpire(genericKey1, 42) .pexpireAt(genericKey1, 42) - .ttl(genericKey2); + .ttl(genericKey2) + .lpush(genericKey3, new String[] {"3", "1", "2"}) + .sort(genericKey3) + .sortStore(genericKey3, genericKey4) + .lrange(genericKey4, 0, -1); if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { transaction @@ -132,7 +144,8 @@ private static Object[] genericCommands(BaseTransaction transaction) { .pexpire(genericKey1, 42, ExpireOptions.NEW_EXPIRY_GREATER_THAN_CURRENT) .pexpireAt(genericKey1, 42, ExpireOptions.HAS_NO_EXPIRY) .expiretime(genericKey1) - .pexpiretime(genericKey1); + .pexpiretime(genericKey1) + .sortReadOnly(genericKey3); } if (REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0")) { @@ -165,6 +178,10 @@ private static Object[] genericCommands(BaseTransaction transaction) { false, // pexpire(genericKey1, 42) false, // pexpireAt(genericKey1, 42) -2L, // ttl(genericKey2) + 3L, // lpush(genericKey3, new String[] {"3", "1", "2"}) + ascendingList, // sort(genericKey3) + 3L, // sortStore(genericKey3, genericKey4) + ascendingList, // lrange(genericKey4, 0, -1) }; if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { @@ -179,6 +196,7 @@ private static Object[] genericCommands(BaseTransaction transaction) { false, // pexpireAt(genericKey1, 42, ExpireOptions.HAS_NO_EXPIRY) -2L, // expiretime(genericKey1) -2L, // pexpiretime(genericKey1) + ascendingList, // sortReadOnly(genericKey3) }); } @@ -205,9 +223,22 @@ private static Object[] stringCommands(BaseTransaction transaction) { String stringKey6 = "{StringKey}-6-" + UUID.randomUUID(); String stringKey7 = "{StringKey}-7-" + UUID.randomUUID(); String stringKey8 = "{StringKey}-8-" + UUID.randomUUID(); + String stringKey9 = "{StringKey}-9-" + UUID.randomUUID(); + + Map expectedLcsIdxObject = + Map.of("matches", new Long[][][] {{{1L, 3L}, {0L, 2L}}}, "len", 3L); + + Map expectedLcsIdxWithMatchLenObject = + Map.of( + "matches", + new Object[] {new Object[] {new Long[] {1L, 3L}, new Long[] {0L, 2L}, 3L}}, + "len", + 3L); transaction + .flushall() .set(stringKey1, value1) + .randomKey() .get(stringKey1) .getdel(stringKey1) .set(stringKey2, value2, SetOptions.builder().returnOldValue(true).build()) @@ -236,12 +267,25 @@ private static Object[] stringCommands(BaseTransaction transaction) { .lcs(stringKey6, stringKey7) .lcs(stringKey6, stringKey8) .lcsLen(stringKey6, stringKey7) - .lcsLen(stringKey6, stringKey8); + .lcsLen(stringKey6, stringKey8) + .lcsIdx(stringKey6, stringKey7) + .lcsIdx(stringKey6, stringKey7, 1) + .lcsIdxWithMatchLen(stringKey6, stringKey7) + .lcsIdxWithMatchLen(stringKey6, stringKey7, 1); + } + + if (REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0")) { + transaction + .set(stringKey9, value1) + .getex(stringKey9) + .getex(stringKey9, GetExOptions.Seconds(20L)); } var expectedResults = new Object[] { + OK, // flushall() OK, // set(stringKey1, value1) + stringKey1, // randomKey() value1, // get(stringKey1) value1, // getdel(stringKey1) null, // set(stringKey2, value2, returnOldValue(true)) @@ -275,6 +319,21 @@ private static Object[] stringCommands(BaseTransaction transaction) { "", // lcs(stringKey6, stringKey8) 3L, // lcsLEN(stringKey6, stringKey7) 0L, // lcsLEN(stringKey6, stringKey8) + expectedLcsIdxObject, // lcsIdx(stringKey6, stringKey7) + expectedLcsIdxObject, // lcsIdx(stringKey6, stringKey7, minMatchLen(1L) + expectedLcsIdxWithMatchLenObject, // lcsIdxWithMatchLen(stringKey6, stringKey7) + expectedLcsIdxWithMatchLenObject, // lcsIdxWithMatchLen(key6, key7, minMatchLen(1L)) + }); + } + + if (REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0")) { + expectedResults = + concatenateArrays( + expectedResults, + new Object[] { + OK, // set(stringKey9, value1) + value1, // getex(stringKey1) + value1, // getex(stringKey1,GetExOptions.Seconds(20L)) }); } @@ -467,6 +526,7 @@ private static Object[] setCommands(BaseTransaction transaction) { .smismember(setKey1, new String[] {"baz", "foo"}) .sinter(new String[] {setKey1, setKey1}) .sadd(setKey2, new String[] {"a", "b"}) + .sunion(new String[] {setKey2, setKey1}) .sunionstore(setKey3, new String[] {setKey2, setKey1}) .sdiffstore(setKey3, new String[] {setKey2, setKey1}) .sinterstore(setKey3, new String[] {setKey2, setKey1}) @@ -497,6 +557,7 @@ private static Object[] setCommands(BaseTransaction transaction) { new Boolean[] {true, false}, // smismembmer(setKey1, new String[] {"baz", "foo"}) Set.of("baz"), // sinter(new String[] { setKey1, setKey1 }) 2L, // sadd(setKey2, new String[] { "a", "b" }) + Set.of("a", "b", "baz"), // sunion(new String[] {setKey2, setKey1}) 3L, // sunionstore(setKey3, new String[] { setKey2, setKey1 }) 2L, // sdiffstore(setKey3, new String[] { setKey2, setKey1 }) 0L, // sinterstore(setKey3, new String[] { setKey2, setKey1 }) @@ -667,6 +728,8 @@ private static Object[] serverManagementCommands(BaseTransaction transaction) .lolwut(1) .flushall() .flushall(ASYNC) + .flushdb() + .flushdb(ASYNC) .dbsize(); return new Object[] { @@ -676,6 +739,8 @@ private static Object[] serverManagementCommands(BaseTransaction transaction) "Redis ver. " + REDIS_VERSION + '\n', // lolwut(1) OK, // flushall() OK, // flushall(ASYNC) + OK, // flushdb() + OK, // flushdb(ASYNC) 0L, // dbsize() }; } @@ -714,6 +779,9 @@ private static Object[] hyperLogLogCommands(BaseTransaction transaction) { private static Object[] streamCommands(BaseTransaction transaction) { final String streamKey1 = "{streamKey}-1-" + UUID.randomUUID(); + final String groupName1 = "{groupName}-1-" + UUID.randomUUID(); + final String groupName2 = "{groupName}-2-" + UUID.randomUUID(); + final String consumer1 = "{consumer}-1-" + UUID.randomUUID(); transaction .xadd(streamKey1, Map.of("field1", "value1"), StreamAddOptions.builder().id("0-1").build()) @@ -721,11 +789,33 @@ private static Object[] streamCommands(BaseTransaction transaction) { .xadd(streamKey1, Map.of("field3", "value3"), StreamAddOptions.builder().id("0-3").build()) .xlen(streamKey1) .xread(Map.of(streamKey1, "0-2")) + .xread(Map.of(streamKey1, "0-2"), StreamReadOptions.builder().count(1L).build()) .xrange(streamKey1, IdBound.of("0-1"), IdBound.of("0-1")) .xrange(streamKey1, IdBound.of("0-1"), IdBound.of("0-1"), 1L) .xrevrange(streamKey1, IdBound.of("0-1"), IdBound.of("0-1")) .xrevrange(streamKey1, IdBound.of("0-1"), IdBound.of("0-1"), 1L) .xtrim(streamKey1, new MinId(true, "0-2")) + .xgroupCreate(streamKey1, groupName1, "0-2") + .xgroupCreate( + streamKey1, groupName2, "0-0", StreamGroupOptions.builder().makeStream().build()) + .xgroupCreateConsumer(streamKey1, groupName1, consumer1) + .xreadgroup(Map.of(streamKey1, ">"), groupName1, consumer1) + .xreadgroup( + Map.of(streamKey1, "0-3"), + groupName1, + consumer1, + StreamReadGroupOptions.builder().count(2L).build()) + .xpending(streamKey1, groupName1) + .xack(streamKey1, groupName1, new String[] {"0-3"}) + .xpending( + streamKey1, + groupName1, + StreamRange.InfRangeBound.MIN, + StreamRange.InfRangeBound.MAX, + 1L) + .xgroupDelConsumer(streamKey1, groupName1, consumer1) + .xgroupDestroy(streamKey1, groupName1) + .xgroupDestroy(streamKey1, groupName2) .xdel(streamKey1, new String[] {"0-3", "0-5"}); return new Object[] { @@ -736,12 +826,38 @@ private static Object[] streamCommands(BaseTransaction transaction) { Map.of( streamKey1, Map.of("0-3", new String[][] {{"field3", "value3"}})), // xread(Map.of(key9, "0-2")); + Map.of( + streamKey1, + Map.of( + "0-3", + new String[][] {{"field3", "value3"}})), // xread(Map.of(key9, "0-2"), options); Map.of("0-1", new String[][] {{"field1", "value1"}}), // .xrange(streamKey1, "0-1", "0-1") Map.of("0-1", new String[][] {{"field1", "value1"}}), // .xrange(streamKey1, "0-1", "0-1", 1l) Map.of("0-1", new String[][] {{"field1", "value1"}}), // .xrevrange(streamKey1, "0-1", "0-1") Map.of( "0-1", new String[][] {{"field1", "value1"}}), // .xrevrange(streamKey1, "0-1", "0-1", 1l) 1L, // xtrim(streamKey1, new MinId(true, "0-2")) + OK, // xgroupCreate(streamKey1, groupName1, "0-0") + OK, // xgroupCreate(streamKey1, groupName1, "0-0", options) + true, // xgroupCreateConsumer(streamKey1, groupName1, consumer1) + Map.of( + streamKey1, + Map.of( + "0-3", + new String[][] { + {"field3", "value3"} + })), // xreadgroup(Map.of(streamKey1, ">"), groupName1, consumer1); + Map.of( + streamKey1, + Map.of()), // xreadgroup(Map.of(streamKey1, ">"), groupName1, consumer1, options); + new Object[] { + 1L, "0-3", "0-3", new Object[][] {{consumer1, "1"}} + }, // xpending(streamKey1, groupName1) + 1L, // xack(streamKey1, groupName1, new String[] {"0-3"}) + new Object[] {}, // xpending(streamKey1, groupName1, MIN, MAX, 1L) + 0L, // xgroupDelConsumer(streamKey1, groupName1, consumer1) + true, // xgroupDestroy(streamKey1, groupName1) + true, // xgroupDestroy(streamKey1, groupName2) 1L, // .xdel(streamKey1, new String[] {"0-1", "0-5"}); }; } @@ -784,19 +900,15 @@ private static Object[] scriptingAndFunctionsCommands(BaseTransaction transac final String libName = "mylib1T"; final String funcName = "myfunc1T"; - final String code = - "#!lua name=" - + libName - + "\n redis.register_function('" - + funcName - + "', function(keys, args) return args[1] end)"; // function returns first argument + // function $funcName returns first argument + final String code = generateLuaLibCode(libName, Map.of(funcName, "return args[1]"), true); var expectedFuncData = new HashMap() { { - put("name", "myfunc1T"); + put("name", funcName); put("description", null); - put("flags", Set.of()); + put("flags", Set.of("no-writes")); } }; @@ -804,7 +916,7 @@ private static Object[] scriptingAndFunctionsCommands(BaseTransaction transac new Map[] { Map.of( "library_name", - "mylib1T", + libName, "engine", "LUA", "functions", @@ -834,25 +946,29 @@ private static Object[] scriptingAndFunctionsCommands(BaseTransaction transac .functionLoad(code, false) .functionLoad(code, true) .functionStats() - .fcall("myfunc1T", new String[0], new String[] {"a", "b"}) - .fcall("myfunc1T", new String[] {"a", "b"}) + .fcall(funcName, new String[0], new String[] {"a", "b"}) + .fcall(funcName, new String[] {"a", "b"}) + .fcallReadOnly(funcName, new String[0], new String[] {"a", "b"}) + .fcallReadOnly(funcName, new String[] {"a", "b"}) .functionList("otherLib", false) - .functionList("mylib1T", true) - .functionDelete("mylib1T") + .functionList(libName, true) + .functionDelete(libName) .functionList(true) .functionStats(); return new Object[] { OK, // functionFlush(SYNC) new Map[0], // functionList(false) - "mylib1T", // functionLoad(code, false) - "mylib1T", // functionLoad(code, true) + libName, // functionLoad(code, false) + libName, // functionLoad(code, true) expectedFunctionStatsNonEmpty, // functionStats() - "a", // fcall("myfunc1T", new String[0], new String[]{"a", "b"}) - "a", // fcall("myfunc1T", new String[] {"a", "b"}) + "a", // fcall(funcName, new String[0], new String[]{"a", "b"}) + "a", // fcall(funcName, new String[] {"a", "b"}) + "a", // fcallReadOnly(funcName, new String[0], new String[]{"a", "b"}) + "a", // fcallReadOnly(funcName, new String[] {"a", "b"}) new Map[0], // functionList("otherLib", false) - expectedLibData, // functionList("mylib1T", true) - OK, // functionDelete("mylib1T") + expectedLibData, // functionList(libName, true) + OK, // functionDelete(libName) new Map[0], // functionList(true) expectedFunctionStatsEmpty, // functionStats() }; diff --git a/java/integTest/src/test/java/glide/cluster/ClusterClientTests.java b/java/integTest/src/test/java/glide/cluster/ClusterClientTests.java index c3eb503eaf..65a4f40572 100644 --- a/java/integTest/src/test/java/glide/cluster/ClusterClientTests.java +++ b/java/integTest/src/test/java/glide/cluster/ClusterClientTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.cluster; import static glide.TestConfiguration.REDIS_VERSION; diff --git a/java/integTest/src/test/java/glide/cluster/ClusterTransactionTests.java b/java/integTest/src/test/java/glide/cluster/ClusterTransactionTests.java index 958460b1fb..5aacc3c65a 100644 --- a/java/integTest/src/test/java/glide/cluster/ClusterTransactionTests.java +++ b/java/integTest/src/test/java/glide/cluster/ClusterTransactionTests.java @@ -1,10 +1,13 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.cluster; import static glide.TestConfiguration.REDIS_VERSION; import static glide.TestUtilities.assertDeepEquals; import static glide.api.BaseClient.OK; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; +import static glide.api.models.configuration.RequestRoutingConfiguration.SimpleMultiNodeRoute.ALL_PRIMARIES; import static glide.api.models.configuration.RequestRoutingConfiguration.SimpleSingleNodeRoute.RANDOM; +import static glide.utils.ArrayTransformUtils.concatenateArrays; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -15,6 +18,7 @@ import glide.TransactionTestUtilities.TransactionBuilder; import glide.api.RedisClusterClient; import glide.api.models.ClusterTransaction; +import glide.api.models.commands.SortClusterOptions; import glide.api.models.configuration.NodeAddress; import glide.api.models.configuration.RedisClusterClientConfiguration; import glide.api.models.configuration.RequestRoutingConfiguration.SingleNodeRoute; @@ -63,17 +67,6 @@ public void custom_command_info() { assertTrue(((String) result[0]).contains("# Stats")); } - @Test - @SneakyThrows - public void WATCH_transaction_failure_returns_null() { - ClusterTransaction transaction = new ClusterTransaction(); - transaction.get("key"); - assertEquals( - OK, clusterClient.customCommand(new String[] {"WATCH", "key"}).get().getSingleValue()); - assertEquals(OK, clusterClient.set("key", "foo").get()); - assertNull(clusterClient.exec(transaction).get()); - } - @Test @SneakyThrows public void info_simple_route_test() { @@ -176,4 +169,122 @@ public void zrank_zrevrank_withscores() { assertArrayEquals(new Object[] {0L, 1.0}, (Object[]) result[1]); assertArrayEquals(new Object[] {2L, 1.0}, (Object[]) result[2]); } + + @Test + @SneakyThrows + public void watch() { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String key3 = "{key}-3" + UUID.randomUUID(); + String key4 = "{key}-4" + UUID.randomUUID(); + String foobarString = "foobar"; + String helloString = "hello"; + String[] keys = new String[] {key1, key2, key3}; + ClusterTransaction setFoobarTransaction = new ClusterTransaction(); + ClusterTransaction setHelloTransaction = new ClusterTransaction(); + String[] expectedExecResponse = new String[] {OK, OK, OK}; + + // Returns null when a watched key is modified before it is executed in a transaction command. + // Transaction commands are not performed. + assertEquals(OK, clusterClient.watch(keys).get()); + assertEquals(OK, clusterClient.set(key2, helloString).get()); + setFoobarTransaction.set(key1, foobarString).set(key2, foobarString).set(key3, foobarString); + assertNull(clusterClient.exec(setFoobarTransaction).get()); // Sanity check + assertNull(clusterClient.get(key1).get()); + assertEquals(helloString, clusterClient.get(key2).get()); + assertNull(clusterClient.get(key3).get()); + + // Transaction executes command successfully with a read command on the watch key before + // transaction is executed. + assertEquals(OK, clusterClient.watch(keys).get()); + assertEquals(helloString, clusterClient.get(key2).get()); + assertArrayEquals(expectedExecResponse, clusterClient.exec(setFoobarTransaction).get()); + assertEquals(foobarString, clusterClient.get(key1).get()); // Sanity check + assertEquals(foobarString, clusterClient.get(key2).get()); + assertEquals(foobarString, clusterClient.get(key3).get()); + + // Transaction executes command successfully with unmodified watched keys + assertEquals(OK, clusterClient.watch(keys).get()); + assertArrayEquals(expectedExecResponse, clusterClient.exec(setFoobarTransaction).get()); + assertEquals(foobarString, clusterClient.get(key1).get()); // Sanity check + assertEquals(foobarString, clusterClient.get(key2).get()); + assertEquals(foobarString, clusterClient.get(key3).get()); + + // Transaction executes command successfully with a modified watched key but is not in the + // transaction. + assertEquals(OK, clusterClient.watch(new String[] {key4}).get()); + setHelloTransaction.set(key1, helloString).set(key2, helloString).set(key3, helloString); + assertArrayEquals(expectedExecResponse, clusterClient.exec(setHelloTransaction).get()); + assertEquals(helloString, clusterClient.get(key1).get()); // Sanity check + assertEquals(helloString, clusterClient.get(key2).get()); + assertEquals(helloString, clusterClient.get(key3).get()); + + // WATCH can not have an empty String array parameter + // Test fails due to https://github.com/amazon-contributing/redis-rs/issues/158 + // ExecutionException executionException = + // assertThrows(ExecutionException.class, () -> clusterClient.watch(new String[] + // {}).get()); + // assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @Test + @SneakyThrows + public void unwatch() { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String foobarString = "foobar"; + String helloString = "hello"; + String[] keys = new String[] {key1, key2}; + ClusterTransaction setFoobarTransaction = new ClusterTransaction(); + String[] expectedExecResponse = new String[] {OK, OK}; + + // UNWATCH returns OK when there no watched keys + assertEquals(OK, clusterClient.unwatch().get()); + + // Transaction executes successfully after modifying a watched key then calling UNWATCH + assertEquals(OK, clusterClient.watch(keys).get()); + assertEquals(OK, clusterClient.set(key2, helloString).get()); + assertEquals(OK, clusterClient.unwatch().get()); + assertEquals(OK, clusterClient.unwatch(ALL_PRIMARIES).get()); + setFoobarTransaction.set(key1, foobarString).set(key2, foobarString); + assertArrayEquals(expectedExecResponse, clusterClient.exec(setFoobarTransaction).get()); + assertEquals(foobarString, clusterClient.get(key1).get()); + assertEquals(foobarString, clusterClient.get(key2).get()); + } + + @Test + @SneakyThrows + public void sort() { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String[] descendingList = new String[] {"3", "2", "1"}; + ClusterTransaction transaction = new ClusterTransaction(); + transaction + .lpush(key1, new String[] {"3", "1", "2"}) + .sort(key1, SortClusterOptions.builder().orderBy(DESC).build()) + .sortStore(key1, key2, SortClusterOptions.builder().orderBy(DESC).build()) + .lrange(key2, 0, -1); + + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + transaction.sortReadOnly(key1, SortClusterOptions.builder().orderBy(DESC).build()); + } + + Object[] results = clusterClient.exec(transaction).get(); + Object[] expectedResult = + new Object[] { + 3L, // lpush(key1, new String[] {"3", "1", "2"}) + descendingList, // sort(key1, SortClusterOptions.builder().orderBy(DESC).build()) + 3L, // sortStore(key1, key2, DESC)) + descendingList, // lrange(key2, 0, -1) + }; + + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + expectedResult = + concatenateArrays( + expectedResult, new Object[] {descendingList} // sortReadOnly(key1, DESC) + ); + } + + assertDeepEquals(expectedResult, results); + } } diff --git a/java/integTest/src/test/java/glide/cluster/CommandTests.java b/java/integTest/src/test/java/glide/cluster/CommandTests.java index 952791193c..c8793d33b4 100644 --- a/java/integTest/src/test/java/glide/cluster/CommandTests.java +++ b/java/integTest/src/test/java/glide/cluster/CommandTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.cluster; import static glide.TestConfiguration.REDIS_VERSION; @@ -12,6 +12,7 @@ import static glide.TestUtilities.getValueFromInfo; import static glide.TestUtilities.parseInfoResponseToMap; import static glide.api.BaseClient.OK; +import static glide.api.models.GlideString.gs; import static glide.api.models.commands.FlushMode.ASYNC; import static glide.api.models.commands.FlushMode.SYNC; import static glide.api.models.commands.InfoOptions.Section.CLIENTS; @@ -24,6 +25,10 @@ import static glide.api.models.commands.InfoOptions.Section.SERVER; import static glide.api.models.commands.InfoOptions.Section.STATS; import static glide.api.models.commands.ScoreFilter.MAX; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; +import static glide.api.models.commands.function.FunctionRestorePolicy.APPEND; +import static glide.api.models.commands.function.FunctionRestorePolicy.FLUSH; +import static glide.api.models.commands.function.FunctionRestorePolicy.REPLACE; import static glide.api.models.configuration.RequestRoutingConfiguration.ByAddressRoute; import static glide.api.models.configuration.RequestRoutingConfiguration.SimpleMultiNodeRoute.ALL_NODES; import static glide.api.models.configuration.RequestRoutingConfiguration.SimpleMultiNodeRoute.ALL_PRIMARIES; @@ -43,12 +48,15 @@ import glide.api.RedisClusterClient; import glide.api.models.ClusterTransaction; import glide.api.models.ClusterValue; -import glide.api.models.commands.FlushMode; +import glide.api.models.GlideString; import glide.api.models.commands.InfoOptions; import glide.api.models.commands.ListDirection; import glide.api.models.commands.RangeOptions.RangeByIndex; +import glide.api.models.commands.SortBaseOptions; +import glide.api.models.commands.SortClusterOptions; import glide.api.models.commands.WeightAggregateOptions.KeyArray; import glide.api.models.commands.bitmap.BitwiseOperation; +import glide.api.models.configuration.RequestRoutingConfiguration.ByAddressRoute; import glide.api.models.configuration.RequestRoutingConfiguration.Route; import glide.api.models.configuration.RequestRoutingConfiguration.SingleNodeRoute; import glide.api.models.configuration.RequestRoutingConfiguration.SlotKeyRoute; @@ -555,6 +563,26 @@ public void echo_with_route() { multiPayload.forEach((key, value) -> assertEquals(message, value)); } + @SneakyThrows + @Test + public void echo_gs() { + byte[] message = {(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00, (byte) 0x02}; + GlideString response = clusterClient.echo(gs(message)).get(); + assertEquals(gs(message), response); + } + + @SneakyThrows + @Test + public void echo_gs_with_route() { + byte[] message = {(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00, (byte) 0x02}; + GlideString singlePayload = clusterClient.echo(gs(message), RANDOM).get().getSingleValue(); + assertEquals(gs(message), singlePayload); + + Map multiPayload = + clusterClient.echo(gs(message), ALL_NODES).get().getMultiValue(); + multiPayload.forEach((key, value) -> assertEquals(gs(message), value)); + } + @Test @SneakyThrows public void time() { @@ -642,7 +670,9 @@ public void lolwut_lolwut() { @Test @SneakyThrows - public void dbsize() { + public void dbsize_and_flushdb() { + boolean is62orHigher = REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0"); + assertEquals(OK, clusterClient.flushall().get()); // dbsize should be 0 after flushall() because all keys have been deleted assertEquals(0L, clusterClient.dbsize().get()); @@ -655,11 +685,41 @@ public void dbsize() { // test dbsize with routing - flush the database first to ensure the set() call is directed to a // node with 0 keys. - assertEquals(OK, clusterClient.flushall().get()); + assertEquals(OK, clusterClient.flushdb().get()); assertEquals(0L, clusterClient.dbsize().get()); + String key = UUID.randomUUID().toString(); + SingleNodeRoute route = new SlotKeyRoute(key, PRIMARY); + + // add a key, measure DB size, flush DB and measure again - with all arg combinations + assertEquals(OK, clusterClient.set(key, "foo").get()); + assertEquals(1L, clusterClient.dbsize(route).get()); + if (is62orHigher) { + assertEquals(OK, clusterClient.flushdb(SYNC).get()); + } else { + assertEquals(OK, clusterClient.flushdb(ASYNC).get()); + } + assertEquals(0L, clusterClient.dbsize().get()); + + assertEquals(OK, clusterClient.set(key, "foo").get()); + assertEquals(1L, clusterClient.dbsize(route).get()); + assertEquals(OK, clusterClient.flushdb(route).get()); + assertEquals(0L, clusterClient.dbsize(route).get()); + assertEquals(OK, clusterClient.set(key, "foo").get()); - assertEquals(1L, clusterClient.dbsize(new SlotKeyRoute(key, PRIMARY)).get()); + assertEquals(1L, clusterClient.dbsize(route).get()); + if (is62orHigher) { + assertEquals(OK, clusterClient.flushdb(SYNC, route).get()); + } else { + assertEquals(OK, clusterClient.flushdb(ASYNC, route).get()); + } + assertEquals(0L, clusterClient.dbsize(route).get()); + + if (!is62orHigher) { + var executionException = + assertThrows(ExecutionException.class, () -> clusterClient.flushdb(SYNC).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } } @Test @@ -685,10 +745,18 @@ public static Stream callCrossSlotCommandsWhichShouldFail() { Arguments.of("renamenx", null, clusterClient.renamenx("abc", "zxy")), Arguments.of( "sinterstore", null, clusterClient.sinterstore("abc", new String[] {"zxy", "lkn"})), + Arguments.of( + "sinterstore_gs", + null, + clusterClient.sinterstore(gs("abc"), new GlideString[] {gs("zxy"), gs("lkn")})), Arguments.of("sdiff", null, clusterClient.sdiff(new String[] {"abc", "zxy", "lkn"})), Arguments.of( "sdiffstore", null, clusterClient.sdiffstore("abc", new String[] {"zxy", "lkn"})), Arguments.of("sinter", null, clusterClient.sinter(new String[] {"abc", "zxy", "lkn"})), + Arguments.of( + "sinter_gs", + null, + clusterClient.sinter(new GlideString[] {gs("abc"), gs("zxy"), gs("lkn")})), Arguments.of( "sunionstore", null, clusterClient.sunionstore("abc", new String[] {"zxy", "lkn"})), Arguments.of("zdiff", null, clusterClient.zdiff(new String[] {"abc", "zxy", "lkn"})), @@ -745,18 +813,41 @@ public static Stream callCrossSlotCommandsWhichShouldFail() { "6.2.0", clusterClient.blmove("abc", "def", ListDirection.LEFT, ListDirection.LEFT, 1)), Arguments.of("sintercard", "7.0.0", clusterClient.sintercard(new String[] {"abc", "def"})), + Arguments.of( + "sintercard_gs", + "7.0.0", + clusterClient.sintercard(new GlideString[] {gs("abc"), gs("def")})), Arguments.of( "sintercard", "7.0.0", clusterClient.sintercard(new String[] {"abc", "def"}, 1)), + Arguments.of( + "sintercard_gs", + "7.0.0", + clusterClient.sintercard(new GlideString[] {gs("abc"), gs("def")}, 1)), Arguments.of( "fcall", "7.0.0", clusterClient.fcall("func", new String[] {"abc", "zxy", "lkn"}, new String[0])), + Arguments.of( + "fcallReadOnly", + "7.0.0", + clusterClient.fcallReadOnly("func", new String[] {"abc", "zxy", "lkn"}, new String[0])), Arguments.of( "xread", null, clusterClient.xread(Map.of("abc", "stream1", "zxy", "stream2"))), Arguments.of("copy", "6.2.0", clusterClient.copy("abc", "def", true)), Arguments.of("msetnx", null, clusterClient.msetnx(Map.of("abc", "def", "ghi", "jkl"))), Arguments.of("lcs", "7.0.0", clusterClient.lcs("abc", "def")), - Arguments.of("lcsLEN", "7.0.0", clusterClient.lcsLen("abc", "def"))); + Arguments.of("lcsLEN", "7.0.0", clusterClient.lcsLen("abc", "def")), + Arguments.of("lcsIdx", "7.0.0", clusterClient.lcsIdx("abc", "def")), + Arguments.of("lcsIdx", "7.0.0", clusterClient.lcsIdx("abc", "def", 10)), + Arguments.of("lcsIdxWithMatchLen", "7.0.0", clusterClient.lcsIdxWithMatchLen("abc", "def")), + Arguments.of( + "lcsIdxWithMatchLen", "7.0.0", clusterClient.lcsIdxWithMatchLen("abc", "def", 10)), + Arguments.of("sunion", "1.0.0", clusterClient.sunion(new String[] {"abc", "def", "ghi"})), + Arguments.of("sortStore", "1.0.0", clusterClient.sortStore("abc", "def")), + Arguments.of( + "sortStore", + "1.0.0", + clusterClient.sortStore("abc", "def", SortClusterOptions.builder().alpha().build()))); } @SneakyThrows @@ -779,7 +870,8 @@ public static Stream callCrossSlotCommandsWhichShouldPass() { Arguments.of("del", clusterClient.del(new String[] {"abc", "zxy", "lkn"})), Arguments.of("mget", clusterClient.mget(new String[] {"abc", "zxy", "lkn"})), Arguments.of("mset", clusterClient.mset(Map.of("abc", "1", "zxy", "2", "lkn", "3"))), - Arguments.of("touch", clusterClient.touch(new String[] {"abc", "zxy", "lkn"}))); + Arguments.of("touch", clusterClient.touch(new String[] {"abc", "zxy", "lkn"})), + Arguments.of("watch", clusterClient.watch(new String[] {"ghi", "zxy", "lkn"}))); } @SneakyThrows @@ -792,7 +884,14 @@ public void check_does_not_throw_cross_slot_error(String testName, CompletableFu @Test @SneakyThrows public void flushall() { - assertEquals(OK, clusterClient.flushall(FlushMode.SYNC).get()); + if (REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0")) { + assertEquals(OK, clusterClient.flushall(SYNC).get()); + } else { + var executionException = + assertThrows(ExecutionException.class, () -> clusterClient.flushall(SYNC).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertEquals(OK, clusterClient.flushall(ASYNC).get()); + } // TODO replace with KEYS command when implemented Object[] keysAfter = @@ -826,7 +925,7 @@ public void function_commands_without_keys_with_route(boolean singleNodeRoute) { String libName = "mylib1c_" + singleNodeRoute; String funcName = "myfunc1c_" + singleNodeRoute; // function $funcName returns first argument - String code = generateLuaLibCode(libName, Map.of(funcName, "return args[1]"), false); + String code = generateLuaLibCode(libName, Map.of(funcName, "return args[1]"), true); Route route = singleNodeRoute ? new SlotKeyRoute("1", PRIMARY) : ALL_PRIMARIES; assertEquals(OK, clusterClient.functionFlush(SYNC, route).get()); @@ -840,6 +939,14 @@ public void function_commands_without_keys_with_route(boolean singleNodeRoute) { assertEquals("one", nodeResponse); } } + fcallResult = clusterClient.fcallReadOnly(funcName, new String[] {"one", "two"}, route).get(); + if (route instanceof SingleNodeRoute) { + assertEquals("one", fcallResult.getSingleValue()); + } else { + for (var nodeResponse : fcallResult.getMultiValue().values()) { + assertEquals("one", nodeResponse); + } + } var expectedDescription = new HashMap() { @@ -850,7 +957,7 @@ public void function_commands_without_keys_with_route(boolean singleNodeRoute) { var expectedFlags = new HashMap>() { { - put(funcName, Set.of()); + put(funcName, Set.of("no-writes")); } }; @@ -893,12 +1000,12 @@ public void function_commands_without_keys_with_route(boolean singleNodeRoute) { // function $newFuncName returns argument array len String newCode = generateLuaLibCode( - libName, Map.of(funcName, "return args[1]", newFuncName, "return #args"), false); + libName, Map.of(funcName, "return args[1]", newFuncName, "return #args"), true); assertEquals(libName, clusterClient.functionLoad(newCode, true, route).get()); expectedDescription.put(newFuncName, null); - expectedFlags.put(newFuncName, Set.of()); + expectedFlags.put(newFuncName, Set.of("no-writes")); response = clusterClient.functionList(false, route).get(); if (singleNodeRoute) { @@ -945,6 +1052,15 @@ public void function_commands_without_keys_with_route(boolean singleNodeRoute) { assertEquals(2L, nodeResponse); } } + fcallResult = + clusterClient.fcallReadOnly(newFuncName, new String[] {"one", "two"}, route).get(); + if (route instanceof SingleNodeRoute) { + assertEquals(2L, fcallResult.getSingleValue()); + } else { + for (var nodeResponse : fcallResult.getMultiValue().values()) { + assertEquals(2L, nodeResponse); + } + } assertEquals(OK, clusterClient.functionFlush(route).get()); } @@ -966,6 +1082,7 @@ public void function_commands_without_keys_and_without_route() { assertEquals(libName, clusterClient.functionLoad(code, false).get()); assertEquals("one", clusterClient.fcall(funcName, new String[] {"one", "two"}).get()); + assertEquals("one", clusterClient.fcallReadOnly(funcName, new String[] {"one", "two"}).get()); var flist = clusterClient.functionList(false).get(); var expectedDescription = @@ -1026,6 +1143,7 @@ public void function_commands_without_keys_and_without_route() { flist, libName, expectedDescription, expectedFlags, Optional.of(newCode)); assertEquals(2L, clusterClient.fcall(newFuncName, new String[] {"one", "two"}).get()); + assertEquals(2L, clusterClient.fcallReadOnly(newFuncName, new String[] {"one", "two"}).get()); assertEquals(OK, clusterClient.functionFlush(ASYNC).get()); } @@ -1041,7 +1159,7 @@ public void fcall_with_keys(String prefix) { String libName = "mylib_with_keys"; String funcName = "myfunc_with_keys"; // function $funcName returns array with first two arguments - String code = generateLuaLibCode(libName, Map.of(funcName, "return {keys[1], keys[2]}"), false); + String code = generateLuaLibCode(libName, Map.of(funcName, "return {keys[1], keys[2]}"), true); // loading function to the node where key is stored assertEquals(libName, clusterClient.functionLoad(code, false, route).get()); @@ -1050,15 +1168,23 @@ public void fcall_with_keys(String prefix) { var functionResult = clusterClient.fcall(funcName, new String[] {key + 1, key + 2}, new String[0]).get(); assertArrayEquals(new Object[] {key + 1, key + 2}, (Object[]) functionResult); + functionResult = + clusterClient.fcallReadOnly(funcName, new String[] {key + 1, key + 2}, new String[0]).get(); + assertArrayEquals(new Object[] {key + 1, key + 2}, (Object[]) functionResult); var transaction = - new ClusterTransaction().fcall(funcName, new String[] {key + 1, key + 2}, new String[0]); + new ClusterTransaction() + .fcall(funcName, new String[] {key + 1, key + 2}, new String[0]) + .fcallReadOnly(funcName, new String[] {key + 1, key + 2}, new String[0]); // check response from a routed transaction request assertDeepEquals( - new Object[][] {{key + 1, key + 2}}, clusterClient.exec(transaction, route).get()); + new Object[][] {{key + 1, key + 2}, {key + 1, key + 2}}, + clusterClient.exec(transaction, route).get()); // if no route given, GLIDE should detect it automatically - assertDeepEquals(new Object[][] {{key + 1, key + 2}}, clusterClient.exec(transaction).get()); + assertDeepEquals( + new Object[][] {{key + 1, key + 2}, {key + 1, key + 2}}, + clusterClient.exec(transaction).get()); assertEquals(OK, clusterClient.functionDelete(libName, route).get()); } @@ -1070,7 +1196,8 @@ public void fcall_readonly_function() { String libName = "fcall_readonly_function"; // intentionally using a REPLICA route - Route route = new SlotKeyRoute(libName, REPLICA); + Route replicaRoute = new SlotKeyRoute(libName, REPLICA); + Route primaryRoute = new SlotKeyRoute(libName, PRIMARY); String funcName = "fcall_readonly_function"; // function $funcName returns a magic number @@ -1080,18 +1207,39 @@ public void fcall_readonly_function() { // fcall on a replica node should fail, because a function isn't guaranteed to be RO var executionException = - assertThrows(ExecutionException.class, () -> clusterClient.fcall(funcName, route).get()); + assertThrows( + ExecutionException.class, () -> clusterClient.fcall(funcName, replicaRoute).get()); assertInstanceOf(RequestException.class, executionException.getCause()); assertTrue( executionException.getMessage().contains("You can't write against a read only replica.")); + // fcall_ro also fails + executionException = + assertThrows( + ExecutionException.class, + () -> clusterClient.fcallReadOnly(funcName, replicaRoute).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue( + executionException.getMessage().contains("You can't write against a read only replica.")); + + // fcall_ro also fails to run it even on primary - another error + executionException = + assertThrows( + ExecutionException.class, + () -> clusterClient.fcallReadOnly(funcName, primaryRoute).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue( + executionException + .getMessage() + .contains("Can not execute a script with write flag using *_ro command.")); + // create the same function, but with RO flag code = generateLuaLibCode(libName, Map.of(funcName, "return 42"), true); assertEquals(libName, clusterClient.functionLoad(code, true).get()); // fcall should succeed now - assertEquals(42L, clusterClient.fcall(funcName, route).get().getSingleValue()); + assertEquals(42L, clusterClient.fcall(funcName, replicaRoute).get().getSingleValue()); assertEquals(OK, clusterClient.functionDelete(libName).get()); } @@ -1149,6 +1297,7 @@ public void functionStats_and_functionKill_without_route() { } assertEquals(OK, clusterClient.functionKill().get()); + Thread.sleep(404); // sometimes kill doesn't happen immediately exception = assertThrows(ExecutionException.class, () -> clusterClient.functionKill().get()); @@ -1235,7 +1384,7 @@ public void functionStats_and_functionKill_with_route(boolean singleNodeRoute) { // redis kills a function with 5 sec delay assertEquals(OK, clusterClient.functionKill(route).get()); - Thread.sleep(404); + Thread.sleep(404); // sometimes kill doesn't happen immediately exception = assertThrows(ExecutionException.class, () -> clusterClient.functionKill(route).get()); @@ -1306,6 +1455,7 @@ public void functionStats_and_functionKill_with_key_based_route() { // redis kills a function with 5 sec delay assertEquals(OK, clusterClient.functionKill(route).get()); + Thread.sleep(404); // sometimes kill doesn't happen immediately exception = assertThrows(ExecutionException.class, () -> clusterClient.functionKill(route).get()); @@ -1491,4 +1641,195 @@ public void functionStats_with_route(boolean singleNodeRoute) { } } } + + @Test + @SneakyThrows + public void function_dump_and_restore() { + assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0"), "This feature added in redis 7"); + + assertEquals(OK, clusterClient.functionFlush(SYNC).get()); + + // dumping an empty lib + byte[] emptyDump = clusterClient.functionDump().get(); + assertTrue(emptyDump.length > 0); + + String name1 = "Foster"; + String libname1 = "FosterLib"; + String name2 = "Dogster"; + String libname2 = "DogsterLib"; + + // function $name1 returns first argument + // function $name2 returns argument array len + String code = + generateLuaLibCode(libname1, Map.of(name1, "return args[1]", name2, "return #args"), true); + assertEquals(libname1, clusterClient.functionLoad(code, true).get()); + Map[] flist = clusterClient.functionList(true).get(); + + final byte[] dump = clusterClient.functionDump().get(); + + // restore without cleaning the lib and/or overwrite option causes an error + var executionException = + assertThrows(ExecutionException.class, () -> clusterClient.functionRestore(dump).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("Library " + libname1 + " already exists")); + + // APPEND policy also fails for the same reason (name collision) + executionException = + assertThrows( + ExecutionException.class, () -> clusterClient.functionRestore(dump, APPEND).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("Library " + libname1 + " already exists")); + + // REPLACE policy succeeds + assertEquals(OK, clusterClient.functionRestore(dump, REPLACE).get()); + // but nothing changed - all code overwritten + var restoredFunctionList = clusterClient.functionList(true).get(); + assertEquals(1, restoredFunctionList.length); + assertEquals(libname1, restoredFunctionList[0].get("library_name")); + // Note that function ordering may differ across nodes so we can't do a deep equals + assertEquals(2, ((Object[]) restoredFunctionList[0].get("functions")).length); + + // create lib with another name, but with the same function names + assertEquals(OK, clusterClient.functionFlush(SYNC).get()); + code = + generateLuaLibCode(libname2, Map.of(name1, "return args[1]", name2, "return #args"), true); + assertEquals(libname2, clusterClient.functionLoad(code, true).get()); + restoredFunctionList = clusterClient.functionList(true).get(); + assertEquals(1, restoredFunctionList.length); + assertEquals(libname2, restoredFunctionList[0].get("library_name")); + + // REPLACE policy now fails due to a name collision + executionException = + assertThrows( + ExecutionException.class, () -> clusterClient.functionRestore(dump, REPLACE).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + // redis checks names in random order and blames on first collision + assertTrue( + executionException.getMessage().contains("Function " + name1 + " already exists") + || executionException.getMessage().contains("Function " + name2 + " already exists")); + + // FLUSH policy succeeds, but deletes the second lib + assertEquals(OK, clusterClient.functionRestore(dump, FLUSH).get()); + restoredFunctionList = clusterClient.functionList(true).get(); + assertEquals(1, restoredFunctionList.length); + assertEquals(libname1, restoredFunctionList[0].get("library_name")); + // Note that function ordering may differ across nodes + assertEquals(2, ((Object[]) restoredFunctionList[0].get("functions")).length); + + // call restored functions + assertEquals( + "meow", + clusterClient.fcallReadOnly(name1, new String[0], new String[] {"meow", "woem"}).get()); + assertEquals( + 2L, clusterClient.fcallReadOnly(name2, new String[0], new String[] {"meow", "woem"}).get()); + } + + @Test + @SneakyThrows + public void randomKey() { + String key1 = "{key}" + UUID.randomUUID(); + String key2 = "{key}" + UUID.randomUUID(); + + assertEquals(OK, clusterClient.set(key1, "a").get()); + assertEquals(OK, clusterClient.set(key2, "b").get()); + + String randomKey = clusterClient.randomKey().get(); + assertEquals(1L, clusterClient.exists(new String[] {randomKey}).get()); + + String randomKeyPrimaries = clusterClient.randomKey(ALL_PRIMARIES).get(); + assertEquals(1L, clusterClient.exists(new String[] {randomKeyPrimaries}).get()); + + // no keys in database + assertEquals(OK, clusterClient.flushall(SYNC).get()); + + // TODO: returns a ResponseError but expecting null + // uncomment when this is completed: https://github.com/amazon-contributing/redis-rs/pull/153 + // assertNull(clusterClient.randomKey().get()); + } + + @Test + @SneakyThrows + public void sort() { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String key3 = "{key}-3" + UUID.randomUUID(); + String[] key1LpushArgs = {"2", "1", "4", "3"}; + String[] key1AscendingList = {"1", "2", "3", "4"}; + String[] key1DescendingList = {"4", "3", "2", "1"}; + String[] key2LpushArgs = {"2", "1", "a", "x", "c", "4", "3"}; + String[] key2DescendingList = {"x", "c", "a", "4", "3", "2", "1"}; + String[] key2DescendingListSubset = Arrays.copyOfRange(key2DescendingList, 0, 4); + + assertArrayEquals(new String[0], clusterClient.sort(key3).get()); + assertEquals(4, clusterClient.lpush(key1, key1LpushArgs).get()); + assertArrayEquals( + new String[0], + clusterClient + .sort( + key1, SortClusterOptions.builder().limit(new SortBaseOptions.Limit(0L, 0L)).build()) + .get()); + assertArrayEquals( + key1DescendingList, + clusterClient.sort(key1, SortClusterOptions.builder().orderBy(DESC).build()).get()); + assertArrayEquals( + Arrays.copyOfRange(key1AscendingList, 0, 2), + clusterClient + .sort( + key1, SortClusterOptions.builder().limit(new SortBaseOptions.Limit(0L, 2L)).build()) + .get()); + assertEquals(7, clusterClient.lpush(key2, key2LpushArgs).get()); + assertArrayEquals( + key2DescendingListSubset, + clusterClient + .sort( + key2, + SortClusterOptions.builder() + .alpha() + .orderBy(DESC) + .limit(new SortBaseOptions.Limit(0L, 4L)) + .build()) + .get()); + + // SORT_R0 + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertArrayEquals( + key1DescendingList, + clusterClient + .sortReadOnly(key1, SortClusterOptions.builder().orderBy(DESC).build()) + .get()); + assertArrayEquals( + Arrays.copyOfRange(key1AscendingList, 0, 2), + clusterClient + .sortReadOnly( + key1, + SortClusterOptions.builder().limit(new SortBaseOptions.Limit(0L, 2L)).build()) + .get()); + assertArrayEquals( + key2DescendingListSubset, + clusterClient + .sortReadOnly( + key2, + SortClusterOptions.builder() + .alpha() + .orderBy(DESC) + .limit(new SortBaseOptions.Limit(0L, 4L)) + .build()) + .get()); + } + + // SORT with STORE + assertEquals( + 4, + clusterClient + .sortStore( + key2, + key3, + SortClusterOptions.builder() + .alpha() + .orderBy(DESC) + .limit(new SortBaseOptions.Limit(0L, 4L)) + .build()) + .get()); + assertArrayEquals(key2DescendingListSubset, clusterClient.lrange(key3, 0, -1).get()); + } } diff --git a/java/integTest/src/test/java/glide/standalone/CommandTests.java b/java/integTest/src/test/java/glide/standalone/CommandTests.java index a9d384b886..d69cb817e8 100644 --- a/java/integTest/src/test/java/glide/standalone/CommandTests.java +++ b/java/integTest/src/test/java/glide/standalone/CommandTests.java @@ -1,7 +1,8 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.standalone; import static glide.TestConfiguration.REDIS_VERSION; +import static glide.TestUtilities.assertDeepEquals; import static glide.TestUtilities.checkFunctionListResponse; import static glide.TestUtilities.checkFunctionStatsResponse; import static glide.TestUtilities.commonClientConfig; @@ -10,6 +11,7 @@ import static glide.TestUtilities.getValueFromInfo; import static glide.TestUtilities.parseInfoResponseToMap; import static glide.api.BaseClient.OK; +import static glide.api.models.GlideString.gs; import static glide.api.models.commands.FlushMode.ASYNC; import static glide.api.models.commands.FlushMode.SYNC; import static glide.api.models.commands.InfoOptions.Section.CLUSTER; @@ -18,9 +20,16 @@ import static glide.api.models.commands.InfoOptions.Section.MEMORY; import static glide.api.models.commands.InfoOptions.Section.SERVER; import static glide.api.models.commands.InfoOptions.Section.STATS; +import static glide.api.models.commands.SortBaseOptions.Limit; +import static glide.api.models.commands.SortBaseOptions.OrderBy.ASC; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; +import static glide.api.models.commands.function.FunctionRestorePolicy.APPEND; +import static glide.api.models.commands.function.FunctionRestorePolicy.FLUSH; +import static glide.api.models.commands.function.FunctionRestorePolicy.REPLACE; import static glide.cluster.CommandTests.DEFAULT_INFO_SECTIONS; import static glide.cluster.CommandTests.EVERYTHING_INFO_SECTIONS; import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -30,11 +39,14 @@ import static org.junit.jupiter.api.Assumptions.assumeTrue; import glide.api.RedisClient; +import glide.api.models.GlideString; import glide.api.models.commands.InfoOptions; +import glide.api.models.commands.SortOptions; import glide.api.models.exceptions.RequestException; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -187,6 +199,36 @@ public void move() { assertTrue(e.getCause() instanceof RequestException); } + @Test + @SneakyThrows + public void move_binary() { + GlideString key1 = GlideString.gs(UUID.randomUUID().toString()); + GlideString key2 = GlideString.gs(UUID.randomUUID().toString()); + GlideString value1 = GlideString.gs(UUID.randomUUID().toString()); + GlideString value2 = GlideString.gs(UUID.randomUUID().toString()); + GlideString nonExistingKey = GlideString.gs(UUID.randomUUID().toString()); + assertEquals(OK, regularClient.select(0).get()); + + assertEquals(false, regularClient.move(nonExistingKey, 1L).get()); + assertEquals(OK, regularClient.set(key1, value1).get()); + assertEquals(OK, regularClient.set(key2, value2).get()); + assertEquals(true, regularClient.move(key1, 1L).get()); + assertNull(regularClient.get(key1).get()); + + assertEquals(OK, regularClient.select(1).get()); + assertEquals(value1, regularClient.get(key1).get()); + + assertEquals(OK, regularClient.set(key2, value2).get()); + // Move does not occur because key2 already exists in DB 0 + assertEquals(false, regularClient.move(key2, 0).get()); + assertEquals(value2, regularClient.get(key2).get()); + + // Incorrect argument - DB index must be non-negative + ExecutionException e = + assertThrows(ExecutionException.class, () -> regularClient.move(key1, -1L).get()); + assertTrue(e.getCause() instanceof RequestException); + } + @Test @SneakyThrows public void clientId() { @@ -294,6 +336,17 @@ public void echo() { String message = "GLIDE"; String response = regularClient.echo(message).get(); assertEquals(message, response); + message = ""; + response = regularClient.echo(message).get(); + assertEquals(message, response); + } + + @SneakyThrows + @Test + public void echo_gs() { + byte[] message = {(byte) 0x01, (byte) 0x00, (byte) 0x01, (byte) 0x00, (byte) 0x02}; + GlideString response = regularClient.echo(gs(message)).get(); + assertEquals(gs(message), response); } @Test @@ -343,18 +396,41 @@ public void lolwut_lolwut() { @Test @SneakyThrows - public void dbsize() { + public void dbsize_and_flushdb() { assertEquals(OK, regularClient.flushall().get()); assertEquals(OK, regularClient.select(0).get()); + // fill DB and check size int numKeys = 10; for (int i = 0; i < numKeys; i++) { assertEquals(OK, regularClient.set(UUID.randomUUID().toString(), "foo").get()); } assertEquals(10L, regularClient.dbsize().get()); + // check another empty DB assertEquals(OK, regularClient.select(1).get()); assertEquals(0L, regularClient.dbsize().get()); + + // check non-empty + assertEquals(OK, regularClient.set(UUID.randomUUID().toString(), "foo").get()); + assertEquals(1L, regularClient.dbsize().get()); + + // flush and check again + if (REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0")) { + assertEquals(OK, regularClient.flushdb(SYNC).get()); + } else { + var executionException = + assertThrows(ExecutionException.class, () -> regularClient.flushdb(SYNC).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertEquals(OK, regularClient.flushdb(ASYNC).get()); + } + assertEquals(0L, regularClient.dbsize().get()); + + // switch to DB 0 and flush and check + assertEquals(OK, regularClient.select(0).get()); + assertEquals(10L, regularClient.dbsize().get()); + assertEquals(OK, regularClient.flushdb().get()); + assertEquals(0L, regularClient.dbsize().get()); } @Test @@ -376,7 +452,14 @@ public void objectFreq() { @Test @SneakyThrows public void flushall() { - assertEquals(OK, regularClient.flushall(SYNC).get()); + if (REDIS_VERSION.isGreaterThanOrEqualTo("6.2.0")) { + assertEquals(OK, regularClient.flushall(SYNC).get()); + } else { + var executionException = + assertThrows(ExecutionException.class, () -> regularClient.flushall(SYNC).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertEquals(OK, regularClient.flushall(ASYNC).get()); + } // TODO replace with KEYS command when implemented Object[] keysAfter = (Object[]) regularClient.customCommand(new String[] {"keys", "*"}).get(); @@ -396,12 +479,15 @@ public void function_commands() { String libName = "mylib1c"; String funcName = "myfunc1c"; // function $funcName returns first argument - String code = generateLuaLibCode(libName, Map.of(funcName, "return args[1]"), false); + String code = generateLuaLibCode(libName, Map.of(funcName, "return args[1]"), true); assertEquals(libName, regularClient.functionLoad(code, false).get()); var functionResult = regularClient.fcall(funcName, new String[0], new String[] {"one", "two"}).get(); assertEquals("one", functionResult); + functionResult = + regularClient.fcallReadOnly(funcName, new String[0], new String[] {"one", "two"}).get(); + assertEquals("one", functionResult); var flist = regularClient.functionList(false).get(); var expectedDescription = @@ -413,7 +499,7 @@ public void function_commands() { var expectedFlags = new HashMap>() { { - put(funcName, Set.of()); + put(funcName, Set.of("no-writes")); } }; checkFunctionListResponse(flist, libName, expectedDescription, expectedFlags, Optional.empty()); @@ -436,7 +522,7 @@ public void function_commands() { // function $newFuncName returns argument array len String newCode = generateLuaLibCode( - libName, Map.of(funcName, "return args[1]", newFuncName, "return #args"), false); + libName, Map.of(funcName, "return args[1]", newFuncName, "return #args"), true); assertEquals(libName, regularClient.functionLoad(newCode, true).get()); // load new lib and delete it - first lib remains loaded @@ -453,7 +539,7 @@ public void function_commands() { flist = regularClient.functionList(libName, false).get(); expectedDescription.put(newFuncName, null); - expectedFlags.put(newFuncName, Set.of()); + expectedFlags.put(newFuncName, Set.of("no-writes")); checkFunctionListResponse(flist, libName, expectedDescription, expectedFlags, Optional.empty()); flist = regularClient.functionList(libName, true).get(); @@ -463,6 +549,9 @@ public void function_commands() { functionResult = regularClient.fcall(newFuncName, new String[0], new String[] {"one", "two"}).get(); assertEquals(2L, functionResult); + functionResult = + regularClient.fcallReadOnly(newFuncName, new String[0], new String[] {"one", "two"}).get(); + assertEquals(2L, functionResult); assertEquals(OK, regularClient.functionFlush(ASYNC).get()); } @@ -515,7 +604,7 @@ public void copy() { } } - // @Test + @Test @SneakyThrows public void functionStats_and_functionKill() { assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0"), "This feature added in redis 7"); @@ -558,6 +647,7 @@ public void functionStats_and_functionKill() { // redis kills a function with 5 sec delay assertEquals(OK, regularClient.functionKill().get()); + Thread.sleep(404); // sometimes kill doesn't happen immediately exception = assertThrows(ExecutionException.class, () -> regularClient.functionKill().get()); @@ -684,4 +774,262 @@ public void functionStats() { response = regularClient.functionStats().get(); checkFunctionStatsResponse(response, new String[0], 0, 0); } + + @Test + @SneakyThrows + public void function_dump_and_restore() { + assumeTrue(REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0"), "This feature added in redis 7"); + + assertEquals(OK, regularClient.functionFlush(SYNC).get()); + + // dumping an empty lib + byte[] emptyDump = regularClient.functionDump().get(); + assertTrue(emptyDump.length > 0); + + String name1 = "Foster"; + String name2 = "Dogster"; + + // function $name1 returns first argument + // function $name2 returns argument array len + String code = + generateLuaLibCode(name1, Map.of(name1, "return args[1]", name2, "return #args"), false); + assertEquals(name1, regularClient.functionLoad(code, true).get()); + var flist = regularClient.functionList(true).get(); + + final byte[] dump = regularClient.functionDump().get(); + + // restore without cleaning the lib and/or overwrite option causes an error + var executionException = + assertThrows(ExecutionException.class, () -> regularClient.functionRestore(dump).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("Library " + name1 + " already exists")); + + // APPEND policy also fails for the same reason (name collision) + executionException = + assertThrows( + ExecutionException.class, () -> regularClient.functionRestore(dump, APPEND).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + assertTrue(executionException.getMessage().contains("Library " + name1 + " already exists")); + + // REPLACE policy succeeds + assertEquals(OK, regularClient.functionRestore(dump, REPLACE).get()); + // but nothing changed - all code overwritten + assertDeepEquals(flist, regularClient.functionList(true).get()); + + // create lib with another name, but with the same function names + assertEquals(OK, regularClient.functionFlush(SYNC).get()); + code = generateLuaLibCode(name2, Map.of(name1, "return args[1]", name2, "return #args"), false); + assertEquals(name2, regularClient.functionLoad(code, true).get()); + + // REPLACE policy now fails due to a name collision + executionException = + assertThrows( + ExecutionException.class, () -> regularClient.functionRestore(dump, REPLACE).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + // redis checks names in random order and blames on first collision + assertTrue( + executionException.getMessage().contains("Function " + name1 + " already exists") + || executionException.getMessage().contains("Function " + name2 + " already exists")); + + // FLUSH policy succeeds, but deletes the second lib + assertEquals(OK, regularClient.functionRestore(dump, FLUSH).get()); + assertDeepEquals(flist, regularClient.functionList(true).get()); + + // call restored functions + assertEquals( + "meow", regularClient.fcall(name1, new String[0], new String[] {"meow", "woem"}).get()); + assertEquals( + 2L, regularClient.fcall(name2, new String[0], new String[] {"meow", "woem"}).get()); + } + + @SneakyThrows + @Test + public void randomkey() { + String key1 = "{key}" + UUID.randomUUID(); + String key2 = "{key}" + UUID.randomUUID(); + + assertEquals(OK, regularClient.set(key1, "a").get()); + assertEquals(OK, regularClient.set(key2, "b").get()); + + String randomKey = regularClient.randomKey().get(); + assertEquals(1L, regularClient.exists(new String[] {randomKey}).get()); + + // no keys in database + assertEquals(OK, regularClient.flushall().get()); + assertNull(regularClient.randomKey().get()); + } + + @Test + @SneakyThrows + public void sort() { + String setKey1 = "setKey1"; + String setKey2 = "setKey2"; + String setKey3 = "setKey3"; + String setKey4 = "setKey4"; + String setKey5 = "setKey5"; + String[] setKeys = new String[] {setKey1, setKey2, setKey3, setKey4, setKey5}; + String listKey = "listKey"; + String storeKey = "storeKey"; + String nameField = "name"; + String ageField = "age"; + String[] names = new String[] {"Alice", "Bob", "Charlie", "Dave", "Eve"}; + String[] namesSortedByAge = new String[] {"Dave", "Bob", "Alice", "Charlie", "Eve"}; + String[] ages = new String[] {"30", "25", "35", "20", "40"}; + String[] userIDs = new String[] {"3", "1", "5", "4", "2"}; + String namePattern = "setKey*->name"; + String agePattern = "setKey*->age"; + String missingListKey = "100000"; + + for (int i = 0; i < setKeys.length; i++) { + assertEquals( + 2, regularClient.hset(setKeys[i], Map.of(nameField, names[i], ageField, ages[i])).get()); + } + + assertEquals(5, regularClient.rpush(listKey, userIDs).get()); + assertArrayEquals( + new String[] {"Alice", "Bob"}, + regularClient + .sort( + listKey, + SortOptions.builder().limit(new Limit(0L, 2L)).getPattern(namePattern).build()) + .get()); + assertArrayEquals( + new String[] {"Eve", "Dave"}, + regularClient + .sort( + listKey, + SortOptions.builder() + .limit(new Limit(0L, 2L)) + .orderBy(DESC) + .getPattern(namePattern) + .build()) + .get()); + assertArrayEquals( + new String[] {"Eve", "40", "Charlie", "35"}, + regularClient + .sort( + listKey, + SortOptions.builder() + .limit(new Limit(0L, 2L)) + .orderBy(DESC) + .byPattern(agePattern) + .getPatterns(List.of(namePattern, agePattern)) + .build()) + .get()); + + // Non-existent key in the BY pattern will result in skipping the sorting operation + assertArrayEquals( + userIDs, + regularClient.sort(listKey, SortOptions.builder().byPattern("noSort").build()).get()); + + // Non-existent key in the GET pattern results in nulls + assertArrayEquals( + new String[] {null, null, null, null, null}, + regularClient + .sort(listKey, SortOptions.builder().alpha().getPattern("missing").build()) + .get()); + + // Missing key in the set + assertEquals(6, regularClient.lpush(listKey, new String[] {missingListKey}).get()); + assertArrayEquals( + new String[] {null, "Dave", "Bob", "Alice", "Charlie", "Eve"}, + regularClient + .sort( + listKey, + SortOptions.builder().byPattern(agePattern).getPattern(namePattern).build()) + .get()); + assertEquals(missingListKey, regularClient.lpop(listKey).get()); + + // SORT_RO + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + assertArrayEquals( + new String[] {"Alice", "Bob"}, + regularClient + .sortReadOnly( + listKey, + SortOptions.builder().limit(new Limit(0L, 2L)).getPattern(namePattern).build()) + .get()); + assertArrayEquals( + new String[] {"Eve", "Dave"}, + regularClient + .sortReadOnly( + listKey, + SortOptions.builder() + .limit(new Limit(0L, 2L)) + .orderBy(DESC) + .getPattern(namePattern) + .build()) + .get()); + assertArrayEquals( + new String[] {"Eve", "40", "Charlie", "35"}, + regularClient + .sortReadOnly( + listKey, + SortOptions.builder() + .limit(new Limit(0L, 2L)) + .orderBy(DESC) + .byPattern(agePattern) + .getPatterns(List.of(namePattern, agePattern)) + .build()) + .get()); + + // Non-existent key in the BY pattern will result in skipping the sorting operation + assertArrayEquals( + userIDs, + regularClient + .sortReadOnly(listKey, SortOptions.builder().byPattern("noSort").build()) + .get()); + + // Non-existent key in the GET pattern results in nulls + assertArrayEquals( + new String[] {null, null, null, null, null}, + regularClient + .sortReadOnly(listKey, SortOptions.builder().alpha().getPattern("missing").build()) + .get()); + + assertArrayEquals( + namesSortedByAge, + regularClient + .sortReadOnly( + listKey, + SortOptions.builder().byPattern(agePattern).getPattern(namePattern).build()) + .get()); + + // Missing key in the set + assertEquals(6, regularClient.lpush(listKey, new String[] {missingListKey}).get()); + assertArrayEquals( + new String[] {null, "Dave", "Bob", "Alice", "Charlie", "Eve"}, + regularClient + .sortReadOnly( + listKey, + SortOptions.builder().byPattern(agePattern).getPattern(namePattern).build()) + .get()); + assertEquals(missingListKey, regularClient.lpop(listKey).get()); + } + + // SORT with STORE + assertEquals( + 5, + regularClient + .sortStore( + listKey, + storeKey, + SortOptions.builder() + .limit(new Limit(0L, -1L)) + .orderBy(ASC) + .byPattern(agePattern) + .getPattern(namePattern) + .build()) + .get()); + assertArrayEquals(namesSortedByAge, regularClient.lrange(storeKey, 0, -1).get()); + assertEquals( + 5, + regularClient + .sortStore( + listKey, + storeKey, + SortOptions.builder().byPattern(agePattern).getPattern(namePattern).build()) + .get()); + assertArrayEquals(namesSortedByAge, regularClient.lrange(storeKey, 0, -1).get()); + } } diff --git a/java/integTest/src/test/java/glide/standalone/StandaloneClientTests.java b/java/integTest/src/test/java/glide/standalone/StandaloneClientTests.java index 3f36952049..9da65bf6c3 100644 --- a/java/integTest/src/test/java/glide/standalone/StandaloneClientTests.java +++ b/java/integTest/src/test/java/glide/standalone/StandaloneClientTests.java @@ -1,4 +1,4 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.standalone; import static glide.TestConfiguration.REDIS_VERSION; diff --git a/java/integTest/src/test/java/glide/standalone/TransactionTests.java b/java/integTest/src/test/java/glide/standalone/TransactionTests.java index 5ca4019d6c..fa82d16f42 100644 --- a/java/integTest/src/test/java/glide/standalone/TransactionTests.java +++ b/java/integTest/src/test/java/glide/standalone/TransactionTests.java @@ -1,25 +1,33 @@ -/** Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 */ +/** Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ package glide.standalone; import static glide.TestConfiguration.REDIS_VERSION; import static glide.TestUtilities.assertDeepEquals; import static glide.TestUtilities.commonClientConfig; import static glide.api.BaseClient.OK; +import static glide.api.models.GlideString.gs; +import static glide.api.models.commands.SortBaseOptions.OrderBy.DESC; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assumptions.assumeTrue; import glide.TransactionTestUtilities.TransactionBuilder; import glide.api.RedisClient; +import glide.api.models.GlideString; import glide.api.models.Transaction; import glide.api.models.commands.InfoOptions; +import glide.api.models.commands.SortOptions; +import glide.api.models.exceptions.RequestException; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ExecutionException; import lombok.SneakyThrows; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -210,16 +218,6 @@ public void zrank_zrevrank_withscores() { assertArrayEquals(new Object[] {2L, 1.0}, (Object[]) result[2]); } - @Test - @SneakyThrows - public void WATCH_transaction_failure_returns_null() { - Transaction transaction = new Transaction(); - transaction.get("key"); - assertEquals(OK, client.customCommand(new String[] {"WATCH", "key"}).get()); - assertEquals(OK, client.set("key", "foo").get()); - assertNull(client.exec(transaction).get()); - } - @Test @SneakyThrows public void copy() { @@ -256,4 +254,221 @@ public void copy() { Object[] result = client.exec(transaction).get(); assertArrayEquals(expectedResult, result); } + + @Test + @SneakyThrows + public void watch() { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String key3 = "{key}-3" + UUID.randomUUID(); + String key4 = "{key}-4" + UUID.randomUUID(); + String foobarString = "foobar"; + String helloString = "hello"; + String[] keys = new String[] {key1, key2, key3}; + Transaction setFoobarTransaction = new Transaction(); + Transaction setHelloTransaction = new Transaction(); + String[] expectedExecResponse = new String[] {OK, OK, OK}; + + // Returns null when a watched key is modified before it is executed in a transaction command. + // Transaction commands are not performed. + assertEquals(OK, client.watch(keys).get()); + assertEquals(OK, client.set(key2, helloString).get()); + setFoobarTransaction.set(key1, foobarString).set(key2, foobarString).set(key3, foobarString); + assertNull(client.exec(setFoobarTransaction).get()); + assertNull(client.get(key1).get()); // Sanity check + assertEquals(helloString, client.get(key2).get()); + assertNull(client.get(key3).get()); + + // Transaction executes command successfully with a read command on the watch key before + // transaction is executed. + assertEquals(OK, client.watch(keys).get()); + assertEquals(helloString, client.get(key2).get()); + assertArrayEquals(expectedExecResponse, client.exec(setFoobarTransaction).get()); + assertEquals(foobarString, client.get(key1).get()); // Sanity check + assertEquals(foobarString, client.get(key2).get()); + assertEquals(foobarString, client.get(key3).get()); + + // Transaction executes command successfully with unmodified watched keys + assertEquals(OK, client.watch(keys).get()); + assertArrayEquals(expectedExecResponse, client.exec(setFoobarTransaction).get()); + assertEquals(foobarString, client.get(key1).get()); // Sanity check + assertEquals(foobarString, client.get(key2).get()); + assertEquals(foobarString, client.get(key3).get()); + + // Transaction executes command successfully with a modified watched key but is not in the + // transaction. + assertEquals(OK, client.watch(new String[] {key4}).get()); + setHelloTransaction.set(key1, helloString).set(key2, helloString).set(key3, helloString); + assertArrayEquals(expectedExecResponse, client.exec(setHelloTransaction).get()); + assertEquals(helloString, client.get(key1).get()); // Sanity check + assertEquals(helloString, client.get(key2).get()); + assertEquals(helloString, client.get(key3).get()); + + // WATCH can not have an empty String array parameter + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.watch(new String[] {}).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @Test + @SneakyThrows + public void watch_binary() { + GlideString key1 = gs("{key}-1" + UUID.randomUUID()); + GlideString key2 = gs("{key}-2" + UUID.randomUUID()); + GlideString key3 = gs("{key}-3" + UUID.randomUUID()); + GlideString key4 = gs("{key}-4" + UUID.randomUUID()); + String foobarString = "foobar"; + String helloString = "hello"; + GlideString[] keys = new GlideString[] {key1, key2, key3}; + Transaction setFoobarTransaction = new Transaction(); + Transaction setHelloTransaction = new Transaction(); + String[] expectedExecResponse = new String[] {OK, OK, OK}; + + // Returns null when a watched key is modified before it is executed in a transaction command. + // Transaction commands are not performed. + assertEquals(OK, client.watch(keys).get()); + assertEquals(OK, client.set(key2, gs(helloString)).get()); + setFoobarTransaction + .set(key1.toString(), foobarString) + .set(key2.toString(), foobarString) + .set(key3.toString(), foobarString); + assertNull(client.exec(setFoobarTransaction).get()); + assertNull(client.get(key1).get()); // Sanity check + assertEquals(gs(helloString), client.get(key2).get()); + assertNull(client.get(key3).get()); + + // Transaction executes command successfully with a read command on the watch key before + // transaction is executed. + assertEquals(OK, client.watch(keys).get()); + assertEquals(gs(helloString), client.get(key2).get()); + assertArrayEquals(expectedExecResponse, client.exec(setFoobarTransaction).get()); + assertEquals(gs(foobarString), client.get(key1).get()); // Sanity check + assertEquals(gs(foobarString), client.get(key2).get()); + assertEquals(gs(foobarString), client.get(key3).get()); + + // Transaction executes command successfully with unmodified watched keys + assertEquals(OK, client.watch(keys).get()); + assertArrayEquals(expectedExecResponse, client.exec(setFoobarTransaction).get()); + assertEquals(gs(foobarString), client.get(key1).get()); // Sanity check + assertEquals(gs(foobarString), client.get(key2).get()); + assertEquals(gs(foobarString), client.get(key3).get()); + + // Transaction executes command successfully with a modified watched key but is not in the + // transaction. + assertEquals(OK, client.watch(new GlideString[] {key4}).get()); + setHelloTransaction + .set(key1.toString(), helloString) + .set(key2.toString(), helloString) + .set(key3.toString(), helloString); + assertArrayEquals(expectedExecResponse, client.exec(setHelloTransaction).get()); + assertEquals(gs(helloString), client.get(key1).get()); // Sanity check + assertEquals(gs(helloString), client.get(key2).get()); + assertEquals(gs(helloString), client.get(key3).get()); + + // WATCH can not have an empty String array parameter + ExecutionException executionException = + assertThrows(ExecutionException.class, () -> client.watch(new GlideString[] {}).get()); + assertInstanceOf(RequestException.class, executionException.getCause()); + } + + @Test + @SneakyThrows + public void unwatch() { + String key1 = "{key}-1" + UUID.randomUUID(); + String key2 = "{key}-2" + UUID.randomUUID(); + String foobarString = "foobar"; + String helloString = "hello"; + String[] keys = new String[] {key1, key2}; + Transaction setFoobarTransaction = new Transaction(); + String[] expectedExecResponse = new String[] {OK, OK}; + + // UNWATCH returns OK when there no watched keys + assertEquals(OK, client.unwatch().get()); + + // Transaction executes successfully after modifying a watched key then calling UNWATCH + assertEquals(OK, client.watch(keys).get()); + assertEquals(OK, client.set(key2, helloString).get()); + assertEquals(OK, client.unwatch().get()); + setFoobarTransaction.set(key1, foobarString).set(key2, foobarString); + assertArrayEquals(expectedExecResponse, client.exec(setFoobarTransaction).get()); + assertEquals(foobarString, client.get(key1).get()); + assertEquals(foobarString, client.get(key2).get()); + } + + @Test + @SneakyThrows + public void sort_and_sortReadOnly() { + Transaction transaction1 = new Transaction(); + Transaction transaction2 = new Transaction(); + String genericKey1 = "{GenericKey}-1-" + UUID.randomUUID(); + String genericKey2 = "{GenericKey}-2-" + UUID.randomUUID(); + String[] ascendingListByAge = new String[] {"Bob", "Alice"}; + String[] descendingListByAge = new String[] {"Alice", "Bob"}; + + transaction1 + .hset("user:1", Map.of("name", "Alice", "age", "30")) + .hset("user:2", Map.of("name", "Bob", "age", "25")) + .lpush(genericKey1, new String[] {"2", "1"}) + .sort( + genericKey1, + SortOptions.builder().byPattern("user:*->age").getPattern("user:*->name").build()) + .sort( + genericKey1, + SortOptions.builder() + .orderBy(DESC) + .byPattern("user:*->age") + .getPattern("user:*->name") + .build()) + .sortStore( + genericKey1, + genericKey2, + SortOptions.builder().byPattern("user:*->age").getPattern("user:*->name").build()) + .lrange(genericKey2, 0, -1) + .sortStore( + genericKey1, + genericKey2, + SortOptions.builder() + .orderBy(DESC) + .byPattern("user:*->age") + .getPattern("user:*->name") + .build()) + .lrange(genericKey2, 0, -1); + + var expectedResults = + new Object[] { + 2L, // hset("user:1", Map.of("name", "Alice", "age", "30")) + 2L, // hset("user:2", Map.of("name", "Bob", "age", "25")) + 2L, // lpush(genericKey1, new String[] {"2", "1"}) + ascendingListByAge, // sort(genericKey1, SortOptions) + descendingListByAge, // sort(genericKey1, SortOptions) + 2L, // sortStore(genericKey1, genericKey2, SortOptions) + ascendingListByAge, // lrange(genericKey4, 0, -1) + 2L, // sortStore(genericKey1, genericKey2, SortOptions) + descendingListByAge, // lrange(genericKey2, 0, -1) + }; + + assertArrayEquals(expectedResults, client.exec(transaction1).get()); + + if (REDIS_VERSION.isGreaterThanOrEqualTo("7.0.0")) { + transaction2 + .sortReadOnly( + genericKey1, + SortOptions.builder().byPattern("user:*->age").getPattern("user:*->name").build()) + .sortReadOnly( + genericKey1, + SortOptions.builder() + .orderBy(DESC) + .byPattern("user:*->age") + .getPattern("user:*->name") + .build()); + + expectedResults = + new Object[] { + ascendingListByAge, // sortReadOnly(genericKey1, SortOptions) + descendingListByAge, // sortReadOnly(genericKey1, SortOptions) + }; + + assertArrayEquals(expectedResults, client.exec(transaction2).get()); + } + } } diff --git a/java/src/ffi_test.rs b/java/src/ffi_test.rs index 199a811392..5cebaf2fd3 100644 --- a/java/src/ffi_test.rs +++ b/java/src/ffi_test.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use jni::{ objects::{JClass, JLongArray}, diff --git a/java/src/lib.rs b/java/src/lib.rs index 9d42b8e298..a6154d023c 100644 --- a/java/src/lib.rs +++ b/java/src/lib.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use glide_core::start_socket_listener; @@ -45,7 +45,7 @@ fn redis_value_to_java<'local>( } } } else { - let Ok(bytearr) = env.byte_array_from_slice(data.as_ref()) else { + let Ok(bytearr) = env.byte_array_from_slice(&data) else { let _ = env.throw("Failed to allocate byte array"); return JObject::null(); }; diff --git a/logger_core/src/lib.rs b/logger_core/src/lib.rs index 8c0d85de0a..7835ab7a6f 100644 --- a/logger_core/src/lib.rs +++ b/logger_core/src/lib.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use once_cell::sync::OnceCell; use std::sync::RwLock; diff --git a/logger_core/tests/test_logger.rs b/logger_core/tests/test_logger.rs index 7481a19046..e38d91e92b 100644 --- a/logger_core/tests/test_logger.rs +++ b/logger_core/tests/test_logger.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use test_env_helpers::*; diff --git a/node/README.md b/node/README.md index 68c8331411..ae5fcaf292 100644 --- a/node/README.md +++ b/node/README.md @@ -45,7 +45,7 @@ To install GLIDE for Redis using `npm`, follow these steps: #### Cluster Redis: ```node -import { RedisClusterClient } from "@aws/glide-for-redis"; +import { GlideClusterClient } from "@aws/glide-for-redis"; const addresses = [ { @@ -53,7 +53,7 @@ const addresses = [ port: 6379, }, ]; -const client = await RedisClusterClient.createClient({ +const client = await GlideClusterClient.createClient({ addresses: addresses, }); await client.set("foo", "bar"); @@ -64,7 +64,7 @@ client.close(); #### Standalone Redis: ```node -import { RedisClient } from "@aws/glide-for-redis"; +import { GlideClient } from "@aws/glide-for-redis"; const addresses = [ { @@ -76,7 +76,7 @@ const addresses = [ port: 6379, }, ]; -const client = await RedisClient.createClient({ +const client = await GlideClient.createClient({ addresses: addresses, }); await client.set("foo", "bar"); diff --git a/node/THIRD_PARTY_LICENSES_NODE b/node/THIRD_PARTY_LICENSES_NODE index 8ab9c1cb87..dccd5046b1 100644 --- a/node/THIRD_PARTY_LICENSES_NODE +++ b/node/THIRD_PARTY_LICENSES_NODE @@ -3045,7 +3045,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: bitflags:2.5.0 +Package: bitflags:2.6.0 The following copyrights and licenses were found in the source code of this package: @@ -12417,7 +12417,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: lazy_static:1.4.0 +Package: lazy_static:1.5.0 The following copyrights and licenses were found in the source code of this package: @@ -12875,7 +12875,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: libloading:0.8.3 +Package: libloading:0.8.4 The following copyrights and licenses were found in the source code of this package: @@ -13584,7 +13584,7 @@ The following copyrights and licenses were found in the source code of this pack ---- -Package: memchr:2.7.2 +Package: memchr:2.7.4 The following copyrights and licenses were found in the source code of this package: @@ -13636,7 +13636,7 @@ For more information, please refer to ---- -Package: miniz_oxide:0.7.3 +Package: miniz_oxide:0.7.4 The following copyrights and licenses were found in the source code of this package: @@ -13910,7 +13910,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: napi:2.16.6 +Package: napi:2.16.7 The following copyrights and licenses were found in the source code of this package: @@ -13960,7 +13960,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: napi-derive:2.16.5 +Package: napi-derive:2.16.6 The following copyrights and licenses were found in the source code of this package: @@ -13985,7 +13985,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: napi-derive-backend:1.0.67 +Package: napi-derive-backend:1.0.68 The following copyrights and licenses were found in the source code of this package: @@ -18436,7 +18436,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: proc-macro2:1.0.85 +Package: proc-macro2:1.0.86 The following copyrights and licenses were found in the source code of this package: @@ -19662,7 +19662,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- -Package: redox_syscall:0.5.1 +Package: redox_syscall:0.5.2 The following copyrights and licenses were found in the source code of this package: @@ -23801,7 +23801,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: subtle:2.5.0 +Package: subtle:2.6.1 The following copyrights and licenses were found in the source code of this package: @@ -24061,7 +24061,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: syn:2.0.66 +Package: syn:2.0.68 The following copyrights and licenses were found in the source code of this package: @@ -26122,7 +26122,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: tinyvec:1.6.0 +Package: tinyvec:1.6.1 The following copyrights and licenses were found in the source code of this package: @@ -36860,7 +36860,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- -Package: @types:node:20.14.2 +Package: @types:node:20.14.9 The following copyrights and licenses were found in the source code of this package: diff --git a/node/index.ts b/node/index.ts index 832d5e9ea0..e2bac6f555 100644 --- a/node/index.ts +++ b/node/index.ts @@ -1,12 +1,12 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ export { Script } from "glide-rs"; export * from "./src/BaseClient"; export * from "./src/Commands"; export * from "./src/Errors"; +export * from "./src/GlideClient"; +export * from "./src/GlideClusterClient"; export * from "./src/Logger"; -export * from "./src/RedisClient"; -export * from "./src/RedisClusterClient"; export * from "./src/Transaction"; diff --git a/node/npm/glide/index.ts b/node/npm/glide/index.ts index f5cbf82d0b..c0c5b827f1 100644 --- a/node/npm/glide/index.ts +++ b/node/npm/glide/index.ts @@ -1,7 +1,7 @@ #!/usr/bin/env node /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { GLIBC, MUSL, familySync } from "detect-libc"; @@ -74,9 +74,9 @@ function loadNativeBinding() { function initialize() { const nativeBinding = loadNativeBinding(); const { - RedisClient, - RedisClusterClient, - RedisClientConfiguration, + GlideClient, + GlideClusterClient, + GlideClientConfiguration, SlotIdTypes, SlotKeyTypes, RouteByAddress, @@ -117,9 +117,9 @@ function initialize() { } = nativeBinding; module.exports = { - RedisClient, - RedisClusterClient, - RedisClientConfiguration, + GlideClient, + GlideClusterClient, + GlideClientConfiguration, SlotIdTypes, SlotKeyTypes, RouteByAddress, diff --git a/node/rust-client/build.rs b/node/rust-client/build.rs index 7295877eb8..af38e8a35e 100644 --- a/node/rust-client/build.rs +++ b/node/rust-client/build.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ extern crate napi_build; diff --git a/node/rust-client/src/lib.rs b/node/rust-client/src/lib.rs index 4f0a5c96b3..743ec570e2 100644 --- a/node/rust-client/src/lib.rs +++ b/node/rust-client/src/lib.rs @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ #[cfg(not(target_env = "msvc"))] @@ -67,7 +67,8 @@ impl AsyncClient { .build()?; let _runtime_handle = runtime.enter(); let client = to_js_result(redis::Client::open(connection_address))?; - let connection = to_js_result(runtime.block_on(client.get_multiplexed_async_connection()))?; + let connection = + to_js_result(runtime.block_on(client.get_multiplexed_async_connection(None)))?; Ok(AsyncClient { connection, runtime, @@ -166,10 +167,7 @@ fn redis_value_to_js(val: Value, js_env: Env) -> Result { .map(|val| val.into_unknown()), Value::Okay => js_env.create_string("OK").map(|val| val.into_unknown()), Value::Int(num) => js_env.create_int64(num).map(|val| val.into_unknown()), - Value::BulkString(data) => { - let str = to_js_result(std::str::from_utf8(data.as_ref()))?; - js_env.create_string(str).map(|val| val.into_unknown()) - } + Value::BulkString(data) => Ok(js_env.create_buffer_with_data(data)?.into_unknown()), Value::Array(array) => { let mut js_array_view = js_env.create_array_with_length(array.len())?; for (index, item) in array.into_iter().enumerate() { @@ -192,9 +190,12 @@ fn redis_value_to_js(val: Value, js_env: Env) -> Result { // "Normal client libraries may ignore completely the difference between this" // "type and the String type, and return a string in both cases."" // https://github.com/redis/redis-specifications/blob/master/protocol/RESP3.md - Value::VerbatimString { format: _, text } => js_env - .create_string_from_std(text) - .map(|val| val.into_unknown()), + Value::VerbatimString { format: _, text } => { + // VerbatimString is binary safe -> convert it into such + Ok(js_env + .create_buffer_with_data(text.as_bytes().to_vec())? + .into_unknown()) + } Value::BigNumber(num) => { let sign = num.is_negative(); let words = num.iter_u64_digits().collect(); @@ -224,7 +225,9 @@ fn redis_value_to_js(val: Value, js_env: Env) -> Result { } } -#[napi(ts_return_type = "null | string | number | {} | Boolean | BigInt | Set | any[]")] +#[napi( + ts_return_type = "null | string | Uint8Array | number | {} | Boolean | BigInt | Set | any[]" +)] pub fn value_from_split_pointer(js_env: Env, high_bits: u32, low_bits: u32) -> Result { let mut bytes = [0_u8; 8]; (&mut bytes[..4]) diff --git a/node/src/BaseClient.ts b/node/src/BaseClient.ts index 519abc4de1..9e6438cc83 100644 --- a/node/src/BaseClient.ts +++ b/node/src/BaseClient.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { @@ -59,6 +59,8 @@ import { createMSet, createObjectEncoding, createObjectFreq, + createObjectIdletime, + createObjectRefcount, createPExpire, createPExpireAt, createPTTL, @@ -77,17 +79,20 @@ import { createSMove, createSPop, createSRem, + createSUnionStore, createSet, createStrlen, createTTL, createType, createUnlink, createXAdd, + createXLen, createXRead, createXTrim, createZAdd, createZCard, createZCount, + createZInterCard, createZInterstore, createZPopMax, createZPopMin, @@ -98,11 +103,6 @@ import { createZRemRangeByRank, createZRemRangeByScore, createZScore, - createSUnionStore, - createXLen, - createZInterCard, - createObjectIdletime, - createObjectRefcount, } from "./Commands"; import { ClosingError, @@ -504,8 +504,8 @@ export class BaseClient { * ``` */ public set( - key: string, - value: string, + key: string | Uint8Array, + value: string | Uint8Array, options?: SetOptions, ): Promise<"OK" | string | null> { return this.createWritePromise(createSet(key, value, options)); diff --git a/node/src/Commands.ts b/node/src/Commands.ts index 60053d9e4b..9e527cfe03 100644 --- a/node/src/Commands.ts +++ b/node/src/Commands.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { createLeakedStringVec, MAX_REQUEST_ARGS_LEN } from "glide-rs"; @@ -9,7 +9,7 @@ import { redis_request } from "./ProtobufMessage"; import RequestType = redis_request.RequestType; -function isLargeCommand(args: string[]) { +function isLargeCommand(args: BulkString[]) { let lenSum = 0; for (const arg of args) { @@ -23,14 +23,20 @@ function isLargeCommand(args: string[]) { return false; } +type BulkString = string | Uint8Array; + /** * Convert a string array into Uint8Array[] */ -function toBuffersArray(args: string[]) { +function toBuffersArray(args: BulkString[]) { const argsBytes: Uint8Array[] = []; - for (const str of args) { - argsBytes.push(Buffer.from(str)); + for (const arg of args) { + if (typeof arg == "string") { + argsBytes.push(Buffer.from(arg)); + } else { + argsBytes.push(arg); + } } return argsBytes; @@ -56,7 +62,7 @@ export function parseInfoResponse(response: string): Record { function createCommand( requestType: redis_request.RequestType, - args: string[], + args: BulkString[], ): redis_request.Command { const singleCommand = redis_request.Command.create({ requestType, @@ -137,8 +143,8 @@ export type SetOptions = { * @internal */ export function createSet( - key: string, - value: string, + key: BulkString, + value: BulkString, options?: SetOptions, ): redis_request.Command { const args = [key, value]; diff --git a/node/src/Errors.ts b/node/src/Errors.ts index 262bd03617..d4a73f2958 100644 --- a/node/src/Errors.ts +++ b/node/src/Errors.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ /// Base class for errors. diff --git a/node/src/RedisClient.ts b/node/src/GlideClient.ts similarity index 95% rename from node/src/RedisClient.ts rename to node/src/GlideClient.ts index 53ac5e9fee..822aaafdae 100644 --- a/node/src/RedisClient.ts +++ b/node/src/GlideClient.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import * as net from "net"; @@ -22,7 +22,7 @@ import { import { connection_request } from "./ProtobufMessage"; import { Transaction } from "./Transaction"; -export type RedisClientConfiguration = BaseClientConfiguration & { +export type GlideClientConfiguration = BaseClientConfiguration & { /** * index of the logical database to connect to. */ @@ -59,12 +59,12 @@ export type RedisClientConfiguration = BaseClientConfiguration & { * For full documentation, see * https://github.com/aws/babushka/wiki/NodeJS-wrapper#redis-standalone */ -export class RedisClient extends BaseClient { +export class GlideClient extends BaseClient { /** * @internal */ protected createClientRequest( - options: RedisClientConfiguration, + options: GlideClientConfiguration, ): connection_request.IConnectionRequest { const configuration = super.createClientRequest(options); configuration.databaseId = options.databaseId; @@ -73,22 +73,22 @@ export class RedisClient extends BaseClient { } public static createClient( - options: RedisClientConfiguration, - ): Promise { - return super.createClientInternal( + options: GlideClientConfiguration, + ): Promise { + return super.createClientInternal( options, - (socket: net.Socket) => new RedisClient(socket), + (socket: net.Socket) => new GlideClient(socket), ); } static async __createClient( options: BaseClientConfiguration, connectedSocket: net.Socket, - ): Promise { + ): Promise { return this.__createClientInternal( options, connectedSocket, - (socket, options) => new RedisClient(socket, options), + (socket, options) => new GlideClient(socket, options), ); } diff --git a/node/src/RedisClusterClient.ts b/node/src/GlideClusterClient.ts similarity index 98% rename from node/src/RedisClusterClient.ts rename to node/src/GlideClusterClient.ts index 9d9cf1a644..527f1bf672 100644 --- a/node/src/RedisClusterClient.ts +++ b/node/src/GlideClusterClient.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import * as net from "net"; @@ -204,7 +204,7 @@ function toProtobufRoute( * For full documentation, see * https://github.com/aws/babushka/wiki/NodeJS-wrapper#redis-cluster */ -export class RedisClusterClient extends BaseClient { +export class GlideClusterClient extends BaseClient { /** * @internal */ @@ -235,22 +235,22 @@ export class RedisClusterClient extends BaseClient { public static async createClient( options: ClusterClientConfiguration, - ): Promise { + ): Promise { return await super.createClientInternal( options, (socket: net.Socket, options?: ClusterClientConfiguration) => - new RedisClusterClient(socket, options), + new GlideClusterClient(socket, options), ); } static async __createClient( options: BaseClientConfiguration, connectedSocket: net.Socket, - ): Promise { + ): Promise { return super.__createClientInternal( options, connectedSocket, - (socket, options) => new RedisClusterClient(socket, options), + (socket, options) => new GlideClusterClient(socket, options), ); } diff --git a/node/src/Logger.ts b/node/src/Logger.ts index 28a2a7a334..4560e4d218 100644 --- a/node/src/Logger.ts +++ b/node/src/Logger.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { InitInternalLogger, Level, log } from "glide-rs"; diff --git a/node/src/Transaction.ts b/node/src/Transaction.ts index 61f7da97c3..8e14d3dd62 100644 --- a/node/src/Transaction.ts +++ b/node/src/Transaction.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { @@ -61,6 +61,8 @@ import { createMSet, createObjectEncoding, createObjectFreq, + createObjectIdletime, + createObjectRefcount, createPExpire, createPExpireAt, createPTTL, @@ -80,6 +82,7 @@ import { createSMove, createSPop, createSRem, + createSUnionStore, createSelect, createSet, createStrlen, @@ -88,11 +91,13 @@ import { createType, createUnlink, createXAdd, + createXLen, createXRead, createXTrim, createZAdd, createZCard, createZCount, + createZInterCard, createZInterstore, createZPopMax, createZPopMin, @@ -103,11 +108,6 @@ import { createZRemRangeByRank, createZRemRangeByScore, createZScore, - createSUnionStore, - createXLen, - createZInterCard, - createObjectIdletime, - createObjectRefcount, } from "./Commands"; import { redis_request } from "./ProtobufMessage"; @@ -1529,7 +1529,7 @@ export class BaseTransaction> { * Transactions allow the execution of a group of commands in a single step. * * Command Response: - * An array of command responses is returned by the RedisClient.exec command, in the order they were given. + * An array of command responses is returned by the GlideClient.exec command, in the order they were given. * Each element in the array represents a command given to the transaction. * The response for each command depends on the executed Redis command. * Specific response types are documented alongside each method. @@ -1540,7 +1540,7 @@ export class BaseTransaction> { * .set("key", "value") * .select(1) /// Standalone command * .get("key"); - * const result = await redisClient.exec(transaction); + * const result = await GlideClient.exec(transaction); * console.log(result); // Output: ['OK', 'OK', null] * ``` */ @@ -1564,7 +1564,7 @@ export class Transaction extends BaseTransaction { * Transactions allow the execution of a group of commands in a single step. * * Command Response: - * An array of command responses is returned by the RedisClusterClient.exec command, in the order they were given. + * An array of command responses is returned by the GlideClusterClient.exec command, in the order they were given. * Each element in the array represents a command given to the transaction. * The response for each command depends on the executed Redis command. * Specific response types are documented alongside each method. diff --git a/node/tests/AsyncClient.test.ts b/node/tests/AsyncClient.test.ts index 22f7ec5075..ec75809878 100644 --- a/node/tests/AsyncClient.test.ts +++ b/node/tests/AsyncClient.test.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { afterAll, afterEach, beforeAll, describe } from "@jest/globals"; diff --git a/node/tests/RedisClient.test.ts b/node/tests/RedisClient.test.ts index 3c238cf0c8..0ec781ab17 100644 --- a/node/tests/RedisClient.test.ts +++ b/node/tests/RedisClient.test.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { @@ -12,14 +12,16 @@ import { } from "@jest/globals"; import { BufferReader, BufferWriter } from "protobufjs"; import { v4 as uuidv4 } from "uuid"; -import { ProtocolVersion, RedisClient, Transaction } from ".."; +import { GlideClient, ProtocolVersion, Transaction } from ".."; import { RedisCluster } from "../../utils/TestUtils.js"; import { redis_request } from "../src/ProtobufMessage"; import { runBaseTests } from "./SharedTests"; import { + checkSimple, convertStringArrayToBuffer, flushAndCloseClient, getClientConfigurationOption, + intoString, parseCommandLineArgs, parseEndpoints, transactionTest, @@ -28,15 +30,15 @@ import { /* eslint-disable @typescript-eslint/no-var-requires */ type Context = { - client: RedisClient; + client: GlideClient; }; const TIMEOUT = 50000; -describe("RedisClient", () => { +describe("GlideClient", () => { let testsFailed = 0; let cluster: RedisCluster; - let client: RedisClient; + let client: GlideClient; beforeAll(async () => { const standaloneAddresses = parseCommandLineArgs()["standalone-endpoints"]; @@ -104,13 +106,17 @@ describe("RedisClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "info without parameters", async (protocol) => { - client = await RedisClient.createClient( + client = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const result = await client.info(); - expect(result).toEqual(expect.stringContaining("# Server")); - expect(result).toEqual(expect.stringContaining("# Replication")); - expect(result).toEqual( + expect(intoString(result)).toEqual( + expect.stringContaining("# Server"), + ); + expect(intoString(result)).toEqual( + expect.stringContaining("# Replication"), + ); + expect(intoString(result)).toEqual( expect.not.stringContaining("# Latencystats"), ); }, @@ -119,31 +125,31 @@ describe("RedisClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "simple select test", async (protocol) => { - client = await RedisClient.createClient( + client = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); let selectResult = await client.select(0); - expect(selectResult).toEqual("OK"); + checkSimple(selectResult).toEqual("OK"); const key = uuidv4(); const value = uuidv4(); const result = await client.set(key, value); - expect(result).toEqual("OK"); + checkSimple(result).toEqual("OK"); selectResult = await client.select(1); - expect(selectResult).toEqual("OK"); + checkSimple(selectResult).toEqual("OK"); expect(await client.get(key)).toEqual(null); selectResult = await client.select(0); - expect(selectResult).toEqual("OK"); - expect(await client.get(key)).toEqual(value); + checkSimple(selectResult).toEqual("OK"); + checkSimple(await client.get(key)).toEqual(value); }, ); it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `can send transactions_%p`, async (protocol) => { - client = await RedisClient.createClient( + client = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const transaction = new Transaction(); @@ -151,17 +157,17 @@ describe("RedisClient", () => { transaction.select(0); const result = await client.exec(transaction); expectedRes.push("OK"); - expect(result).toEqual(expectedRes); + expect(intoString(result)).toEqual(intoString(expectedRes)); }, ); it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "can return null on WATCH transaction failures", async (protocol) => { - const client1 = await RedisClient.createClient( + const client1 = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); - const client2 = await RedisClient.createClient( + const client2 = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const transaction = new Transaction(); @@ -183,7 +189,7 @@ describe("RedisClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "object freq transaction test_%p", async (protocol) => { - const client = await RedisClient.createClient( + const client = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -224,7 +230,7 @@ describe("RedisClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "object idletime transaction test_%p", async (protocol) => { - const client = await RedisClient.createClient( + const client = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -269,7 +275,7 @@ describe("RedisClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "object refcount transaction test_%p", async (protocol) => { - const client = await RedisClient.createClient( + const client = await GlideClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -300,7 +306,7 @@ describe("RedisClient", () => { options.protocol = protocol; options.clientName = clientName; testsFailed += 1; - client = await RedisClient.createClient(options); + client = await GlideClient.createClient(options); return { client, context: { client } }; }, close: (context: Context, testSucceeded: boolean) => { diff --git a/node/tests/RedisClientInternals.test.ts b/node/tests/RedisClientInternals.test.ts index 70a4c6bfee..c910b3bb1d 100644 --- a/node/tests/RedisClientInternals.test.ts +++ b/node/tests/RedisClientInternals.test.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { beforeAll, describe, expect, it } from "@jest/globals"; @@ -22,11 +22,11 @@ import { BaseClientConfiguration, ClosingError, ClusterClientConfiguration, + GlideClient, + GlideClientConfiguration, + GlideClusterClient, InfoOptions, Logger, - RedisClient, - RedisClientConfiguration, - RedisClusterClient, RequestError, ReturnType, SlotKeyTypes, @@ -124,11 +124,11 @@ function sendResponse( function getConnectionAndSocket( checkRequest?: (request: connection_request.ConnectionRequest) => boolean, - connectionOptions?: ClusterClientConfiguration | RedisClientConfiguration, + connectionOptions?: ClusterClientConfiguration | GlideClientConfiguration, isCluster?: boolean, ): Promise<{ socket: net.Socket; - connection: RedisClient | RedisClusterClient; + connection: GlideClient | GlideClusterClient; server: net.Server; }> { return new Promise((resolve, reject) => { @@ -136,7 +136,7 @@ function getConnectionAndSocket( path.join(os.tmpdir(), `socket_listener`), ); const socketName = path.join(temporaryFolder, "read"); - let connectionPromise: Promise; // eslint-disable-line prefer-const + let connectionPromise: Promise; // eslint-disable-line prefer-const const server = net .createServer(async (socket) => { socket.once("data", (data) => { @@ -174,8 +174,8 @@ function getConnectionAndSocket( addresses: [{ host: "foo" }], }; const connection = isCluster - ? await RedisClusterClient.__createClient(options, socket) - : await RedisClient.__createClient(options, socket); + ? await GlideClusterClient.__createClient(options, socket) + : await GlideClient.__createClient(options, socket); resolve(connection); }); @@ -184,7 +184,7 @@ function getConnectionAndSocket( } function closeTestResources( - connection: RedisClient | RedisClusterClient, + connection: GlideClient | GlideClusterClient, server: net.Server, socket: net.Socket, ) { @@ -195,7 +195,7 @@ function closeTestResources( async function testWithResources( testFunction: ( - connection: RedisClient | RedisClusterClient, + connection: GlideClient | GlideClusterClient, socket: net.Socket, ) => Promise, connectionOptions?: BaseClientConfiguration, @@ -212,7 +212,7 @@ async function testWithResources( async function testWithClusterResources( testFunction: ( - connection: RedisClusterClient, + connection: GlideClusterClient, socket: net.Socket, ) => Promise, connectionOptions?: BaseClientConfiguration, @@ -224,7 +224,7 @@ async function testWithClusterResources( ); try { - if (connection instanceof RedisClusterClient) { + if (connection instanceof GlideClusterClient) { await testFunction(connection, socket); } else { throw new Error("Not cluster connection"); @@ -235,7 +235,7 @@ async function testWithClusterResources( } async function testSentValueMatches(config: { - sendRequest: (client: RedisClient | RedisClusterClient) => Promise; + sendRequest: (client: GlideClient | GlideClusterClient) => Promise; expectedRequestType: redis_request.RequestType | null | undefined; expectedValue: unknown; }) { diff --git a/node/tests/RedisClusterClient.test.ts b/node/tests/RedisClusterClient.test.ts index 6a189b4210..c627a8f9bd 100644 --- a/node/tests/RedisClusterClient.test.ts +++ b/node/tests/RedisClusterClient.test.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { @@ -14,9 +14,9 @@ import { v4 as uuidv4 } from "uuid"; import { ClusterTransaction, + GlideClusterClient, InfoOptions, ProtocolVersion, - RedisClusterClient, } from ".."; import { RedisCluster } from "../../utils/TestUtils.js"; import { checkIfServerVersionLessThan, runBaseTests } from "./SharedTests"; @@ -24,20 +24,22 @@ import { flushAndCloseClient, getClientConfigurationOption, getFirstResult, + intoArray, + intoString, parseCommandLineArgs, parseEndpoints, transactionTest, } from "./TestUtilities"; type Context = { - client: RedisClusterClient; + client: GlideClusterClient; }; const TIMEOUT = 50000; -describe("RedisClusterClient", () => { +describe("GlideClusterClient", () => { let testsFailed = 0; let cluster: RedisCluster; - let client: RedisClusterClient; + let client: GlideClusterClient; beforeAll(async () => { const clusterAddresses = parseCommandLineArgs()["cluster-endpoints"]; // Connect to cluster or create a new one based on the parsed addresses @@ -67,7 +69,7 @@ describe("RedisClusterClient", () => { options.protocol = protocol; options.clientName = clientName; testsFailed += 1; - client = await RedisClusterClient.createClient(options); + client = await GlideClusterClient.createClient(options); return { context: { client, @@ -86,30 +88,26 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `info with server and replication_%p`, async (protocol) => { - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const info_server = getFirstResult( await client.info([InfoOptions.Server]), ); - expect(info_server).toEqual(expect.stringContaining("# Server")); - - const result = (await client.info([ - InfoOptions.Replication, - ])) as Record; - const clusterNodes = await client.customCommand([ - "CLUSTER", - "NODES", - ]); - expect( - (clusterNodes as string)?.split("master").length - 1, - ).toEqual(Object.keys(result).length); - Object.values(result).every((item) => { - expect(item).toEqual(expect.stringContaining("# Replication")); - expect(item).toEqual( - expect.not.stringContaining("# Errorstats"), - ); - }); + expect(intoString(info_server)).toEqual( + expect.stringContaining("# Server"), + ); + + const infoReplicationValues = Object.values( + await client.info([InfoOptions.Replication]), + ); + + const replicationInfo = intoArray(infoReplicationValues); + + for (const item of replicationInfo) { + expect(item).toContain("role:master"); + expect(item).toContain("# Replication"); + } }, TIMEOUT, ); @@ -117,16 +115,19 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `info with server and randomNode route_%p`, async (protocol) => { - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const result = await client.info( [InfoOptions.Server], "randomNode", ); - expect(typeof result).toEqual("string"); - expect(result).toEqual(expect.stringContaining("# Server")); - expect(result).toEqual(expect.not.stringContaining("# Errorstats")); + expect(intoString(result)).toEqual( + expect.stringContaining("# Server"), + ); + expect(intoString(result)).toEqual( + expect.not.stringContaining("# Errorstats"), + ); }, TIMEOUT, ); @@ -144,14 +145,16 @@ describe("RedisClusterClient", () => { ); }; - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const result = cleanResult( - (await client.customCommand( - ["cluster", "nodes"], - "randomNode", - )) as string, + intoString( + await client.customCommand( + ["cluster", "nodes"], + "randomNode", + ), + ), ); // check that routing without explicit port works @@ -162,10 +165,12 @@ describe("RedisClusterClient", () => { } const secondResult = cleanResult( - (await client.customCommand(["cluster", "nodes"], { - type: "routeByAddress", - host, - })) as string, + intoString( + await client.customCommand(["cluster", "nodes"], { + type: "routeByAddress", + host, + }), + ), ); expect(result).toEqual(secondResult); @@ -174,11 +179,13 @@ describe("RedisClusterClient", () => { // check that routing with explicit port works const thirdResult = cleanResult( - (await client.customCommand(["cluster", "nodes"], { - type: "routeByAddress", - host: host2, - port: Number(port), - })) as string, + intoString( + await client.customCommand(["cluster", "nodes"], { + type: "routeByAddress", + host: host2, + port: Number(port), + }), + ), ); expect(result).toEqual(thirdResult); @@ -189,7 +196,7 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `fail routing by address if no port is provided_%p`, async (protocol) => { - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); expect(() => @@ -205,14 +212,16 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `config get and config set transactions test_%p`, async (protocol) => { - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const transaction = new ClusterTransaction(); transaction.configSet({ timeout: "1000" }); transaction.configGet(["timeout"]); const result = await client.exec(transaction); - expect(result).toEqual(["OK", { timeout: "1000" }]); + expect(intoString(result)).toEqual( + intoString(["OK", { timeout: "1000" }]), + ); }, TIMEOUT, ); @@ -220,13 +229,13 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `can send transactions_%p`, async (protocol) => { - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const transaction = new ClusterTransaction(); const expectedRes = await transactionTest(transaction); const result = await client.exec(transaction); - expect(result).toEqual(expectedRes); + expect(intoString(result)).toEqual(intoString(expectedRes)); }, TIMEOUT, ); @@ -234,10 +243,10 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `can return null on WATCH transaction failures_%p`, async (protocol) => { - const client1 = await RedisClusterClient.createClient( + const client1 = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); - const client2 = await RedisClusterClient.createClient( + const client2 = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const transaction = new ClusterTransaction(); @@ -260,15 +269,15 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `echo with all nodes routing_%p`, async (protocol) => { - client = await RedisClusterClient.createClient( + client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); const message = uuidv4(); const echoDict = await client.echo(message, "allNodes"); expect(typeof echoDict).toBe("object"); - expect(Object.values(echoDict)).toEqual( - expect.arrayContaining([message]), + expect(intoArray(echoDict)).toEqual( + expect.arrayContaining(intoArray([message])), ); }, TIMEOUT, @@ -277,7 +286,7 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `check that multi key command returns a cross slot error`, async (protocol) => { - const client = await RedisClusterClient.createClient( + const client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -318,7 +327,7 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( `check that multi key command routed to multiple nodes`, async (protocol) => { - const client = await RedisClusterClient.createClient( + const client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -335,7 +344,7 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "object freq transaction test_%p", async (protocol) => { - const client = await RedisClusterClient.createClient( + const client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -376,7 +385,7 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "object idletime transaction test_%p", async (protocol) => { - const client = await RedisClusterClient.createClient( + const client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); @@ -421,7 +430,7 @@ describe("RedisClusterClient", () => { it.each([ProtocolVersion.RESP2, ProtocolVersion.RESP3])( "object refcount transaction test_%p", async (protocol) => { - const client = await RedisClusterClient.createClient( + const client = await GlideClusterClient.createClient( getClientConfigurationOption(cluster.getAddresses(), protocol), ); diff --git a/node/tests/SharedTests.ts b/node/tests/SharedTests.ts index 7cca08f7f9..dbcdc7840a 100644 --- a/node/tests/SharedTests.ts +++ b/node/tests/SharedTests.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { expect, it } from "@jest/globals"; @@ -8,19 +8,22 @@ import { v4 as uuidv4 } from "uuid"; import { ClosingError, ExpireOptions, + GlideClient, + GlideClusterClient, InfoOptions, InsertPosition, ProtocolVersion, - RedisClient, - RedisClusterClient, Script, parseInfoResponse, } from "../"; import { Client, GetAndSetRandomValue, + checkSimple, compareMaps, getFirstResult, + intoArray, + intoString, } from "./TestUtilities"; async function getVersion(): Promise<[number, number, number]> { @@ -56,7 +59,7 @@ export async function checkIfServerVersionLessThan( return versionToCompare < minVersion; } -export type BaseClient = RedisClient | RedisClusterClient; +export type BaseClient = GlideClient | GlideClusterClient; export function runBaseTests(config: { init: ( @@ -100,9 +103,8 @@ export function runBaseTests(config: { } const result = await client.customCommand(["CLIENT", "INFO"]); - - expect(result).toContain("lib-name=GlideJS"); - expect(result).toContain("lib-ver=unknown"); + expect(intoString(result)).toContain("lib-name=GlideJS"); + expect(intoString(result)).toContain("lib-ver=unknown"); }, protocol); }, config.timeout, @@ -157,7 +159,9 @@ export function runBaseTests(config: { async (protocol) => { await runTest( async (client: BaseClient) => { - expect(await client.clientGetName()).toBe("TEST_CLIENT"); + expect(intoString(await client.clientGetName())).toBe( + "TEST_CLIENT", + ); }, protocol, "TEST_CLIENT", @@ -178,9 +182,9 @@ export function runBaseTests(config: { key, value, ]); - expect(setResult).toEqual("OK"); + checkSimple(setResult).toEqual("OK"); const result = await client.customCommand(["GET", key]); - expect(result).toEqual(value); + checkSimple(result).toEqual(value); }, protocol); }, config.timeout, @@ -201,20 +205,20 @@ export function runBaseTests(config: { key1, value1, ]); - expect(setResult1).toEqual("OK"); + checkSimple(setResult1).toEqual("OK"); const setResult2 = await client.customCommand([ "SET", key2, value2, ]); - expect(setResult2).toEqual("OK"); + checkSimple(setResult2).toEqual("OK"); const mget_result = await client.customCommand([ "MGET", key1, key2, key3, ]); - expect(mget_result).toEqual([value1, value2, null]); + checkSimple(mget_result).toEqual([value1, value2, null]); }, protocol); }, config.timeout, @@ -257,13 +261,15 @@ export function runBaseTests(config: { `test config rewrite_%p`, async (protocol) => { await runTest(async (client: BaseClient) => { - const serverInfo = await client.info([InfoOptions.Server]); + const serverInfo = intoString( + await client.info([InfoOptions.Server]), + ); const conf_file = parseInfoResponse( getFirstResult(serverInfo).toString(), )["config_file"]; if (conf_file.length > 0) { - expect(await client.configRewrite()).toEqual("OK"); + checkSimple(await client.configRewrite()).toEqual("OK"); } else { try { /// We expect Redis to return an error since the test cluster doesn't use redis.conf file @@ -286,13 +292,17 @@ export function runBaseTests(config: { /// we execute set and info so the commandstats will show `cmdstat_set::calls` greater than 1 /// after the configResetStat call we initiate an info command and the the commandstats won't contain `cmdstat_set`. await client.set("foo", "bar"); - const OldResult = await client.info([InfoOptions.Commandstats]); - expect(JSON.stringify(OldResult)).toContain("cmdstat_set"); - - expect(await client.configResetStat()).toEqual("OK"); - - const result = await client.info([InfoOptions.Commandstats]); - expect(JSON.stringify(result)).not.toContain("cmdstat_set"); + const oldResult = await client.info([InfoOptions.Commandstats]); + const oldResultAsString = intoString(oldResult); + console.log(oldResult); + console.log(oldResultAsString); + expect(oldResultAsString).toContain("cmdstat_set"); + checkSimple(await client.configResetStat()).toEqual("OK"); + + const result = intoArray( + await client.info([InfoOptions.Commandstats]), + ); + expect(result).not.toContain("cmdstat_set"); }, protocol); }, config.timeout, @@ -311,8 +321,8 @@ export function runBaseTests(config: { [key2]: value, [key3]: value, }; - expect(await client.mset(keyValueList)).toEqual("OK"); - expect( + checkSimple(await client.mset(keyValueList)).toEqual("OK"); + checkSimple( await client.mget([key1, key2, "nonExistingKey", key3]), ).toEqual([value, value, null, value]); }, protocol); @@ -325,13 +335,13 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "10")).toEqual("OK"); + checkSimple(await client.set(key, "10")).toEqual("OK"); expect(await client.incr(key)).toEqual(11); - expect(await client.get(key)).toEqual("11"); - expect(await client.incrBy(key, 4)).toEqual(15); - expect(await client.get(key)).toEqual("15"); - expect(await client.incrByFloat(key, 1.5)).toEqual(16.5); - expect(await client.get(key)).toEqual("16.5"); + checkSimple(await client.get(key)).toEqual("11"); + checkSimple(await client.incrBy(key, 4)).toEqual(15); + checkSimple(await client.get(key)).toEqual("15"); + checkSimple(await client.incrByFloat(key, 1.5)).toEqual(16.5); + checkSimple(await client.get(key)).toEqual("16.5"); }, protocol); }, config.timeout, @@ -346,11 +356,11 @@ export function runBaseTests(config: { const key3 = uuidv4(); /// key1 and key2 does not exist, so it set to 0 before performing the operation. expect(await client.incr(key1)).toEqual(1); - expect(await client.get(key1)).toEqual("1"); + checkSimple(await client.get(key1)).toEqual("1"); expect(await client.incrBy(key2, 2)).toEqual(2); - expect(await client.get(key2)).toEqual("2"); + checkSimple(await client.get(key2)).toEqual("2"); expect(await client.incrByFloat(key3, -0.5)).toEqual(-0.5); - expect(await client.get(key3)).toEqual("-0.5"); + checkSimple(await client.get(key3)).toEqual("-0.5"); }, protocol); }, config.timeout, @@ -361,7 +371,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); try { expect(await client.incr(key)).toThrow(); @@ -395,8 +405,8 @@ export function runBaseTests(config: { `ping test_%p`, async (protocol) => { await runTest(async (client: BaseClient) => { - expect(await client.ping()).toEqual("PONG"); - expect(await client.ping("Hello")).toEqual("Hello"); + checkSimple(await client.ping()).toEqual("PONG"); + checkSimple(await client.ping("Hello")).toEqual("Hello"); }, protocol); }, config.timeout, @@ -419,11 +429,11 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "10")).toEqual("OK"); + checkSimple(await client.set(key, "10")).toEqual("OK"); expect(await client.decr(key)).toEqual(9); - expect(await client.get(key)).toEqual("9"); + checkSimple(await client.get(key)).toEqual("9"); expect(await client.decrBy(key, 4)).toEqual(5); - expect(await client.get(key)).toEqual("5"); + checkSimple(await client.get(key)).toEqual("5"); }, protocol); }, config.timeout, @@ -438,10 +448,10 @@ export function runBaseTests(config: { /// key1 and key2 does not exist, so it set to 0 before performing the operation. expect(await client.get(key1)).toBeNull(); expect(await client.decr(key1)).toEqual(-1); - expect(await client.get(key1)).toEqual("-1"); + checkSimple(await client.get(key1)).toEqual("-1"); expect(await client.get(key2)).toBeNull(); expect(await client.decrBy(key2, 3)).toEqual(-3); - expect(await client.get(key2)).toEqual("-3"); + checkSimple(await client.get(key2)).toEqual("-3"); }, protocol); }, config.timeout, @@ -481,15 +491,15 @@ export function runBaseTests(config: { const prevTimeout = (await client.configGet([ "timeout", ])) as Record; - expect(await client.configSet({ timeout: "1000" })).toEqual( - "OK", - ); + checkSimple( + await client.configSet({ timeout: "1000" }), + ).toEqual("OK"); const currTimeout = (await client.configGet([ "timeout", ])) as Record; - expect(currTimeout).toEqual({ timeout: "1000" }); + checkSimple(currTimeout).toEqual({ timeout: "1000" }); /// Revert to the pervious configuration - expect( + checkSimple( await client.configSet({ timeout: prevTimeout["timeout"], }), @@ -512,8 +522,8 @@ export function runBaseTests(config: { [field2]: value, }; expect(await client.hset(key, fieldValueMap)).toEqual(2); - expect(await client.hget(key, field1)).toEqual(value); - expect(await client.hget(key, field2)).toEqual(value); + checkSimple(await client.hget(key, field1)).toEqual(value); + checkSimple(await client.hget(key, field2)).toEqual(value); expect(await client.hget(key, "nonExistingField")).toEqual( null, ); @@ -561,7 +571,7 @@ export function runBaseTests(config: { [field2]: value, }; expect(await client.hset(key, fieldValueMap)).toEqual(2); - expect( + checkSimple( await client.hmget(key, [ field1, "nonExistingField", @@ -614,12 +624,13 @@ export function runBaseTests(config: { }; expect(await client.hset(key, fieldValueMap)).toEqual(2); - expect( - compareMaps(await client.hgetall(key), { + expect(intoString(await client.hgetall(key))).toEqual( + intoString({ [field1]: value, [field2]: value, }), - ).toBe(true); + ); + expect(await client.hgetall("nonExistingKey")).toEqual({}); }, protocol); }, @@ -740,9 +751,12 @@ export function runBaseTests(config: { }; expect(await client.hset(key1, fieldValueMap)).toEqual(2); - expect(await client.hvals(key1)).toEqual(["value1", "value2"]); + checkSimple(await client.hvals(key1)).toEqual([ + "value1", + "value2", + ]); expect(await client.hdel(key1, [field1])).toEqual(1); - expect(await client.hvals(key1)).toEqual(["value2"]); + checkSimple(await client.hvals(key1)).toEqual(["value2"]); expect(await client.hvals("nonExistingHash")).toEqual([]); }, protocol); }, @@ -761,9 +775,9 @@ export function runBaseTests(config: { expect(await client.hsetnx(key1, field, "newValue")).toEqual( false, ); - expect(await client.hget(key1, field)).toEqual("value"); + checkSimple(await client.hget(key1, field)).toEqual("value"); - expect(await client.set(key2, "value")).toEqual("OK"); + checkSimple(await client.set(key2, "value")).toEqual("OK"); await expect( client.hsetnx(key2, field, "value"), ).rejects.toThrow(); @@ -779,13 +793,13 @@ export function runBaseTests(config: { const key = uuidv4(); const valueList = ["value4", "value3", "value2", "value1"]; expect(await client.lpush(key, valueList)).toEqual(4); - expect(await client.lpop(key)).toEqual("value1"); - expect(await client.lrange(key, 0, -1)).toEqual([ + checkSimple(await client.lpop(key)).toEqual("value1"); + checkSimple(await client.lrange(key, 0, -1)).toEqual([ "value2", "value3", "value4", ]); - expect(await client.lpopCount(key, 2)).toEqual([ + checkSimple(await client.lpopCount(key, 2)).toEqual([ "value2", "value3", ]); @@ -803,7 +817,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); try { expect(await client.lpush(key, ["bar"])).toThrow(); @@ -845,7 +859,7 @@ export function runBaseTests(config: { expect(await client.llen("nonExistingKey")).toEqual(0); - expect(await client.set(key2, "foo")).toEqual("OK"); + checkSimple(await client.set(key2, "foo")).toEqual("OK"); try { expect(await client.llen(key2)).toThrow(); @@ -866,17 +880,17 @@ export function runBaseTests(config: { const key = uuidv4(); const valueList = ["value4", "value3", "value2", "value1"]; expect(await client.lpush(key, valueList)).toEqual(4); - expect(await client.ltrim(key, 0, 1)).toEqual("OK"); - expect(await client.lrange(key, 0, -1)).toEqual([ + checkSimple(await client.ltrim(key, 0, 1)).toEqual("OK"); + checkSimple(await client.lrange(key, 0, -1)).toEqual([ "value1", "value2", ]); /// `start` is greater than `end` so the key will be removed. - expect(await client.ltrim(key, 4, 2)).toEqual("OK"); + checkSimple(await client.ltrim(key, 4, 2)).toEqual("OK"); expect(await client.lrange(key, 0, -1)).toEqual([]); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); try { expect(await client.ltrim(key, 0, 1)).toThrow(); @@ -904,18 +918,20 @@ export function runBaseTests(config: { ]; expect(await client.lpush(key, valueList)).toEqual(5); expect(await client.lrem(key, 2, "value1")).toEqual(2); - expect(await client.lrange(key, 0, -1)).toEqual([ + checkSimple(await client.lrange(key, 0, -1)).toEqual([ "value2", "value2", "value1", ]); expect(await client.lrem(key, -1, "value2")).toEqual(1); - expect(await client.lrange(key, 0, -1)).toEqual([ + checkSimple(await client.lrange(key, 0, -1)).toEqual([ "value2", "value1", ]); expect(await client.lrem(key, 0, "value2")).toEqual(1); - expect(await client.lrange(key, 0, -1)).toEqual(["value1"]); + checkSimple(await client.lrange(key, 0, -1)).toEqual([ + "value1", + ]); expect(await client.lrem("nonExistingKey", 2, "value")).toEqual( 0, ); @@ -931,8 +947,8 @@ export function runBaseTests(config: { const key = uuidv4(); const valueList = ["value1", "value2", "value3", "value4"]; expect(await client.rpush(key, valueList)).toEqual(4); - expect(await client.rpop(key)).toEqual("value4"); - expect(await client.rpopCount(key, 2)).toEqual([ + checkSimple(await client.rpop(key)).toEqual("value4"); + checkSimple(await client.rpopCount(key, 2)).toEqual([ "value3", "value2", ]); @@ -947,7 +963,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); try { expect(await client.rpush(key, ["bar"])).toThrow(); @@ -980,7 +996,7 @@ export function runBaseTests(config: { await client.srem(key, ["member3", "nonExistingMember"]), ).toEqual(1); /// compare the 2 sets. - expect(await client.smembers(key)).toEqual( + checkSimple(await client.smembers(key)).toEqual( new Set(["member1", "member2", "member4"]), ); expect(await client.srem(key, ["member1"])).toEqual(1); @@ -1005,19 +1021,19 @@ export function runBaseTests(config: { // move an element expect(await client.smove(key1, key2, "1")); - expect(await client.smembers(key1)).toEqual( + checkSimple(await client.smembers(key1)).toEqual( new Set(["2", "3"]), ); - expect(await client.smembers(key2)).toEqual( + checkSimple(await client.smembers(key2)).toEqual( new Set(["1", "2", "3"]), ); // moved element already exists in the destination set expect(await client.smove(key2, key1, "2")); - expect(await client.smembers(key1)).toEqual( + checkSimple(await client.smembers(key1)).toEqual( new Set(["2", "3"]), ); - expect(await client.smembers(key2)).toEqual( + checkSimple(await client.smembers(key2)).toEqual( new Set(["1", "3"]), ); @@ -1025,29 +1041,43 @@ export function runBaseTests(config: { expect(await client.smove(non_existing_key, key1, "4")).toEqual( false, ); - expect(await client.smembers(key1)).toEqual( + checkSimple(await client.smembers(key1)).toEqual( new Set(["2", "3"]), ); // move to a new set expect(await client.smove(key1, key3, "2")); - expect(await client.smembers(key1)).toEqual(new Set(["3"])); - expect(await client.smembers(key3)).toEqual(new Set(["2"])); + checkSimple(await client.smembers(key1)).toEqual( + new Set(["3"]), + ); + checkSimple(await client.smembers(key3)).toEqual( + new Set(["2"]), + ); // attempt to move a missing element expect(await client.smove(key1, key3, "42")).toEqual(false); - expect(await client.smembers(key1)).toEqual(new Set(["3"])); - expect(await client.smembers(key3)).toEqual(new Set(["2"])); + checkSimple(await client.smembers(key1)).toEqual( + new Set(["3"]), + ); + checkSimple(await client.smembers(key3)).toEqual( + new Set(["2"]), + ); // move missing element to missing key expect( await client.smove(key1, non_existing_key, "42"), ).toEqual(false); - expect(await client.smembers(key1)).toEqual(new Set(["3"])); - expect(await client.type(non_existing_key)).toEqual("none"); + checkSimple(await client.smembers(key1)).toEqual( + new Set(["3"]), + ); + checkSimple(await client.type(non_existing_key)).toEqual( + "none", + ); // key exists, but it is not a set - expect(await client.set(string_key, "value")).toEqual("OK"); + checkSimple(await client.set(string_key, "value")).toEqual( + "OK", + ); await expect( client.smove(string_key, key1, "_"), ).rejects.toThrow(); @@ -1077,7 +1107,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); try { expect(await client.sadd(key, ["bar"])).toThrow(); @@ -1128,7 +1158,7 @@ export function runBaseTests(config: { // positive test case expect(await client.sadd(key1, member1_list)).toEqual(4); expect(await client.sadd(key2, member2_list)).toEqual(3); - expect(await client.sinter([key1, key2])).toEqual( + checkSimple(await client.sinter([key1, key2])).toEqual( new Set(["c", "d"]), ); @@ -1147,7 +1177,7 @@ export function runBaseTests(config: { ); // non-set key - expect(await client.set(key2, "value")).toEqual("OK"); + checkSimple(await client.set(key2, "value")).toEqual("OK"); try { expect(await client.sinter([key2])).toThrow(); @@ -1178,19 +1208,19 @@ export function runBaseTests(config: { // store union in new key expect(await client.sunionstore(key4, [key1, key2])).toEqual(5); - expect(await client.smembers(key4)).toEqual( + checkSimple(await client.smembers(key4)).toEqual( new Set(["a", "b", "c", "d", "e"]), ); // overwrite existing set expect(await client.sunionstore(key1, [key4, key2])).toEqual(5); - expect(await client.smembers(key1)).toEqual( + checkSimple(await client.smembers(key1)).toEqual( new Set(["a", "b", "c", "d", "e"]), ); // overwrite one of the source keys expect(await client.sunionstore(key2, [key4, key2])).toEqual(5); - expect(await client.smembers(key2)).toEqual( + checkSimple(await client.smembers(key2)).toEqual( new Set(["a", "b", "c", "d", "e"]), ); @@ -1204,7 +1234,7 @@ export function runBaseTests(config: { await expect(client.sunionstore(key4, [])).rejects.toThrow(); // key exists, but it is not a set - expect(await client.set(stringKey, "foo")).toEqual("OK"); + checkSimple(await client.set(stringKey, "foo")).toEqual("OK"); await expect( client.sunionstore(key4, [stringKey, key1]), ).rejects.toThrow(); @@ -1213,7 +1243,7 @@ export function runBaseTests(config: { expect( await client.sunionstore(stringKey, [key1, key3]), ).toEqual(7); - expect(await client.smembers(stringKey)).toEqual( + checkSimple(await client.smembers(stringKey)).toEqual( new Set(["a", "b", "c", "d", "e", "f", "g"]), ); }, protocol); @@ -1236,7 +1266,7 @@ export function runBaseTests(config: { await client.sismember("nonExistingKey", "member1"), ).toEqual(false); - expect(await client.set(key2, "foo")).toEqual("OK"); + checkSimple(await client.set(key2, "foo")).toEqual("OK"); await expect( client.sismember(key2, "member1"), ).rejects.toThrow(); @@ -1254,14 +1284,11 @@ export function runBaseTests(config: { expect(await client.sadd(key, members)).toEqual(3); const result1 = await client.spop(key); - expect(members).toContain(result1); - - members = members.filter((item) => item !== result1); - - expect(await client.spopCount(key, 2)).toEqual( - new Set(members), - ); + expect(members).toContain(intoString(result1)); + members = members.filter((item) => item != result1); + const result2 = await client.spopCount(key, 2); + expect(intoString(result2)).toEqual(intoString(members)); expect(await client.spop("nonExistingKey")).toEqual(null); expect(await client.spopCount("nonExistingKey", 1)).toEqual( new Set(), @@ -1278,9 +1305,9 @@ export function runBaseTests(config: { const key1 = uuidv4(); const key2 = uuidv4(); const value = uuidv4(); - expect(await client.set(key1, value)).toEqual("OK"); + checkSimple(await client.set(key1, value)).toEqual("OK"); expect(await client.exists([key1])).toEqual(1); - expect(await client.set(key2, value)).toEqual("OK"); + checkSimple(await client.set(key2, value)).toEqual("OK"); expect( await client.exists([key1, "nonExistingKey", key2]), ).toEqual(2); @@ -1298,9 +1325,9 @@ export function runBaseTests(config: { const key2 = "{key}" + uuidv4(); const key3 = "{key}" + uuidv4(); const value = uuidv4(); - expect(await client.set(key1, value)).toEqual("OK"); - expect(await client.set(key2, value)).toEqual("OK"); - expect(await client.set(key3, value)).toEqual("OK"); + checkSimple(await client.set(key1, value)).toEqual("OK"); + checkSimple(await client.set(key2, value)).toEqual("OK"); + checkSimple(await client.set(key3, value)).toEqual("OK"); expect( await client.unlink([key1, key2, "nonExistingKey", key3]), ).toEqual(3); @@ -1314,11 +1341,11 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect(await client.expire(key, 10)).toEqual(true); expect(await client.ttl(key)).toBeLessThanOrEqual(10); /// set command clears the timeout. - expect(await client.set(key, "bar")).toEqual("OK"); + checkSimple(await client.set(key, "bar")).toEqual("OK"); const versionLessThan = await checkIfServerVersionLessThan("7.0.0"); @@ -1360,7 +1387,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect( await client.expireAt( key, @@ -1391,7 +1418,7 @@ export function runBaseTests(config: { expect(await client.ttl(key)).toBeLessThanOrEqual(50); /// set command clears the timeout. - expect(await client.set(key, "bar")).toEqual("OK"); + checkSimple(await client.set(key, "bar")).toEqual("OK"); if (!versionLessThan) { expect( @@ -1412,14 +1439,14 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect(await client.ttl(key)).toEqual(-1); expect(await client.expire(key, -10)).toEqual(true); expect(await client.ttl(key)).toEqual(-2); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect(await client.pexpire(key, -10000)).toEqual(true); expect(await client.ttl(key)).toEqual(-2); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect( await client.expireAt( key, @@ -1427,7 +1454,7 @@ export function runBaseTests(config: { ), ).toEqual(true); expect(await client.ttl(key)).toEqual(-2); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect( await client.pexpireAt( key, @@ -1473,12 +1500,12 @@ export function runBaseTests(config: { const key2 = uuidv4(); let script = new Script("return 'Hello'"); - expect(await client.invokeScript(script)).toEqual("Hello"); + checkSimple(await client.invokeScript(script)).toEqual("Hello"); script = new Script( "return redis.call('SET', KEYS[1], ARGV[1])", ); - expect( + checkSimple( await client.invokeScript(script, { keys: [key1], args: ["value1"], @@ -1486,7 +1513,7 @@ export function runBaseTests(config: { ).toEqual("OK"); /// Reuse the same script with different parameters. - expect( + checkSimple( await client.invokeScript(script, { keys: [key2], args: ["value2"], @@ -1494,11 +1521,11 @@ export function runBaseTests(config: { ).toEqual("OK"); script = new Script("return redis.call('GET', KEYS[1])"); - expect( + checkSimple( await client.invokeScript(script, { keys: [key1] }), ).toEqual("value1"); - expect( + checkSimple( await client.invokeScript(script, { keys: [key2] }), ).toEqual("value2"); }, protocol); @@ -1695,7 +1722,7 @@ export function runBaseTests(config: { await client.zscore("nonExistingKey", "nonExistingMember"), ).toEqual(null); - expect(await client.set(key2, "foo")).toEqual("OK"); + checkSimple(await client.set(key2, "foo")).toEqual("OK"); await expect(client.zscore(key2, "foo")).rejects.toThrow(); }, protocol); }, @@ -1749,7 +1776,7 @@ export function runBaseTests(config: { ), ).toEqual(0); - expect(await client.set(key2, "foo")).toEqual("OK"); + checkSimple(await client.set(key2, "foo")).toEqual("OK"); await expect( client.zcount(key2, "negativeInfinity", "positiveInfinity"), ).rejects.toThrow(); @@ -1766,9 +1793,9 @@ export function runBaseTests(config: { const membersScores = { one: 1, two: 2, three: 3 }; expect(await client.zadd(key, membersScores)).toEqual(3); - expect(await client.zrange(key, { start: 0, stop: 1 })).toEqual( - ["one", "two"], - ); + checkSimple( + await client.zrange(key, { start: 0, stop: 1 }), + ).toEqual(["one", "two"]); const result = await client.zrangeWithScores(key, { start: 0, stop: -1, @@ -1781,7 +1808,7 @@ export function runBaseTests(config: { three: 3.0, }), ).toBe(true); - expect( + checkSimple( await client.zrange(key, { start: 0, stop: 1 }, true), ).toEqual(["three", "two"]); expect(await client.zrange(key, { start: 3, stop: 1 })).toEqual( @@ -1803,7 +1830,7 @@ export function runBaseTests(config: { const membersScores = { one: 1, two: 2, three: 3 }; expect(await client.zadd(key, membersScores)).toEqual(3); - expect( + checkSimple( await client.zrange(key, { start: "negativeInfinity", stop: { value: 3, isInclusive: false }, @@ -1823,7 +1850,7 @@ export function runBaseTests(config: { three: 3.0, }), ).toBe(true); - expect( + checkSimple( await client.zrange( key, { @@ -1835,7 +1862,7 @@ export function runBaseTests(config: { ), ).toEqual(["two", "one"]); - expect( + checkSimple( await client.zrange(key, { start: "negativeInfinity", stop: "positiveInfinity", @@ -1896,7 +1923,7 @@ export function runBaseTests(config: { const membersScores = { a: 1, b: 2, c: 3 }; expect(await client.zadd(key, membersScores)).toEqual(3); - expect( + checkSimple( await client.zrange(key, { start: "negativeInfinity", stop: { value: "c", isInclusive: false }, @@ -1904,7 +1931,7 @@ export function runBaseTests(config: { }), ).toEqual(["a", "b"]); - expect( + checkSimple( await client.zrange(key, { start: "negativeInfinity", stop: "positiveInfinity", @@ -1913,7 +1940,7 @@ export function runBaseTests(config: { }), ).toEqual(["b", "c"]); - expect( + checkSimple( await client.zrange( key, { @@ -2122,25 +2149,27 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "value")).toEqual("OK"); - expect(await client.type(key)).toEqual("string"); - expect(await client.del([key])).toEqual(1); + checkSimple(await client.set(key, "value")).toEqual("OK"); + checkSimple(await client.type(key)).toEqual("string"); + checkSimple(await client.del([key])).toEqual(1); - expect(await client.lpush(key, ["value"])).toEqual(1); - expect(await client.type(key)).toEqual("list"); - expect(await client.del([key])).toEqual(1); + checkSimple(await client.lpush(key, ["value"])).toEqual(1); + checkSimple(await client.type(key)).toEqual("list"); + checkSimple(await client.del([key])).toEqual(1); - expect(await client.sadd(key, ["value"])).toEqual(1); - expect(await client.type(key)).toEqual("set"); - expect(await client.del([key])).toEqual(1); + checkSimple(await client.sadd(key, ["value"])).toEqual(1); + checkSimple(await client.type(key)).toEqual("set"); + checkSimple(await client.del([key])).toEqual(1); - expect(await client.zadd(key, { member: 1.0 })).toEqual(1); - expect(await client.type(key)).toEqual("zset"); - expect(await client.del([key])).toEqual(1); + checkSimple(await client.zadd(key, { member: 1.0 })).toEqual(1); + checkSimple(await client.type(key)).toEqual("zset"); + checkSimple(await client.del([key])).toEqual(1); - expect(await client.hset(key, { field: "value" })).toEqual(1); - expect(await client.type(key)).toEqual("hash"); - expect(await client.del([key])).toEqual(1); + checkSimple(await client.hset(key, { field: "value" })).toEqual( + 1, + ); + checkSimple(await client.type(key)).toEqual("hash"); + checkSimple(await client.del([key])).toEqual(1); await client.customCommand([ "XADD", @@ -2149,10 +2178,9 @@ export function runBaseTests(config: { "field", "value", ]); - expect(await client.type(key)).toEqual("stream"); - expect(await client.del([key])).toEqual(1); - - expect(await client.type(key)).toEqual("none"); + checkSimple(await client.type(key)).toEqual("stream"); + checkSimple(await client.del([key])).toEqual(1); + checkSimple(await client.type(key)).toEqual("none"); }, protocol); }, config.timeout, @@ -2163,7 +2191,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const message = uuidv4(); - expect(await client.echo(message)).toEqual(message); + checkSimple(await client.echo(message)).toEqual(message); }, protocol); }, config.timeout, @@ -2176,8 +2204,8 @@ export function runBaseTests(config: { const key1 = uuidv4(); const key1Value = uuidv4(); const key1ValueLength = key1Value.length; - expect(await client.set(key1, key1Value)).toEqual("OK"); - expect(await client.strlen(key1)).toEqual(key1ValueLength); + checkSimple(await client.set(key1, key1Value)).toEqual("OK"); + checkSimple(await client.strlen(key1)).toEqual(key1ValueLength); expect(await client.strlen("nonExistKey")).toEqual(0); @@ -2211,8 +2239,12 @@ export function runBaseTests(config: { listKey2Value, ]), ).toEqual(2); - expect(await client.lindex(listName, 0)).toEqual(listKey2Value); - expect(await client.lindex(listName, 1)).toEqual(listKey1Value); + checkSimple(await client.lindex(listName, 0)).toEqual( + listKey2Value, + ); + checkSimple(await client.lindex(listName, 1)).toEqual( + listKey1Value, + ); expect(await client.lindex("notExsitingList", 1)).toEqual(null); expect(await client.lindex(listName, 3)).toEqual(null); }, protocol); @@ -2247,7 +2279,7 @@ export function runBaseTests(config: { "3.5", ), ).toEqual(6); - expect(await client.lrange(key1, 0, -1)).toEqual([ + checkSimple(await client.lrange(key1, 0, -1)).toEqual([ "1", "1.5", "2", @@ -2299,7 +2331,7 @@ export function runBaseTests(config: { }), ).toBe(true); expect(await client.zpopmin(key)).toEqual({}); - expect(await client.set(key, "value")).toEqual("OK"); + checkSimple(await client.set(key, "value")).toEqual("OK"); await expect(client.zpopmin(key)).rejects.toThrow(); expect(await client.zpopmin("notExsitingKey")).toEqual({}); }, protocol); @@ -2323,7 +2355,7 @@ export function runBaseTests(config: { }), ).toBe(true); expect(await client.zpopmax(key)).toEqual({}); - expect(await client.set(key, "value")).toEqual("OK"); + checkSimple(await client.set(key, "value")).toEqual("OK"); await expect(client.zpopmax(key)).rejects.toThrow(); expect(await client.zpopmax("notExsitingKey")).toEqual({}); }, protocol); @@ -2338,7 +2370,7 @@ export function runBaseTests(config: { const key = uuidv4(); expect(await client.pttl(key)).toEqual(-2); - expect(await client.set(key, "value")).toEqual("OK"); + checkSimple(await client.set(key, "value")).toEqual("OK"); expect(await client.pttl(key)).toEqual(-1); expect(await client.expire(key, 10)).toEqual(true); @@ -2414,7 +2446,7 @@ export function runBaseTests(config: { null, ); - expect(await client.set(key2, "value")).toEqual("OK"); + checkSimple(await client.set(key2, "value")).toEqual("OK"); await expect(client.zrank(key2, "member")).rejects.toThrow(); }, protocol); }, @@ -2428,7 +2460,7 @@ export function runBaseTests(config: { await client.rpush("brpop-test", ["foo", "bar", "baz"]), ).toEqual(3); // Test basic usage - expect(await client.brpop(["brpop-test"], 0.1)).toEqual([ + checkSimple(await client.brpop(["brpop-test"], 0.1)).toEqual([ "brpop-test", "baz", ]); @@ -2441,7 +2473,7 @@ export function runBaseTests(config: { await expect(client.brpop(["foo"], 0.1)).rejects.toThrow(); // Same-slot requirement - if (client instanceof RedisClusterClient) { + if (client instanceof GlideClusterClient) { try { expect( await client.brpop(["abc", "zxy", "lkn"], 0.1), @@ -2465,7 +2497,7 @@ export function runBaseTests(config: { await client.rpush("blpop-test", ["foo", "bar", "baz"]), ).toEqual(3); // Test basic usage - expect(await client.blpop(["blpop-test"], 0.1)).toEqual([ + checkSimple(await client.blpop(["blpop-test"], 0.1)).toEqual([ "blpop-test", "foo", ]); @@ -2478,7 +2510,7 @@ export function runBaseTests(config: { await expect(client.blpop(["foo"], 0.1)).rejects.toThrow(); // Same-slot requirement - if (client instanceof RedisClusterClient) { + if (client instanceof GlideClusterClient) { try { expect( await client.blpop(["abc", "zxy", "lkn"], 0.1), @@ -2499,7 +2531,7 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.set(key, "foo")).toEqual("OK"); + checkSimple(await client.set(key, "foo")).toEqual("OK"); expect(await client.persist(key)).toEqual(false); expect(await client.expire(key, 10)).toEqual(true); @@ -2539,7 +2571,7 @@ export function runBaseTests(config: { ], { id: "0-1" }, ); - expect(timestamp1).toEqual("0-1"); + checkSimple(timestamp1).toEqual("0-1"); expect( await client.xadd(key, [ [field1, "foo2"], @@ -2736,7 +2768,7 @@ export function runBaseTests(config: { [timestamp_2_3 as string]: [["bar", "bar3"]], }, }; - expect(result).toEqual(expected); + checkSimple(result).toEqual(expected); }, ProtocolVersion.RESP2); }, config.timeout, @@ -2752,7 +2784,7 @@ export function runBaseTests(config: { await client.set(key, "value"); await client.rename(key, newKey); const result = await client.get(newKey); - expect(result).toEqual("value"); + checkSimple(result).toEqual("value"); // If key doesn't exist it should throw, it also test that key has successfully been renamed await expect(client.rename(key, newKey)).rejects.toThrow(); }, protocol); @@ -2779,13 +2811,13 @@ export function runBaseTests(config: { await client.set(key1, "key1"); await client.set(key3, "key3"); // Test that renamenx can rename key1 to key2 (non-existing value) - expect(await client.renamenx(key1, key2)).toEqual(true); + checkSimple(await client.renamenx(key1, key2)).toEqual(true); // sanity check - expect(await client.get(key2)).toEqual("key1"); + checkSimple(await client.get(key2)).toEqual("key1"); // Test that renamenx doesn't rename key2 to key3 (with an existing value) - expect(await client.renamenx(key2, key3)).toEqual(false); + checkSimple(await client.renamenx(key2, key3)).toEqual(false); // sanity check - expect(await client.get(key3)).toEqual("key3"); + checkSimple(await client.get(key3)).toEqual("key3"); }, protocol); }, config.timeout, @@ -2796,13 +2828,13 @@ export function runBaseTests(config: { async (protocol) => { await runTest(async (client: BaseClient) => { const key = uuidv4(); - expect(await client.pfadd(key, [])).toEqual(1); - expect(await client.pfadd(key, ["one", "two"])).toEqual(1); - expect(await client.pfadd(key, ["two"])).toEqual(0); - expect(await client.pfadd(key, [])).toEqual(0); + checkSimple(await client.pfadd(key, [])).toEqual(1); + checkSimple(await client.pfadd(key, ["one", "two"])).toEqual(1); + checkSimple(await client.pfadd(key, ["two"])).toEqual(0); + checkSimple(await client.pfadd(key, [])).toEqual(0); // key exists, but it is not a HyperLogLog - expect(await client.set("foo", "value")).toEqual("OK"); + checkSimple(await client.set("foo", "value")).toEqual("OK"); await expect(client.pfadd("foo", [])).rejects.toThrow(); }, protocol); }, @@ -2860,9 +2892,9 @@ export function runBaseTests(config: { count: 500, }, }); - expect(setResWithExpirySetMilli).toEqual("OK"); + checkSimple(setResWithExpirySetMilli).toEqual("OK"); const getWithExpirySetMilli = await client.get(key); - expect(getWithExpirySetMilli).toEqual(value); + checkSimple(getWithExpirySetMilli).toEqual(value); const setResWithExpirySec = await client.set(key, value, { expiry: { @@ -2870,9 +2902,9 @@ export function runBaseTests(config: { count: 1, }, }); - expect(setResWithExpirySec).toEqual("OK"); + checkSimple(setResWithExpirySec).toEqual("OK"); const getResWithExpirySec = await client.get(key); - expect(getResWithExpirySec).toEqual(value); + checkSimple(getResWithExpirySec).toEqual(value); const setWithUnixSec = await client.set(key, value, { expiry: { @@ -2880,59 +2912,59 @@ export function runBaseTests(config: { count: Math.floor(Date.now() / 1000) + 1, }, }); - expect(setWithUnixSec).toEqual("OK"); + checkSimple(setWithUnixSec).toEqual("OK"); const getWithUnixSec = await client.get(key); - expect(getWithUnixSec).toEqual(value); + checkSimple(getWithUnixSec).toEqual(value); const setResWithExpiryKeep = await client.set(key, value, { expiry: "keepExisting", }); - expect(setResWithExpiryKeep).toEqual("OK"); + checkSimple(setResWithExpiryKeep).toEqual("OK"); const getResWithExpiryKeep = await client.get(key); - expect(getResWithExpiryKeep).toEqual(value); + checkSimple(getResWithExpiryKeep).toEqual(value); // wait for the key to expire base on the previous set let sleep = new Promise((resolve) => setTimeout(resolve, 1000)); await sleep; const getResExpire = await client.get(key); // key should have expired - expect(getResExpire).toEqual(null); + checkSimple(getResExpire).toEqual(null); const setResWithExpiryWithUmilli = await client.set(key, value, { expiry: { type: "unixMilliseconds", count: Date.now() + 1000, }, }); - expect(setResWithExpiryWithUmilli).toEqual("OK"); + checkSimple(setResWithExpiryWithUmilli).toEqual("OK"); // wait for the key to expire sleep = new Promise((resolve) => setTimeout(resolve, 1001)); await sleep; const getResWithExpiryWithUmilli = await client.get(key); // key should have expired - expect(getResWithExpiryWithUmilli).toEqual(null); + checkSimple(getResWithExpiryWithUmilli).toEqual(null); } async function setWithOnlyIfExistOptions(client: BaseClient) { const key = uuidv4(); const value = uuidv4(); const setKey = await client.set(key, value); - expect(setKey).toEqual("OK"); + checkSimple(setKey).toEqual("OK"); const getRes = await client.get(key); - expect(getRes).toEqual(value); + checkSimple(getRes).toEqual(value); const setExistingKeyRes = await client.set(key, value, { conditionalSet: "onlyIfExists", }); - expect(setExistingKeyRes).toEqual("OK"); + checkSimple(setExistingKeyRes).toEqual("OK"); const getExistingKeyRes = await client.get(key); - expect(getExistingKeyRes).toEqual(value); + checkSimple(getExistingKeyRes).toEqual(value); const notExistingKeyRes = await client.set(key + 1, value, { conditionalSet: "onlyIfExists", }); // key does not exist, so it should not be set - expect(notExistingKeyRes).toEqual(null); + checkSimple(notExistingKeyRes).toEqual(null); const getNotExistingKey = await client.get(key + 1); // key should not have been set - expect(getNotExistingKey).toEqual(null); + checkSimple(getNotExistingKey).toEqual(null); } async function setWithOnlyIfNotExistOptions(client: BaseClient) { @@ -2942,19 +2974,19 @@ export function runBaseTests(config: { conditionalSet: "onlyIfDoesNotExist", }); // key does not exist, so it should be set - expect(notExistingKeyRes).toEqual("OK"); + checkSimple(notExistingKeyRes).toEqual("OK"); const getNotExistingKey = await client.get(key); // key should have been set - expect(getNotExistingKey).toEqual(value); + checkSimple(getNotExistingKey).toEqual(value); const existingKeyRes = await client.set(key, value, { conditionalSet: "onlyIfDoesNotExist", }); // key exists, so it should not be set - expect(existingKeyRes).toEqual(null); + checkSimple(existingKeyRes).toEqual(null); const getExistingKey = await client.get(key); // key should not have been set - expect(getExistingKey).toEqual(value); + checkSimple(getExistingKey).toEqual(value); } async function setWithGetOldOptions(client: BaseClient) { @@ -2965,19 +2997,19 @@ export function runBaseTests(config: { returnOldValue: true, }); // key does not exist, so old value should be null - expect(setResGetNotExistOld).toEqual(null); + checkSimple(setResGetNotExistOld).toEqual(null); // key should have been set const getResGetNotExistOld = await client.get(key); - expect(getResGetNotExistOld).toEqual(value); + checkSimple(getResGetNotExistOld).toEqual(value); const setResGetExistOld = await client.set(key, value, { returnOldValue: true, }); // key exists, so old value should be returned - expect(setResGetExistOld).toEqual(value); + checkSimple(setResGetExistOld).toEqual(value); // key should have been set const getResGetExistOld = await client.get(key); - expect(getResGetExistOld).toEqual(value); + checkSimple(getResGetExistOld).toEqual(value); } async function setWithAllOptions(client: BaseClient) { @@ -3031,14 +3063,14 @@ export function runBaseTests(config: { }); if (exist == false) { - expect(setRes).toEqual("OK"); + checkSimple(setRes).toEqual("OK"); exist = true; } else { - expect(setRes).toEqual(null); + checkSimple(setRes).toEqual(null); } const getRes = await client.get(key); - expect(getRes).toEqual(value); + checkSimple(getRes).toEqual(value); } for (const expiryVal of expiryCombination) { @@ -3101,30 +3133,37 @@ export function runBaseTests(config: { null, ); - expect( + checkSimple( await client.set( string_key, "a really loooooooooooooooooooooooooooooooooooooooong value", ), ).toEqual("OK"); - expect(await client.objectEncoding(string_key)).toEqual("raw"); - expect(await client.set(string_key, "2")).toEqual("OK"); - expect(await client.objectEncoding(string_key)).toEqual("int"); + checkSimple(await client.objectEncoding(string_key)).toEqual( + "raw", + ); + + checkSimple(await client.set(string_key, "2")).toEqual("OK"); + checkSimple(await client.objectEncoding(string_key)).toEqual( + "int", + ); - expect(await client.set(string_key, "value")).toEqual("OK"); - expect(await client.objectEncoding(string_key)).toEqual( + checkSimple(await client.set(string_key, "value")).toEqual( + "OK", + ); + checkSimple(await client.objectEncoding(string_key)).toEqual( "embstr", ); expect(await client.lpush(list_key, ["1"])).toEqual(1); if (versionLessThan7) { - expect(await client.objectEncoding(list_key)).toEqual( + checkSimple(await client.objectEncoding(list_key)).toEqual( "quicklist", ); } else { - expect(await client.objectEncoding(list_key)).toEqual( + checkSimple(await client.objectEncoding(list_key)).toEqual( "listpack", ); } @@ -3136,23 +3175,23 @@ export function runBaseTests(config: { ).toEqual(1); } - expect(await client.objectEncoding(hashtable_key)).toEqual( + checkSimple(await client.objectEncoding(hashtable_key)).toEqual( "hashtable", ); expect(await client.sadd(intset_key, ["1"])).toEqual(1); - expect(await client.objectEncoding(intset_key)).toEqual( + checkSimple(await client.objectEncoding(intset_key)).toEqual( "intset", ); expect(await client.sadd(set_listpack_key, ["foo"])).toEqual(1); if (versionLessThan72) { - expect( + checkSimple( await client.objectEncoding(set_listpack_key), ).toEqual("hashtable"); } else { - expect( + checkSimple( await client.objectEncoding(set_listpack_key), ).toEqual("listpack"); } @@ -3166,20 +3205,20 @@ export function runBaseTests(config: { ).toEqual(1); } - expect(await client.objectEncoding(hash_hashtable_key)).toEqual( - "hashtable", - ); + checkSimple( + await client.objectEncoding(hash_hashtable_key), + ).toEqual("hashtable"); expect( await client.hset(hash_listpack_key, { "1": "2" }), ).toEqual(1); if (versionLessThan7) { - expect( + checkSimple( await client.objectEncoding(hash_listpack_key), ).toEqual("ziplist"); } else { - expect( + checkSimple( await client.objectEncoding(hash_listpack_key), ).toEqual("listpack"); } @@ -3191,7 +3230,7 @@ export function runBaseTests(config: { ).toEqual(1); } - expect(await client.objectEncoding(skiplist_key)).toEqual( + checkSimple(await client.objectEncoding(skiplist_key)).toEqual( "skiplist", ); @@ -3200,11 +3239,11 @@ export function runBaseTests(config: { ).toEqual(1); if (versionLessThan7) { - expect( + checkSimple( await client.objectEncoding(zset_listpack_key), ).toEqual("ziplist"); } else { - expect( + checkSimple( await client.objectEncoding(zset_listpack_key), ).toEqual("listpack"); } @@ -3212,7 +3251,7 @@ export function runBaseTests(config: { expect( await client.xadd(stream_key, [["field", "value"]]), ).not.toBeNull(); - expect(await client.objectEncoding(stream_key)).toEqual( + checkSimple(await client.objectEncoding(stream_key)).toEqual( "stream", ); }, protocol); @@ -3349,7 +3388,7 @@ export function runCommonTests(config: { const value = "שלום hello 汉字"; await client.set(key, value); const result = await client.get(key); - expect(result).toEqual(value); + checkSimple(result).toEqual(value); }); }, config.timeout, @@ -3361,7 +3400,7 @@ export function runCommonTests(config: { await runTest(async (client: Client) => { const result = await client.get(uuidv4()); - expect(result).toEqual(null); + checkSimple(result).toEqual(null); }); }, config.timeout, @@ -3375,7 +3414,7 @@ export function runCommonTests(config: { await client.set(key, ""); const result = await client.get(key); - expect(result).toEqual(""); + checkSimple(result).toEqual(""); }); }, config.timeout, @@ -3402,7 +3441,7 @@ export function runCommonTests(config: { await client.set(key, value); const result = await client.get(key); - expect(result).toEqual(value); + checkSimple(result).toEqual(value); }); }, config.timeout, @@ -3417,7 +3456,7 @@ export function runCommonTests(config: { await GetAndSetRandomValue(client); } else { const result = await client.get(uuidv4()); - expect(result).toEqual(null); + checkSimple(result).toEqual(null); } }; diff --git a/node/tests/TestUtilities.ts b/node/tests/TestUtilities.ts index b5012b639b..5a78091df0 100644 --- a/node/tests/TestUtilities.ts +++ b/node/tests/TestUtilities.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { beforeAll, expect } from "@jest/globals"; @@ -10,11 +10,11 @@ import { BaseClient, BaseClientConfiguration, ClusterTransaction, + GlideClient, + GlideClusterClient, InsertPosition, Logger, ProtocolVersion, - RedisClient, - RedisClusterClient, ReturnType, Transaction, } from ".."; @@ -24,6 +24,61 @@ beforeAll(() => { Logger.init("info"); }); +/* eslint-disable @typescript-eslint/no-explicit-any */ +function intoArrayInternal(obj: any, builder: Array) { + if (obj == null) { + builder.push("null"); + } else if (typeof obj === "string") { + builder.push(obj); + } else if (obj instanceof Uint8Array) { + builder.push(obj.toString()); + } else if (obj instanceof Array) { + for (const item of obj) { + intoArrayInternal(item, builder); + } + } else if (obj instanceof Set) { + const arr = Array.from(obj); + arr.sort(); + + for (const item of arr) { + intoArrayInternal(item, builder); + } + } else if (obj instanceof Map) { + for (const [key, value] of obj) { + intoArrayInternal(key, builder); + intoArrayInternal(value, builder); + } + } else if (typeof obj[Symbol.iterator] === "function") { + // iterable, recurse into children + for (const item of obj) { + intoArrayInternal(item, builder); + } + } else { + for (const [k, v] of Object.entries(obj)) { + intoArrayInternal(k, builder); + intoArrayInternal(v, builder); + } + } +} + +/** + * accept any variable `v` and convert it into String, recursively + */ +export function intoString(v: any): string { + const builder: Array = []; + intoArrayInternal(v, builder); + return builder.join(""); +} + +/** + * accept any variable `v` and convert it into array of string + */ +export function intoArray(v: any): Array { + const result: Array = []; + intoArrayInternal(v, result); + return result; +} + /** * Convert array of strings into array of `Uint8Array` */ @@ -37,6 +92,23 @@ export function convertStringArrayToBuffer(value: string[]): Uint8Array[] { return bytesarr; } +export class Checker { + left: string; + + constructor(left: any) { + this.left = intoString(left); + } + + toEqual(right: any) { + right = intoString(right); + return expect(this.left).toEqual(right); + } +} + +export function checkSimple(left: any): Checker { + return new Checker(left); +} + export type Client = { set: (key: string, value: string) => Promise; get: (key: string) => Promise; @@ -47,9 +119,9 @@ export async function GetAndSetRandomValue(client: Client) { // Adding random repetition, to prevent the inputs from always having the same alignment. const value = uuidv4() + "0".repeat(Math.random() * 7); const setResult = await client.set(key, value); - expect(setResult).toEqual("OK"); + expect(intoString(setResult)).toEqual("OK"); const result = await client.get(key); - expect(result).toEqual(value); + expect(intoString(result)).toEqual(value); } export function flushallOnPort(port: number): Promise { @@ -146,8 +218,8 @@ export async function testTeardown( option: BaseClientConfiguration, ) { const client = cluster_mode - ? await RedisClusterClient.createClient(option) - : await RedisClient.createClient(option); + ? await GlideClusterClient.createClient(option) + : await GlideClient.createClient(option); await client.customCommand(["FLUSHALL"]); client.close(); diff --git a/node/tests/UtilsTests.test.ts b/node/tests/UtilsTests.test.ts index b5533ddaa5..2cc267812e 100644 --- a/node/tests/UtilsTests.test.ts +++ b/node/tests/UtilsTests.test.ts @@ -1,5 +1,5 @@ /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ import { describe, expect, it } from "@jest/globals"; diff --git a/python/Cargo.toml b/python/Cargo.toml index 842526ed8d..589c0c28ee 100644 --- a/python/Cargo.toml +++ b/python/Cargo.toml @@ -12,6 +12,7 @@ crate-type = ["cdylib"] [dependencies] pyo3 = { version = "^0.20", features = ["extension-module", "num-bigint"] } +bytes = { version = "1.6.0" } redis = { path = "../submodules/redis-rs/redis", features = ["aio", "tokio-comp", "connection-manager","tokio-rustls-comp"] } glide-core = { path = "../glide-core", features = ["socket-layer"] } logger_core = {path = "../logger_core"} diff --git a/python/README.md b/python/README.md index 30353a7c31..290b4699c9 100644 --- a/python/README.md +++ b/python/README.md @@ -45,11 +45,11 @@ To install GLIDE for Redis using `pip`, follow these steps: ```python: >>> import asyncio ->>> from glide import ClusterClientConfiguration, NodeAddress, RedisClusterClient +>>> from glide import ClusterClientConfiguration, NodeAddress, GlideClusterClient >>> async def test_cluster_client(): ... addresses = [NodeAddress("redis.example.com", 6379)] ... config = ClusterClientConfiguration(addresses) -... client = await RedisClusterClient.create(config) +... client = await GlideClusterClient.create(config) ... set_result = await client.set("foo", "bar") ... print(f"Set response is {set_result}") ... get_result = await client.get("foo") @@ -64,14 +64,14 @@ Get response is bar ```python: >>> import asyncio ->>> from glide import RedisClientConfiguration, NodeAddress, RedisClient +>>> from glide import GlideClientConfiguration, NodeAddress, GlideClient >>> async def test_standalone_client(): ... addresses = [ -... NodeAddress("redis_primary.example.com", 6379), -... NodeAddress("redis_replica.example.com", 6379) +... NodeAddress("server_primary.example.com", 6379), +... NodeAddress("server_replica.example.com", 6379) ... ] -... config = RedisClientConfiguration(addresses) -... client = await RedisClient.create(config) +... config = GlideClientConfiguration(addresses) +... client = await GlideClient.create(config) ... set_result = await client.set("foo", "bar") ... print(f"Set response is {set_result}") ... get_result = await client.get("foo") diff --git a/python/THIRD_PARTY_LICENSES_PYTHON b/python/THIRD_PARTY_LICENSES_PYTHON index cb99cb678a..885abcc102 100644 --- a/python/THIRD_PARTY_LICENSES_PYTHON +++ b/python/THIRD_PARTY_LICENSES_PYTHON @@ -2993,7 +2993,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: bitflags:2.5.0 +Package: bitflags:2.6.0 The following copyrights and licenses were found in the source code of this package: @@ -12517,7 +12517,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: lazy_static:1.4.0 +Package: lazy_static:1.5.0 The following copyrights and licenses were found in the source code of this package: @@ -13666,7 +13666,7 @@ The following copyrights and licenses were found in the source code of this pack ---- -Package: memchr:2.7.2 +Package: memchr:2.7.4 The following copyrights and licenses were found in the source code of this package: @@ -13743,7 +13743,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: miniz_oxide:0.7.3 +Package: miniz_oxide:0.7.4 The following copyrights and licenses were found in the source code of this package: @@ -18647,7 +18647,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: proc-macro2:1.0.85 +Package: proc-macro2:1.0.86 The following copyrights and licenses were found in the source code of this package: @@ -21018,7 +21018,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- -Package: redox_syscall:0.5.1 +Package: redox_syscall:0.5.2 The following copyrights and licenses were found in the source code of this package: @@ -24241,7 +24241,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: subtle:2.5.0 +Package: subtle:2.6.1 The following copyrights and licenses were found in the source code of this package: @@ -24501,7 +24501,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: syn:2.0.66 +Package: syn:2.0.68 The following copyrights and licenses were found in the source code of this package: @@ -26328,7 +26328,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: tinyvec:1.6.0 +Package: tinyvec:1.6.1 The following copyrights and licenses were found in the source code of this package: @@ -36892,7 +36892,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: google-api-core:2.19.0 +Package: google-api-core:2.19.1 The following copyrights and licenses were found in the source code of this package: @@ -37724,7 +37724,7 @@ The following copyrights and licenses were found in the source code of this pack ---- -Package: googleapis-common-protos:1.63.1 +Package: googleapis-common-protos:1.63.2 The following copyrights and licenses were found in the source code of this package: @@ -39157,7 +39157,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---- -Package: proto-plus:1.23.0 +Package: proto-plus:1.24.0 The following copyrights and licenses were found in the source code of this package: @@ -39396,7 +39396,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- -Package: protobuf:5.27.1 +Package: protobuf:5.27.2 The following copyrights and licenses were found in the source code of this package: @@ -40736,7 +40736,7 @@ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---- -Package: urllib3:2.2.1 +Package: urllib3:2.2.2 The following copyrights and licenses were found in the source code of this package: diff --git a/python/python/glide/__init__.py b/python/python/glide/__init__.py index 8236711b2a..7b0510dbb1 100644 --- a/python/python/glide/__init__.py +++ b/python/python/glide/__init__.py @@ -1,17 +1,33 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 +from glide.async_commands.bitmap import ( + BitEncoding, + BitFieldGet, + BitFieldIncrBy, + BitFieldOffset, + BitFieldOverflow, + BitFieldSet, + BitFieldSubCommands, + BitmapIndexType, + BitOffset, + BitOffsetMultiplier, + BitOverflowControl, + BitwiseOperation, + OffsetOptions, + SignedEncoding, + UnsignedEncoding, +) from glide.async_commands.command_args import Limit, ListDirection, OrderBy from glide.async_commands.core import ( ConditionalChange, ExpireOptions, + ExpiryGetEx, ExpirySet, ExpiryType, + ExpiryTypeGetEx, + FlushMode, InfoSection, InsertPosition, - StreamAddOptions, - StreamTrimOptions, - TrimByMaxLen, - TrimByMinId, UpdateOptions, ) from glide.async_commands.redis_modules import json @@ -30,30 +46,46 @@ ScoreBoundary, ScoreFilter, ) +from glide.async_commands.stream import ( + ExclusiveIdBound, + IdBound, + MaxId, + MinId, + StreamAddOptions, + StreamGroupOptions, + StreamRangeBound, + StreamReadGroupOptions, + StreamReadOptions, + StreamTrimOptions, + TrimByMaxLen, + TrimByMinId, +) from glide.async_commands.transaction import ClusterTransaction, Transaction from glide.config import ( BackoffStrategy, BaseClientConfiguration, ClusterClientConfiguration, + GlideClientConfiguration, NodeAddress, PeriodicChecksManualInterval, PeriodicChecksStatus, ProtocolVersion, ReadFrom, - RedisClientConfiguration, RedisCredentials, ) from glide.constants import OK from glide.exceptions import ( ClosingError, + ConfigurationError, + ConnectionError, ExecAbortError, RedisError, RequestError, TimeoutError, ) +from glide.glide_client import GlideClient, GlideClusterClient from glide.logger import Level as LogLevel from glide.logger import Logger -from glide.redis_client import RedisClient, RedisClusterClient from glide.routes import ( AllNodes, AllPrimaries, @@ -68,13 +100,13 @@ __all__ = [ # Client - "RedisClient", - "RedisClusterClient", + "GlideClient", + "GlideClusterClient", "Transaction", "ClusterTransaction", # Config "BaseClientConfiguration", - "RedisClientConfiguration", + "GlideClientConfiguration", "ClusterClientConfiguration", "BackoffStrategy", "ReadFrom", @@ -86,12 +118,30 @@ # Response "OK", # Commands + "BitEncoding", + "BitFieldGet", + "BitFieldIncrBy", + "BitFieldOffset", + "BitFieldOverflow", + "BitFieldSet", + "BitFieldSubCommands", + "BitmapIndexType", + "BitOffset", + "BitOffsetMultiplier", + "BitOverflowControl", + "BitwiseOperation", + "OffsetOptions", + "SignedEncoding", + "UnsignedEncoding", "Script", "ScoreBoundary", "ConditionalChange", "ExpireOptions", + "ExpiryGetEx", "ExpirySet", "ExpiryType", + "ExpiryTypeGetEx", + "FlushMode", "GeoSearchByBox", "GeoSearchByRadius", "GeoSearchCount", @@ -110,7 +160,15 @@ "RangeByScore", "ScoreFilter", "OrderBy", + "ExclusiveIdBound", + "IdBound", + "MaxId", + "MinId", "StreamAddOptions", + "StreamGroupOptions", + "StreamReadGroupOptions", + "StreamRangeBound", + "StreamReadOptions", "StreamTrimOptions", "TrimByMaxLen", "TrimByMinId", @@ -128,6 +186,8 @@ "SlotIdRoute", # Exceptions "ClosingError", + "ConfigurationError", + "ConnectionError", "ExecAbortError", "RedisError", "RequestError", diff --git a/python/python/glide/async_commands/__init__.py b/python/python/glide/async_commands/__init__.py index 3779cd3fa5..8aaf21baff 100644 --- a/python/python/glide/async_commands/__init__.py +++ b/python/python/glide/async_commands/__init__.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from .core import CoreCommands diff --git a/python/python/glide/async_commands/bitmap.py b/python/python/glide/async_commands/bitmap.py new file mode 100644 index 0000000000..c5cfdca0d3 --- /dev/null +++ b/python/python/glide/async_commands/bitmap.py @@ -0,0 +1,305 @@ +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 +from abc import ABC, abstractmethod +from enum import Enum +from typing import List, Optional + + +class BitmapIndexType(Enum): + """ + Enumeration specifying if index arguments are BYTE indexes or BIT indexes. Can be specified in `OffsetOptions`, + which is an optional argument to the `BITCOUNT` command. + + Since: Redis version 7.0.0. + """ + + BYTE = "BYTE" + """ + Specifies that indexes provided to `OffsetOptions` are byte indexes. + """ + BIT = "BIT" + """ + Specifies that indexes provided to `OffsetOptions` are bit indexes. + """ + + +class OffsetOptions: + def __init__( + self, start: int, end: int, index_type: Optional[BitmapIndexType] = None + ): + """ + Represents offsets specifying a string interval to analyze in the `BITCOUNT` command. The offsets are + zero-based indexes, with `0` being the first index of the string, `1` being the next index and so on. + The offsets can also be negative numbers indicating offsets starting at the end of the string, with `-1` being + the last index of the string, `-2` being the penultimate, and so on. + + Args: + start (int): The starting offset index. + end (int): The ending offset index. + index_type (Optional[BitmapIndexType]): The index offset type. This option can only be specified if you are + using Redis version 7.0.0 or above. Could be either `BitmapIndexType.BYTE` or `BitmapIndexType.BIT`. + If no index type is provided, the indexes will be assumed to be byte indexes. + """ + self.start = start + self.end = end + self.index_type = index_type + + def to_args(self) -> List[str]: + args = [str(self.start), str(self.end)] + if self.index_type is not None: + args.append(self.index_type.value) + + return args + + +class BitwiseOperation(Enum): + """ + Enumeration defining the bitwise operation to use in the `BITOP` command. Specifies the bitwise operation to + perform between the passed in keys. + """ + + AND = "AND" + OR = "OR" + XOR = "XOR" + NOT = "NOT" + + +class BitEncoding(ABC): + """ + Abstract Base Class used to specify a signed or unsigned argument encoding for the `BITFIELD` or `BITFIELD_RO` + commands. + """ + + @abstractmethod + def to_arg(self) -> str: + """ + Returns the encoding as a string argument to be used in the `BITFIELD` or `BITFIELD_RO` + commands. + """ + pass + + +class SignedEncoding(BitEncoding): + # Prefix specifying that the encoding is signed. + SIGNED_ENCODING_PREFIX = "i" + + def __init__(self, encoding_length: int): + """ + Represents a signed argument encoding. Must be less than 65 bits long. + + Args: + encoding_length (int): The bit size of the encoding. + """ + self._encoding = f"{self.SIGNED_ENCODING_PREFIX}{str(encoding_length)}" + + def to_arg(self) -> str: + return self._encoding + + +class UnsignedEncoding(BitEncoding): + # Prefix specifying that the encoding is unsigned. + UNSIGNED_ENCODING_PREFIX = "u" + + def __init__(self, encoding_length: int): + """ + Represents an unsigned argument encoding. Must be less than 64 bits long. + + Args: + encoding_length (int): The bit size of the encoding. + """ + self._encoding = f"{self.UNSIGNED_ENCODING_PREFIX}{str(encoding_length)}" + + def to_arg(self) -> str: + return self._encoding + + +class BitFieldOffset(ABC): + """Abstract Base Class representing an offset for an array of bits for the `BITFIELD` or `BITFIELD_RO` commands.""" + + @abstractmethod + def to_arg(self) -> str: + """ + Returns the offset as a string argument to be used in the `BITFIELD` or `BITFIELD_RO` + commands. + """ + pass + + +class BitOffset(BitFieldOffset): + def __init__(self, offset: int): + """ + Represents an offset in an array of bits for the `BITFIELD` or `BITFIELD_RO` commands. Must be greater than or + equal to 0. + + For example, if we have the binary `01101001` with offset of 1 for an unsigned encoding of size 4, then the value + is 13 from `0(1101)001`. + + Args: + offset (int): The bit index offset in the array of bits. + """ + self._offset = str(offset) + + def to_arg(self) -> str: + return self._offset + + +class BitOffsetMultiplier(BitFieldOffset): + # Prefix specifying that the offset uses an encoding multiplier. + OFFSET_MULTIPLIER_PREFIX = "#" + + def __init__(self, offset: int): + """ + Represents an offset in an array of bits for the `BITFIELD` or `BITFIELD_RO` commands. The bit offset index is + calculated as the numerical value of the offset multiplied by the encoding value. Must be greater than or equal + to 0. + + For example, if we have the binary 01101001 with offset multiplier of 1 for an unsigned encoding of size 4, then + the value is 9 from `0110(1001)`. + + Args: + offset (int): The offset in the array of bits, which will be multiplied by the encoding value to get the + final bit index offset. + """ + self._offset = f"{self.OFFSET_MULTIPLIER_PREFIX}{str(offset)}" + + def to_arg(self) -> str: + return self._offset + + +class BitFieldSubCommands(ABC): + """Abstract Base Class representing subcommands for the `BITFIELD` or `BITFIELD_RO` commands.""" + + @abstractmethod + def to_args(self) -> List[str]: + """ + Returns the subcommand as a list of string arguments to be used in the `BITFIELD` or `BITFIELD_RO` commands. + """ + pass + + +class BitFieldGet(BitFieldSubCommands): + # "GET" subcommand string for use in the `BITFIELD` or `BITFIELD_RO` commands. + GET_COMMAND_STRING = "GET" + + def __init__(self, encoding: BitEncoding, offset: BitFieldOffset): + """ + Represents the "GET" subcommand for getting a value in the binary representation of the string stored in `key`. + + Args: + encoding (BitEncoding): The bit encoding for the subcommand. + offset (BitFieldOffset): The offset in the array of bits from which to get the value. + """ + self._encoding = encoding + self._offset = offset + + def to_args(self) -> List[str]: + return [self.GET_COMMAND_STRING, self._encoding.to_arg(), self._offset.to_arg()] + + +class BitFieldSet(BitFieldSubCommands): + # "SET" subcommand string for use in the `BITFIELD` command. + SET_COMMAND_STRING = "SET" + + def __init__(self, encoding: BitEncoding, offset: BitFieldOffset, value: int): + """ + Represents the "SET" subcommand for setting bits in the binary representation of the string stored in `key`. + + Args: + encoding (BitEncoding): The bit encoding for the subcommand. + offset (BitOffset): The offset in the array of bits where the value will be set. + value (int): The value to set the bits in the binary value to. + """ + self._encoding = encoding + self._offset = offset + self._value = value + + def to_args(self) -> List[str]: + return [ + self.SET_COMMAND_STRING, + self._encoding.to_arg(), + self._offset.to_arg(), + str(self._value), + ] + + +class BitFieldIncrBy(BitFieldSubCommands): + # "INCRBY" subcommand string for use in the `BITFIELD` command. + INCRBY_COMMAND_STRING = "INCRBY" + + def __init__(self, encoding: BitEncoding, offset: BitFieldOffset, increment: int): + """ + Represents the "INCRBY" subcommand for increasing or decreasing bits in the binary representation of the + string stored in `key`. + + Args: + encoding (BitEncoding): The bit encoding for the subcommand. + offset (BitOffset): The offset in the array of bits where the value will be incremented. + increment (int): The value to increment the bits in the binary value by. + """ + self._encoding = encoding + self._offset = offset + self._increment = increment + + def to_args(self) -> List[str]: + return [ + self.INCRBY_COMMAND_STRING, + self._encoding.to_arg(), + self._offset.to_arg(), + str(self._increment), + ] + + +class BitOverflowControl(Enum): + """ + Enumeration specifying bit overflow controls for the `BITFIELD` command. + """ + + WRAP = "WRAP" + """ + Performs modulo when overflows occur with unsigned encoding. When overflows occur with signed encoding, the value + restarts at the most negative value. When underflows occur with signed encoding, the value restarts at the most + positive value. + """ + SAT = "SAT" + """ + Underflows remain set to the minimum value, and overflows remain set to the maximum value. + """ + FAIL = "FAIL" + """ + Returns `None` when overflows occur. + """ + + +class BitFieldOverflow(BitFieldSubCommands): + # "OVERFLOW" subcommand string for use in the `BITFIELD` command. + OVERFLOW_COMMAND_STRING = "OVERFLOW" + + def __init__(self, overflow_control: BitOverflowControl): + """ + Represents the "OVERFLOW" subcommand that determines the result of the "SET" or "INCRBY" `BITFIELD` subcommands + when an underflow or overflow occurs. + + Args: + overflow_control (BitOverflowControl): The desired overflow behavior. + """ + self._overflow_control = overflow_control + + def to_args(self) -> List[str]: + return [self.OVERFLOW_COMMAND_STRING, self._overflow_control.value] + + +def _create_bitfield_args(subcommands: List[BitFieldSubCommands]) -> List[str]: + args = [] + for subcommand in subcommands: + args.extend(subcommand.to_args()) + + return args + + +def _create_bitfield_read_only_args( + subcommands: List[BitFieldGet], +) -> List[str]: + args = [] + for subcommand in subcommands: + args.extend(subcommand.to_args()) + + return args diff --git a/python/python/glide/async_commands/cluster_commands.py b/python/python/glide/async_commands/cluster_commands.py index 761d5301ca..9294c1c8ba 100644 --- a/python/python/glide/async_commands/cluster_commands.py +++ b/python/python/glide/async_commands/cluster_commands.py @@ -1,11 +1,16 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from __future__ import annotations from typing import Dict, List, Mapping, Optional, cast from glide.async_commands.command_args import Limit, OrderBy -from glide.async_commands.core import CoreCommands, InfoSection, _build_sort_args +from glide.async_commands.core import ( + CoreCommands, + FlushMode, + InfoSection, + _build_sort_args, +) from glide.async_commands.transaction import BaseTransaction, ClusterTransaction from glide.constants import TOK, TClusterResponse, TResult, TSingleNodeRoute from glide.protobuf.redis_request_pb2 import RequestType @@ -486,3 +491,170 @@ async def sort_store( args = _build_sort_args(key, None, limit, None, order, alpha, store=destination) result = await self._execute_command(RequestType.Sort, args) return cast(int, result) + + async def publish(self, message: str, channel: str, sharded: bool = False) -> int: + """ + Publish a message on pubsub channel. + This command aggregates PUBLISH and SPUBLISH commands functionalities. + The mode is selected using the 'sharded' parameter + See https://valkey.io/commands/publish and https://valkey.io/commands/spublish for more details. + + Args: + message (str): Message to publish + channel (str): Channel to publish the message on. + sharded (bool): Use sharded pubsub mode. + + Returns: + int: Number of subscriptions in that shard that received the message. + + Examples: + >>> await client.publish("Hi all!", "global-channel", False) + 1 # Publishes "Hi all!" message on global-channel channel using non-sharded mode + >>> await client.publish("Hi to sharded channel1!", "channel1, True) + 2 # Publishes "Hi to sharded channel1!" message on channel1 using sharded mode + """ + result = await self._execute_command( + RequestType.SPublish if sharded else RequestType.Publish, [channel, message] + ) + return cast(int, result) + + async def flushall( + self, flush_mode: Optional[FlushMode] = None, route: Optional[Route] = None + ) -> TClusterResponse[TOK]: + """ + Deletes all the keys of all the existing databases. This command never fails. + + See https://valkey.io/commands/flushall for more details. + + Args: + flush_mode (Optional[FlushMode]): The flushing mode, could be either `SYNC` or `ASYNC`. + route (Optional[Route]): The command will be routed to all primary nodes, unless `route` is provided, + in which case the client will route the command to the nodes defined by `route`. + + Returns: + TClusterResponse[TOK]: OK. + + Examples: + >>> await client.flushall(FlushMode.ASYNC) + OK # This command never fails. + >>> await client.flushall(FlushMode.ASYNC, AllNodes()) + OK # This command never fails. + """ + args = [] + if flush_mode is not None: + args.append(flush_mode.value) + + return cast( + TClusterResponse[TOK], + await self._execute_command(RequestType.FlushAll, args, route), + ) + + async def flushdb( + self, flush_mode: Optional[FlushMode] = None, route: Optional[Route] = None + ) -> TClusterResponse[TOK]: + """ + Deletes all the keys of the currently selected database. This command never fails. + + See https://valkey.io/commands/flushdb for more details. + + Args: + flush_mode (Optional[FlushMode]): The flushing mode, could be either `SYNC` or `ASYNC`. + route (Optional[Route]): The command will be routed to all primary nodes, unless `route` is provided, + in which case the client will route the command to the nodes defined by `route`. + + Returns: + TOK: OK. + + Examples: + >>> await client.flushdb() + OK # The keys of the currently selected database were deleted. + >>> await client.flushdb(FlushMode.ASYNC) + OK # The keys of the currently selected database were deleted asynchronously. + >>> await client.flushdb(FlushMode.ASYNC, AllNodes()) + OK # The keys of the currently selected database were deleted asynchronously on all nodes. + """ + args = [] + if flush_mode is not None: + args.append(flush_mode.value) + + return cast( + TClusterResponse[TOK], + await self._execute_command(RequestType.FlushDB, args, route), + ) + + async def copy( + self, + source: str, + destination: str, + replace: Optional[bool] = None, + ) -> bool: + """ + Copies the value stored at the `source` to the `destination` key. When `replace` is True, + removes the `destination` key first if it already exists, otherwise performs no action. + + See https://valkey.io/commands/copy for more details. + + Note: + Both `source` and `destination` must map to the same hash slot. + + Args: + source (str): The key to the source value. + destination (str): The key where the value should be copied to. + replace (Optional[bool]): If the destination key should be removed before copying the value to it. + + Returns: + bool: True if the source was copied. Otherwise, returns False. + + Examples: + >>> await client.set("source", "sheep") + >>> await client.copy("source", "destination") + True # Source was copied + >>> await client.get("destination") + "sheep" + + Since: Redis version 6.2.0. + """ + args = [source, destination] + if replace is True: + args.append("REPLACE") + return cast( + bool, + await self._execute_command(RequestType.Copy, args), + ) + + async def lolwut( + self, + version: Optional[int] = None, + parameters: Optional[List[int]] = None, + route: Optional[Route] = None, + ) -> TClusterResponse[str]: + """ + Displays a piece of generative computer art and the Redis version. + + See https://valkey.io/commands/lolwut for more details. + + Args: + version (Optional[int]): Version of computer art to generate. + parameters (Optional[List[int]]): Additional set of arguments in order to change the output: + For version `5`, those are length of the line, number of squares per row, and number of squares per column. + For version `6`, those are number of columns and number of lines. + route (Optional[Route]): The command will be routed to a random node, unless `route` is provided, + in which case the client will route the command to the nodes defined by `route`. + + Returns: + TClusterResponse[str]: A piece of generative computer art along with the current Redis version. + + Examples: + >>> await client.lolwut(6, [40, 20], ALL_NODES); + "Redis ver. 7.2.3" # Indicates the current Redis version + """ + args = [] + if version is not None: + args.extend(["VERSION", str(version)]) + if parameters: + for var in parameters: + args.extend(str(var)) + return cast( + TClusterResponse[str], + await self._execute_command(RequestType.Lolwut, args, route), + ) diff --git a/python/python/glide/async_commands/command_args.py b/python/python/glide/async_commands/command_args.py index 39d3e4982c..ce76fd2d55 100644 --- a/python/python/glide/async_commands/command_args.py +++ b/python/python/glide/async_commands/command_args.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from enum import Enum from typing import List, Optional, Union diff --git a/python/python/glide/async_commands/core.py b/python/python/glide/async_commands/core.py index 4e33247192..330a9018d3 100644 --- a/python/python/glide/async_commands/core.py +++ b/python/python/glide/async_commands/core.py @@ -1,6 +1,7 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from abc import ABC, abstractmethod from collections.abc import Mapping +from dataclasses import dataclass from datetime import datetime, timedelta from enum import Enum from typing import ( @@ -16,6 +17,15 @@ get_args, ) +from glide.async_commands.bitmap import ( + BitFieldGet, + BitFieldSubCommands, + BitmapIndexType, + BitwiseOperation, + OffsetOptions, + _create_bitfield_args, + _create_bitfield_read_only_args, +) from glide.async_commands.command_args import Limit, ListDirection, OrderBy from glide.async_commands.sorted_set import ( AggregationType, @@ -35,6 +45,14 @@ _create_zinter_zunion_cmd_args, _create_zrange_args, ) +from glide.async_commands.stream import ( + StreamAddOptions, + StreamGroupOptions, + StreamRangeBound, + StreamReadGroupOptions, + StreamReadOptions, + StreamTrimOptions, +) from glide.constants import TOK, TResult from glide.protobuf.redis_request_pb2 import RequestType from glide.routes import Route @@ -70,6 +88,23 @@ class ExpiryType(Enum): KEEP_TTL = 4, Type[None] # Equivalent to `KEEPTTL` in the Redis API +class ExpiryTypeGetEx(Enum): + """GetEx option: The type of the expiry. + - EX - Set the specified expire time, in seconds. Equivalent to `EX` in the Redis API. + - PX - Set the specified expire time, in milliseconds. Equivalent to `PX` in the Redis API. + - UNIX_SEC - Set the specified Unix time at which the key will expire, in seconds. Equivalent to `EXAT` in the Redis API. + - UNIX_MILLSEC - Set the specified Unix time at which the key will expire, in milliseconds. Equivalent to `PXAT` in the + Redis API. + - PERSIST - Remove the time to live associated with the key. Equivalent to `PERSIST` in the Redis API. + """ + + SEC = 0, Union[int, timedelta] # Equivalent to `EX` in the Redis API + MILLSEC = 1, Union[int, timedelta] # Equivalent to `PX` in the Redis API + UNIX_SEC = 2, Union[int, datetime] # Equivalent to `EXAT` in the Redis API + UNIX_MILLSEC = 3, Union[int, datetime] # Equivalent to `PXAT` in the Redis API + PERSIST = 4, Type[None] # Equivalent to `PERSIST` in the Redis API + + class InfoSection(Enum): """ INFO option: a specific section of information: @@ -142,134 +177,6 @@ class UpdateOptions(Enum): GREATER_THAN = "GT" -class StreamTrimOptions(ABC): - """ - Abstract base class for stream trim options. - """ - - @abstractmethod - def __init__( - self, - exact: bool, - threshold: Union[str, int], - method: str, - limit: Optional[int] = None, - ): - """ - Initialize stream trim options. - - Args: - exact (bool): If `true`, the stream will be trimmed exactly. - Otherwise the stream will be trimmed in a near-exact manner, which is more efficient. - threshold (Union[str, int]): Threshold for trimming. - method (str): Method for trimming (e.g., MINID, MAXLEN). - limit (Optional[int]): Max number of entries to be trimmed. Defaults to None. - Note: If `exact` is set to `True`, `limit` cannot be specified. - """ - if exact and limit: - raise ValueError( - "If `exact` is set to `True`, `limit` cannot be specified." - ) - self.exact = exact - self.threshold = threshold - self.method = method - self.limit = limit - - def to_args(self) -> List[str]: - """ - Convert options to arguments for Redis command. - - Returns: - List[str]: List of arguments for Redis command. - """ - option_args = [ - self.method, - "=" if self.exact else "~", - str(self.threshold), - ] - if self.limit is not None: - option_args.extend(["LIMIT", str(self.limit)]) - return option_args - - -class TrimByMinId(StreamTrimOptions): - """ - Stream trim option to trim by minimum ID. - """ - - def __init__(self, exact: bool, threshold: str, limit: Optional[int] = None): - """ - Initialize trim option by minimum ID. - - Args: - exact (bool): If `true`, the stream will be trimmed exactly. - Otherwise the stream will be trimmed in a near-exact manner, which is more efficient. - threshold (str): Threshold for trimming by minimum ID. - limit (Optional[int]): Max number of entries to be trimmed. Defaults to None. - Note: If `exact` is set to `True`, `limit` cannot be specified. - """ - super().__init__(exact, threshold, "MINID", limit) - - -class TrimByMaxLen(StreamTrimOptions): - """ - Stream trim option to trim by maximum length. - """ - - def __init__(self, exact: bool, threshold: int, limit: Optional[int] = None): - """ - Initialize trim option by maximum length. - - Args: - exact (bool): If `true`, the stream will be trimmed exactly. - Otherwise the stream will be trimmed in a near-exact manner, which is more efficient. - threshold (int): Threshold for trimming by maximum length. - limit (Optional[int]): Max number of entries to be trimmed. Defaults to None. - Note: If `exact` is set to `True`, `limit` cannot be specified. - """ - super().__init__(exact, threshold, "MAXLEN", limit) - - -class StreamAddOptions: - """ - Options for adding entries to a stream. - """ - - def __init__( - self, - id: Optional[str] = None, - make_stream: bool = True, - trim: Optional[StreamTrimOptions] = None, - ): - """ - Initialize stream add options. - - Args: - id (Optional[str]): ID for the new entry. If set, the new entry will be added with this ID. If not specified, '*' is used. - make_stream (bool, optional): If set to False, a new stream won't be created if no stream matches the given key. - trim (Optional[StreamTrimOptions]): If set, the add operation will also trim the older entries in the stream. See `StreamTrimOptions`. - """ - self.id = id - self.make_stream = make_stream - self.trim = trim - - def to_args(self) -> List[str]: - """ - Convert options to arguments for Redis command. - - Returns: - List[str]: List of arguments for Redis command. - """ - option_args = [] - if not self.make_stream: - option_args.append("NOMKSTREAM") - if self.trim: - option_args.extend(self.trim.to_args()) - option_args.append(self.id if self.id else "*") - - return option_args - - class ExpirySet: """SET option: Represents the expiry type and value to be executed with "SET" command.""" @@ -323,11 +230,81 @@ def get_cmd_args(self) -> List[str]: return [self.cmd_arg] if self.value is None else [self.cmd_arg, self.value] +class ExpiryGetEx: + """GetEx option: Represents the expiry type and value to be executed with "GetEx" command.""" + + def __init__( + self, + expiry_type: ExpiryTypeGetEx, + value: Optional[Union[int, datetime, timedelta]], + ) -> None: + """ + Args: + - expiry_type (ExpiryType): The expiry type. + - value (Optional[Union[int, datetime, timedelta]]): The value of the expiration type. The type of expiration + determines the type of expiration value: + - SEC: Union[int, timedelta] + - MILLSEC: Union[int, timedelta] + - UNIX_SEC: Union[int, datetime] + - UNIX_MILLSEC: Union[int, datetime] + - PERSIST: Type[None] + """ + self.set_expiry_type_and_value(expiry_type, value) + + def set_expiry_type_and_value( + self, + expiry_type: ExpiryTypeGetEx, + value: Optional[Union[int, datetime, timedelta]], + ): + if not isinstance(value, get_args(expiry_type.value[1])): + raise ValueError( + f"The value of {expiry_type} should be of type {expiry_type.value[1]}" + ) + self.expiry_type = expiry_type + if self.expiry_type == ExpiryTypeGetEx.SEC: + self.cmd_arg = "EX" + if isinstance(value, timedelta): + value = int(value.total_seconds()) + elif self.expiry_type == ExpiryTypeGetEx.MILLSEC: + self.cmd_arg = "PX" + if isinstance(value, timedelta): + value = int(value.total_seconds() * 1000) + elif self.expiry_type == ExpiryTypeGetEx.UNIX_SEC: + self.cmd_arg = "EXAT" + if isinstance(value, datetime): + value = int(value.timestamp()) + elif self.expiry_type == ExpiryTypeGetEx.UNIX_MILLSEC: + self.cmd_arg = "PXAT" + if isinstance(value, datetime): + value = int(value.timestamp() * 1000) + elif self.expiry_type == ExpiryTypeGetEx.PERSIST: + self.cmd_arg = "PERSIST" + self.value = str(value) if value else None + + def get_cmd_args(self) -> List[str]: + return [self.cmd_arg] if self.value is None else [self.cmd_arg, self.value] + + class InsertPosition(Enum): BEFORE = "BEFORE" AFTER = "AFTER" +class FlushMode(Enum): + """ + Defines flushing mode for: + + `FLUSHALL` command and `FUNCTION FLUSH` command. + + See https://valkey.io/commands/flushall/ and https://valkey.io/commands/function-flush/ for details + + SYNC was introduced in version 6.2.0. + """ + + ASYNC = "ASYNC" + SYNC = "SYNC" + + def _build_sort_args( key: str, by_pattern: Optional[str] = None, @@ -471,6 +448,43 @@ async def getdel(self, key: str) -> Optional[str]: Optional[str], await self._execute_command(RequestType.GetDel, [key]) ) + async def getrange(self, key: str, start: int, end: int) -> str: + """ + Returns the substring of the string value stored at `key`, determined by the offsets `start` and `end` (both are inclusive). + Negative offsets can be used in order to provide an offset starting from the end of the string. + So `-1` means the last character, `-2` the penultimate and so forth. + + If `key` does not exist, an empty string is returned. If `start` or `end` + are out of range, returns the substring within the valid range of the string. + + See https://valkey.io/commands/getrange/ for more details. + + Args: + key (str): The key of the string. + start (int): The starting offset. + end (int): The ending offset. + + Returns: + str: A substring extracted from the value stored at `key`. + + Examples: + >>> await client.set("mykey", "This is a string") + >>> await client.getrange("mykey", 0, 3) + "This" + >>> await client.getrange("mykey", -3, -1) + "ing" # extracted last 3 characters of a string + >>> await client.getrange("mykey", 0, 100) + "This is a string" + >>> await client.getrange("non_existing", 5, 6) + "" + """ + return cast( + str, + await self._execute_command( + RequestType.GetRange, [key, str(start), str(end)] + ), + ) + async def append(self, key: str, value: str) -> int: """ Appends a value to a key. @@ -795,6 +809,29 @@ async def decrby(self, key: str, amount: int) -> int: int, await self._execute_command(RequestType.DecrBy, [key, str(amount)]) ) + async def touch(self, keys: List[str]) -> int: + """ + Updates the last access time of specified keys. + + See https://valkey.io/commands/touch/ for details. + + Note: + When in cluster mode, the command may route to multiple nodes when `keys` map to different hash slots. + + Args: + keys (List[str]): The keys to update last access time. + + Returns: + int: The number of keys that were updated, a key is ignored if it doesn't exist. + + Examples: + >>> await client.set("myKey1", "value1") + >>> await client.set("myKey2", "value2") + >>> await client.touch(["myKey1", "myKey2", "nonExistentKey"]) + 2 # Last access time of 2 keys has been updated. + """ + return cast(int, await self._execute_command(RequestType.Touch, keys)) + async def hset(self, key: str, field_value_map: Mapping[str, str]) -> int: """ Sets the specified fields to their respective values in the hash stored at `key`. @@ -834,7 +871,7 @@ async def hget(self, key: str, field: str) -> Optional[str]: Returns None if `field` is not presented in the hash or `key` does not exist. Examples: - >>> await client.hset("my_hash", "field") + >>> await client.hset("my_hash", "field", "value") >>> await client.hget("my_hash", "field") "value" >>> await client.hget("my_hash", "nonexistent_field") @@ -1148,6 +1185,29 @@ async def hrandfield_withvalues(self, key: str, count: int) -> List[List[str]]: ), ) + async def hstrlen(self, key: str, field: str) -> int: + """ + Returns the string length of the value associated with `field` in the hash stored at `key`. + + See https://valkey.io/commands/hstrlen/ for more details. + + Args: + key (str): The key of the hash. + field (str): The field in the hash. + + Returns: + int: The string length or 0 if `field` or `key` does not exist. + + Examples: + >>> await client.hset("my_hash", "field", "value") + >>> await client.hstrlen("my_hash", "my_field") + 5 + """ + return cast( + int, + await self._execute_command(RequestType.HStrlen, [key, field]), + ) + async def lpush(self, key: str, elements: List[str]) -> int: """ Insert all the specified values at the head of the list stored at `key`. @@ -1415,6 +1475,33 @@ async def lindex( await self._execute_command(RequestType.LIndex, [key, str(index)]), ) + async def lset(self, key: str, index: int, element: str) -> TOK: + """ + Sets the list element at `index` to `element`. + + The index is zero-based, so `0` means the first element, `1` the second element and so on. + Negative indices can be used to designate elements starting at the tail of the list. + Here, `-1` means the last element, `-2` means the penultimate and so forth. + + See https://valkey.io/commands/lset/ for details. + + Args: + key (str): The key of the list. + index (int): The index of the element in the list to be set. + element (str): The new element to set at the specified index. + + Returns: + TOK: A simple `OK` response. + + Examples: + >>> await client.lset("testKey", 1, "two") + OK + """ + return cast( + TOK, + await self._execute_command(RequestType.LSet, [key, str(index), element]), + ) + async def rpush(self, key: str, elements: List[str]) -> int: """ Inserts all the specified values at the tail of the list stored at `key`. @@ -1849,6 +1936,32 @@ async def smove( ), ) + async def sunion(self, keys: List[str]) -> Set[str]: + """ + Gets the union of all the given sets. + + See https://valkey.io/commands/sunion for more details. + + Note: + When in cluster mode, all `keys` must map to the same hash slot. + + Args: + keys (List[str]): The keys of the sets. + + Returns: + Set[str]: A set of members which are present in at least one of the given sets. + If none of the sets exist, an empty set will be returned. + + Examples: + >>> await client.sadd("my_set1", ["member1", "member2"]) + >>> await client.sadd("my_set2", ["member2", "member3"]) + >>> await client.sunion(["my_set1", "my_set2"]) + {"member1", "member2", "member3"} # sets "my_set1" and "my_set2" have three unique members + >>> await client.sunion(["my_set1", "non_existing_set"]) + {"member1", "member2"} + """ + return cast(Set[str], await self._execute_command(RequestType.SUnion, keys)) + async def sunionstore( self, destination: str, @@ -2287,6 +2400,61 @@ async def pexpireat( ) return cast(bool, await self._execute_command(RequestType.PExpireAt, args)) + async def expiretime(self, key: str) -> int: + """ + Returns the absolute Unix timestamp (since January 1, 1970) at which + the given `key` will expire, in seconds. + To get the expiration with millisecond precision, use `pexpiretime`. + + See https://valkey.io/commands/expiretime/ for details. + + Args: + key (str): The `key` to determine the expiration value of. + + Returns: + int: The expiration Unix timestamp in seconds, -2 if `key` does not exist or -1 if `key` exists but has no associated expire. + + Examples: + >>> await client.expiretime("my_key") + -2 # 'my_key' doesn't exist. + >>> await client.set("my_key", "value") + >>> await client.expiretime("my_key") + -1 # 'my_key' has no associate expiration. + >>> await client.expire("my_key", 60) + >>> await client.expiretime("my_key") + 1718614954 + + Since: Redis version 7.0.0. + """ + return cast(int, await self._execute_command(RequestType.ExpireTime, [key])) + + async def pexpiretime(self, key: str) -> int: + """ + Returns the absolute Unix timestamp (since January 1, 1970) at which + the given `key` will expire, in milliseconds. + + See https://valkey.io/commands/pexpiretime/ for details. + + Args: + key (str): The `key` to determine the expiration value of. + + Returns: + int: The expiration Unix timestamp in milliseconds, -2 if `key` does not exist, or -1 if `key` exists but has no associated expiration. + + Examples: + >>> await client.pexpiretime("my_key") + -2 # 'my_key' doesn't exist. + >>> await client.set("my_key", "value") + >>> await client.pexpiretime("my_key") + -1 # 'my_key' has no associate expiration. + >>> await client.expire("my_key", 60) + >>> await client.pexpiretime("my_key") + 1718615446670 + + Since: Redis version 7.0.0. + """ + return cast(int, await self._execute_command(RequestType.PExpireTime, [key])) + async def ttl(self, key: str) -> int: """ Returns the remaining time to live of `key` that has a timeout. @@ -2417,6 +2585,29 @@ async def xadd( return cast(Optional[str], await self._execute_command(RequestType.XAdd, args)) + async def xdel(self, key: str, ids: List[str]) -> int: + """ + Removes the specified entries by id from a stream, and returns the number of entries deleted. + + See https://valkey.io/commands/xdel for more details. + + Args: + key (str): The key of the stream. + ids (List[str]): An array of entry ids. + + Returns: + int: The number of entries removed from the stream. This number may be less than the number of entries in + `ids`, if the specified `ids` don't exist in the stream. + + Examples: + >>> await client.xdel("key", ["1538561698944-0", "1538561698944-1"]) + 2 # Stream marked 2 entries as deleted. + """ + return cast( + int, + await self._execute_command(RequestType.XDel, [key] + ids), + ) + async def xtrim( self, key: str, @@ -2468,30 +2659,378 @@ async def xlen(self, key: str) -> int: await self._execute_command(RequestType.XLen, [key]), ) - async def geoadd( + async def xrange( self, key: str, - members_geospatialdata: Mapping[str, GeospatialData], - existing_options: Optional[ConditionalChange] = None, - changed: bool = False, - ) -> int: + start: StreamRangeBound, + end: StreamRangeBound, + count: Optional[int] = None, + ) -> Optional[Mapping[str, List[List[str]]]]: """ - Adds geospatial members with their positions to the specified sorted set stored at `key`. - If a member is already a part of the sorted set, its position is updated. + Returns stream entries matching a given range of IDs. - See https://valkey.io/commands/geoadd for more details. + See https://valkey.io/commands/xrange for more details. Args: - key (str): The key of the sorted set. - members_geospatialdata (Mapping[str, GeospatialData]): A mapping of member names to their corresponding positions. See `GeospatialData`. - The command will report an error when the user attempts to index coordinates outside the specified ranges. - existing_options (Optional[ConditionalChange]): Options for handling existing members. - - NX: Only add new elements. - - XX: Only update existing elements. - changed (bool): Modify the return value to return the number of changed elements, instead of the number of new elements added. + key (str): The key of the stream. + start (StreamRangeBound): The starting stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MinId` to start with the minimum available ID. + end (StreamRangeBound): The ending stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MaxId` to end with the maximum available ID. + count (Optional[int]): An optional argument specifying the maximum count of stream entries to return. + If `count` is not provided, all stream entries in the range will be returned. + + Returns: + Optional[Mapping[str, List[List[str]]]]: A mapping of stream IDs to stream entry data, where entry data is a + list of pairings with format `[[field, entry], [field, entry], ...]`. Returns None if the range + arguments are not applicable. - Returns: - int: The number of elements added to the sorted set. + Examples: + >>> await client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="0-1")) + >>> await client.xadd("mystream", [("field2", "value2"), ("field2", "value3")], StreamAddOptions(id="0-2")) + >>> await client.xrange("mystream", MinId(), MaxId()) + { + "0-1": [["field1", "value1"]], + "0-2": [["field2", "value2"], ["field2", "value3"]], + } # Indicates the stream IDs and their associated field-value pairs for all stream entries in "mystream". + """ + args = [key, start.to_arg(), end.to_arg()] + if count is not None: + args.extend(["COUNT", str(count)]) + + return cast( + Optional[Mapping[str, List[List[str]]]], + await self._execute_command(RequestType.XRange, args), + ) + + async def xrevrange( + self, + key: str, + end: StreamRangeBound, + start: StreamRangeBound, + count: Optional[int] = None, + ) -> Optional[Mapping[str, List[List[str]]]]: + """ + Returns stream entries matching a given range of IDs in reverse order. Equivalent to `XRANGE` but returns the + entries in reverse order. + + See https://valkey.io/commands/xrevrange for more details. + + Args: + key (str): The key of the stream. + end (StreamRangeBound): The ending stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MaxId` to end with the maximum available ID. + start (StreamRangeBound): The starting stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MinId` to start with the minimum available ID. + count (Optional[int]): An optional argument specifying the maximum count of stream entries to return. + If `count` is not provided, all stream entries in the range will be returned. + + Returns: + Optional[Mapping[str, List[List[str]]]]: A mapping of stream IDs to stream entry data, where entry data is a + list of pairings with format `[[field, entry], [field, entry], ...]`. Returns None if the range + arguments are not applicable. + + Examples: + >>> await client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="0-1")) + >>> await client.xadd("mystream", [("field2", "value2"), ("field2", "value3")], StreamAddOptions(id="0-2")) + >>> await client.xrevrange("mystream", MaxId(), MinId()) + { + "0-2": [["field2", "value2"], ["field2", "value3"]], + "0-1": [["field1", "value1"]], + } # Indicates the stream IDs and their associated field-value pairs for all stream entries in "mystream". + """ + args = [key, end.to_arg(), start.to_arg()] + if count is not None: + args.extend(["COUNT", str(count)]) + + return cast( + Optional[Mapping[str, List[List[str]]]], + await self._execute_command(RequestType.XRevRange, args), + ) + + async def xread( + self, + keys_and_ids: Mapping[str, str], + options: Optional[StreamReadOptions] = None, + ) -> Optional[Mapping[str, Mapping[str, List[List[str]]]]]: + """ + Reads entries from the given streams. + + See https://valkey.io/commands/xread for more details. + + Note: + When in cluster mode, all keys in `keys_and_ids` must map to the same hash slot. + + Args: + keys_and_ids (Mapping[str, str]): A mapping of keys and entry IDs to read from. The mapping is composed of a + stream's key and the ID of the entry after which the stream will be read. + options (Optional[StreamReadOptions]): Options detailing how to read the stream. + + Returns: + Optional[Mapping[str, Mapping[str, List[List[str]]]]]: A mapping of stream keys, to a mapping of stream IDs, + to a list of pairings with format `[[field, entry], [field, entry], ...]`. + None will be returned under the following conditions: + - All key-ID pairs in `keys_and_ids` have either a non-existing key or a non-existing ID, or there are no entries after the given ID. + - The `BLOCK` option is specified and the timeout is hit. + + Examples: + >>> await client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="0-1")) + >>> await client.xadd("mystream", [("field2", "value2"), ("field2", "value3")], StreamAddOptions(id="0-2")) + >>> await client.xread({"mystream": "0-0"}, StreamReadOptions(block_ms=1000)) + { + "mystream": { + "0-1": [["field1", "value1"]], + "0-2": [["field2", "value2"], ["field2", "value3"]], + } + } + # Indicates the stream entries for "my_stream" with IDs greater than "0-0". The operation blocks up to + # 1000ms if there is no stream data. + """ + args = [] if options is None else options.to_args() + args.append("STREAMS") + args.extend([key for key in keys_and_ids.keys()]) + args.extend([value for value in keys_and_ids.values()]) + + return cast( + Optional[Mapping[str, Mapping[str, List[List[str]]]]], + await self._execute_command(RequestType.XRead, args), + ) + + async def xgroup_create( + self, + key: str, + group_name: str, + group_id: str, + options: Optional[StreamGroupOptions] = None, + ) -> TOK: + """ + Creates a new consumer group uniquely identified by `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-create for more details. + + Args: + key (str): The key of the stream. + group_name (str): The newly created consumer group name. + group_id (str): The stream entry ID that specifies the last delivered entry in the stream from the new + group’s perspective. The special ID "$" can be used to specify the last entry in the stream. + options (Optional[StreamGroupOptions]): Options for creating the stream group. + + Returns: + TOK: A simple "OK" response. + + Examples: + >>> await client.xgroup_create("mystream", "mygroup", "$", StreamGroupOptions(make_stream=True)) + OK + # Created the consumer group "mygroup" for the stream "mystream", which will track entries created after + # the most recent entry. The stream was created with length 0 if it did not already exist. + """ + args = [key, group_name, group_id] + if options is not None: + args.extend(options.to_args()) + + return cast( + TOK, + await self._execute_command(RequestType.XGroupCreate, args), + ) + + async def xgroup_destroy(self, key: str, group_name: str) -> bool: + """ + Destroys the consumer group `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-destroy for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name to delete. + + Returns: + bool: True if the consumer group was destroyed. Otherwise, returns False. + + Examples: + >>> await client.xgroup_destroy("mystream", "mygroup") + True # The consumer group "mygroup" for stream "mystream" was destroyed. + """ + return cast( + bool, + await self._execute_command(RequestType.XGroupDestroy, [key, group_name]), + ) + + async def xgroup_create_consumer( + self, key: str, group_name: str, consumer_name: str + ) -> bool: + """ + Creates a consumer named `consumer_name` in the consumer group `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-createconsumer for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name. + consumer_name (str): The newly created consumer. + + Returns: + bool: True if the consumer is created. Otherwise, returns False. + + Examples: + >>> await client.xgroup_create_consumer("mystream", "mygroup", "myconsumer") + True # The consumer "myconsumer" was created in consumer group "mygroup" for the stream "mystream". + """ + return cast( + bool, + await self._execute_command( + RequestType.XGroupCreateConsumer, [key, group_name, consumer_name] + ), + ) + + async def xgroup_del_consumer( + self, key: str, group_name: str, consumer_name: str + ) -> int: + """ + Deletes a consumer named `consumer_name` in the consumer group `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-delconsumer for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name. + consumer_name (str): The consumer to delete. + + Returns: + int: The number of pending messages the `consumer` had before it was deleted. + + Examples: + >>> await client.xgroup_del_consumer("mystream", "mygroup", "myconsumer") + 5 # Consumer "myconsumer" was deleted, and had 5 pending messages unclaimed. + """ + return cast( + int, + await self._execute_command( + RequestType.XGroupDelConsumer, [key, group_name, consumer_name] + ), + ) + + async def xreadgroup( + self, + keys_and_ids: Mapping[str, str], + group_name: str, + consumer_name: str, + options: Optional[StreamReadGroupOptions] = None, + ) -> Optional[Mapping[str, Mapping[str, Optional[List[List[str]]]]]]: + """ + Reads entries from the given streams owned by a consumer group. + + See https://valkey.io/commands/xreadgroup for more details. + + Note: + When in cluster mode, all keys in `keys_and_ids` must map to the same hash slot. + + Args: + keys_and_ids (Mapping[str, str]): A mapping of stream keys to stream entry IDs to read from. The special ">" + ID returns messages that were never delivered to any other consumer. Any other valid ID will return + entries pending for the consumer with IDs greater than the one provided. + group_name (str): The consumer group name. + consumer_name (str): The consumer name. The consumer will be auto-created if it does not already exist. + options (Optional[StreamReadGroupOptions]): Options detailing how to read the stream. + + Returns: + Optional[Mapping[str, Mapping[str, Optional[List[List[str]]]]]]: A mapping of stream keys, to a mapping of + stream IDs, to a list of pairings with format `[[field, entry], [field, entry], ...]`. + Returns None if the BLOCK option is given and a timeout occurs, or if there is no stream that can be served. + + Examples: + >>> await client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="1-0")) + >>> await client.xgroup_create("mystream", "mygroup", "0-0") + >>> await client.xreadgroup({"mystream": ">"}, "mygroup", "myconsumer", StreamReadGroupOptions(count=1)) + { + "mystream": { + "1-0": [["field1", "value1"]], + } + } # Read one stream entry from "mystream" using "myconsumer" in the consumer group "mygroup". + """ + args = ["GROUP", group_name, consumer_name] + if options is not None: + args.extend(options.to_args()) + + args.append("STREAMS") + args.extend([key for key in keys_and_ids.keys()]) + args.extend([value for value in keys_and_ids.values()]) + + return cast( + Optional[Mapping[str, Mapping[str, Optional[List[List[str]]]]]], + await self._execute_command(RequestType.XReadGroup, args), + ) + + async def xack( + self, + key: str, + group_name: str, + ids: List[str], + ) -> int: + """ + Removes one or multiple messages from the Pending Entries List (PEL) of a stream consumer group. + This command should be called on pending messages so that such messages do not get processed again by the + consumer group. + + See https://valkey.io/commands/xack for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name. + ids (List[str]): The stream entry IDs to acknowledge and consume for the given consumer group. + + Returns: + int: The number of messages that were successfully acknowledged. + + Examples: + >>> await client.xadd("mystream", [("field1", "value1")], StreamAddOptions(id="1-0")) + >>> await client.xgroup_create("mystream", "mygroup", "0-0") + >>> await client.xreadgroup({"mystream": ">"}, "mygroup", "myconsumer") + { + "mystream": { + "1-0": [["field1", "value1"]], + } + } # Read one stream entry, the entry is now in the Pending Entries List for "mygroup". + >>> await client.xack("mystream", "mygroup", ["1-0"]) + 1 # 1 pending message was acknowledged and removed from the Pending Entries List for "mygroup". + """ + + return cast( + int, + await self._execute_command(RequestType.XAck, [key, group_name] + ids), + ) + + async def geoadd( + self, + key: str, + members_geospatialdata: Mapping[str, GeospatialData], + existing_options: Optional[ConditionalChange] = None, + changed: bool = False, + ) -> int: + """ + Adds geospatial members with their positions to the specified sorted set stored at `key`. + If a member is already a part of the sorted set, its position is updated. + + See https://valkey.io/commands/geoadd for more details. + + Args: + key (str): The key of the sorted set. + members_geospatialdata (Mapping[str, GeospatialData]): A mapping of member names to their corresponding positions. See `GeospatialData`. + The command will report an error when the user attempts to index coordinates outside the specified ranges. + existing_options (Optional[ConditionalChange]): Options for handling existing members. + - NX: Only add new elements. + - XX: Only update existing elements. + changed (bool): Modify the return value to return the number of changed elements, instead of the number of new elements added. + + Returns: + int: The number of elements added to the sorted set. If `changed` is set, returns the number of elements updated in the sorted set. Examples: @@ -2684,7 +3223,7 @@ async def geosearch( Since: Redis version 6.2.0. """ args = _create_geosearch_args( - key, + [key], search_from, seach_by, order_by, @@ -2699,6 +3238,72 @@ async def geosearch( await self._execute_command(RequestType.GeoSearch, args), ) + async def geosearchstore( + self, + destination: str, + source: str, + search_from: Union[str, GeospatialData], + search_by: Union[GeoSearchByRadius, GeoSearchByBox], + count: Optional[GeoSearchCount] = None, + store_dist: bool = False, + ) -> int: + """ + Searches for members in a sorted set stored at `key` representing geospatial data within a circular or rectangular area and stores the result in `destination`. + If `destination` already exists, it is overwritten. Otherwise, a new sorted set will be created. + + To get the result directly, see `geosearch`. + + Note: + When in cluster mode, both `source` and `destination` must map to the same hash slot. + + Args: + destination (str): The key to store the search results. + source (str): The key of the sorted set representing geospatial data to search from. + search_from (Union[str, GeospatialData]): The location to search from. Can be specified either as a member + from the sorted set or as a geospatial data (see `GeospatialData`). + search_by (Union[GeoSearchByRadius, GeoSearchByBox]): The search criteria. + For circular area search, see `GeoSearchByRadius`. + For rectangular area search, see `GeoSearchByBox`. + count (Optional[GeoSearchCount]): Specifies the maximum number of results to store. See `GeoSearchCount`. + If not specified, stores all results. + store_dist (bool): Determines what is stored as the sorted set score. Defaults to False. + - If set to False, the geohash of the location will be stored as the sorted set score. + - If set to True, the distance from the center of the shape (circle or box) will be stored as the sorted set score. + The distance is represented as a floating-point number in the same unit specified for that shape. + + Returns: + int: The number of elements in the resulting sorted set stored at `destination`. + + Examples: + >>> await client.geoadd("my_geo_sorted_set", {"Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669)}) + >>> await client.geosearchstore("my_dest_sorted_set", "my_geo_sorted_set", "Catania", GeoSearchByRadius(175, GeoUnit.MILES)) + 2 # Number of elements stored in "my_dest_sorted_set". + >>> await client.zrange_withscores("my_dest_sorted_set", RangeByIndex(0, -1)) + {"Palermo": 3479099956230698.0, "Catania": 3479447370796909.0} # The elements within te search area, with their geohash as score. + >>> await client.geosearchstore("my_dest_sorted_set", "my_geo_sorted_set", GeospatialData(15, 37), GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), store_dist=True) + 2 # Number of elements stored in "my_dest_sorted_set", with distance as score. + >>> await client.zrange_withscores("my_dest_sorted_set", RangeByIndex(0, -1)) + {"Catania": 56.4412578701582, "Palermo": 190.44242984775784} # The elements within te search area, with the distance as score. + + Since: Redis version 6.2.0. + """ + args = _create_geosearch_args( + [destination, source], + search_from, + search_by, + None, + count, + False, + False, + False, + store_dist, + ) + + return cast( + int, + await self._execute_command(RequestType.GeoSearchStore, args), + ) + async def zadd( self, key: str, @@ -2888,6 +3493,38 @@ async def zcount( ), ) + async def zincrby(self, key: str, increment: float, member: str) -> float: + """ + Increments the score of `member` in the sorted set stored at `key` by `increment`. + If `member` does not exist in the sorted set, it is added with `increment` as its score. + If `key` does not exist, a new sorted set is created with the specified member as its sole member. + + See https://valkey.io/commands/zincrby/ for more details. + + Args: + key (str): The key of the sorted set. + increment (float): The score increment. + member (str): A member of the sorted set. + + Returns: + float: The new score of `member`. + + Examples: + >>> await client.zadd("my_sorted_set", {"member": 10.5, "member2": 8.2}) + >>> await client.zincrby("my_sorted_set", 1.2, "member") + 11.7 # The member existed in the set before score was altered, the new score is 11.7. + >>> await client.zincrby("my_sorted_set", -1.7, "member") + 10.0 # Negetive increment, decrements the score. + >>> await client.zincrby("my_sorted_set", 5.5, "non_existing_member") + 5.5 # A new memeber is added to the sorted set with the score being 5.5. + """ + return cast( + float, + await self._execute_command( + RequestType.ZIncrBy, [key, str(increment), member] + ), + ) + async def zpopmax( self, key: str, count: Optional[int] = None ) -> Mapping[str, float]: @@ -3198,6 +3835,65 @@ async def zrank_withscore( await self._execute_command(RequestType.ZRank, [key, member, "WITHSCORE"]), ) + async def zrevrank(self, key: str, member: str) -> Optional[int]: + """ + Returns the rank of `member` in the sorted set stored at `key`, where scores are ordered from the highest to + lowest, starting from `0`. + + To get the rank of `member` with its score, see `zrevrank_withscore`. + + See https://valkey.io/commands/zrevrank for more details. + + Args: + key (str): The key of the sorted set. + member (str): The member whose rank is to be retrieved. + + Returns: + Optional[int]: The rank of `member` in the sorted set, where ranks are ordered from high to low based on scores. + If `key` doesn't exist, or if `member` is not present in the set, `None` will be returned. + + Examples: + >>> await client.zadd("my_sorted_set", {"member1": 10.5, "member2": 8.2, "member3": 9.6}) + >>> await client.zrevrank("my_sorted_set", "member2") + 2 # "member2" has the third-highest score in the sorted set "my_sorted_set" + """ + return cast( + Optional[int], + await self._execute_command(RequestType.ZRevRank, [key, member]), + ) + + async def zrevrank_withscore( + self, key: str, member: str + ) -> Optional[List[Union[int, float]]]: + """ + Returns the rank of `member` in the sorted set stored at `key` with its score, where scores are ordered from the + highest to lowest, starting from `0`. + + See https://valkey.io/commands/zrevrank for more details. + + Args: + key (str): The key of the sorted set. + member (str): The member whose rank is to be retrieved. + + Returns: + Optional[List[Union[int, float]]]: A list containing the rank (as `int`) and score (as `float`) of `member` + in the sorted set, where ranks are ordered from high to low based on scores. + If `key` doesn't exist, or if `member` is not present in the set, `None` will be returned. + + Examples: + >>> await client.zadd("my_sorted_set", {"member1": 10.5, "member2": 8.2, "member3": 9.6}) + >>> await client.zrevrank("my_sorted_set", "member2") + [2, 8.2] # "member2" with score 8.2 has the third-highest score in the sorted set "my_sorted_set" + + Since: Redis version 7.2.0. + """ + return cast( + Optional[List[Union[int, float]]], + await self._execute_command( + RequestType.ZRevRank, [key, member, "WITHSCORE"] + ), + ) + async def zrem( self, key: str, @@ -4114,6 +4810,41 @@ async def pfmerge(self, destination: str, source_keys: List[str]) -> TOK: ), ) + async def bitcount(self, key: str, options: Optional[OffsetOptions] = None) -> int: + """ + Counts the number of set bits (population counting) in the string stored at `key`. The `options` argument can + optionally be provided to count the number of bits in a specific string interval. + + See https://valkey.io/commands/bitcount for more details. + + Args: + key (str): The key for the string to count the set bits of. + options (Optional[OffsetOptions]): The offset options. + + Returns: + int: If `options` is provided, returns the number of set bits in the string interval specified by `options`. + If `options` is not provided, returns the number of set bits in the string stored at `key`. + Otherwise, if `key` is missing, returns `0` as it is treated as an empty string. + + Examples: + >>> await client.bitcount("my_key1") + 2 # The string stored at "my_key1" contains 2 set bits. + >>> await client.bitcount("my_key2", OffsetOptions(1, 3)) + 2 # The second to fourth bytes of the string stored at "my_key2" contain 2 set bits. + >>> await client.bitcount("my_key3", OffsetOptions(1, 1, BitmapIndexType.BIT)) + 1 # Indicates that the second bit of the string stored at "my_key3" is set. + >>> await client.bitcount("my_key3", OffsetOptions(-1, -1, BitmapIndexType.BIT)) + 1 # Indicates that the last bit of the string stored at "my_key3" is set. + """ + args = [key] + if options is not None: + args = args + options.to_args() + + return cast( + int, + await self._execute_command(RequestType.BitCount, args), + ) + async def setbit(self, key: str, offset: int, value: int) -> int: """ Sets or clears the bit at `offset` in the string value stored at `key`. The `offset` is a zero-based index, @@ -4142,6 +4873,213 @@ async def setbit(self, key: str, offset: int, value: int) -> int: ), ) + async def getbit(self, key: str, offset: int) -> int: + """ + Returns the bit value at `offset` in the string value stored at `key`. + `offset` should be greater than or equal to zero. + + See https://valkey.io/commands/getbit for more details. + + Args: + key (str): The key of the string. + offset (int): The index of the bit to return. + + Returns: + int: The bit at the given `offset` of the string. Returns `0` if the key is empty or if the `offset` exceeds + the length of the string. + + Examples: + >>> await client.getbit("my_key", 1) + 1 # Indicates that the second bit of the string stored at "my_key" is set to 1. + """ + return cast( + int, + await self._execute_command(RequestType.GetBit, [key, str(offset)]), + ) + + async def bitpos(self, key: str, bit: int, start: Optional[int] = None) -> int: + """ + Returns the position of the first bit matching the given `bit` value. The optional starting offset + `start` is a zero-based index, with `0` being the first byte of the list, `1` being the next byte and so on. + The offset can also be a negative number indicating an offset starting at the end of the list, with `-1` being + the last byte of the list, `-2` being the penultimate, and so on. + + See https://valkey.io/commands/bitpos for more details. + + Args: + key (str): The key of the string. + bit (int): The bit value to match. Must be `0` or `1`. + start (Optional[int]): The starting offset. + + Returns: + int: The position of the first occurrence of `bit` in the binary value of the string held at `key`. + If `start` was provided, the search begins at the offset indicated by `start`. + + Examples: + >>> await client.set("key1", "A1") # "A1" has binary value 01000001 00110001 + >>> await client.bitpos("key1", 1) + 1 # The first occurrence of bit value 1 in the string stored at "key1" is at the second position. + >>> await client.bitpos("key1", 1, -1) + 10 # The first occurrence of bit value 1, starting at the last byte in the string stored at "key1", is at the eleventh position. + """ + args = [key, str(bit)] if start is None else [key, str(bit), str(start)] + return cast( + int, + await self._execute_command(RequestType.BitPos, args), + ) + + async def bitpos_interval( + self, + key: str, + bit: int, + start: int, + end: int, + index_type: Optional[BitmapIndexType] = None, + ) -> int: + """ + Returns the position of the first bit matching the given `bit` value. The offsets are zero-based indexes, with + `0` being the first element of the list, `1` being the next, and so on. These offsets can also be negative + numbers indicating offsets starting at the end of the list, with `-1` being the last element of the list, `-2` + being the penultimate, and so on. + + If you are using Redis 7.0.0 or above, the optional `index_type` can also be provided to specify whether the + `start` and `end` offsets specify BIT or BYTE offsets. If `index_type` is not provided, BYTE offsets + are assumed. If BIT is specified, `start=0` and `end=2` means to look at the first three bits. If BYTE is + specified, `start=0` and `end=2` means to look at the first three bytes. + + See https://valkey.io/commands/bitpos for more details. + + Args: + key (str): The key of the string. + bit (int): The bit value to match. Must be `0` or `1`. + start (int): The starting offset. + end (int): The ending offset. + index_type (Optional[BitmapIndexType]): The index offset type. This option can only be specified if you are + using Redis version 7.0.0 or above. Could be either `BitmapIndexType.BYTE` or `BitmapIndexType.BIT`. + If no index type is provided, the indexes will be assumed to be byte indexes. + + Returns: + int: The position of the first occurrence from the `start` to the `end` offsets of the `bit` in the binary + value of the string held at `key`. + + Examples: + >>> await client.set("key1", "A12") # "A12" has binary value 01000001 00110001 00110010 + >>> await client.bitpos_interval("key1", 1, 1, -1) + 10 # The first occurrence of bit value 1 in the second byte to the last byte of the string stored at "key1" is at the eleventh position. + >>> await client.bitpos_interval("key1", 1, 2, 9, BitmapIndexType.BIT) + 7 # The first occurrence of bit value 1 in the third to tenth bits of the string stored at "key1" is at the eighth position. + """ + if index_type is not None: + args = [key, str(bit), str(start), str(end), index_type.value] + else: + args = [key, str(bit), str(start), str(end)] + + return cast( + int, + await self._execute_command(RequestType.BitPos, args), + ) + + async def bitop( + self, operation: BitwiseOperation, destination: str, keys: List[str] + ) -> int: + """ + Perform a bitwise operation between multiple keys (containing string values) and store the result in the + `destination`. + + See https://valkey.io/commands/bitop for more details. + + Note: + When in cluster mode, `destination` and all `keys` must map to the same hash slot. + + Args: + operation (BitwiseOperation): The bitwise operation to perform. + destination (str): The key that will store the resulting string. + keys (List[str]): The list of keys to perform the bitwise operation on. + + Returns: + int: The size of the string stored in `destination`. + + Examples: + >>> await client.set("key1", "A") # "A" has binary value 01000001 + >>> await client.set("key1", "B") # "B" has binary value 01000010 + >>> await client.bitop(BitwiseOperation.AND, "destination", ["key1", "key2"]) + 1 # The size of the resulting string stored in "destination" is 1 + >>> await client.get("destination") + "@" # "@" has binary value 01000000 + """ + return cast( + int, + await self._execute_command( + RequestType.BitOp, [operation.value, destination] + keys + ), + ) + + async def bitfield( + self, key: str, subcommands: List[BitFieldSubCommands] + ) -> List[Optional[int]]: + """ + Reads or modifies the array of bits representing the string that is held at `key` based on the specified + `subcommands`. + + See https://valkey.io/commands/bitfield for more details. + + Args: + key (str): The key of the string. + subcommands (List[BitFieldSubCommands]): The subcommands to be performed on the binary value of the string + at `key`, which could be any of the following: + - `BitFieldGet` + - `BitFieldSet` + - `BitFieldIncrBy` + - `BitFieldOverflow` + + Returns: + List[Optional[int]]: An array of results from the executed subcommands: + - `BitFieldGet` returns the value in `Offset` or `OffsetMultiplier`. + - `BitFieldSet` returns the old value in `Offset` or `OffsetMultiplier`. + - `BitFieldIncrBy` returns the new value in `Offset` or `OffsetMultiplier`. + - `BitFieldOverflow` determines the behavior of the "SET" and "INCRBY" subcommands when an overflow or + underflow occurs. "OVERFLOW" does not return a value and does not contribute a value to the list + response. + + Examples: + >>> await client.set("my_key", "A") # "A" has binary value 01000001 + >>> await client.bitfield("my_key", [BitFieldSet(UnsignedEncoding(2), Offset(1), 3), BitFieldGet(UnsignedEncoding(2), Offset(1))]) + [2, 3] # The old value at offset 1 with an unsigned encoding of 2 was 2. The new value at offset 1 with an unsigned encoding of 2 is 3. + """ + args = [key] + _create_bitfield_args(subcommands) + return cast( + List[Optional[int]], + await self._execute_command(RequestType.BitField, args), + ) + + async def bitfield_read_only( + self, key: str, subcommands: List[BitFieldGet] + ) -> List[int]: + """ + Reads the array of bits representing the string that is held at `key` based on the specified `subcommands`. + + See https://valkey.io/commands/bitfield_ro for more details. + + Args: + key (str): The key of the string. + subcommands (List[BitFieldGet]): The "GET" subcommands to be performed. + + Returns: + List[int]: An array of results from the "GET" subcommands. + + Examples: + >>> await client.set("my_key", "A") # "A" has binary value 01000001 + >>> await client.bitfield_read_only("my_key", [BitFieldGet(UnsignedEncoding(2), Offset(1))]) + [2] # The value at offset 1 with an unsigned encoding of 2 is 3. + + Since: Redis version 6.0.0. + """ + args = [key] + _create_bitfield_read_only_args(subcommands) + return cast( + List[int], + await self._execute_command(RequestType.BitFieldReadOnly, args), + ) + async def object_encoding(self, key: str) -> Optional[str]: """ Returns the internal encoding for the Redis object stored at `key`. @@ -4228,3 +5166,144 @@ async def object_refcount(self, key: str) -> Optional[int]: Optional[int], await self._execute_command(RequestType.ObjectRefCount, [key]), ) + + async def srandmember(self, key: str) -> Optional[str]: + """ + Returns a random element from the set value stored at 'key'. + + See https://valkey.io/commands/srandmember for more details. + + Args: + key (str): The key from which to retrieve the set member. + + Returns: + str: A random element from the set, or None if 'key' does not exist. + + Examples: + >>> await client.sadd("my_set", {"member1": 1.0, "member2": 2.0}) + >>> await client.srandmember("my_set") + "member1" # "member1" is a random member of "my_set". + >>> await client.srandmember("non_existing_set") + None # "non_existing_set" is not an existing key, so None was returned. + """ + return cast( + Optional[str], + await self._execute_command(RequestType.SRandMember, [key]), + ) + + async def srandmember_count(self, key: str, count: int) -> List[str]: + """ + Returns one or more random elements from the set value stored at 'key'. + + See https://valkey.io/commands/srandmember for more details. + + Args: + key (str): The key of the sorted set. + count (int): The number of members to return. + If `count` is positive, returns unique members. + If `count` is negative, allows for duplicates members. + + Returns: + List[str]: A list of members from the set. + If the set does not exist or is empty, the response will be an empty list. + + Examples: + >>> await client.sadd("my_set", {"member1": 1.0, "member2": 2.0}) + >>> await client.srandmember("my_set", -3) + ["member1", "member1", "member2"] # "member1" and "member2" are random members of "my_set". + >>> await client.srandmember("non_existing_set", 3) + [] # "non_existing_set" is not an existing key, so an empty list was returned. + """ + return cast( + List[str], + await self._execute_command(RequestType.SRandMember, [key, str(count)]), + ) + + async def getex( + self, + key: str, + expiry: Optional[ExpiryGetEx] = None, + ) -> Optional[str]: + """ + Get the value of `key` and optionally set its expiration. `GETEX` is similar to `GET`. + See https://valkey.io/commands/getex for more details. + + Args: + key (str): The key to get. + expiry (Optional[ExpirySet], optional): set expiriation to the given key. + Equivalent to [`EX` | `PX` | `EXAT` | `PXAT` | `PERSIST`] in the Redis API. + + Returns: + Optional[str]: + If `key` exists, return the value stored at `key` + If `key` does not exist, return `None` + + Examples: + >>> await client.set("key", "value") + 'OK' + >>> await client.getex("key") + 'value' + >>> await client.getex("key", ExpiryGetEx(ExpiryTypeGetEx.SEC, 1)) + 'value' + >>> time.sleep(1) + >>> await client.getex("key") + None + + Since: Redis version 6.2.0. + """ + args = [key] + if expiry is not None: + args.extend(expiry.get_cmd_args()) + return cast( + Optional[str], + await self._execute_command(RequestType.GetEx, args), + ) + + @dataclass + class PubSubMsg: + """ + Describes the incoming pubsub message + + Attributes: + message (str): Incoming message. + channel (str): Name of an channel that triggered the message. + pattern (Optional[str]): Pattern that triggered the message. + """ + + message: str + channel: str + pattern: Optional[str] + + async def get_pubsub_message(self) -> PubSubMsg: + """ + Returns the next pubsub message. + Throws WrongConfiguration in cases: + 1. No pubsub subscriptions are configured for the client + 2. Callback is configured with the pubsub subsciptions + + See https://valkey.io/docs/topics/pubsub/ for more details. + + Returns: + PubSubMsg: The next pubsub message + + Examples: + >>> pubsub_msg = await listening_client.get_pubsub_message() + """ + ... + + def try_get_pubsub_message(self) -> Optional[PubSubMsg]: + """ + Tries to return the next pubsub message. + Throws WrongConfiguration in cases: + 1. No pubsub subscriptions are configured for the client + 2. Callback is configured with the pubsub subsciptions + + See https://valkey.io/docs/topics/pubsub/ for more details. + + Returns: + Optional[PubSubMsg]: The next pubsub message or None + + Examples: + >>> pubsub_msg = listening_client.try_get_pubsub_message() + """ + ... diff --git a/python/python/glide/async_commands/redis_modules/json.py b/python/python/glide/async_commands/redis_modules/json.py index 3bcaeedb97..5ed6d96302 100644 --- a/python/python/glide/async_commands/redis_modules/json.py +++ b/python/python/glide/async_commands/redis_modules/json.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 """module for `RedisJSON` commands. Examples: @@ -19,8 +19,8 @@ from glide.async_commands.core import ConditionalChange from glide.constants import TOK, TJsonResponse +from glide.glide_client import TGlideClient from glide.protobuf.redis_request_pb2 import RequestType -from glide.redis_client import TRedisClient class JsonGetOptions: @@ -55,7 +55,7 @@ def get_options(self) -> List[str]: async def set( - client: TRedisClient, + client: TGlideClient, key: str, path: str, value: str, @@ -67,7 +67,7 @@ async def set( See https://redis.io/commands/json.set/ for more details. Args: - client (TRedisClient): The Redis client to execute the command. + client (TGlideClient): The Redis client to execute the command. key (str): The key of the JSON document. path (str): Represents the path within the JSON document where the value will be set. The key will be modified only if `value` is added as the last child in the specified `path`, or if the specified `path` acts as the parent of a new child being added. @@ -95,7 +95,7 @@ async def set( async def get( - client: TRedisClient, + client: TGlideClient, key: str, paths: Optional[Union[str, List[str]]] = None, options: Optional[JsonGetOptions] = None, @@ -106,7 +106,7 @@ async def get( See https://redis.io/commands/json.get/ for more details. Args: - client (TRedisClient): The Redis client to execute the command. + client (TGlideClient): The Redis client to execute the command. key (str): The key of the JSON document. paths (Optional[Union[str, List[str]]]): The path or list of paths within the JSON document. Default is root `$`. options (Optional[JsonGetOptions]): Options for formatting the string representation of the JSON data. See `JsonGetOptions`. @@ -140,7 +140,7 @@ async def get( async def delete( - client: TRedisClient, + client: TGlideClient, key: str, path: Optional[str] = None, ) -> int: @@ -150,7 +150,7 @@ async def delete( See https://redis.io/commands/json.del/ for more details. Args: - client (TRedisClient): The Redis client to execute the command. + client (TGlideClient): The Redis client to execute the command. key (str): The key of the JSON document. path (Optional[str]): Represents the path within the JSON document where the value will be deleted. If None, deletes the entire JSON document at `key`. Defaults to None. @@ -177,7 +177,7 @@ async def delete( async def forget( - client: TRedisClient, + client: TGlideClient, key: str, path: Optional[str] = None, ) -> Optional[int]: @@ -187,7 +187,7 @@ async def forget( See https://redis.io/commands/json.forget/ for more details. Args: - client (TRedisClient): The Redis client to execute the command. + client (TGlideClient): The Redis client to execute the command. key (str): The key of the JSON document. path (Optional[str]): Represents the path within the JSON document where the value will be deleted. If None, deletes the entire JSON document at `key`. Defaults to None. @@ -215,7 +215,7 @@ async def forget( async def toggle( - client: TRedisClient, + client: TGlideClient, key: str, path: str, ) -> TJsonResponse[bool]: @@ -225,7 +225,7 @@ async def toggle( See https://redis.io/commands/json.toggle/ for more details. Args: - client (TRedisClient): The Redis client to execute the command. + client (TGlideClient): The Redis client to execute the command. key (str): The key of the JSON document. path (str): The JSONPath to specify. diff --git a/python/python/glide/async_commands/sorted_set.py b/python/python/glide/async_commands/sorted_set.py index d9f99a42c9..2fa7d851f9 100644 --- a/python/python/glide/async_commands/sorted_set.py +++ b/python/python/glide/async_commands/sorted_set.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from enum import Enum from typing import List, Optional, Tuple, Union @@ -356,7 +356,7 @@ def _create_zinter_zunion_cmd_args( def _create_geosearch_args( - key: str, + keys: List[str], search_from: Union[str, GeospatialData], seach_by: Union[GeoSearchByRadius, GeoSearchByBox], order_by: Optional[OrderBy] = None, @@ -364,8 +364,9 @@ def _create_geosearch_args( with_coord: bool = False, with_dist: bool = False, with_hash: bool = False, + store_dist: bool = False, ) -> List[str]: - args = [key] + args = keys if isinstance(search_from, str): args += ["FROMMEMBER", search_from] else: @@ -389,4 +390,7 @@ def _create_geosearch_args( if with_hash: args.append("WITHHASH") + if store_dist: + args.append("STOREDIST") + return args diff --git a/python/python/glide/async_commands/standalone_commands.py b/python/python/glide/async_commands/standalone_commands.py index e2757e5eb2..497fbdd901 100644 --- a/python/python/glide/async_commands/standalone_commands.py +++ b/python/python/glide/async_commands/standalone_commands.py @@ -1,13 +1,18 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from __future__ import annotations from typing import Dict, List, Mapping, Optional, cast from glide.async_commands.command_args import Limit, OrderBy -from glide.async_commands.core import CoreCommands, InfoSection, _build_sort_args +from glide.async_commands.core import ( + CoreCommands, + FlushMode, + InfoSection, + _build_sort_args, +) from glide.async_commands.transaction import BaseTransaction, Transaction -from glide.constants import TOK, TResult +from glide.constants import OK, TOK, TResult from glide.protobuf.redis_request_pb2 import RequestType @@ -439,3 +444,154 @@ async def sort_store( ) result = await self._execute_command(RequestType.Sort, args) return cast(int, result) + + async def publish(self, message: str, channel: str) -> TOK: + """ + Publish a message on pubsub channel. + See https://valkey.io/commands/publish for more details. + + Args: + message (str): Message to publish + channel (str): Channel to publish the message on. + + Returns: + TOK: a simple `OK` response. + + Examples: + >>> await client.publish("Hi all!", "global-channel") + "OK" + """ + await self._execute_command(RequestType.Publish, [channel, message]) + return cast(TOK, OK) + + async def flushall(self, flush_mode: Optional[FlushMode] = None) -> TOK: + """ + Deletes all the keys of all the existing databases. This command never fails. + + See https://valkey.io/commands/flushall for more details. + + Args: + flush_mode (Optional[FlushMode]): The flushing mode, could be either `SYNC` or `ASYNC`. + + Returns: + TOK: OK. + + Examples: + >>> await client.flushall(FlushMode.ASYNC) + OK # This command never fails. + """ + args = [] + if flush_mode is not None: + args.append(flush_mode.value) + + return cast( + TOK, + await self._execute_command(RequestType.FlushAll, args), + ) + + async def flushdb(self, flush_mode: Optional[FlushMode] = None) -> TOK: + """ + Deletes all the keys of the currently selected database. This command never fails. + + See https://valkey.io/commands/flushdb for more details. + + Args: + flush_mode (Optional[FlushMode]): The flushing mode, could be either `SYNC` or `ASYNC`. + + Returns: + TOK: OK. + + Examples: + >>> await client.flushdb() + OK # The keys of the currently selected database were deleted. + >>> await client.flushdb(FlushMode.ASYNC) + OK # The keys of the currently selected database were deleted asynchronously. + """ + args = [] + if flush_mode is not None: + args.append(flush_mode.value) + + return cast( + TOK, + await self._execute_command(RequestType.FlushDB, args), + ) + + async def copy( + self, + source: str, + destination: str, + destinationDB: Optional[int] = None, + replace: Optional[bool] = None, + ) -> bool: + """ + Copies the value stored at the `source` to the `destination` key. If `destinationDB` + is specified, the value will be copied to the database specified by `destinationDB`, + otherwise the current database will be used. When `replace` is True, removes the + `destination` key first if it already exists, otherwise performs no action. + + See https://valkey.io/commands/copy for more details. + + Args: + source (str): The key to the source value. + destination (str): The key where the value should be copied to. + destinationDB (Optional[int]): The alternative logical database index for the destination key. + replace (Optional[bool]): If the destination key should be removed before copying the value to it. + + Returns: + bool: True if the source was copied. Otherwise, return False. + + Examples: + >>> await client.set("source", "sheep") + >>> await client.copy("source", "destination", 1, False) + True # Source was copied + >>> await client.select(1) + >>> await client.get("destination") + "sheep" + + Since: Redis version 6.2.0. + """ + args = [source, destination] + if destinationDB is not None: + args.extend(["DB", str(destinationDB)]) + if replace is True: + args.append("REPLACE") + return cast( + bool, + await self._execute_command(RequestType.Copy, args), + ) + + async def lolwut( + self, + version: Optional[int] = None, + parameters: Optional[List[int]] = None, + ) -> str: + """ + Displays a piece of generative computer art and the Redis version. + + See https://valkey.io/commands/lolwut for more details. + + Args: + version (Optional[int]): Version of computer art to generate. + parameters (Optional[List[int]]): Additional set of arguments in order to change the output: + For version `5`, those are length of the line, number of squares per row, and number of squares per column. + For version `6`, those are number of columns and number of lines. + + Returns: + str: A piece of generative computer art along with the current Redis version. + + Examples: + >>> await client.lolwut(6, [40, 20]); + "Redis ver. 7.2.3" # Indicates the current Redis version + >>> await client.lolwut(5, [30, 5, 5]); + "Redis ver. 7.2.3" # Indicates the current Redis version + """ + args = [] + if version is not None: + args.extend(["VERSION", str(version)]) + if parameters: + for var in parameters: + args.extend(str(var)) + return cast( + str, + await self._execute_command(RequestType.Lolwut, args), + ) diff --git a/python/python/glide/async_commands/stream.py b/python/python/glide/async_commands/stream.py new file mode 100644 index 0000000000..5a4ea33042 --- /dev/null +++ b/python/python/glide/async_commands/stream.py @@ -0,0 +1,337 @@ +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import List, Optional, Union + + +class StreamTrimOptions(ABC): + """ + Abstract base class for stream trim options. + """ + + @abstractmethod + def __init__( + self, + exact: bool, + threshold: Union[str, int], + method: str, + limit: Optional[int] = None, + ): + """ + Initialize stream trim options. + + Args: + exact (bool): If `true`, the stream will be trimmed exactly. + Otherwise the stream will be trimmed in a near-exact manner, which is more efficient. + threshold (Union[str, int]): Threshold for trimming. + method (str): Method for trimming (e.g., MINID, MAXLEN). + limit (Optional[int]): Max number of entries to be trimmed. Defaults to None. + Note: If `exact` is set to `True`, `limit` cannot be specified. + """ + if exact and limit: + raise ValueError( + "If `exact` is set to `True`, `limit` cannot be specified." + ) + self.exact = exact + self.threshold = threshold + self.method = method + self.limit = limit + + def to_args(self) -> List[str]: + """ + Convert options to arguments for Redis command. + + Returns: + List[str]: List of arguments for Redis command. + """ + option_args = [ + self.method, + "=" if self.exact else "~", + str(self.threshold), + ] + if self.limit is not None: + option_args.extend(["LIMIT", str(self.limit)]) + return option_args + + +class TrimByMinId(StreamTrimOptions): + """ + Stream trim option to trim by minimum ID. + """ + + def __init__(self, exact: bool, threshold: str, limit: Optional[int] = None): + """ + Initialize trim option by minimum ID. + + Args: + exact (bool): If `true`, the stream will be trimmed exactly. + Otherwise the stream will be trimmed in a near-exact manner, which is more efficient. + threshold (str): Threshold for trimming by minimum ID. + limit (Optional[int]): Max number of entries to be trimmed. Defaults to None. + Note: If `exact` is set to `True`, `limit` cannot be specified. + """ + super().__init__(exact, threshold, "MINID", limit) + + +class TrimByMaxLen(StreamTrimOptions): + """ + Stream trim option to trim by maximum length. + """ + + def __init__(self, exact: bool, threshold: int, limit: Optional[int] = None): + """ + Initialize trim option by maximum length. + + Args: + exact (bool): If `true`, the stream will be trimmed exactly. + Otherwise the stream will be trimmed in a near-exact manner, which is more efficient. + threshold (int): Threshold for trimming by maximum length. + limit (Optional[int]): Max number of entries to be trimmed. Defaults to None. + Note: If `exact` is set to `True`, `limit` cannot be specified. + """ + super().__init__(exact, threshold, "MAXLEN", limit) + + +class StreamAddOptions: + """ + Options for adding entries to a stream. + """ + + def __init__( + self, + id: Optional[str] = None, + make_stream: bool = True, + trim: Optional[StreamTrimOptions] = None, + ): + """ + Initialize stream add options. + + Args: + id (Optional[str]): ID for the new entry. If set, the new entry will be added with this ID. If not specified, '*' is used. + make_stream (bool, optional): If set to False, a new stream won't be created if no stream matches the given key. + trim (Optional[StreamTrimOptions]): If set, the add operation will also trim the older entries in the stream. See `StreamTrimOptions`. + """ + self.id = id + self.make_stream = make_stream + self.trim = trim + + def to_args(self) -> List[str]: + """ + Convert options to arguments for Redis command. + + Returns: + List[str]: List of arguments for Redis command. + """ + option_args = [] + if not self.make_stream: + option_args.append("NOMKSTREAM") + if self.trim: + option_args.extend(self.trim.to_args()) + option_args.append(self.id if self.id else "*") + + return option_args + + +class StreamRangeBound(ABC): + """ + Abstract Base Class used in the `XRANGE` and `XREVRANGE` commands to specify the starting and ending range bound for + the stream search by stream ID. + """ + + @abstractmethod + def to_arg(self) -> str: + """ + Returns the stream range bound as a string argument to be used in the `XRANGE` or `XREVRANGE` commands. + """ + pass + + +class MinId(StreamRangeBound): + """ + Stream ID boundary used to specify the minimum stream entry ID. Can be used in the `XRANGE` or `XREVRANGE` commands + to get the first stream ID. + """ + + MIN_RANGE_REDIS_API = "-" + + def to_arg(self) -> str: + return self.MIN_RANGE_REDIS_API + + +class MaxId(StreamRangeBound): + """ + Stream ID boundary used to specify the maximum stream entry ID. Can be used in the `XRANGE` or `XREVRANGE` commands + to get the last stream ID. + """ + + MAX_RANGE_REDIS_API = "+" + + def to_arg(self) -> str: + return self.MAX_RANGE_REDIS_API + + +class IdBound(StreamRangeBound): + """ + Inclusive (closed) stream ID boundary used to specify a range of IDs to search. Stream ID bounds can be complete + with a timestamp and sequence number separated by a dash ("-"), for example "1526985054069-0". Stream ID bounds can + also be incomplete, with just a timestamp. + """ + + @staticmethod + def from_timestamp(timestamp: int) -> IdBound: + """ + Creates an incomplete stream ID boundary without the sequence number for a range search. + + Args: + timestamp (int): The stream ID timestamp. + """ + return IdBound(str(timestamp)) + + def __init__(self, stream_id: str): + """ + Creates a stream ID boundary for a range search. + + Args: + stream_id (str): The stream ID. + """ + self.stream_id = stream_id + + def to_arg(self) -> str: + return self.stream_id + + +class ExclusiveIdBound(StreamRangeBound): + """ + Exclusive (open) stream ID boundary used to specify a range of IDs to search. Stream ID bounds can be complete with + a timestamp and sequence number separated by a dash ("-"), for example "1526985054069-0". Stream ID bounds can also + be incomplete, with just a timestamp. + """ + + EXCLUSIVE_BOUND_REDIS_API = "(" + + @staticmethod + def from_timestamp(timestamp: int) -> ExclusiveIdBound: + """ + Creates an incomplete stream ID boundary without the sequence number for a range search. + + Args: + timestamp (int): The stream ID timestamp. + """ + return ExclusiveIdBound(str(timestamp)) + + def __init__(self, stream_id: str): + """ + Creates a stream ID boundary for a range search. + + Args: + stream_id (str): The stream ID. + """ + self.stream_id = f"{self.EXCLUSIVE_BOUND_REDIS_API}{stream_id}" + + def to_arg(self) -> str: + return self.stream_id + + +class StreamReadOptions: + READ_COUNT_REDIS_API = "COUNT" + READ_BLOCK_REDIS_API = "BLOCK" + + def __init__(self, block_ms: Optional[int] = None, count: Optional[int] = None): + """ + Options for reading entries from streams. Can be used as an optional argument to `XREAD`. + + Args: + block_ms (Optional[int]): If provided, the request will be blocked for the set amount of milliseconds or + until the server has the required number of entries. Equivalent to `BLOCK` in the Redis API. + count (Optional[int]): The maximum number of elements requested. Equivalent to `COUNT` in the Redis API. + """ + self.block_ms = block_ms + self.count = count + + def to_args(self) -> List[str]: + """ + Returns the options as a list of string arguments to be used in the `XREAD` command. + + Returns: + List[str]: The options as a list of arguments for the `XREAD` command. + """ + args = [] + if self.block_ms is not None: + args.extend([self.READ_BLOCK_REDIS_API, str(self.block_ms)]) + + if self.count is not None: + args.extend([self.READ_COUNT_REDIS_API, str(self.count)]) + + return args + + +class StreamGroupOptions: + MAKE_STREAM_REDIS_API = "MKSTREAM" + ENTRIES_READ_REDIS_API = "ENTRIESREAD" + + def __init__( + self, make_stream: bool = False, entries_read_id: Optional[str] = None + ): + """ + Options for creating stream consumer groups. Can be used as an optional argument to `XGROUP CREATE`. + + Args: + make_stream (bool): If set to True and the stream doesn't exist, this creates a new stream with a + length of 0. + entries_read_id: (Optional[str]): An arbitrary ID (that isn't the first ID, last ID, or the zero ID ("0-0")) + used to find out how many entries are between the arbitrary ID (excluding it) and the stream's last + entry. This option can only be specified if you are using Redis version 7.0.0 or above. + """ + self.make_stream = make_stream + self.entries_read_id = entries_read_id + + def to_args(self) -> List[str]: + """ + Returns the options as a list of string arguments to be used in the `XGROUP CREATE` command. + + Returns: + List[str]: The options as a list of arguments for the `XGROUP CREATE` command. + """ + args = [] + if self.make_stream is True: + args.append(self.MAKE_STREAM_REDIS_API) + + if self.entries_read_id is not None: + args.extend([self.ENTRIES_READ_REDIS_API, self.entries_read_id]) + + return args + + +class StreamReadGroupOptions(StreamReadOptions): + READ_NOACK_REDIS_API = "NOACK" + + def __init__( + self, no_ack=False, block_ms: Optional[int] = None, count: Optional[int] = None + ): + """ + Options for reading entries from streams using a consumer group. Can be used as an optional argument to + `XREADGROUP`. + + Args: + no_ack (bool): If set, messages are not added to the Pending Entries List (PEL). This is equivalent to + acknowledging the message when it is read. Equivalent to `NOACK` in the Redis API. + block_ms (Optional[int]): If provided, the request will be blocked for the set amount of milliseconds or + until the server has the required number of entries. Equivalent to `BLOCK` in the Redis API. + count (Optional[int]): The maximum number of elements requested. Equivalent to `COUNT` in the Redis API. + """ + super().__init__(block_ms=block_ms, count=count) + self.no_ack = no_ack + + def to_args(self) -> List[str]: + """ + Returns the options as a list of string arguments to be used in the `XREADGROUP` command. + + Returns: + List[str]: The options as a list of arguments for the `XREADGROUP` command. + """ + args = super().to_args() + if self.no_ack: + args.append(self.READ_NOACK_REDIS_API) + + return args diff --git a/python/python/glide/async_commands/transaction.py b/python/python/glide/async_commands/transaction.py index f2f95bdb46..692edbdb3d 100644 --- a/python/python/glide/async_commands/transaction.py +++ b/python/python/glide/async_commands/transaction.py @@ -1,19 +1,28 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import threading from typing import List, Mapping, Optional, Tuple, TypeVar, Union +from glide.async_commands.bitmap import ( + BitFieldGet, + BitFieldSubCommands, + BitmapIndexType, + BitwiseOperation, + OffsetOptions, + _create_bitfield_args, + _create_bitfield_read_only_args, +) from glide.async_commands.command_args import Limit, ListDirection, OrderBy from glide.async_commands.core import ( ConditionalChange, ExpireOptions, + ExpiryGetEx, ExpirySet, + FlushMode, GeospatialData, GeoUnit, InfoSection, InsertPosition, - StreamAddOptions, - StreamTrimOptions, UpdateOptions, _build_sort_args, ) @@ -33,6 +42,14 @@ _create_zinter_zunion_cmd_args, _create_zrange_args, ) +from glide.async_commands.stream import ( + StreamAddOptions, + StreamGroupOptions, + StreamRangeBound, + StreamReadGroupOptions, + StreamReadOptions, + StreamTrimOptions, +) from glide.protobuf.redis_request_pb2 import RequestType TTransaction = TypeVar("TTransaction", bound="BaseTransaction") @@ -98,6 +115,27 @@ def getdel(self: TTransaction, key: str) -> TTransaction: """ return self.append_command(RequestType.GetDel, [key]) + def getrange(self: TTransaction, key: str, start: int, end: int) -> TTransaction: + """ + Returns the substring of the string value stored at `key`, determined by the offsets `start` and `end` (both are inclusive). + Negative offsets can be used in order to provide an offset starting from the end of the string. + So `-1` means the last character, `-2` the penultimate and so forth. + + If `key` does not exist, an empty string is returned. If `start` or `end` + are out of range, returns the substring within the valid range of the string. + + See https://valkey.io/commands/getrange/ for more details. + + Args: + key (str): The key of the string. + start (int): The starting offset. + end (int): The ending offset. + + Commands response: + str: A substring extracted from the value stored at `key`. + """ + return self.append_command(RequestType.GetRange, [key, str(start), str(end)]) + def set( self: TTransaction, key: str, @@ -345,6 +383,20 @@ def mget(self: TTransaction, keys: List[str]) -> TTransaction: """ return self.append_command(RequestType.MGet, keys) + def touch(self: TTransaction, keys: List[str]) -> TTransaction: + """ + Updates the last access time of specified keys. + + See https://valkey.io/commands/touch/ for details. + + Args: + keys (List[str]): The keys to update last access time. + + Commands response: + int: The number of keys that were updated, a key is ignored if it doesn't exist. + """ + return self.append_command(RequestType.Touch, keys) + def config_rewrite(self: TTransaction) -> TTransaction: """ Rewrite the configuration file with the current configuration. @@ -740,6 +792,21 @@ def hrandfield_withvalues(self: TTransaction, key: str, count: int) -> TTransact RequestType.HRandField, [key, str(count), "WITHVALUES"] ) + def hstrlen(self: TTransaction, key: str, field: str) -> TTransaction: + """ + Returns the string length of the value associated with `field` in the hash stored at `key`. + + See https://valkey.io/commands/hstrlen/ for more details. + + Args: + key (str): The key of the hash. + field (str): The field in the hash. + + Commands response: + int: The string length or 0 if `field` or `key` does not exist. + """ + return self.append_command(RequestType.HStrlen, [key, field]) + def lpush(self: TTransaction, key: str, elements: List[str]) -> TTransaction: """ Insert all the specified values at the head of the list stored at `key`. @@ -924,6 +991,26 @@ def lindex( """ return self.append_command(RequestType.LIndex, [key, str(index)]) + def lset(self: TTransaction, key: str, index: int, element: str) -> TTransaction: + """ + Sets the list element at `index` to `element`. + + The index is zero-based, so `0` means the first element, `1` the second element and so on. + Negative indices can be used to designate elements starting at the tail of the list. + Here, `-1` means the last element, `-2` means the penultimate and so forth. + + See https://valkey.io/commands/lset/ for details. + + Args: + key (str): The key of the list. + index (int): The index of the element in the list to be set. + element (str): The new element to set at the specified index. + + Commands response: + TOK: A simple `OK` response. + """ + return self.append_command(RequestType.LSet, [key, str(index), element]) + def rpush(self: TTransaction, key: str, elements: List[str]) -> TTransaction: """Inserts all the specified values at the tail of the list stored at `key`. `elements` are inserted one after the other to the tail of the list, from the leftmost element @@ -1225,6 +1312,21 @@ def smove( """ return self.append_command(RequestType.SMove, [source, destination, member]) + def sunion(self: TTransaction, keys: List[str]) -> TTransaction: + """ + Gets the union of all the given sets. + + See https://valkey.io/commands/sunion for more details. + + Args: + keys (List[str]): The keys of the sets. + + Commands response: + Set[str]: A set of members which are present in at least one of the given sets. + If none of the sets exist, an empty set will be returned. + """ + return self.append_command(RequestType.SUnion, keys) + def sunionstore( self: TTransaction, destination: str, @@ -1552,6 +1654,41 @@ def pexpireat( ) return self.append_command(RequestType.PExpireAt, args) + def expiretime(self: TTransaction, key: str) -> TTransaction: + """ + Returns the absolute Unix timestamp (since January 1, 1970) at which + the given `key` will expire, in seconds. + To get the expiration with millisecond precision, use `pexpiretime`. + + See https://valkey.io/commands/expiretime/ for details. + + Args: + key (str): The `key` to determine the expiration value of. + + Commands response: + int: The expiration Unix timestamp in seconds, -2 if `key` does not exist or -1 if `key` exists but has no associated expire. + + Since: Redis version 7.0.0. + """ + return self.append_command(RequestType.ExpireTime, [key]) + + def pexpiretime(self: TTransaction, key: str) -> TTransaction: + """ + Returns the absolute Unix timestamp (since January 1, 1970) at which + the given `key` will expire, in milliseconds. + + See https://valkey.io/commands/pexpiretime/ for details. + + Args: + key (str): The `key` to determine the expiration value of. + + Commands response: + int: The expiration Unix timestamp in milliseconds, -2 if `key` does not exist, or -1 if `key` exists but has no associated expiration. + + Since: Redis version 7.0.0. + """ + return self.append_command(RequestType.PExpireTime, [key]) + def ttl(self: TTransaction, key: str) -> TTransaction: """ Returns the remaining time to live of `key` that has a timeout. @@ -1639,6 +1776,29 @@ def type(self: TTransaction, key: str) -> TTransaction: """ return self.append_command(RequestType.Type, [key]) + def function_load( + self: TTransaction, library_code: str, replace: bool = False + ) -> TTransaction: + """ + Loads a library to Redis. + + See https://valkey.io/docs/latest/commands/function-load/ for more details. + + Args: + library_code (str): The source code that implements the library. + replace (bool): Whether the given library should overwrite a library with the same name if + it already exists. + + Commands response: + str: The library name that was loaded. + + Since: Redis 7.0.0. + """ + return self.append_command( + RequestType.FunctionLoad, + ["REPLACE", library_code] if replace else [library_code], + ) + def xadd( self: TTransaction, key: str, @@ -1665,6 +1825,22 @@ def xadd( return self.append_command(RequestType.XAdd, args) + def xdel(self: TTransaction, key: str, ids: List[str]) -> TTransaction: + """ + Removes the specified entries by id from a stream, and returns the number of entries deleted. + + See https://valkey.io/commands/xdel for more details. + + Args: + key (str): The key of the stream. + ids (List[str]): An array of entry ids. + + Command response: + int: The number of entries removed from the stream. This number may be less than the number of entries in + `ids`, if the specified `ids` don't exist in the stream. + """ + return self.append_command(RequestType.XDel, [key] + ids) + def xtrim( self: TTransaction, key: str, @@ -1702,29 +1878,249 @@ def xlen(self: TTransaction, key: str) -> TTransaction: """ return self.append_command(RequestType.XLen, [key]) - def function_load( - self: TTransaction, library_code: str, replace: bool = False + def xrange( + self: TTransaction, + key: str, + start: StreamRangeBound, + end: StreamRangeBound, + count: Optional[int] = None, ) -> TTransaction: """ - Loads a library to Redis. + Returns stream entries matching a given range of IDs. - See https://valkey.io/docs/latest/commands/function-load/ for more details. + See https://valkey.io/commands/xrange for more details. Args: - library_code (str): The source code that implements the library. - replace (bool): Whether the given library should overwrite a library with the same name if - it already exists. + key (str): The key of the stream. + start (StreamRangeBound): The starting stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MinId` to start with the minimum available ID. + end (StreamRangeBound): The ending stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MaxId` to end with the maximum available ID. + count (Optional[int]): An optional argument specifying the maximum count of stream entries to return. + If `count` is not provided, all stream entries in the range will be returned. + + Command response: + Optional[Mapping[str, List[List[str]]]]: A mapping of stream IDs to stream entry data, where entry data is a + list of pairings with format `[[field, entry], [field, entry], ...]`. Returns None if the range arguments + are not applicable. + """ + args = [key, start.to_arg(), end.to_arg()] + if count is not None: + args.extend(["COUNT", str(count)]) - Commands response: - str: The library name that was loaded. + return self.append_command(RequestType.XRange, args) - Since: Redis 7.0.0. + def xrevrange( + self: TTransaction, + key: str, + end: StreamRangeBound, + start: StreamRangeBound, + count: Optional[int] = None, + ) -> TTransaction: + """ + Returns stream entries matching a given range of IDs in reverse order. Equivalent to `XRANGE` but returns the + entries in reverse order. + + See https://valkey.io/commands/xrevrange for more details. + + Args: + key (str): The key of the stream. + end (StreamRangeBound): The ending stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MaxId` to end with the maximum available ID. + start (StreamRangeBound): The starting stream ID bound for the range. + - Use `IdBound` to specify a stream ID. + - Use `ExclusiveIdBound` to specify an exclusive bounded stream ID. + - Use `MinId` to start with the minimum available ID. + count (Optional[int]): An optional argument specifying the maximum count of stream entries to return. + If `count` is not provided, all stream entries in the range will be returned. + + Command response: + Optional[Mapping[str, List[List[str]]]]: A mapping of stream IDs to stream entry data, where entry data is a + list of pairings with format `[[field, entry], [field, entry], ...]`. Returns None if the range arguments + are not applicable. + """ + args = [key, end.to_arg(), start.to_arg()] + if count is not None: + args.extend(["COUNT", str(count)]) + + return self.append_command(RequestType.XRevRange, args) + + def xread( + self: TTransaction, + keys_and_ids: Mapping[str, str], + options: Optional[StreamReadOptions] = None, + ) -> TTransaction: + """ + Reads entries from the given streams. + + See https://valkey.io/commands/xread for more details. + + Args: + keys_and_ids (Mapping[str, str]): A mapping of keys and entry IDs to read from. The mapping is composed of a + stream's key and the ID of the entry after which the stream will be read. + options (Optional[StreamReadOptions]): Options detailing how to read the stream. + + Command response: + Optional[Mapping[str, Mapping[str, List[List[str]]]]]: A mapping of stream keys, to a mapping of stream IDs, + to a list of pairings with format `[[field, entry], [field, entry], ...]`. + None will be returned under the following conditions: + - All key-ID pairs in `keys_and_ids` have either a non-existing key or a non-existing ID, or there are no entries after the given ID. + - The `BLOCK` option is specified and the timeout is hit. + """ + args = [] if options is None else options.to_args() + args.append("STREAMS") + args.extend([key for key in keys_and_ids.keys()]) + args.extend([value for value in keys_and_ids.values()]) + + return self.append_command(RequestType.XRead, args) + + def xgroup_create( + self: TTransaction, + key: str, + group_name: str, + group_id: str, + options: Optional[StreamGroupOptions] = None, + ) -> TTransaction: + """ + Creates a new consumer group uniquely identified by `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-create for more details. + + Args: + key (str): The key of the stream. + group_name (str): The newly created consumer group name. + group_id (str): The stream entry ID that specifies the last delivered entry in the stream from the new + group’s perspective. The special ID "$" can be used to specify the last entry in the stream. + options (Optional[StreamGroupOptions]): Options for creating the stream group. + + Command response: + TOK: A simple "OK" response. + """ + args = [key, group_name, group_id] + if options is not None: + args.extend(options.to_args()) + + return self.append_command(RequestType.XGroupCreate, args) + + def xgroup_destroy(self: TTransaction, key: str, group_name: str) -> TTransaction: + """ + Destroys the consumer group `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-destroy for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name to delete. + + Command response: + bool: True if the consumer group was destroyed. Otherwise, returns False. + """ + return self.append_command(RequestType.XGroupDestroy, [key, group_name]) + + def xgroup_create_consumer( + self: TTransaction, key: str, group_name: str, consumer_name: str + ) -> TTransaction: + """ + Creates a consumer named `consumer_name` in the consumer group `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-createconsumer for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name. + consumer_name (str): The newly created consumer. + + Command response: + bool: True if the consumer is created. Otherwise, returns False. """ return self.append_command( - RequestType.FunctionLoad, - ["REPLACE", library_code] if replace else [library_code], + RequestType.XGroupCreateConsumer, [key, group_name, consumer_name] + ) + + def xgroup_del_consumer( + self: TTransaction, key: str, group_name: str, consumer_name: str + ) -> TTransaction: + """ + Deletes a consumer named `consumer_name` in the consumer group `group_name` for the stream stored at `key`. + + See https://valkey.io/commands/xgroup-delconsumer for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name. + consumer_name (str): The consumer to delete. + + Command response: + int: The number of pending messages the `consumer` had before it was deleted. + """ + return self.append_command( + RequestType.XGroupDelConsumer, [key, group_name, consumer_name] ) + def xreadgroup( + self: TTransaction, + keys_and_ids: Mapping[str, str], + group_name: str, + consumer_name: str, + options: Optional[StreamReadGroupOptions] = None, + ) -> TTransaction: + """ + Reads entries from the given streams owned by a consumer group. + + See https://valkey.io/commands/xreadgroup for more details. + + Args: + keys_and_ids (Mapping[str, str]): A mapping of stream keys to stream entry IDs to read from. The special ">" + ID returns messages that were never delivered to any other consumer. Any other valid ID will return + entries pending for the consumer with IDs greater than the one provided. + group_name (str): The consumer group name. + consumer_name (str): The consumer name. The consumer will be auto-created if it does not already exist. + options (Optional[StreamReadGroupOptions]): Options detailing how to read the stream. + + Command response: + Optional[Mapping[str, Mapping[str, Optional[List[List[str]]]]]]: A mapping of stream keys, to a mapping of + stream IDs, to a list of pairings with format `[[field, entry], [field, entry], ...]`. + Returns None if the BLOCK option is given and a timeout occurs, or if there is no stream that can be served. + """ + args = ["GROUP", group_name, consumer_name] + if options is not None: + args.extend(options.to_args()) + + args.append("STREAMS") + args.extend([key for key in keys_and_ids.keys()]) + args.extend([value for value in keys_and_ids.values()]) + + return self.append_command(RequestType.XReadGroup, args) + + def xack( + self: TTransaction, + key: str, + group_name: str, + ids: List[str], + ) -> TTransaction: + """ + Removes one or multiple messages from the Pending Entries List (PEL) of a stream consumer group. + This command should be called on pending messages so that such messages do not get processed again by the + consumer group. + + See https://valkey.io/commands/xack for more details. + + Args: + key (str): The key of the stream. + group_name (str): The consumer group name. + ids (List[str]): The stream entry IDs to acknowledge and consume for the given consumer group. + + Command response: + int: The number of messages that were successfully acknowledged. + """ + return self.append_command(RequestType.XAck, [key, group_name] + ids) + def geoadd( self: TTransaction, key: str, @@ -1879,7 +2275,7 @@ def geosearch( Since: Redis version 6.2.0. """ args = _create_geosearch_args( - key, + [key], search_from, seach_by, order_by, @@ -1891,6 +2287,57 @@ def geosearch( return self.append_command(RequestType.GeoSearch, args) + def geosearchstore( + self: TTransaction, + destination: str, + source: str, + search_from: Union[str, GeospatialData], + search_by: Union[GeoSearchByRadius, GeoSearchByBox], + count: Optional[GeoSearchCount] = None, + store_dist: bool = False, + ) -> TTransaction: + """ + Searches for members in a sorted set stored at `key` representing geospatial data within a circular or rectangular area and stores the result in `destination`. + If `destination` already exists, it is overwritten. Otherwise, a new sorted set will be created. + + To get the result directly, see `geosearch`. + + See https://valkey.io/commands/geosearch/ for more details. + + Args: + destination (str): The key to store the search results. + source (str): The key of the sorted set representing geospatial data to search from. + search_from (Union[str, GeospatialData]): The location to search from. Can be specified either as a member + from the sorted set or as a geospatial data (see `GeospatialData`). + search_by (Union[GeoSearchByRadius, GeoSearchByBox]): The search criteria. + For circular area search, see `GeoSearchByRadius`. + For rectangular area search, see `GeoSearchByBox`. + count (Optional[GeoSearchCount]): Specifies the maximum number of results to store. See `GeoSearchCount`. + If not specified, stores all results. + store_dist (bool): Determines what is stored as the sorted set score. Defaults to False. + - If set to False, the geohash of the location will be stored as the sorted set score. + - If set to True, the distance from the center of the shape (circle or box) will be stored as the sorted set score. + The distance is represented as a floating-point number in the same unit specified for that shape. + + Commands response: + int: The number of elements in the resulting sorted set stored at `destination`.s + + Since: Redis version 6.2.0. + """ + args = _create_geosearch_args( + [destination, source], + search_from, + search_by, + None, + count, + False, + False, + False, + store_dist, + ) + + return self.append_command(RequestType.GeoSearchStore, args) + def zadd( self: TTransaction, key: str, @@ -2045,6 +2492,26 @@ def zcount( ) return self.append_command(RequestType.ZCount, [key, score_min, score_max]) + def zincrby( + self: TTransaction, key: str, increment: float, member: str + ) -> TTransaction: + """ + Increments the score of `member` in the sorted set stored at `key` by `increment`. + If `member` does not exist in the sorted set, it is added with `increment` as its score. + If `key` does not exist, a new sorted set is created with the specified member as its sole member. + + See https://valkey.io/commands/zincrby/ for more details. + + Args: + key (str): The key of the sorted set. + increment (float): The score increment. + member (str): A member of the sorted set. + + Commands response: + float: The new score of `member`. + """ + return self.append_command(RequestType.ZIncrBy, [key, str(increment), member]) + def zpopmax( self: TTransaction, key: str, count: Optional[int] = None ) -> TTransaction: @@ -2269,6 +2736,45 @@ def zrank_withscore( """ return self.append_command(RequestType.ZRank, [key, member, "WITHSCORE"]) + def zrevrank(self: TTransaction, key: str, member: str) -> TTransaction: + """ + Returns the rank of `member` in the sorted set stored at `key`, where scores are ordered from the highest to + lowest, starting from `0`. + + To get the rank of `member` with its score, see `zrevrank_withscore`. + + See https://valkey.io/commands/zrevrank for more details. + + Args: + key (str): The key of the sorted set. + member (str): The member whose rank is to be retrieved. + + Command response: + Optional[int]: The rank of `member` in the sorted set, where ranks are ordered from high to low based on scores. + If `key` doesn't exist, or if `member` is not present in the set, `None` will be returned. + """ + return self.append_command(RequestType.ZRevRank, [key, member]) + + def zrevrank_withscore(self: TTransaction, key: str, member: str) -> TTransaction: + """ + Returns the rank of `member` in the sorted set stored at `key` with its score, where scores are ordered from the + highest to lowest, starting from `0`. + + See https://valkey.io/commands/zrevrank for more details. + + Args: + key (str): The key of the sorted set. + member (str): The member whose rank is to be retrieved. + + Command response: + Optional[List[Union[int, float]]]: A list containing the rank (as `int`) and score (as `float`) of `member` + in the sorted set, where ranks are ordered from high to low based on scores. + If `key` doesn't exist, or if `member` is not present in the set, `None` will be returned. + + Since: Redis version 7.2.0. + """ + return self.append_command(RequestType.ZRevRank, [key, member, "WITHSCORE"]) + def zrem( self: TTransaction, key: str, @@ -2877,6 +3383,30 @@ def pfmerge( """ return self.append_command(RequestType.PfMerge, [destination] + source_keys) + def bitcount( + self: TTransaction, key: str, options: Optional[OffsetOptions] = None + ) -> TTransaction: + """ + Counts the number of set bits (population counting) in a string stored at `key`. The `options` argument can + optionally be provided to count the number of bits in a specific string interval. + + See https://valkey.io/commands/bitcount for more details. + + Args: + key (str): The key for the string to count the set bits of. + options (Optional[OffsetOptions]): The offset options. + + Command response: + int: If `options` is provided, returns the number of set bits in the string interval specified by `options`. + If `options` is not provided, returns the number of set bits in the string stored at `key`. + Otherwise, if `key` is missing, returns `0` as it is treated as an empty string. + """ + args = [key] + if options is not None: + args = args + options.to_args() + + return self.append_command(RequestType.BitCount, args) + def setbit(self: TTransaction, key: str, offset: int, value: int) -> TTransaction: """ Sets or clears the bit at `offset` in the string value stored at `key`. The `offset` is a zero-based index, @@ -2896,6 +3426,161 @@ def setbit(self: TTransaction, key: str, offset: int, value: int) -> TTransactio """ return self.append_command(RequestType.SetBit, [key, str(offset), str(value)]) + def getbit(self: TTransaction, key: str, offset: int) -> TTransaction: + """ + Returns the bit value at `offset` in the string value stored at `key`. + `offset` should be greater than or equal to zero. + + See https://valkey.io/commands/getbit for more details. + + Args: + key (str): The key of the string. + offset (int): The index of the bit to return. + + Command response: + int: The bit at the given `offset` of the string. Returns `0` if the key is empty or if the `offset` exceeds + the length of the string. + """ + return self.append_command(RequestType.GetBit, [key, str(offset)]) + + def bitpos( + self: TTransaction, key: str, bit: int, start: Optional[int] = None + ) -> TTransaction: + """ + Returns the position of the first bit matching the given `bit` value. The optional starting offset + `start` is a zero-based index, with `0` being the first byte of the list, `1` being the next byte and so on. + The offset can also be a negative number indicating an offset starting at the end of the list, with `-1` being + the last byte of the list, `-2` being the penultimate, and so on. + + See https://valkey.io/commands/bitpos for more details. + + Args: + key (str): The key of the string. + bit (int): The bit value to match. Must be `0` or `1`. + start (Optional[int]): The starting offset. + + Command response: + int: The position of the first occurrence of `bit` in the binary value of the string held at `key`. + If `start` was provided, the search begins at the offset indicated by `start`. + """ + args = [key, str(bit)] if start is None else [key, str(bit), str(start)] + return self.append_command(RequestType.BitPos, args) + + def bitpos_interval( + self: TTransaction, + key: str, + bit: int, + start: int, + end: int, + index_type: Optional[BitmapIndexType] = None, + ) -> TTransaction: + """ + Returns the position of the first bit matching the given `bit` value. The offsets are zero-based indexes, with + `0` being the first element of the list, `1` being the next, and so on. These offsets can also be negative + numbers indicating offsets starting at the end of the list, with `-1` being the last element of the list, `-2` + being the penultimate, and so on. + + If you are using Redis 7.0.0 or above, the optional `index_type` can also be provided to specify whether the + `start` and `end` offsets specify BIT or BYTE offsets. If `index_type` is not provided, BYTE offsets + are assumed. If BIT is specified, `start=0` and `end=2` means to look at the first three bits. If BYTE is + specified, `start=0` and `end=2` means to look at the first three bytes. + + See https://valkey.io/commands/bitpos for more details. + + Args: + key (str): The key of the string. + bit (int): The bit value to match. Must be `0` or `1`. + start (int): The starting offset. + end (int): The ending offset. + index_type (Optional[BitmapIndexType]): The index offset type. This option can only be specified if you are + using Redis version 7.0.0 or above. Could be either `BitmapIndexType.BYTE` or `BitmapIndexType.BIT`. + If no index type is provided, the indexes will be assumed to be byte indexes. + + Command response: + int: The position of the first occurrence from the `start` to the `end` offsets of the `bit` in the binary + value of the string held at `key`. + """ + if index_type is not None: + args = [key, str(bit), str(start), str(end), index_type.value] + else: + args = [key, str(bit), str(start), str(end)] + + return self.append_command(RequestType.BitPos, args) + + def bitop( + self: TTransaction, + operation: BitwiseOperation, + destination: str, + keys: List[str], + ) -> TTransaction: + """ + Perform a bitwise operation between multiple keys (containing string values) and store the result in the + `destination`. + + See https://valkey.io/commands/bitop for more details. + + Args: + operation (BitwiseOperation): The bitwise operation to perform. + destination (str): The key that will store the resulting string. + keys (List[str]): The list of keys to perform the bitwise operation on. + + Command response: + int: The size of the string stored in `destination`. + """ + return self.append_command( + RequestType.BitOp, [operation.value, destination] + keys + ) + + def bitfield( + self: TTransaction, key: str, subcommands: List[BitFieldSubCommands] + ) -> TTransaction: + """ + Reads or modifies the array of bits representing the string that is held at `key` based on the specified + `subcommands`. + + See https://valkey.io/commands/bitfield for more details. + + Args: + key (str): The key of the string. + subcommands (List[BitFieldSubCommands]): The subcommands to be performed on the binary value of the string + at `key`, which could be any of the following: + - `BitFieldGet` + - `BitFieldSet` + - `BitFieldIncrBy` + - `BitFieldOverflow` + + Command response: + List[Optional[int]]: An array of results from the executed subcommands: + - `BitFieldGet` returns the value in `Offset` or `OffsetMultiplier`. + - `BitFieldSet` returns the old value in `Offset` or `OffsetMultiplier`. + - `BitFieldIncrBy` returns the new value in `Offset` or `OffsetMultiplier`. + - `BitFieldOverflow` determines the behavior of the "SET" and "INCRBY" subcommands when an overflow or + underflow occurs. "OVERFLOW" does not return a value and does not contribute a value to the list + response. + """ + args = [key] + _create_bitfield_args(subcommands) + return self.append_command(RequestType.BitField, args) + + def bitfield_read_only( + self: TTransaction, key: str, subcommands: List[BitFieldGet] + ) -> TTransaction: + """ + Reads the array of bits representing the string that is held at `key` based on the specified `subcommands`. + + See https://valkey.io/commands/bitfield_ro for more details. + + Args: + key (str): The key of the string. + subcommands (List[BitFieldGet]): The "GET" subcommands to be performed. + + Command response: + List[int]: An array of results from the "GET" subcommands. + + Since: Redis version 6.0.0. + """ + args = [key] + _create_bitfield_read_only_args(subcommands) + return self.append_command(RequestType.BitFieldReadOnly, args) + def object_encoding(self: TTransaction, key: str) -> TTransaction: """ Returns the internal encoding for the Redis object stored at `key`. @@ -2955,6 +3640,126 @@ def object_refcount(self: TTransaction, key: str) -> TTransaction: """ return self.append_command(RequestType.ObjectRefCount, [key]) + def srandmember(self: TTransaction, key: str) -> TTransaction: + """ + Returns a random element from the set value stored at 'key'. + + See https://valkey.io/commands/srandmember for more details. + + Args: + key (str): The key from which to retrieve the set member. + + Command Response: + str: A random element from the set, or None if 'key' does not exist. + """ + return self.append_command(RequestType.SRandMember, [key]) + + def srandmember_count(self: TTransaction, key: str, count: int) -> TTransaction: + """ + Returns one or more random elements from the set value stored at 'key'. + + See https://valkey.io/commands/srandmember for more details. + + Args: + key (str): The key of the sorted set. + count (int): The number of members to return. + If `count` is positive, returns unique members. + If `count` is negative, allows for duplicates members. + + Command Response: + List[str]: A list of members from the set. + If the set does not exist or is empty, the response will be an empty list. + """ + return self.append_command(RequestType.SRandMember, [key, str(count)]) + + def flushall( + self: TTransaction, flush_mode: Optional[FlushMode] = None + ) -> TTransaction: + """ + Deletes all the keys of all the existing databases. This command never fails. + See https://valkey.io/commands/flushall for more details. + + Args: + flush_mode (Optional[FlushMode]): The flushing mode, could be either `SYNC` or `ASYNC`. + + Command Response: + TOK: OK. + """ + args = [] + if flush_mode is not None: + args.append(flush_mode.value) + return self.append_command(RequestType.FlushAll, args) + + def flushdb( + self: TTransaction, flush_mode: Optional[FlushMode] = None + ) -> TTransaction: + """ + Deletes all the keys of the currently selected database. This command never fails. + + See https://valkey.io/commands/flushdb for more details. + + Args: + flush_mode (Optional[FlushMode]): The flushing mode, could be either `SYNC` or `ASYNC`. + + Command Response: + TOK: OK. + """ + args = [] + if flush_mode is not None: + args.append(flush_mode.value) + return self.append_command(RequestType.FlushDB, args) + + def getex( + self: TTransaction, key: str, expiry: Optional[ExpiryGetEx] = None + ) -> TTransaction: + """ + Get the value of `key` and optionally set its expiration. GETEX is similar to GET. + See https://valkey.io/commands/getex for more details. + + Args: + key (str): The key to get. + expiry (Optional[ExpirySet], optional): set expiriation to the given key. + Equivalent to [`EX` | `PX` | `EXAT` | `PXAT` | `PERSIST`] in the Redis API. + + Command Response: + Optional[str]: + If `key` exists, return the value stored at `key` + If 'key` does not exist, return 'None' + + Since: Redis version 6.2.0. + """ + args = [key] + if expiry is not None: + args.extend(expiry.get_cmd_args()) + return self.append_command(RequestType.GetEx, args) + + def lolwut( + self: TTransaction, + version: Optional[int] = None, + parameters: Optional[List[int]] = None, + ) -> TTransaction: + """ + Displays a piece of generative computer art and the Redis version. + + See https://valkey.io/commands/lolwut for more details. + + Args: + version (Optional[int]): Version of computer art to generate. + parameters (Optional[List[int]]): Additional set of arguments in order to change the output: + For version `5`, those are length of the line, number of squares per row, and number of squares per column. + For version `6`, those are number of columns and number of lines. + + Command Response: + str: A piece of generative computer art along with the current Redis version. + """ + args = [] + if version is not None: + args.extend(["VERSION", str(version)]) + if parameters: + for var in parameters: + args.extend(str(var)) + return self.append_command(RequestType.Lolwut, args) + class Transaction(BaseTransaction): """ @@ -3102,6 +3907,40 @@ def sort_store( ) return self.append_command(RequestType.Sort, args) + def copy( + self: TTransaction, + source: str, + destination: str, + destinationDB: Optional[int] = None, + replace: Optional[bool] = None, + ) -> TTransaction: + """ + Copies the value stored at the `source` to the `destination` key. If `destinationDB` + is specified, the value will be copied to the database specified by `destinationDB`, + otherwise the current database will be used. When `replace` is True, removes the + `destination` key first if it already exists, otherwise performs no action. + + See https://valkey.io/commands/copy for more details. + + Args: + source (str): The key to the source value. + destination (str): The key where the value should be copied to. + destinationDB (Optional[int]): The alternative logical database index for the destination key. + replace (Optional[bool]): If the destination key should be removed before copying the value to it. + + Command response: + bool: True if the source was copied. Otherwise, return False. + + Since: Redis version 6.2.0. + """ + args = [source, destination] + if destinationDB is not None: + args.extend(["DB", str(destinationDB)]) + if replace is not None: + args.append("REPLACE") + + return self.append_command(RequestType.Copy, args) + class ClusterTransaction(BaseTransaction): """ @@ -3169,4 +4008,32 @@ def sort_store( args = _build_sort_args(key, None, limit, None, order, alpha, store=destination) return self.append_command(RequestType.Sort, args) + def copy( + self: TTransaction, + source: str, + destination: str, + replace: Optional[bool] = None, + ) -> TTransaction: + """ + Copies the value stored at the `source` to the `destination` key. When `replace` is True, + removes the `destination` key first if it already exists, otherwise performs no action. + + See https://valkey.io/commands/copy for more details. + + Args: + source (str): The key to the source value. + destination (str): The key where the value should be copied to. + replace (Optional[bool]): If the destination key should be removed before copying the value to it. + + Command response: + bool: True if the source was copied. Otherwise, return False. + + Since: Redis version 6.2.0. + """ + args = [source, destination] + if replace is not None: + args.append("REPLACE") + + return self.append_command(RequestType.Copy, args) + # TODO: add all CLUSTER commands diff --git a/python/python/glide/config.py b/python/python/glide/config.py index 5c6ba07969..8c6405e313 100644 --- a/python/python/glide/config.py +++ b/python/python/glide/config.py @@ -1,8 +1,13 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 -from enum import Enum -from typing import List, Optional, Union +from __future__ import annotations +from dataclasses import dataclass +from enum import Enum, IntEnum +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union + +from glide.async_commands.core import CoreCommands +from glide.exceptions import ConfigurationError from glide.protobuf.connection_request_pb2 import ConnectionRequest from glide.protobuf.connection_request_pb2 import ProtocolVersion as SentProtocolVersion from glide.protobuf.connection_request_pb2 import ReadFrom as ProtobufReadFrom @@ -131,7 +136,7 @@ def __init__( protocol: ProtocolVersion = ProtocolVersion.RESP3, ): """ - Represents the configuration settings for a Redis client. + Represents the configuration settings for a Glide client. Args: addresses (List[NodeAddress]): DNS Addresses and ports of known nodes in the cluster. @@ -194,10 +199,18 @@ def _create_a_protobuf_conn_request( return request + def _is_pubsub_configured(self) -> bool: + return False + + def _get_pubsub_callback_and_context( + self, + ) -> Tuple[Optional[Callable[[CoreCommands.PubSubMsg, Any], None]], Any]: + return None, None + -class RedisClientConfiguration(BaseClientConfiguration): +class GlideClientConfiguration(BaseClientConfiguration): """ - Represents the configuration settings for a Standalone Redis client. + Represents the configuration settings for a Standalone Glide client. Args: addresses (List[NodeAddress]): DNS Addresses and ports of known nodes in the cluster. @@ -221,8 +234,40 @@ class RedisClientConfiguration(BaseClientConfiguration): database_id (Optional[int]): index of the logical database to connect to. client_name (Optional[str]): Client name to be used for the client. Will be used with CLIENT SETNAME command during connection establishment. protocol (ProtocolVersion): The version of the Redis RESP protocol to communicate with the server. + pubsub_subscriptions (Optional[GlideClientConfiguration.PubSubSubscriptions]): Pubsub subscriptions to be used for the client. + Will be applied via SUBSCRIBE/PSUBSCRIBE commands during connection establishment. """ + class PubSubChannelModes(IntEnum): + """ + Describes pubsub subsciption modes. + See https://valkey.io/docs/topics/pubsub/ for more details + """ + + Exact = 0 + """ Use exact channel names """ + Pattern = 1 + """ Use channel name patterns """ + + @dataclass + class PubSubSubscriptions: + """Describes pubsub configuration for standalone mode client. + + Attributes: + channels_and_patterns (Dict[GlideClientConfiguration.PubSubChannelModes, Set[str]]): + Channels and patterns by modes. + callback (Optional[Callable[[CoreCommands.PubSubMsg, Any], None]]): + Optional callback to accept the incomming messages. + context (Any): + Arbitrary context to pass to the callback. + """ + + channels_and_patterns: Dict[ + GlideClientConfiguration.PubSubChannelModes, Set[str] + ] + callback: Optional[Callable[[CoreCommands.PubSubMsg, Any], None]] + context: Any + def __init__( self, addresses: List[NodeAddress], @@ -234,6 +279,7 @@ def __init__( database_id: Optional[int] = None, client_name: Optional[str] = None, protocol: ProtocolVersion = ProtocolVersion.RESP3, + pubsub_subscriptions: Optional[PubSubSubscriptions] = None, ): super().__init__( addresses=addresses, @@ -246,6 +292,7 @@ def __init__( ) self.reconnect_strategy = reconnect_strategy self.database_id = database_id + self.pubsub_subscriptions = pubsub_subscriptions def _create_a_protobuf_conn_request( self, cluster_mode: bool = False @@ -263,12 +310,44 @@ def _create_a_protobuf_conn_request( if self.database_id: request.database_id = self.database_id + if self.pubsub_subscriptions: + if self.protocol == ProtocolVersion.RESP2: + raise ConfigurationError( + "PubSub subscriptions require RESP3 protocol, but RESP2 was configured." + ) + if ( + self.pubsub_subscriptions.context is not None + and not self.pubsub_subscriptions.callback + ): + raise ConfigurationError( + "PubSub subscriptions with a context require a callback function to be configured." + ) + for ( + channel_type, + channels_patterns, + ) in self.pubsub_subscriptions.channels_and_patterns.items(): + entry = request.pubsub_subscriptions.channels_or_patterns_by_type[ + int(channel_type) + ] + for channel_pattern in channels_patterns: + entry.channels_or_patterns.append(str.encode(channel_pattern)) + return request + def _is_pubsub_configured(self) -> bool: + return self.pubsub_subscriptions is not None + + def _get_pubsub_callback_and_context( + self, + ) -> Tuple[Optional[Callable[[CoreCommands.PubSubMsg, Any], None]], Any]: + if self.pubsub_subscriptions: + return self.pubsub_subscriptions.callback, self.pubsub_subscriptions.context + return None, None + class ClusterClientConfiguration(BaseClientConfiguration): """ - Represents the configuration settings for a Cluster Redis client. + Represents the configuration settings for a Cluster Glide client. Args: addresses (List[NodeAddress]): DNS Addresses and ports of known nodes in the cluster. @@ -290,12 +369,46 @@ class ClusterClientConfiguration(BaseClientConfiguration): These checks evaluate changes in the cluster's topology, triggering a slot refresh when detected. Periodic checks ensure a quick and efficient process by querying a limited number of nodes. Defaults to PeriodicChecksStatus.ENABLED_DEFAULT_CONFIGS. + pubsub_subscriptions (Optional[ClusterClientConfiguration.PubSubSubscriptions]): Pubsub subscriptions to be used for the client. + Will be applied via SUBSCRIBE/PSUBSCRIBE/SSUBSCRIBE commands during connection establishment. Notes: Currently, the reconnection strategy in cluster mode is not configurable, and exponential backoff with fixed values is used. """ + class PubSubChannelModes(IntEnum): + """ + Describes pubsub subsciption modes. + See https://valkey.io/docs/topics/pubsub/ for more details + """ + + Exact = 0 + """ Use exact channel names """ + Pattern = 1 + """ Use channel name patterns """ + Sharded = 2 + """ Use sharded pubsub """ + + @dataclass + class PubSubSubscriptions: + """Describes pubsub configuration for cluster mode client. + + Attributes: + channels_and_patterns (Dict[ClusterClientConfiguration.PubSubChannelModes, Set[str]]): + Channels and patterns by modes. + callback (Optional[Callable[[CoreCommands.PubSubMsg, Any], None]]): + Optional callback to accept the incoming messages. + context (Any): + Arbitrary context to pass to the callback. + """ + + channels_and_patterns: Dict[ + ClusterClientConfiguration.PubSubChannelModes, Set[str] + ] + callback: Optional[Callable[[CoreCommands.PubSubMsg, Any], None]] + context: Any + def __init__( self, addresses: List[NodeAddress], @@ -308,6 +421,7 @@ def __init__( periodic_checks: Union[ PeriodicChecksStatus, PeriodicChecksManualInterval ] = PeriodicChecksStatus.ENABLED_DEFAULT_CONFIGS, + pubsub_subscriptions: Optional[PubSubSubscriptions] = None, ): super().__init__( addresses=addresses, @@ -319,6 +433,7 @@ def __init__( protocol=protocol, ) self.periodic_checks = periodic_checks + self.pubsub_subscriptions = pubsub_subscriptions def _create_a_protobuf_conn_request( self, cluster_mode: bool = False @@ -332,4 +447,36 @@ def _create_a_protobuf_conn_request( elif self.periodic_checks == PeriodicChecksStatus.DISABLED: request.periodic_checks_disabled.SetInParent() + if self.pubsub_subscriptions: + if self.protocol == ProtocolVersion.RESP2: + raise ConfigurationError( + "PubSub subscriptions require RESP3 protocol, but RESP2 was configured." + ) + if ( + self.pubsub_subscriptions.context is not None + and not self.pubsub_subscriptions.callback + ): + raise ConfigurationError( + "PubSub subscriptions with a context require a callback function to be configured." + ) + for ( + channel_type, + channels_patterns, + ) in self.pubsub_subscriptions.channels_and_patterns.items(): + entry = request.pubsub_subscriptions.channels_or_patterns_by_type[ + int(channel_type) + ] + for channel_pattern in channels_patterns: + entry.channels_or_patterns.append(str.encode(channel_pattern)) + return request + + def _is_pubsub_configured(self) -> bool: + return self.pubsub_subscriptions is not None + + def _get_pubsub_callback_and_context( + self, + ) -> Tuple[Optional[Callable[[CoreCommands.PubSubMsg, Any], None]], Any]: + if self.pubsub_subscriptions: + return self.pubsub_subscriptions.callback, self.pubsub_subscriptions.context + return None, None diff --git a/python/python/glide/constants.py b/python/python/glide/constants.py index 6c2cf47148..f78398895a 100644 --- a/python/python/glide/constants.py +++ b/python/python/glide/constants.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from typing import Dict, List, Literal, Mapping, Optional, Set, TypeVar, Union diff --git a/python/python/glide/exceptions.py b/python/python/glide/exceptions.py index 2b000e3e53..6420fa4e5d 100644 --- a/python/python/glide/exceptions.py +++ b/python/python/glide/exceptions.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from typing import Optional @@ -54,3 +54,9 @@ class ConnectionError(RequestError): """ pass + + +class ConfigurationError(RequestError): + """ + Errors that are thrown when a request cannot be completed in current configuration settings. + """ diff --git a/python/python/glide/glide.pyi b/python/python/glide/glide.pyi index d155757bbd..fde1ac0d99 100644 --- a/python/python/glide/glide.pyi +++ b/python/python/glide/glide.pyi @@ -1,10 +1,11 @@ from collections.abc import Callable from enum import Enum -from typing import Optional +from typing import List, Optional from glide.constants import TResult DEFAULT_TIMEOUT_IN_MILLISECONDS: int = ... +MAX_REQUEST_ARGS_LEN: int = ... class Level(Enum): Error = 0 @@ -23,5 +24,6 @@ class Script: def start_socket_listener_external(init_callback: Callable) -> None: ... def value_from_pointer(pointer: int) -> TResult: ... def create_leaked_value(message: str) -> int: ... +def create_leaked_bytes_vec(args_vec: List[bytes]) -> int: ... def py_init(level: Optional[Level], file_name: Optional[str]) -> Level: ... def py_log(log_level: Level, log_identifier: str, message: str) -> None: ... diff --git a/python/python/glide/redis_client.py b/python/python/glide/glide_client.py similarity index 52% rename from python/python/glide/redis_client.py rename to python/python/glide/glide_client.py index 3d61d12b49..2840caf9a6 100644 --- a/python/python/glide/redis_client.py +++ b/python/python/glide/glide_client.py @@ -1,8 +1,9 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import asyncio +import sys import threading -from typing import List, Optional, Tuple, Type, Union, cast +from typing import Any, Dict, List, Optional, Tuple, Type, Union, cast import async_timeout from glide.async_commands.cluster_commands import ClusterCommands @@ -12,6 +13,7 @@ from glide.constants import DEFAULT_READ_BYTES_SIZE, OK, TRequest, TResult from glide.exceptions import ( ClosingError, + ConfigurationError, ConnectionError, ExecAbortError, RequestError, @@ -28,6 +30,8 @@ from .glide import ( DEFAULT_TIMEOUT_IN_MILLISECONDS, + MAX_REQUEST_ARGS_LEN, + create_leaked_bytes_vec, start_socket_listener_external, value_from_pointer, ) @@ -47,7 +51,7 @@ def get_request_error_class( return RequestError -class BaseRedisClient(CoreCommands): +class BaseClient(CoreCommands): def __init__(self, config: BaseClientConfiguration): """ To create a new client, use the `create` classmethod @@ -60,17 +64,20 @@ def __init__(self, config: BaseClientConfiguration): self.socket_path: Optional[str] = None self._reader_task: Optional[asyncio.Task] = None self._is_closed: bool = False + self._pubsub_futures: List[asyncio.Future] = [] + self._pubsub_lock = threading.Lock() + self._pending_push_notifications: List[Response] = list() @classmethod async def create(cls, config: BaseClientConfiguration) -> Self: - """Creates a Redis client. + """Creates a Glide client. Args: config (ClientConfiguration): The client configurations. If no configuration is provided, a default client to "localhost":6379 will be created. Returns: - Self: a Redis Client instance. + Self: a Glide Client instance. """ config = config self = cls(config) @@ -140,6 +147,14 @@ async def close(self, err_message: Optional[str] = None) -> None: if not response_future.done(): err_message = "" if err_message is None else err_message response_future.set_exception(ClosingError(err_message)) + try: + self._pubsub_lock.acquire() + for pubsub_future in self._pubsub_futures: + if not response_future.done() and not pubsub_future.cancelled(): + pubsub_future.set_exception(ClosingError("")) + finally: + self._pubsub_lock.release() + self._writer.close() await self._writer.wait_closed() self.__del__() @@ -182,6 +197,46 @@ async def _write_buffered_requests_to_socket(self) -> None: self._writer.write(b_arr) await self._writer.drain() + # TODO: change `str` to `TEncodable` where `TEncodable = Union[str, bytes]` + def _encode_arg(self, arg: str) -> bytes: + """ + Converts a string argument to bytes. + + Args: + arg (str): An encodable argument. + + Returns: + bytes: The encoded argument as bytes. + """ + + # TODO: Allow passing different encoding options + return bytes(arg, encoding="utf8") + + # TODO: change `List[str]` to `List[TEncodable]` where `TEncodable = Union[str, bytes]` + def _encode_and_sum_size( + self, + args_list: Optional[List[str]], + ) -> Tuple[List[bytes], int]: + """ + Encodes the list and calculates the total memory size. + + Args: + args_list (Optional[List[str]]): A list of strings to be converted to bytes. + If None or empty, returns ([], 0). + + Returns: + int: The total memory size of the encoded arguments in bytes. + """ + args_size = 0 + encoded_args_list: List[bytes] = [] + if not args_list: + return (encoded_args_list, args_size) + for arg in args_list: + encoded_arg = self._encode_arg(arg) + encoded_args_list.append(encoded_arg) + args_size += sys.getsizeof(encoded_arg) + return (encoded_args_list, args_size) + async def _execute_command( self, request_type: RequestType.ValueType, @@ -195,9 +250,13 @@ async def _execute_command( request = RedisRequest() request.callback_idx = self._get_callback_index() request.single_command.request_type = request_type - request.single_command.args_array.args[:] = [ - bytes(elem, encoding="utf8") for elem in args - ] # TODO - use arg pointer + (encoded_args, args_size) = self._encode_and_sum_size(args) + if args_size < MAX_REQUEST_ARGS_LEN: + request.single_command.args_array.args[:] = encoded_args + else: + request.single_command.args_vec_pointer = create_leaked_bytes_vec( + encoded_args + ) set_protobuf_route(request, route) return await self._write_request_await_response(request) @@ -217,8 +276,12 @@ async def _execute_transaction( command = Command() command.request_type = requst_type # For now, we allow the user to pass the command as array of strings - # we convert them here into bytearray (the datatype that our rust core expects) - command.args_array.args[:] = [bytes(elem, encoding="utf8") for elem in args] + # we convert them here into bytes (the datatype that our rust core expects) + (encoded_args, args_size) = self._encode_and_sum_size(args) + if args_size < MAX_REQUEST_ARGS_LEN: + command.args_array.args[:] = encoded_args + else: + command.args_vec_pointer = create_leaked_bytes_vec(encoded_args) transaction_commands.append(command) request.transaction.commands.extend(transaction_commands) set_protobuf_route(request, route) @@ -243,6 +306,119 @@ async def _execute_script( set_protobuf_route(request, route) return await self._write_request_await_response(request) + async def get_pubsub_message(self) -> CoreCommands.PubSubMsg: + if self._is_closed: + raise ClosingError( + "Unable to execute requests; the client is closed. Please create a new client." + ) + + if not self.config._is_pubsub_configured(): + raise ConfigurationError( + "The operation will never complete since there was no pubsub subscriptions applied to the client." + ) + + if self.config._get_pubsub_callback_and_context()[0] is not None: + raise ConfigurationError( + "The operation will never complete since messages will be passed to the configured callback." + ) + + # locking might not be required + response_future: asyncio.Future = asyncio.Future() + try: + self._pubsub_lock.acquire() + self._pubsub_futures.append(response_future) + self._complete_pubsub_futures_safe() + finally: + self._pubsub_lock.release() + return await response_future + + def try_get_pubsub_message(self) -> Optional[CoreCommands.PubSubMsg]: + if self._is_closed: + raise ClosingError( + "Unable to execute requests; the client is closed. Please create a new client." + ) + + if not self.config._is_pubsub_configured(): + raise ConfigurationError( + "The operation will never succeed since there was no pubsbub subscriptions applied to the client." + ) + + if self.config._get_pubsub_callback_and_context()[0] is not None: + raise ConfigurationError( + "The operation will never succeed since messages will be passed to the configured callback." + ) + + # locking might not be required + msg: Optional[CoreCommands.PubSubMsg] = None + try: + self._pubsub_lock.acquire() + self._complete_pubsub_futures_safe() + while len(self._pending_push_notifications) and not msg: + push_notification = self._pending_push_notifications.pop(0) + msg = self._notification_to_pubsub_message_safe(push_notification) + finally: + self._pubsub_lock.release() + return msg + + def _cancel_pubsub_futures_with_exception_safe(self, exception: ConnectionError): + while len(self._pubsub_futures): + next_future = self._pubsub_futures.pop(0) + if not next_future.cancelled(): + next_future.set_exception(exception) + + def _notification_to_pubsub_message_safe( + self, response: Response + ) -> Optional[CoreCommands.PubSubMsg]: + pubsub_message = None + push_notification = cast( + Dict[str, Any], value_from_pointer(response.resp_pointer) + ) + message_kind = push_notification["kind"] + if message_kind == "Disconnection": + ClientLogger.log( + LogLevel.WARN, + "disconnect notification", + "Transport disconnected, messages might be lost", + ) + elif ( + message_kind == "Message" + or message_kind == "PMessage" + or message_kind == "SMessage" + ): + values: List = push_notification["values"] + if message_kind == "PMessage": + pubsub_message = BaseClient.PubSubMsg( + message=values[2], channel=values[1], pattern=values[0] + ) + else: + pubsub_message = BaseClient.PubSubMsg( + message=values[1], channel=values[0], pattern=None + ) + elif ( + message_kind == "PSubscribe" + or message_kind == "Subscribe" + or message_kind == "SSubscribe" + or message_kind == "Unsubscribe" + ): + pass + else: + ClientLogger.log( + LogLevel.WARN, + "unknown notification", + f"Unknown notification message: '{message_kind}'", + ) + + return pubsub_message + + def _complete_pubsub_futures_safe(self): + while len(self._pending_push_notifications) and len(self._pubsub_futures): + next_push_notification = self._pending_push_notifications.pop(0) + pubsub_message = self._notification_to_pubsub_message_safe( + next_push_notification + ) + if pubsub_message: + self._pubsub_futures.pop(0).set_result(pubsub_message) + async def _write_request_await_response(self, request: RedisRequest): # Create a response future for this request and add it to the available # futures map @@ -258,6 +434,53 @@ def _get_callback_index(self) -> int: # The list is empty return len(self._available_futures) + async def _process_response(self, response: Response) -> None: + res_future = self._available_futures.pop(response.callback_idx, None) + if not res_future or response.HasField("closing_error"): + err_msg = ( + response.closing_error + if response.HasField("closing_error") + else f"Client Error - closing due to unknown error. callback index: {response.callback_idx}" + ) + if res_future is not None: + res_future.set_exception(ClosingError(err_msg)) + await self.close(err_msg) + raise ClosingError(err_msg) + else: + self._available_callback_indexes.append(response.callback_idx) + if response.HasField("request_error"): + error_type = get_request_error_class(response.request_error.type) + res_future.set_exception(error_type(response.request_error.message)) + elif response.HasField("resp_pointer"): + res_future.set_result(value_from_pointer(response.resp_pointer)) + elif response.HasField("constant_response"): + res_future.set_result(OK) + else: + res_future.set_result(None) + + async def _process_push(self, response: Response) -> None: + if response.HasField("closing_error") or not response.HasField("resp_pointer"): + err_msg = ( + response.closing_error + if response.HasField("closing_error") + else "Client Error - push notification without resp_pointer" + ) + await self.close(err_msg) + raise ClosingError(err_msg) + + try: + self._pubsub_lock.acquire() + callback, context = self.config._get_pubsub_callback_and_context() + if callback: + pubsub_message = self._notification_to_pubsub_message_safe(response) + if pubsub_message: + callback(pubsub_message, context) + else: + self._pending_push_notifications.append(response) + self._complete_pubsub_futures_safe() + finally: + self._pubsub_lock.release() + async def _reader_loop(self) -> None: # Socket reader loop remaining_read_bytes = bytearray() @@ -280,37 +503,15 @@ async def _reader_loop(self) -> None: remaining_read_bytes = read_bytes[offset:] break response = cast(Response, response) - res_future = self._available_futures.pop(response.callback_idx, None) - if not res_future or response.HasField("closing_error"): - err_msg = ( - response.closing_error - if response.HasField("closing_error") - else f"Client Error - closing due to unknown error. callback index: {response.callback_idx}" - ) - if res_future is not None: - res_future.set_exception(ClosingError(err_msg)) - await self.close(err_msg) - raise ClosingError(err_msg) + if response.is_push: + await self._process_push(response=response) else: - self._available_callback_indexes.append(response.callback_idx) - if response.HasField("request_error"): - error_type = get_request_error_class( - response.request_error.type - ) - res_future.set_exception( - error_type(response.request_error.message) - ) - elif response.HasField("resp_pointer"): - res_future.set_result(value_from_pointer(response.resp_pointer)) - elif response.HasField("constant_response"): - res_future.set_result(OK) - else: - res_future.set_result(None) - - -class RedisClusterClient(BaseRedisClient, ClusterCommands): + await self._process_response(response=response) + + +class GlideClusterClient(BaseClient, ClusterCommands): """ - Client used for connection to cluster Redis servers. + Client used for connection to cluster servers. For full documentation, see https://github.com/aws/babushka/wiki/Python-wrapper#redis-cluster """ @@ -319,14 +520,12 @@ def _get_protobuf_conn_request(self) -> ConnectionRequest: return self.config._create_a_protobuf_conn_request(cluster_mode=True) -class RedisClient(BaseRedisClient, StandaloneCommands): +class GlideClient(BaseClient, StandaloneCommands): """ - Client used for connection to standalone Redis servers. + Client used for connection to standalone servers. For full documentation, see https://github.com/aws/babushka/wiki/Python-wrapper#redis-standalone """ - pass - -TRedisClient = Union[RedisClient, RedisClusterClient] +TGlideClient = Union[GlideClient, GlideClusterClient] diff --git a/python/python/glide/logger.py b/python/python/glide/logger.py index ae8c09e9bc..2426136aad 100644 --- a/python/python/glide/logger.py +++ b/python/python/glide/logger.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from __future__ import annotations diff --git a/python/python/glide/protobuf_codec.py b/python/python/glide/protobuf_codec.py index 959637db58..859b85610e 100644 --- a/python/python/glide/protobuf_codec.py +++ b/python/python/glide/protobuf_codec.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import struct from typing import List, Tuple, Type diff --git a/python/python/glide/routes.py b/python/python/glide/routes.py index 29cf8b364c..a0c62cb70e 100644 --- a/python/python/glide/routes.py +++ b/python/python/glide/routes.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from enum import Enum from typing import Optional @@ -24,6 +24,12 @@ def __init__(self) -> None: class AllNodes(Route): + """ + Route request to all nodes. + Warning: + Don't use it with write commands, they could be routed to a replica (RO) node and fail. + """ + pass @@ -32,6 +38,12 @@ class AllPrimaries(Route): class RandomNode(Route): + """ + Route request to a random node. + Warning: + Don't use it with write commands, because they could be randomly routed to a replica (RO) node and fail. + """ + pass diff --git a/python/python/tests/__init__.py b/python/python/tests/__init__.py index 9d4ea1a992..fa59791e66 100644 --- a/python/python/tests/__init__.py +++ b/python/python/tests/__init__.py @@ -1 +1 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 diff --git a/python/python/tests/conftest.py b/python/python/tests/conftest.py index 7462cf9565..6006280e1d 100644 --- a/python/python/tests/conftest.py +++ b/python/python/tests/conftest.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import random from typing import AsyncGenerator, List, Optional, Union @@ -6,14 +6,14 @@ import pytest from glide.config import ( ClusterClientConfiguration, + GlideClientConfiguration, NodeAddress, ProtocolVersion, - RedisClientConfiguration, RedisCredentials, ) +from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient from glide.logger import Level as logLevel from glide.logger import Logger -from glide.redis_client import RedisClient, RedisClusterClient, TRedisClient from tests.utils.cluster import RedisCluster DEFAULT_HOST = "localhost" @@ -206,7 +206,7 @@ def pytest_collection_modifyitems(config, items): @pytest.fixture() async def redis_client( request, cluster_mode: bool, protocol: ProtocolVersion -) -> AsyncGenerator[TRedisClient, None]: +) -> AsyncGenerator[TGlideClient, None]: "Get async socket client for tests" client = await create_client(request, cluster_mode, protocol=protocol) yield client @@ -223,7 +223,13 @@ async def create_client( client_name: Optional[str] = None, protocol: ProtocolVersion = ProtocolVersion.RESP3, timeout: Optional[int] = None, -) -> Union[RedisClient, RedisClusterClient]: + cluster_mode_pubsub: Optional[ + ClusterClientConfiguration.PubSubSubscriptions + ] = None, + standalone_mode_pubsub: Optional[ + GlideClientConfiguration.PubSubSubscriptions + ] = None, +) -> Union[GlideClient, GlideClusterClient]: # Create async socket client use_tls = request.config.getoption("--tls") if cluster_mode: @@ -238,11 +244,12 @@ async def create_client( client_name=client_name, protocol=protocol, request_timeout=timeout, + pubsub_subscriptions=cluster_mode_pubsub, ) - return await RedisClusterClient.create(cluster_config) + return await GlideClusterClient.create(cluster_config) else: assert type(pytest.standalone_cluster) is RedisCluster - config = RedisClientConfiguration( + config = GlideClientConfiguration( addresses=( pytest.standalone_cluster.nodes_addr if addresses is None else addresses ), @@ -252,8 +259,9 @@ async def create_client( client_name=client_name, protocol=protocol, request_timeout=timeout, + pubsub_subscriptions=standalone_mode_pubsub, ) - return await RedisClient.create(config) + return await GlideClient.create(config) async def test_teardown(request, cluster_mode: bool, protocol: ProtocolVersion): diff --git a/python/python/tests/test_async_client.py b/python/python/tests/test_async_client.py index 415d92a855..3364491894 100644 --- a/python/python/tests/test_async_client.py +++ b/python/python/tests/test_async_client.py @@ -1,28 +1,43 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from __future__ import annotations import asyncio +import copy import math import time from collections.abc import Mapping from datetime import date, datetime, timedelta, timezone -from typing import Any, Dict, Union, cast +from typing import Any, Dict, List, Union, cast import pytest from glide import ClosingError, RequestError, Script +from glide.async_commands.bitmap import ( + BitFieldGet, + BitFieldIncrBy, + BitFieldOverflow, + BitFieldSet, + BitmapIndexType, + BitOffset, + BitOffsetMultiplier, + BitOverflowControl, + BitwiseOperation, + OffsetOptions, + SignedEncoding, + UnsignedEncoding, +) from glide.async_commands.command_args import Limit, ListDirection, OrderBy from glide.async_commands.core import ( ConditionalChange, ExpireOptions, + ExpiryGetEx, ExpirySet, ExpiryType, + ExpiryTypeGetEx, + FlushMode, InfBound, InfoSection, InsertPosition, - StreamAddOptions, - TrimByMaxLen, - TrimByMinId, UpdateOptions, ) from glide.async_commands.sorted_set import ( @@ -34,17 +49,32 @@ GeoUnit, InfBound, LexBoundary, - Limit, - OrderBy, RangeByIndex, RangeByLex, RangeByScore, ScoreBoundary, ScoreFilter, ) -from glide.config import ProtocolVersion, RedisCredentials +from glide.async_commands.stream import ( + ExclusiveIdBound, + IdBound, + MaxId, + MinId, + StreamAddOptions, + StreamGroupOptions, + StreamReadGroupOptions, + StreamReadOptions, + TrimByMaxLen, + TrimByMinId, +) +from glide.config import ( + ClusterClientConfiguration, + GlideClientConfiguration, + ProtocolVersion, + RedisCredentials, +) from glide.constants import OK, TResult -from glide.redis_client import RedisClient, RedisClusterClient, TRedisClient +from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient from glide.routes import ( AllNodes, AllPrimaries, @@ -68,10 +98,10 @@ @pytest.mark.asyncio -class TestRedisClients: +class TestGlideClients: @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_register_client_name_and_version(self, redis_client: TRedisClient): + async def test_register_client_name_and_version(self, redis_client: TGlideClient): min_version = "7.2.0" if await check_if_server_version_lt(redis_client, min_version): # TODO: change it to pytest fixture after we'll implement a sync client @@ -83,10 +113,13 @@ async def test_register_client_name_and_version(self, redis_client: TRedisClient @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_send_and_receive_large_values(self, redis_client: TRedisClient): - length = 2**16 - key = get_random_string(length) - value = get_random_string(length) + async def test_send_and_receive_large_values(self, request, cluster_mode, protocol): + redis_client = await create_client( + request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + ) + length = 2**25 # 33mb + key = "0" * length + value = "0" * length assert len(key) == length assert len(value) == length await redis_client.set(key, value) @@ -94,7 +127,7 @@ async def test_send_and_receive_large_values(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_send_and_receive_non_ascii_unicode(self, redis_client: TRedisClient): + async def test_send_and_receive_non_ascii_unicode(self, redis_client: TGlideClient): key = "foo" value = "שלום hello 汉字" assert value == "שלום hello 汉字" @@ -105,7 +138,7 @@ async def test_send_and_receive_non_ascii_unicode(self, redis_client: TRedisClie @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_client_handle_concurrent_workload_without_dropping_or_changing_values( - self, redis_client: TRedisClient, value_size + self, redis_client: TGlideClient, value_size ): num_of_concurrent_tasks = 100 running_tasks = set() @@ -126,9 +159,9 @@ async def exec_command(i): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_can_connect_with_auth_requirepass( - self, redis_client: TRedisClient, request + self, redis_client: TGlideClient, request ): - is_cluster = isinstance(redis_client, RedisClusterClient) + is_cluster = isinstance(redis_client, GlideClusterClient) password = "TEST_AUTH" credentials = RedisCredentials(password) try: @@ -166,9 +199,9 @@ async def test_can_connect_with_auth_requirepass( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_can_connect_with_auth_acl( - self, redis_client: Union[RedisClient, RedisClusterClient], request + self, redis_client: Union[GlideClient, GlideClusterClient], request ): - is_cluster = isinstance(redis_client, RedisClusterClient) + is_cluster = isinstance(redis_client, GlideClusterClient) username = "testuser" password = "TEST_AUTH" try: @@ -231,7 +264,7 @@ async def test_client_name(self, request, cluster_mode, protocol): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_closed_client_raises_error(self, redis_client: TRedisClient): + async def test_closed_client_raises_error(self, redis_client: TGlideClient): await redis_client.close() with pytest.raises(ClosingError) as e: await redis_client.set("foo", "bar") @@ -243,7 +276,7 @@ class TestCommands: @pytest.mark.smoke_test @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_socket_set_get(self, redis_client: TRedisClient): + async def test_socket_set_get(self, redis_client: TGlideClient): key = get_random_string(10) value = datetime.now(timezone.utc).strftime("%m/%d/%Y, %H:%M:%S") assert await redis_client.set(key, value) == OK @@ -251,21 +284,21 @@ async def test_socket_set_get(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP3]) - async def test_use_resp3_protocol(self, redis_client: TRedisClient): + async def test_use_resp3_protocol(self, redis_client: TGlideClient): result = cast(Dict[str, str], await redis_client.custom_command(["HELLO"])) assert int(result["proto"]) == 3 @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2]) - async def test_allow_opt_in_to_resp2_protocol(self, redis_client: TRedisClient): + async def test_allow_opt_in_to_resp2_protocol(self, redis_client: TGlideClient): result = cast(Dict[str, str], await redis_client.custom_command(["HELLO"])) assert int(result["proto"]) == 2 @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_conditional_set(self, redis_client: TRedisClient): + async def test_conditional_set(self, redis_client: TGlideClient): key = get_random_string(10) value = get_random_string(10) res = await redis_client.set( @@ -285,7 +318,7 @@ async def test_conditional_set(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_set_return_old_value(self, redis_client: TRedisClient): + async def test_set_return_old_value(self, redis_client: TGlideClient): min_version = "6.2.0" if await check_if_server_version_lt(redis_client, min_version): # TODO: change it to pytest fixture after we'll implement a sync client @@ -302,14 +335,14 @@ async def test_set_return_old_value(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_custom_command_single_arg(self, redis_client: TRedisClient): + async def test_custom_command_single_arg(self, redis_client: TGlideClient): # Test single arg command res = await redis_client.custom_command(["PING"]) assert res == "PONG" @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_custom_command_multi_arg(self, redis_client: TRedisClient): + async def test_custom_command_multi_arg(self, redis_client: TGlideClient): # Test multi args command client_list = await redis_client.custom_command( ["CLIENT", "LIST", "TYPE", "NORMAL"] @@ -323,7 +356,7 @@ async def test_custom_command_multi_arg(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_custom_command_lower_and_upper_case( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): # Test multi args command client_list = await redis_client.custom_command( @@ -337,7 +370,7 @@ async def test_custom_command_lower_and_upper_case( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_request_error_raises_exception(self, redis_client: TRedisClient): + async def test_request_error_raises_exception(self, redis_client: TGlideClient): key = get_random_string(10) value = get_random_string(10) await redis_client.set(key, value) @@ -347,11 +380,11 @@ async def test_request_error_raises_exception(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_info_server_replication(self, redis_client: TRedisClient): + async def test_info_server_replication(self, redis_client: TGlideClient): info = get_first_result(await redis_client.info([InfoSection.SERVER])) assert "# Server" in info cluster_mode = parse_info_response(info)["redis_mode"] - expected_cluster_mode = isinstance(redis_client, RedisClusterClient) + expected_cluster_mode = isinstance(redis_client, GlideClusterClient) assert cluster_mode == "cluster" if expected_cluster_mode else "standalone" info = get_first_result(await redis_client.info([InfoSection.REPLICATION])) assert "# Replication" in info @@ -359,8 +392,8 @@ async def test_info_server_replication(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_info_default(self, redis_client: TRedisClient): - cluster_mode = isinstance(redis_client, RedisClusterClient) + async def test_info_default(self, redis_client: TGlideClient): + cluster_mode = isinstance(redis_client, GlideClusterClient) info_result = await redis_client.info() if cluster_mode: cluster_nodes = await redis_client.custom_command(["CLUSTER", "NODES"]) @@ -373,7 +406,7 @@ async def test_info_default(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_select(self, redis_client: RedisClient): + async def test_select(self, redis_client: GlideClient): assert await redis_client.select(0) == OK key = get_random_string(10) value = get_random_string(10) @@ -386,7 +419,7 @@ async def test_select(self, redis_client: RedisClient): @pytest.mark.parametrize("cluster_mode", [False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_move(self, redis_client: RedisClient): + async def test_move(self, redis_client: GlideClient): key = get_random_string(10) value = get_random_string(10) @@ -406,7 +439,7 @@ async def test_move(self, redis_client: RedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_delete(self, redis_client: TRedisClient): + async def test_delete(self, redis_client: TGlideClient): keys = [get_random_string(10), get_random_string(10), get_random_string(10)] value = get_random_string(10) [await redis_client.set(key, value) for key in keys] @@ -419,7 +452,7 @@ async def test_delete(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_getdel(self, redis_client: TRedisClient): + async def test_getdel(self, redis_client: TGlideClient): key = get_random_string(10) value = get_random_string(10) non_existing_key = get_random_string(10) @@ -439,7 +472,41 @@ async def test_getdel(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_config_reset_stat(self, redis_client: TRedisClient): + async def test_getrange(self, redis_client: TGlideClient): + key = get_random_string(16) + value = get_random_string(10) + non_string_key = get_random_string(10) + + assert await redis_client.set(key, value) == OK + assert await redis_client.getrange(key, 0, 3) == value[:4] + assert await redis_client.getrange(key, -3, -1) == value[-3:] + assert await redis_client.getrange(key, 0, -1) == value + + # out of range + assert await redis_client.getrange(key, 10, 100) == value[10:] + assert await redis_client.getrange(key, -200, -3) == value[-200:-2] + assert await redis_client.getrange(key, 100, 200) == "" + + # incorrect range + assert await redis_client.getrange(key, -1, -3) == "" + + # a redis bug, fixed in version 8: https://github.com/redis/redis/issues/13207 + if await check_if_server_version_lt(redis_client, "8.0.0"): + assert await redis_client.getrange(key, -200, -100) == value[0] + else: + assert await redis_client.getrange(key, -200, -100) == "" + + if await check_if_server_version_lt(redis_client, "8.0.0"): + assert await redis_client.getrange(non_string_key, 0, -1) == "" + + # non-string key + assert await redis_client.lpush(non_string_key, ["_"]) == 1 + with pytest.raises(RequestError): + await redis_client.getrange(non_string_key, 0, -1) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_config_reset_stat(self, redis_client: TGlideClient): # we execute set and info so the commandstats will show `cmdstat_set::calls` greater than 1 # after the configResetStat call we initiate an info command and the the commandstats won't contain `cmdstat_set`. await redis_client.set("foo", "bar") @@ -455,7 +522,7 @@ async def test_config_reset_stat(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_config_rewrite(self, redis_client: TRedisClient): + async def test_config_rewrite(self, redis_client: TGlideClient): info_server = parse_info_response( get_first_result(await redis_client.info([InfoSection.SERVER])) ) @@ -469,14 +536,14 @@ async def test_config_rewrite(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_client_id(self, redis_client: TRedisClient): + async def test_client_id(self, redis_client: TGlideClient): client_id = await redis_client.client_id() assert type(client_id) is int assert client_id > 0 @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_incr_commands_existing_key(self, redis_client: TRedisClient): + async def test_incr_commands_existing_key(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "10") == OK assert await redis_client.incr(key) == 11 @@ -488,7 +555,7 @@ async def test_incr_commands_existing_key(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_incr_commands_non_existing_key(self, redis_client: TRedisClient): + async def test_incr_commands_non_existing_key(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(10) key3 = get_random_string(10) @@ -507,7 +574,7 @@ async def test_incr_commands_non_existing_key(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_incr_commands_with_str_value(self, redis_client: TRedisClient): + async def test_incr_commands_with_str_value(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK with pytest.raises(RequestError) as e: @@ -526,7 +593,7 @@ async def test_incr_commands_with_str_value(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_client_getname(self, redis_client: TRedisClient): + async def test_client_getname(self, redis_client: TGlideClient): assert await redis_client.client_getname() is None assert ( await redis_client.custom_command(["CLIENT", "SETNAME", "GlideConnection"]) @@ -536,7 +603,7 @@ async def test_client_getname(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_mset_mget(self, redis_client: TRedisClient): + async def test_mset_mget(self, redis_client: TGlideClient): keys = [get_random_string(10), get_random_string(10), get_random_string(10)] non_existing_key = get_random_string(10) key_value_pairs = {key: value for key, value in zip(keys, keys)} @@ -551,7 +618,19 @@ async def test_mset_mget(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_msetnx(self, redis_client: TRedisClient): + async def test_touch(self, redis_client: TGlideClient): + keys = [get_random_string(10), get_random_string(10)] + key_value_pairs = {key: value for key, value in zip(keys, keys)} + + assert await redis_client.mset(key_value_pairs) == OK + assert await redis_client.touch(keys) == 2 + + # 2 existing keys, one non-existing + assert await redis_client.touch([*keys, get_random_string(3)]) == 2 + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_msetnx(self, redis_client: TGlideClient): key1 = f"{{key}}-1{get_random_string(5)}" key2 = f"{{key}}-2{get_random_string(5)}" key3 = f"{{key}}-3{get_random_string(5)}" @@ -570,13 +649,13 @@ async def test_msetnx(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_ping(self, redis_client: TRedisClient): + async def test_ping(self, redis_client: TGlideClient): assert await redis_client.ping() == "PONG" assert await redis_client.ping("HELLO") == "HELLO" @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_config_get_set(self, redis_client: TRedisClient): + async def test_config_get_set(self, redis_client: TGlideClient): previous_timeout = await redis_client.config_get(["timeout"]) assert await redis_client.config_set({"timeout": "1000"}) == OK assert await redis_client.config_get(["timeout"]) == {"timeout": "1000"} @@ -590,7 +669,7 @@ async def test_config_get_set(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_decr_decrby_existing_key(self, redis_client: TRedisClient): + async def test_decr_decrby_existing_key(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "10") == OK assert await redis_client.decr(key) == 9 @@ -600,7 +679,7 @@ async def test_decr_decrby_existing_key(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_decr_decrby_non_existing_key(self, redis_client: TRedisClient): + async def test_decr_decrby_non_existing_key(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(10) @@ -614,7 +693,7 @@ async def test_decr_decrby_non_existing_key(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_decr_with_str_value(self, redis_client: TRedisClient): + async def test_decr_with_str_value(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK with pytest.raises(RequestError) as e: @@ -629,7 +708,7 @@ async def test_decr_with_str_value(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_setrange(self, redis_client: TRedisClient): + async def test_setrange(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) @@ -651,7 +730,7 @@ async def test_setrange(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hset_hget_hgetall(self, redis_client: TRedisClient): + async def test_hset_hget_hgetall(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) field2 = get_random_string(5) @@ -669,7 +748,7 @@ async def test_hset_hget_hgetall(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hdel(self, redis_client: TRedisClient): + async def test_hdel(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) field2 = get_random_string(5) @@ -683,7 +762,7 @@ async def test_hdel(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hsetnx(self, redis_client: TRedisClient): + async def test_hsetnx(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) @@ -697,7 +776,7 @@ async def test_hsetnx(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hmget(self, redis_client: TRedisClient): + async def test_hmget(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) field2 = get_random_string(5) @@ -716,7 +795,7 @@ async def test_hmget(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hset_without_data(self, redis_client: TRedisClient): + async def test_hset_without_data(self, redis_client: TGlideClient): with pytest.raises(RequestError) as e: await redis_client.hset("key", {}) @@ -724,7 +803,7 @@ async def test_hset_without_data(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hincrby_hincrbyfloat(self, redis_client: TRedisClient): + async def test_hincrby_hincrbyfloat(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) field_value_map = {field: "10"} @@ -736,7 +815,7 @@ async def test_hincrby_hincrbyfloat(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hincrby_non_existing_key_field(self, redis_client: TRedisClient): + async def test_hincrby_non_existing_key_field(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(10) field = get_random_string(5) @@ -750,7 +829,7 @@ async def test_hincrby_non_existing_key_field(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hincrby_invalid_value(self, redis_client: TRedisClient): + async def test_hincrby_invalid_value(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) field_value_map = {field: "value"} @@ -767,7 +846,7 @@ async def test_hincrby_invalid_value(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hexist(self, redis_client: TRedisClient): + async def test_hexist(self, redis_client: TGlideClient): key = get_random_string(10) field = get_random_string(5) field2 = get_random_string(5) @@ -780,7 +859,7 @@ async def test_hexist(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hlen(self, redis_client: TRedisClient): + async def test_hlen(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(5) field = get_random_string(5) @@ -799,7 +878,7 @@ async def test_hlen(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hvals(self, redis_client: TRedisClient): + async def test_hvals(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(5) field = get_random_string(5) @@ -818,7 +897,7 @@ async def test_hvals(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hkeys(self, redis_client: TRedisClient): + async def test_hkeys(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(5) field = get_random_string(5) @@ -837,7 +916,7 @@ async def test_hkeys(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hrandfield(self, redis_client: TRedisClient): + async def test_hrandfield(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(5) field = get_random_string(5) @@ -854,7 +933,7 @@ async def test_hrandfield(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hrandfield_count(self, redis_client: TRedisClient): + async def test_hrandfield_count(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(5) field = get_random_string(5) @@ -882,7 +961,7 @@ async def test_hrandfield_count(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_hrandfield_withvalues(self, redis_client: TRedisClient): + async def test_hrandfield_withvalues(self, redis_client: TGlideClient): key = get_random_string(10) key2 = get_random_string(5) field = get_random_string(5) @@ -911,7 +990,22 @@ async def test_hrandfield_withvalues(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lpush_lpop_lrange(self, redis_client: TRedisClient): + async def test_hstrlen(self, redis_client: TGlideClient): + key = get_random_string(10) + + assert await redis_client.hstrlen(key, "field") == 0 + assert await redis_client.hset(key, {"field": "value"}) == 1 + assert await redis_client.hstrlen(key, "field") == 5 + + assert await redis_client.hstrlen(key, "field2") == 0 + + await redis_client.set(key, "value") + with pytest.raises(RequestError): + await redis_client.hstrlen(key, "field") + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_lpush_lpop_lrange(self, redis_client: TGlideClient): key = get_random_string(10) value_list = ["value4", "value3", "value2", "value1"] @@ -925,7 +1019,7 @@ async def test_lpush_lpop_lrange(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_lpush_lpop_lrange_wrong_type_raise_error( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK @@ -944,7 +1038,7 @@ async def test_lpush_lpop_lrange_wrong_type_raise_error( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lpushx(self, redis_client: TRedisClient): + async def test_lpushx(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) @@ -965,7 +1059,7 @@ async def test_lpushx(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_blpop(self, redis_client: TRedisClient): + async def test_blpop(self, redis_client: TGlideClient): key1 = f"{{test}}-1-f{get_random_string(10)}" key2 = f"{{test}}-2-f{get_random_string(10)}" value1 = "value1" @@ -992,7 +1086,7 @@ async def endless_blpop_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lmpop(self, redis_client: TRedisClient): + async def test_lmpop(self, redis_client: TGlideClient): min_version = "7.0.0" if await check_if_server_version_lt(redis_client, min_version): return pytest.mark.skip(reason=f"Redis version required >= {min_version}") @@ -1031,7 +1125,7 @@ async def test_lmpop(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_blmpop(self, redis_client: TRedisClient): + async def test_blmpop(self, redis_client: TGlideClient): min_version = "7.0.0" if await check_if_server_version_lt(redis_client, min_version): return pytest.mark.skip(reason=f"Redis version required >= {min_version}") @@ -1079,7 +1173,7 @@ async def endless_blmpop_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lindex(self, redis_client: TRedisClient): + async def test_lindex(self, redis_client: TGlideClient): key = get_random_string(10) value_list = [get_random_string(5), get_random_string(5)] assert await redis_client.lpush(key, value_list) == 2 @@ -1090,7 +1184,7 @@ async def test_lindex(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_rpush_rpop(self, redis_client: TRedisClient): + async def test_rpush_rpop(self, redis_client: TGlideClient): key = get_random_string(10) value_list = ["value4", "value3", "value2", "value1"] @@ -1102,7 +1196,7 @@ async def test_rpush_rpop(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_rpush_rpop_wrong_type_raise_error(self, redis_client: TRedisClient): + async def test_rpush_rpop_wrong_type_raise_error(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK @@ -1116,7 +1210,7 @@ async def test_rpush_rpop_wrong_type_raise_error(self, redis_client: TRedisClien @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_rpushx(self, redis_client: TRedisClient): + async def test_rpushx(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) @@ -1137,7 +1231,7 @@ async def test_rpushx(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_brpop(self, redis_client: TRedisClient): + async def test_brpop(self, redis_client: TGlideClient): key1 = f"{{test}}-1-f{get_random_string(10)}" key2 = f"{{test}}-2-f{get_random_string(10)}" value1 = "value1" @@ -1165,7 +1259,7 @@ async def endless_brpop_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_linsert(self, redis_client: TRedisClient): + async def test_linsert(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) @@ -1196,7 +1290,7 @@ async def test_linsert(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lmove(self, redis_client: TRedisClient): + async def test_lmove(self, redis_client: TGlideClient): key1 = "{SameSlot}" + get_random_string(10) key2 = "{SameSlot}" + get_random_string(10) @@ -1265,7 +1359,7 @@ async def test_lmove(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_blmove(self, redis_client: TRedisClient): + async def test_blmove(self, redis_client: TGlideClient): key1 = "{SameSlot}" + get_random_string(10) key2 = "{SameSlot}" + get_random_string(10) @@ -1355,7 +1449,37 @@ async def endless_blmove_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sadd_srem_smembers_scard(self, redis_client: TRedisClient): + async def test_lset(self, redis_client: TGlideClient): + key = get_random_string(10) + element = get_random_string(5) + values = [get_random_string(5) for _ in range(4)] + + # key does not exist + with pytest.raises(RequestError): + await redis_client.lset("non_existing_key", 0, element) + + # pushing elements to list + await redis_client.lpush(key, values) == 4 + + # index out of range + with pytest.raises(RequestError): + await redis_client.lset(key, 10, element) + + # assert lset result + assert await redis_client.lset(key, 0, element) == OK + + values = [element] + values[:-1][::-1] + assert await redis_client.lrange(key, 0, -1) == values + + # assert lset with a negative index for the last element in the list + assert await redis_client.lset(key, -1, element) == OK + + values[-1] = element + assert await redis_client.lrange(key, 0, -1) == values + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_sadd_srem_smembers_scard(self, redis_client: TGlideClient): key = get_random_string(10) value_list = ["member1", "member2", "member3", "member4"] @@ -1370,7 +1494,7 @@ async def test_sadd_srem_smembers_scard(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_sadd_srem_smembers_scard_non_existing_key( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): non_existing_key = get_random_string(10) assert await redis_client.srem(non_existing_key, ["member"]) == 0 @@ -1380,7 +1504,7 @@ async def test_sadd_srem_smembers_scard_non_existing_key( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_sadd_srem_smembers_scard_wrong_type_raise_error( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK @@ -1403,7 +1527,7 @@ async def test_sadd_srem_smembers_scard_wrong_type_raise_error( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sismember(self, redis_client: TRedisClient): + async def test_sismember(self, redis_client: TGlideClient): key = get_random_string(10) member = get_random_string(5) assert await redis_client.sadd(key, [member]) == 1 @@ -1413,7 +1537,7 @@ async def test_sismember(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_spop(self, redis_client: TRedisClient): + async def test_spop(self, redis_client: TGlideClient): key = get_random_string(10) member = get_random_string(5) assert await redis_client.sadd(key, [member]) == 1 @@ -1431,7 +1555,7 @@ async def test_spop(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_smove(self, redis_client: TRedisClient): + async def test_smove(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" @@ -1477,7 +1601,32 @@ async def test_smove(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sunionstore(self, redis_client: TRedisClient): + async def test_sunion(self, redis_client: TGlideClient): + key1 = f"{{testKey}}:{get_random_string(10)}" + key2 = f"{{testKey}}:{get_random_string(10)}" + non_existing_key = f"{{testKey}}:non_existing_key" + member1_list = ["a", "b", "c"] + member2_list = ["b", "c", "d", "e"] + + assert await redis_client.sadd(key1, member1_list) == 3 + assert await redis_client.sadd(key2, member2_list) == 4 + assert await redis_client.sunion([key1, key2]) == {"a", "b", "c", "d", "e"} + + # invalid argument - key list must not be empty + with pytest.raises(RequestError): + await redis_client.sunion([]) + + # non-existing key returns the set of existing keys + assert await redis_client.sunion([key1, non_existing_key]) == set(member1_list) + + # non-set key + assert await redis_client.set(key2, "value") == OK + with pytest.raises(RequestError) as e: + await redis_client.sunion([key2]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_sunionstore(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" @@ -1524,7 +1673,7 @@ async def test_sunionstore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sinter(self, redis_client: TRedisClient): + async def test_sinter(self, redis_client: TGlideClient): key1 = f"{{testKey}}:{get_random_string(10)}" key2 = f"{{testKey}}:{get_random_string(10)}" non_existing_key = f"{{testKey}}:non_existing_key" @@ -1551,7 +1700,7 @@ async def test_sinter(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sinterstore(self, redis_client: TRedisClient): + async def test_sinterstore(self, redis_client: TGlideClient): key1 = f"{{testKey}}:{get_random_string(10)}" key2 = f"{{testKey}}:{get_random_string(10)}" key3 = f"{{testKey}}:{get_random_string(10)}" @@ -1594,7 +1743,7 @@ async def test_sinterstore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sintercard(self, redis_client: TRedisClient): + async def test_sintercard(self, redis_client: TGlideClient): min_version = "7.0.0" if await check_if_server_version_lt(redis_client, min_version): return pytest.mark.skip(reason=f"Redis version required >= {min_version}") @@ -1641,7 +1790,7 @@ async def test_sintercard(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sdiff(self, redis_client: TRedisClient): + async def test_sdiff(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" string_key = f"{{testKey}}:4-{get_random_string(10)}" @@ -1667,7 +1816,7 @@ async def test_sdiff(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_sdiffstore(self, redis_client: TRedisClient): + async def test_sdiffstore(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" @@ -1712,7 +1861,7 @@ async def test_sdiffstore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_smismember(self, redis_client: TRedisClient): + async def test_smismember(self, redis_client: TGlideClient): key1 = get_random_string(10) string_key = get_random_string(10) non_existing_key = get_random_string(10) @@ -1733,7 +1882,7 @@ async def test_smismember(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_ltrim(self, redis_client: TRedisClient): + async def test_ltrim(self, redis_client: TGlideClient): key = get_random_string(10) value_list = ["value4", "value3", "value2", "value1"] @@ -1753,7 +1902,7 @@ async def test_ltrim(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lrem(self, redis_client: TRedisClient): + async def test_lrem(self, redis_client: TGlideClient): key = get_random_string(10) value_list = ["value1", "value2", "value1", "value1", "value2"] @@ -1772,7 +1921,7 @@ async def test_lrem(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_llen(self, redis_client: TRedisClient): + async def test_llen(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) value_list = ["value4", "value3", "value2", "value1"] @@ -1789,7 +1938,7 @@ async def test_llen(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_strlen(self, redis_client: TRedisClient): + async def test_strlen(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) value_list = ["value4", "value3", "value2", "value1"] @@ -1804,7 +1953,7 @@ async def test_strlen(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_rename(self, redis_client: TRedisClient): + async def test_rename(self, redis_client: TGlideClient): key1 = "{" + get_random_string(10) + "}" assert await redis_client.set(key1, "foo") == OK assert await redis_client.rename(key1, key1 + "_rename") == OK @@ -1817,7 +1966,7 @@ async def test_rename(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_renamenx(self, redis_client: TRedisClient): + async def test_renamenx(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" @@ -1841,7 +1990,7 @@ async def test_renamenx(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_exists(self, redis_client: TRedisClient): + async def test_exists(self, redis_client: TGlideClient): keys = [get_random_string(10), get_random_string(10)] assert await redis_client.set(keys[0], "value") == OK @@ -1854,7 +2003,7 @@ async def test_exists(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_unlink(self, redis_client: TRedisClient): + async def test_unlink(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) key3 = get_random_string(10) @@ -1866,11 +2015,16 @@ async def test_unlink(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_expire_pexpire_ttl_with_positive_timeout( - self, redis_client: TRedisClient + async def test_expire_pexpire_ttl_expiretime_pexpiretime_with_positive_timeout( + self, redis_client: TGlideClient ): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK + assert await redis_client.ttl(key) == -1 + + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -1 + assert await redis_client.pexpiretime(key) == -1 assert await redis_client.expire(key, 10) == 1 assert await redis_client.ttl(key) in range(11) @@ -1887,12 +2041,14 @@ async def test_expire_pexpire_ttl_with_positive_timeout( assert await redis_client.expire(key, 15) else: assert await redis_client.expire(key, 15, ExpireOptions.HasExistingExpiry) + assert await redis_client.expiretime(key) > int(time.time()) + assert await redis_client.pexpiretime(key) > (int(time.time()) * 1000) assert await redis_client.ttl(key) in range(16) @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_expireat_pexpireat_ttl_with_positive_timeout( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK @@ -1921,32 +2077,48 @@ async def test_expireat_pexpireat_ttl_with_positive_timeout( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_expire_pexpire_expireat_pexpireat_past_or_negative_timeout( - self, redis_client: TRedisClient + async def test_expire_pexpire_expireat_pexpireat_expiretime_pexpiretime_past_or_negative_timeout( + self, redis_client: TGlideClient ): key = get_random_string(10) assert await redis_client.set(key, "foo") == OK assert await redis_client.ttl(key) == -1 - assert await redis_client.expire(key, -10) == 1 + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -1 + assert await redis_client.pexpiretime(key) == -1 + + assert await redis_client.expire(key, -10) is True assert await redis_client.ttl(key) == -2 + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -2 + assert await redis_client.pexpiretime(key) == -2 assert await redis_client.set(key, "foo") == OK assert await redis_client.pexpire(key, -10000) assert await redis_client.ttl(key) == -2 + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -2 + assert await redis_client.pexpiretime(key) == -2 assert await redis_client.set(key, "foo") == OK assert await redis_client.expireat(key, int(time.time()) - 50) == 1 assert await redis_client.ttl(key) == -2 + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -2 + assert await redis_client.pexpiretime(key) == -2 assert await redis_client.set(key, "foo") == OK assert await redis_client.pexpireat(key, int(time.time() * 1000) - 50000) assert await redis_client.ttl(key) == -2 + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -2 + assert await redis_client.pexpiretime(key) == -2 @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_expire_pexpire_expireAt_pexpireAt_ttl_non_existing_key( - self, redis_client: TRedisClient + async def test_expire_pexpire_expireAt_pexpireAt_ttl_expiretime_pexpiretime_non_existing_key( + self, redis_client: TGlideClient ): key = get_random_string(10) @@ -1955,10 +2127,13 @@ async def test_expire_pexpire_expireAt_pexpireAt_ttl_non_existing_key( assert await redis_client.expireat(key, int(time.time()) + 50) == 0 assert not await redis_client.pexpireat(key, int(time.time() * 1000) + 50000) assert await redis_client.ttl(key) == -2 + if not await check_if_server_version_lt(redis_client, "7.0.0"): + assert await redis_client.expiretime(key) == -2 + assert await redis_client.pexpiretime(key) == -2 @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_pttl(self, redis_client: TRedisClient): + async def test_pttl(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.pttl(key) == -2 current_time = int(time.time()) @@ -1977,7 +2152,7 @@ async def test_pttl(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_persist(self, redis_client: TRedisClient): + async def test_persist(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "value") == OK assert not await redis_client.persist(key) @@ -1987,7 +2162,7 @@ async def test_persist(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geoadd(self, redis_client: TRedisClient): + async def test_geoadd(self, redis_client: TGlideClient): key, key2 = get_random_string(10), get_random_string(10) members_coordinates = { "Palermo": GeospatialData(13.361389, 38.115556), @@ -2028,7 +2203,7 @@ async def test_geoadd(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geoadd_invalid_args(self, redis_client: TRedisClient): + async def test_geoadd_invalid_args(self, redis_client: TGlideClient): key = get_random_string(10) with pytest.raises(RequestError): @@ -2048,7 +2223,7 @@ async def test_geoadd_invalid_args(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geosearch_by_box(self, redis_client: TRedisClient): + async def test_geosearch_by_box(self, redis_client: TGlideClient): key = get_random_string(10) members = ["Catania", "Palermo", "edge2", "edge1"] members_coordinates = { @@ -2145,7 +2320,7 @@ async def test_geosearch_by_box(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geosearch_by_radius(self, redis_client: TRedisClient): + async def test_geosearch_by_radius(self, redis_client: TGlideClient): key = get_random_string(10) members_coordinates = { "Palermo": GeospatialData(13.361389, 38.115556), @@ -2190,7 +2365,7 @@ async def test_geosearch_by_radius(self, redis_client: TRedisClient): == members[:2][::-1] ) - # Test search by radius, unit: miles, from a geospatial data, with limited count to 1 + # Test search by radius, unit: miles, from a geospatial data assert ( await redis_client.geosearch( key, @@ -2213,7 +2388,7 @@ async def test_geosearch_by_radius(self, redis_client: TRedisClient): with_dist=True, with_hash=True, ) - == result[:2] + == result ) # Test search by radius, unit: kilometers, from a geospatial data, with limited ANY count to 1 @@ -2232,7 +2407,7 @@ async def test_geosearch_by_radius(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geosearch_no_result(self, redis_client: TRedisClient): + async def test_geosearch_no_result(self, redis_client: TGlideClient): key = get_random_string(10) members_coordinates = { "Palermo": GeospatialData(13.361389, 38.115556), @@ -2294,162 +2469,464 @@ async def test_geosearch_no_result(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geohash(self, redis_client: TRedisClient): - key = get_random_string(10) + async def test_geosearchstore_by_box(self, redis_client: TGlideClient): + key = f"{{testKey}}:{get_random_string(10)}" + destination_key = f"{{testKey}}:{get_random_string(8)}" members_coordinates = { "Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669), + "edge1": GeospatialData(12.758489, 38.788135), + "edge2": GeospatialData(17.241510, 38.788135), } - assert await redis_client.geoadd(key, members_coordinates) == 2 - assert await redis_client.geohash(key, ["Palermo", "Catania", "Place"]) == [ - "sqc8b49rny0", - "sqdtr74hyu0", - None, - ] + result = { + "Catania": [56.4412578701582, 3479447370796909.0], + "Palermo": [190.44242984775784, 3479099956230698.0], + "edge2": [279.7403417843143, 3481342659049484.0], + "edge1": [279.7404521356343, 3479273021651468.0], + } + assert await redis_client.geoadd(key, members_coordinates) == 4 + # Test storing results of a box search, unit: kilometes, from a geospatial data assert ( - await redis_client.geohash( - "non_existing_key", ["Palermo", "Catania", "Place"] + await redis_client.geosearchstore( + destination_key, + key, + GeospatialData(15, 37), + GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), ) - == [None] * 3 + ) == 4 # Number of elements stored + + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) ) + expected_map = {member: value[1] for member, value in result.items()} + sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1])) + assert compare_maps(zrange_map, sorted_expected_map) is True - # Neccessary to check since we are enforcing the user to pass a list of members while redis don't - # But when running the command with key only (and no members) the returned value will always be an empty list - # So in case of any changes, this test will fail and inform us that we should allow not passing any members. - assert await redis_client.geohash(key, []) == [] + # Test storing results of a box search, unit: kilometes, from a geospatial data, with distance + assert ( + await redis_client.geosearchstore( + destination_key, + key, + GeospatialData(15, 37), + GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), + store_dist=True, + ) + ) == 4 # Number of elements stored - assert await redis_client.set(key, "value") == OK - with pytest.raises(RequestError): - await redis_client.geohash(key, ["Palermo", "Catania"]) + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) + ) + expected_map = {member: value[0] for member, value in result.items()} + sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1])) + assert compare_maps(zrange_map, sorted_expected_map) is True - @pytest.mark.parametrize("cluster_mode", [True, False]) - @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geodist(self, redis_client: TRedisClient): - key, key2 = get_random_string(10), get_random_string(10) - members_coordinates = { - "Palermo": GeospatialData(13.361389, 38.115556), - "Catania": GeospatialData(15.087269, 37.502669), - } - assert await redis_client.geoadd(key, members_coordinates) == 2 + # Test storing results of a box search, unit: kilometes, from a geospatial data, with count + assert ( + await redis_client.geosearchstore( + destination_key, + key, + GeospatialData(15, 37), + GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), + count=GeoSearchCount(1), + ) + ) == 1 # Number of elements stored - assert await redis_client.geodist(key, "Palermo", "Catania") == 166274.1516 + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) + ) + assert compare_maps(zrange_map, {"Catania": 3479447370796909.0}) is True + + # Test storing results of a box search, unit: meters, from a member, with distance + meters = 400 * 1000 assert ( - await redis_client.geodist(key, "Palermo", "Catania", GeoUnit.KILOMETERS) - == 166.2742 + await redis_client.geosearchstore( + destination_key, + key, + "Catania", + GeoSearchByBox(meters, meters, GeoUnit.METERS), + store_dist=True, + ) + ) == 3 # Number of elements stored + + # Verify the stored results with distances + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) ) - assert await redis_client.geodist(key, "Palermo", "Palermo", GeoUnit.MILES) == 0 + expected_distances = { + "Catania": 0.0, + "Palermo": 166274.15156960033, + "edge2": 236529.17986494553, + } + assert compare_maps(zrange_map, expected_distances) is True + + # Test search by box, unit: feet, from a member, with limited ANY count to 2, with hash + feet = 400 * 3280.8399 assert ( - await redis_client.geodist( - key, "Palermo", "non-existing-member", GeoUnit.FEET + await redis_client.geosearchstore( + destination_key, + key, + "Palermo", + GeoSearchByBox(feet, feet, GeoUnit.FEET), + count=GeoSearchCount(2), ) - == None + == 2 ) - assert await redis_client.set(key2, "value") == OK - with pytest.raises(RequestError): - await redis_client.geodist(key2, "Palmero", "Catania") + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) + ) + for member in zrange_map: + assert member in result @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_geopos(self, redis_client: TRedisClient): - key = get_random_string(10) + async def test_geosearchstore_by_radius(self, redis_client: TGlideClient): + key = f"{{testKey}}:{get_random_string(10)}" + destination_key = f"{{testKey}}:{get_random_string(8)}" members_coordinates = { "Palermo": GeospatialData(13.361389, 38.115556), "Catania": GeospatialData(15.087269, 37.502669), + "edge1": GeospatialData(12.758489, 38.788135), + "edge2": GeospatialData(17.241510, 38.788135), } - assert await redis_client.geoadd(key, members_coordinates) == 2 - - # The comparison allows for a small tolerance level due to potential precision errors in floating-point calculations - # No worries, Python can handle it, therefore, this shouldn't fail - positions = await redis_client.geopos(key, ["Palermo", "Catania", "Place"]) - expected_positions = [ - [13.36138933897018433, 38.11555639549629859], - [15.08726745843887329, 37.50266842333162032], - ] - assert len(positions) == 3 and positions[2] is None - - assert all( - all( - math.isclose(actual_coord, expected_coord) - for actual_coord, expected_coord in zip(actual_pos, expected_pos) - ) - for actual_pos, expected_pos in zip(positions, expected_positions) - if actual_pos is not None - ) + result = { + "Catania": [56.4412578701582, 3479447370796909.0], + "Palermo": [190.44242984775784, 3479099956230698.0], + } + assert await redis_client.geoadd(key, members_coordinates) == 4 + # Test storing results of a radius search, unit: feet, from a member + feet = 200 * 3280.8399 assert ( - await redis_client.geopos( - "non_existing_key", ["Palermo", "Catania", "Place"] + await redis_client.geosearchstore( + destination_key, + key, + "Catania", + GeoSearchByRadius(feet, GeoUnit.FEET), ) - == [None] * 3 + == 2 ) - # Neccessary to check since we are enforcing the user to pass a list of members while redis don't - # But when running the command with key only (and no members) the returned value will always be an empty list - # So in case of any changes, this test will fail and inform us that we should allow not passing any members. - assert await redis_client.geohash(key, []) == [] - - assert await redis_client.set(key, "value") == OK - with pytest.raises(RequestError): - await redis_client.geopos(key, ["Palermo", "Catania"]) - - @pytest.mark.parametrize("cluster_mode", [True, False]) - @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zadd_zaddincr(self, redis_client: TRedisClient): - key = get_random_string(10) - members_scores = {"one": 1, "two": 2, "three": 3} - assert await redis_client.zadd(key, members_scores=members_scores) == 3 - assert await redis_client.zadd_incr(key, member="one", increment=2) == 3.0 + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) + ) + expected_map = {member: value[1] for member, value in result.items()} + sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1])) + assert compare_maps(zrange_map, sorted_expected_map) is True - @pytest.mark.parametrize("cluster_mode", [True, False]) - @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zadd_nx_xx(self, redis_client: TRedisClient): - key = get_random_string(10) - members_scores = {"one": 1, "two": 2, "three": 3} + # Test search by radius, units: meters, from a member + meters = 200 * 1000 assert ( - await redis_client.zadd( + await redis_client.geosearchstore( + destination_key, key, - members_scores=members_scores, - existing_options=ConditionalChange.ONLY_IF_EXISTS, + "Catania", + GeoSearchByRadius(meters, GeoUnit.METERS), + store_dist=True, ) - == 0 + == 2 + ) + + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) ) + expected_distances = { + "Catania": 0.0, + "Palermo": 166274.15156960033, + } + assert compare_maps(zrange_map, expected_distances) is True + + # Test search by radius, unit: miles, from a geospatial data assert ( - await redis_client.zadd( + await redis_client.geosearchstore( + destination_key, key, - members_scores=members_scores, - existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST, + GeospatialData(15, 37), + GeoSearchByRadius(175, GeoUnit.MILES), ) - == 3 + == 4 ) + # Test storing results of a radius search, unit: kilometers, from a geospatial data, with limited count to 2 + kilometers = 200 assert ( - await redis_client.zadd_incr( + await redis_client.geosearchstore( + destination_key, key, - member="one", - increment=5.0, - existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST, + GeospatialData(15, 37), + GeoSearchByRadius(kilometers, GeoUnit.KILOMETERS), + count=GeoSearchCount(2), + store_dist=True, ) - is None + == 2 + ) + + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) ) + expected_map = {member: value[0] for member, value in result.items()} + sorted_expected_map = dict(sorted(expected_map.items(), key=lambda x: x[1])) + assert compare_maps(zrange_map, sorted_expected_map) is True + # Test storing results of a radius search, unit: kilometers, from a geospatial data, with limited ANY count to 1 assert ( - await redis_client.zadd_incr( + await redis_client.geosearchstore( + destination_key, key, - member="one", - increment=5.0, - existing_options=ConditionalChange.ONLY_IF_EXISTS, + GeospatialData(15, 37), + GeoSearchByRadius(kilometers, GeoUnit.KILOMETERS), + count=GeoSearchCount(1, True), ) - == 6.0 + == 1 + ) + + # Verify the stored results + zrange_map = await redis_client.zrange_withscores( + destination_key, RangeByIndex(0, -1) ) + for member in zrange_map: + assert member in result + @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zadd_gt_lt(self, redis_client: TRedisClient): - key = get_random_string(10) - members_scores = {"one": -3, "two": 2, "three": 3} - assert await redis_client.zadd(key, members_scores=members_scores) == 3 + async def test_geosearchstore_no_result(self, redis_client: TGlideClient): + key = f"{{testKey}}:{get_random_string(10)}" + destination_key = f"{{testKey}}:{get_random_string(8)}" + members_coordinates = { + "Palermo": GeospatialData(13.361389, 38.115556), + "Catania": GeospatialData(15.087269, 37.502669), + "edge1": GeospatialData(12.758489, 38.788135), + "edge2": GeospatialData(17.241510, 38.788135), + } + assert await redis_client.geoadd(key, members_coordinates) == 4 + + # No members within the area + assert ( + await redis_client.geosearchstore( + destination_key, + key, + GeospatialData(15, 37), + GeoSearchByBox(50, 50, GeoUnit.METERS), + ) + == 0 + ) + + assert ( + await redis_client.geosearchstore( + destination_key, + key, + GeospatialData(15, 37), + GeoSearchByRadius(10, GeoUnit.METERS), + ) + == 0 + ) + + # No members in the area (apart from the member we search from itself) + assert ( + await redis_client.geosearchstore( + destination_key, + key, + "Catania", + GeoSearchByBox(10, 10, GeoUnit.KILOMETERS), + ) + == 1 + ) + + assert ( + await redis_client.geosearchstore( + destination_key, + key, + "Catania", + GeoSearchByRadius(10, GeoUnit.METERS), + ) + == 1 + ) + + # Search from non-existing member + with pytest.raises(RequestError): + await redis_client.geosearchstore( + destination_key, + key, + "non_existing_member", + GeoSearchByBox(10, 10, GeoUnit.MILES), + ) + + assert await redis_client.set(key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.geosearchstore( + destination_key, + key, + "Catania", + GeoSearchByBox(10, 10, GeoUnit.MILES), + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_geohash(self, redis_client: TGlideClient): + key = get_random_string(10) + members_coordinates = { + "Palermo": GeospatialData(13.361389, 38.115556), + "Catania": GeospatialData(15.087269, 37.502669), + } + assert await redis_client.geoadd(key, members_coordinates) == 2 + assert await redis_client.geohash(key, ["Palermo", "Catania", "Place"]) == [ + "sqc8b49rny0", + "sqdtr74hyu0", + None, + ] + + assert ( + await redis_client.geohash( + "non_existing_key", ["Palermo", "Catania", "Place"] + ) + == [None] * 3 + ) + + # Neccessary to check since we are enforcing the user to pass a list of members while redis don't + # But when running the command with key only (and no members) the returned value will always be an empty list + # So in case of any changes, this test will fail and inform us that we should allow not passing any members. + assert await redis_client.geohash(key, []) == [] + + assert await redis_client.set(key, "value") == OK + with pytest.raises(RequestError): + await redis_client.geohash(key, ["Palermo", "Catania"]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_geodist(self, redis_client: TGlideClient): + key, key2 = get_random_string(10), get_random_string(10) + members_coordinates = { + "Palermo": GeospatialData(13.361389, 38.115556), + "Catania": GeospatialData(15.087269, 37.502669), + } + assert await redis_client.geoadd(key, members_coordinates) == 2 + + assert await redis_client.geodist(key, "Palermo", "Catania") == 166274.1516 + assert ( + await redis_client.geodist(key, "Palermo", "Catania", GeoUnit.KILOMETERS) + == 166.2742 + ) + assert await redis_client.geodist(key, "Palermo", "Palermo", GeoUnit.MILES) == 0 + assert ( + await redis_client.geodist( + key, "Palermo", "non-existing-member", GeoUnit.FEET + ) + == None + ) + + assert await redis_client.set(key2, "value") == OK + with pytest.raises(RequestError): + await redis_client.geodist(key2, "Palmero", "Catania") + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_geopos(self, redis_client: TGlideClient): + key = get_random_string(10) + members_coordinates = { + "Palermo": GeospatialData(13.361389, 38.115556), + "Catania": GeospatialData(15.087269, 37.502669), + } + assert await redis_client.geoadd(key, members_coordinates) == 2 + + # The comparison allows for a small tolerance level due to potential precision errors in floating-point calculations + # No worries, Python can handle it, therefore, this shouldn't fail + positions = await redis_client.geopos(key, ["Palermo", "Catania", "Place"]) + expected_positions = [ + [13.36138933897018433, 38.11555639549629859], + [15.08726745843887329, 37.50266842333162032], + ] + assert len(positions) == 3 and positions[2] is None + + assert all( + all( + math.isclose(actual_coord, expected_coord) + for actual_coord, expected_coord in zip(actual_pos, expected_pos) + ) + for actual_pos, expected_pos in zip(positions, expected_positions) + if actual_pos is not None + ) + + assert ( + await redis_client.geopos( + "non_existing_key", ["Palermo", "Catania", "Place"] + ) + == [None] * 3 + ) + + # Neccessary to check since we are enforcing the user to pass a list of members while redis don't + # But when running the command with key only (and no members) the returned value will always be an empty list + # So in case of any changes, this test will fail and inform us that we should allow not passing any members. + assert await redis_client.geohash(key, []) == [] + + assert await redis_client.set(key, "value") == OK + with pytest.raises(RequestError): + await redis_client.geopos(key, ["Palermo", "Catania"]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_zadd_zaddincr(self, redis_client: TGlideClient): + key = get_random_string(10) + members_scores = {"one": 1, "two": 2, "three": 3} + assert await redis_client.zadd(key, members_scores=members_scores) == 3 + assert await redis_client.zadd_incr(key, member="one", increment=2) == 3.0 + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_zadd_nx_xx(self, redis_client: TGlideClient): + key = get_random_string(10) + members_scores = {"one": 1, "two": 2, "three": 3} + assert ( + await redis_client.zadd( + key, + members_scores=members_scores, + existing_options=ConditionalChange.ONLY_IF_EXISTS, + ) + == 0 + ) + assert ( + await redis_client.zadd( + key, + members_scores=members_scores, + existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST, + ) + == 3 + ) + + assert ( + await redis_client.zadd_incr( + key, + member="one", + increment=5.0, + existing_options=ConditionalChange.ONLY_IF_DOES_NOT_EXIST, + ) + is None + ) + + assert ( + await redis_client.zadd_incr( + key, + member="one", + increment=5.0, + existing_options=ConditionalChange.ONLY_IF_EXISTS, + ) + == 6.0 + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_zadd_gt_lt(self, redis_client: TGlideClient): + key = get_random_string(10) + members_scores = {"one": -3, "two": 2, "three": 3} + assert await redis_client.zadd(key, members_scores=members_scores) == 3 members_scores["one"] = 10 assert ( await redis_client.zadd( @@ -2493,7 +2970,33 @@ async def test_zadd_gt_lt(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrem(self, redis_client: TRedisClient): + async def test_zincrby(self, redis_client: TGlideClient): + key, member, member2 = ( + get_random_string(10), + get_random_string(5), + get_random_string(5), + ) + + # key does not exist + assert await redis_client.zincrby(key, 2.5, member) == 2.5 + assert await redis_client.zscore(key, member) == 2.5 + + # key exists, but value doesn't + assert await redis_client.zincrby(key, -3.3, member2) == -3.3 + assert await redis_client.zscore(key, member2) == -3.3 + + # updating existing value in existing key + assert await redis_client.zincrby(key, 1.0, member) == 3.5 + assert await redis_client.zscore(key, member) == 3.5 + + # Key exists, but it is not a sorted set + assert await redis_client.set(key, "_") == OK + with pytest.raises(RequestError): + await redis_client.zincrby(key, 0.5, "_") + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_zrem(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -2505,7 +3008,7 @@ async def test_zrem(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zremrangebyscore(self, redis_client: TRedisClient): + async def test_zremrangebyscore(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores) == 3 @@ -2533,7 +3036,7 @@ async def test_zremrangebyscore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zremrangebylex(self, redis_client: TRedisClient): + async def test_zremrangebylex(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) range = RangeByIndex(0, -1) @@ -2578,7 +3081,7 @@ async def test_zremrangebylex(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zremrangebyrank(self, redis_client: TRedisClient): + async def test_zremrangebyrank(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) range = RangeByIndex(0, -1) @@ -2607,7 +3110,7 @@ async def test_zremrangebyrank(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zlexcount(self, redis_client: TRedisClient): + async def test_zlexcount(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) members_scores = {"a": 1.0, "b": 2.0, "c": 3.0} @@ -2651,7 +3154,7 @@ async def test_zlexcount(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zcard(self, redis_client: TRedisClient): + async def test_zcard(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -2663,7 +3166,7 @@ async def test_zcard(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zcount(self, redis_client: TRedisClient): + async def test_zcount(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -2706,7 +3209,7 @@ async def test_zcount(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zscore(self, redis_client: TRedisClient): + async def test_zscore(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -2719,7 +3222,7 @@ async def test_zscore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zmscore(self, redis_client: TRedisClient): + async def test_zmscore(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} @@ -2741,7 +3244,7 @@ async def test_zmscore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zinter_commands(self, redis_client: TRedisClient): + async def test_zinter_commands(self, redis_client: TGlideClient): key1 = "{testKey}:1-" + get_random_string(10) key2 = "{testKey}:2-" + get_random_string(10) key3 = "{testKey}:3-" + get_random_string(10) @@ -2871,7 +3374,7 @@ async def test_zinter_commands(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zunion_commands(self, redis_client: TRedisClient): + async def test_zunion_commands(self, redis_client: TGlideClient): key1 = "{testKey}:1-" + get_random_string(10) key2 = "{testKey}:2-" + get_random_string(10) key3 = "{testKey}:3-" + get_random_string(10) @@ -3024,7 +3527,7 @@ async def test_zunion_commands(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zpopmin(self, redis_client: TRedisClient): + async def test_zpopmin(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"a": 1.0, "b": 2.0, "c": 3.0} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -3043,7 +3546,7 @@ async def test_zpopmin(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_bzpopmin(self, redis_client: TRedisClient): + async def test_bzpopmin(self, redis_client: TGlideClient): key1 = f"{{testKey}}:{get_random_string(10)}" key2 = f"{{testKey}}:{get_random_string(10)}" non_existing_key = f"{{testKey}}:non_existing_key" @@ -3077,7 +3580,7 @@ async def endless_bzpopmin_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zpopmax(self, redis_client: TRedisClient): + async def test_zpopmax(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"a": 1.0, "b": 2.0, "c": 3.0} assert await redis_client.zadd(key, members_scores) == 3 @@ -3096,7 +3599,7 @@ async def test_zpopmax(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_bzpopmax(self, redis_client: TRedisClient): + async def test_bzpopmax(self, redis_client: TGlideClient): key1 = f"{{testKey}}:{get_random_string(10)}" key2 = f"{{testKey}}:{get_random_string(10)}" non_existing_key = f"{{testKey}}:non_existing_key" @@ -3130,7 +3633,7 @@ async def endless_bzpopmax_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrange_by_index(self, redis_client: TRedisClient): + async def test_zrange_by_index(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -3161,7 +3664,7 @@ async def test_zrange_by_index(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrange_byscore(self, redis_client: TRedisClient): + async def test_zrange_byscore(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -3243,7 +3746,7 @@ async def test_zrange_byscore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrange_bylex(self, redis_client: TRedisClient): + async def test_zrange_bylex(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"a": 1, "b": 2, "c": 3} assert await redis_client.zadd(key, members_scores=members_scores) == 3 @@ -3297,7 +3800,7 @@ async def test_zrange_bylex(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrange_different_types_of_keys(self, redis_client: TRedisClient): + async def test_zrange_different_types_of_keys(self, redis_client: TGlideClient): key = get_random_string(10) assert ( @@ -3320,7 +3823,7 @@ async def test_zrange_different_types_of_keys(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrangestore_by_index(self, redis_client: TRedisClient): + async def test_zrangestore_by_index(self, redis_client: TGlideClient): destination = f"{{testKey}}:{get_random_string(10)}" source = f"{{testKey}}:{get_random_string(10)}" string_key = f"{{testKey}}:{get_random_string(10)}" @@ -3378,7 +3881,7 @@ async def test_zrangestore_by_index(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrangestore_by_score(self, redis_client: TRedisClient): + async def test_zrangestore_by_score(self, redis_client: TGlideClient): destination = f"{{testKey}}:{get_random_string(10)}" source = f"{{testKey}}:{get_random_string(10)}" string_key = f"{{testKey}}:{get_random_string(10)}" @@ -3481,7 +3984,7 @@ async def test_zrangestore_by_score(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrangestore_by_lex(self, redis_client: TRedisClient): + async def test_zrangestore_by_lex(self, redis_client: TGlideClient): destination = f"{{testKey}}:{get_random_string(10)}" source = f"{{testKey}}:{get_random_string(10)}" string_key = f"{{testKey}}:4-{get_random_string(10)}" @@ -3584,7 +4087,7 @@ async def test_zrangestore_by_lex(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrank(self, redis_client: TRedisClient): + async def test_zrank(self, redis_client: TGlideClient): key = get_random_string(10) members_scores = {"one": 1.5, "two": 2, "three": 3} assert await redis_client.zadd(key, members_scores) == 3 @@ -3605,7 +4108,42 @@ async def test_zrank(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zdiff(self, redis_client: TRedisClient): + async def test_zrevrank(self, redis_client: TGlideClient): + key = get_random_string(10) + non_existing_key = get_random_string(10) + string_key = get_random_string(10) + member_scores = {"one": 1.0, "two": 2.0, "three": 3.0} + + assert await redis_client.zadd(key, member_scores) == 3 + assert await redis_client.zrevrank(key, "three") == 0 + assert await redis_client.zrevrank(key, "non_existing_member") is None + assert ( + await redis_client.zrevrank(non_existing_key, "non_existing_member") is None + ) + + if not check_if_server_version_lt(redis_client, "7.2.0"): + assert await redis_client.zrevrank_withscore(key, "one") == [2, 1.0] + assert ( + await redis_client.zrevrank_withscore(key, "non_existing_member") + is None + ) + assert ( + await redis_client.zrevrank_withscore( + non_existing_key, "non_existing_member" + ) + is None + ) + + # key exists, but it is not a sorted set + assert await redis_client.set(string_key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.zrevrank(string_key, "member") + with pytest.raises(RequestError): + await redis_client.zrevrank_withscore(string_key, "member") + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_zdiff(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" @@ -3659,7 +4197,7 @@ async def test_zdiff(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zdiffstore(self, redis_client: TRedisClient): + async def test_zdiffstore(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" @@ -3698,7 +4236,7 @@ async def test_zdiffstore(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_bzmpop(self, redis_client: TRedisClient): + async def test_bzmpop(self, redis_client: TGlideClient): min_version = "7.0.0" if await check_if_server_version_lt(redis_client, min_version): return pytest.mark.skip(reason=f"Redis version required >= {min_version}") @@ -3765,7 +4303,7 @@ async def endless_bzmpop_call(): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrandmember(self, redis_client: TRedisClient): + async def test_zrandmember(self, redis_client: TGlideClient): key = get_random_string(10) string_key = get_random_string(10) scores = {"one": 1, "two": 2} @@ -3782,7 +4320,7 @@ async def test_zrandmember(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrandmember_count(self, redis_client: TRedisClient): + async def test_zrandmember_count(self, redis_client: TGlideClient): key = get_random_string(10) string_key = get_random_string(10) scores = {"one": 1, "two": 2} @@ -3809,7 +4347,7 @@ async def test_zrandmember_count(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zrandmember_withscores(self, redis_client: TRedisClient): + async def test_zrandmember_withscores(self, redis_client: TGlideClient): key = get_random_string(10) string_key = get_random_string(10) scores = {"one": 1, "two": 2} @@ -3838,7 +4376,7 @@ async def test_zrandmember_withscores(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zintercard(self, redis_client: TRedisClient): + async def test_zintercard(self, redis_client: TGlideClient): min_version = "7.0.0" if await check_if_server_version_lt(redis_client, min_version): return pytest.mark.skip(reason=f"Redis version required >= {min_version}") @@ -3872,7 +4410,7 @@ async def test_zintercard(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_zmpop(self, redis_client: TRedisClient): + async def test_zmpop(self, redis_client: TGlideClient): min_version = "7.0.0" if await check_if_server_version_lt(redis_client, min_version): return pytest.mark.skip(reason=f"Redis version required >= {min_version}") @@ -3925,7 +4463,7 @@ async def test_zmpop(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_type(self, redis_client: TRedisClient): + async def test_type(self, redis_client: TGlideClient): key = get_random_string(10) assert await redis_client.set(key, "value") == OK assert (await redis_client.type(key)).lower() == "string" @@ -3956,7 +4494,7 @@ async def test_type(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_sort_and_sort_store_with_get_or_by_args( - self, redis_client: RedisClient + self, redis_client: GlideClient ): key = "{SameSlotKey}" + get_random_string(10) store = "{SameSlotKey}" + get_random_string(10) @@ -4039,7 +4577,7 @@ async def test_sort_and_sort_store_with_get_or_by_args( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_sort_and_sort_store_without_get_or_by_args( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): key = "{SameSlotKey}" + get_random_string(10) store = "{SameSlotKey}" + get_random_string(10) @@ -4095,10 +4633,10 @@ async def test_sort_and_sort_store_without_get_or_by_args( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_echo(self, redis_client: TRedisClient): + async def test_echo(self, redis_client: TGlideClient): message = get_random_string(5) assert await redis_client.echo(message) == message - if isinstance(redis_client, RedisClusterClient): + if isinstance(redis_client, GlideClusterClient): echo_dict = await redis_client.echo(message, AllNodes()) assert isinstance(echo_dict, dict) for value in echo_dict.values(): @@ -4106,7 +4644,7 @@ async def test_echo(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_dbsize(self, redis_client: TRedisClient): + async def test_dbsize(self, redis_client: TGlideClient): assert await redis_client.custom_command(["FLUSHALL"]) == OK assert await redis_client.dbsize() == 0 @@ -4116,7 +4654,7 @@ async def test_dbsize(self, redis_client: TRedisClient): assert await redis_client.set(key, value) == OK assert await redis_client.dbsize() == 10 - if isinstance(redis_client, RedisClusterClient): + if isinstance(redis_client, GlideClusterClient): assert await redis_client.custom_command(["FLUSHALL"]) == OK key = get_random_string(5) assert await redis_client.set(key, value) == OK @@ -4127,7 +4665,7 @@ async def test_dbsize(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_time(self, redis_client: TRedisClient): + async def test_time(self, redis_client: TGlideClient): current_time = int(time.time()) - 1 result = await redis_client.time() assert len(result) == 2 @@ -4139,7 +4677,7 @@ async def test_time(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_lastsave(self, redis_client: TRedisClient): + async def test_lastsave(self, redis_client: TGlideClient): yesterday = date.today() - timedelta(1) yesterday_unix_time = time.mktime(yesterday.timetuple()) @@ -4147,7 +4685,7 @@ async def test_lastsave(self, redis_client: TRedisClient): assert isinstance(result, int) assert result > yesterday_unix_time - if isinstance(redis_client, RedisClusterClient): + if isinstance(redis_client, GlideClusterClient): # test with single-node route result = await redis_client.lastsave(RandomNode()) assert isinstance(result, int) @@ -4161,7 +4699,7 @@ async def test_lastsave(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_append(self, redis_client: TRedisClient): + async def test_append(self, redis_client: TGlideClient): key, value = get_random_string(10), get_random_string(5) assert await redis_client.append(key, value) == 5 @@ -4170,7 +4708,7 @@ async def test_append(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_xadd_xtrim_xlen(self, redis_client: TRedisClient): + async def test_xadd_xtrim_xlen(self, redis_client: TGlideClient): key = get_random_string(10) string_key = get_random_string(10) non_existing_key = get_random_string(10) @@ -4243,103 +4781,1327 @@ async def test_xadd_xtrim_xlen(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_pfadd(self, redis_client: TRedisClient): - key = get_random_string(10) - assert await redis_client.pfadd(key, []) == 1 - assert await redis_client.pfadd(key, ["one", "two"]) == 1 - assert await redis_client.pfadd(key, ["two"]) == 0 - assert await redis_client.pfadd(key, []) == 0 + async def test_xdel(self, redis_client: TGlideClient): + key1 = get_random_string(10) + string_key = get_random_string(10) + non_existing_key = get_random_string(10) + stream_id1 = "0-1" + stream_id2 = "0-2" + stream_id3 = "0-3" - assert await redis_client.set("foo", "value") == OK + assert ( + await redis_client.xadd( + key1, [("f1", "foo1"), ("f2", "foo2")], StreamAddOptions(stream_id1) + ) + == stream_id1 + ) + assert ( + await redis_client.xadd( + key1, [("f1", "foo1"), ("f2", "foo2")], StreamAddOptions(stream_id2) + ) + == stream_id2 + ) + assert await redis_client.xlen(key1) == 2 + + # deletes one stream id, and ignores anything invalid + assert await redis_client.xdel(key1, [stream_id1, stream_id3]) == 1 + assert await redis_client.xdel(non_existing_key, [stream_id3]) == 0 + + # invalid argument - id list should not be empty with pytest.raises(RequestError): - await redis_client.pfadd("foo", []) + await redis_client.xdel(key1, []) - @pytest.mark.parametrize("cluster_mode", [True, False]) + # key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.xdel(string_key, [stream_id3]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_xrange_and_xrevrange(self, redis_client: TGlideClient): + key = get_random_string(10) + non_existing_key = get_random_string(10) + string_key = get_random_string(10) + stream_id1 = "0-1" + stream_id2 = "0-2" + stream_id3 = "0-3" + + assert ( + await redis_client.xadd( + key, [("f1", "v1")], StreamAddOptions(id=stream_id1) + ) + == stream_id1 + ) + assert ( + await redis_client.xadd( + key, [("f2", "v2")], StreamAddOptions(id=stream_id2) + ) + == stream_id2 + ) + assert await redis_client.xlen(key) == 2 + + # get everything from the stream + assert await redis_client.xrange(key, MinId(), MaxId()) == { + stream_id1: [["f1", "v1"]], + stream_id2: [["f2", "v2"]], + } + assert await redis_client.xrevrange(key, MaxId(), MinId()) == { + stream_id2: [["f2", "v2"]], + stream_id1: [["f1", "v1"]], + } + + # returns empty mapping if + before - + assert await redis_client.xrange(key, MaxId(), MinId()) == {} + # rev search returns empty mapping if - before + + assert await redis_client.xrevrange(key, MinId(), MaxId()) == {} + + assert ( + await redis_client.xadd( + key, [("f3", "v3")], StreamAddOptions(id=stream_id3) + ) + == stream_id3 + ) + + # get the newest entry + assert await redis_client.xrange( + key, ExclusiveIdBound(stream_id2), ExclusiveIdBound.from_timestamp(5), 1 + ) == {stream_id3: [["f3", "v3"]]} + assert await redis_client.xrevrange( + key, ExclusiveIdBound.from_timestamp(5), ExclusiveIdBound(stream_id2), 1 + ) == {stream_id3: [["f3", "v3"]]} + + # xrange/xrevrange against an emptied stream + assert await redis_client.xdel(key, [stream_id1, stream_id2, stream_id3]) == 3 + assert await redis_client.xrange(key, MinId(), MaxId(), 10) == {} + assert await redis_client.xrevrange(key, MaxId(), MinId(), 10) == {} + + assert await redis_client.xrange(non_existing_key, MinId(), MaxId()) == {} + assert await redis_client.xrevrange(non_existing_key, MaxId(), MinId()) == {} + + # count value < 1 returns None + assert await redis_client.xrange(key, MinId(), MaxId(), 0) is None + assert await redis_client.xrange(key, MinId(), MaxId(), -1) is None + assert await redis_client.xrevrange(key, MaxId(), MinId(), 0) is None + assert await redis_client.xrevrange(key, MaxId(), MinId(), -1) is None + + # key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") + with pytest.raises(RequestError): + await redis_client.xrange(string_key, MinId(), MaxId()) + with pytest.raises(RequestError): + await redis_client.xrevrange(string_key, MaxId(), MinId()) + + # invalid start bound + with pytest.raises(RequestError): + await redis_client.xrange(key, IdBound("not_a_stream_id"), MaxId()) + with pytest.raises(RequestError): + await redis_client.xrevrange(key, MaxId(), IdBound("not_a_stream_id")) + + # invalid end bound + with pytest.raises(RequestError): + await redis_client.xrange(key, MinId(), IdBound("not_a_stream_id")) + with pytest.raises(RequestError): + await redis_client.xrevrange(key, IdBound("not_a_stream_id"), MinId()) + + @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_pfcount(self, redis_client: TRedisClient): + async def test_xread( + self, redis_client: TGlideClient, cluster_mode, protocol, request + ): + key1 = f"{{testKey}}:1-{get_random_string(10)}" + key2 = f"{{testKey}}:2-{get_random_string(10)}" + non_existing_key = f"{{testKey}}:3-{get_random_string(10)}" + stream_id1_1 = "1-1" + stream_id1_2 = "1-2" + stream_id1_3 = "1-3" + stream_id2_1 = "2-1" + stream_id2_2 = "2-2" + stream_id2_3 = "2-3" + non_existing_id = "99-99" + + # setup first entries in streams key1 and key2 + assert ( + await redis_client.xadd( + key1, [("f1_1", "v1_1")], StreamAddOptions(id=stream_id1_1) + ) + == stream_id1_1 + ) + assert ( + await redis_client.xadd( + key2, [("f2_1", "v2_1")], StreamAddOptions(id=stream_id2_1) + ) + == stream_id2_1 + ) + + # setup second entries in streams key1 and key2 + assert ( + await redis_client.xadd( + key1, [("f1_2", "v1_2")], StreamAddOptions(id=stream_id1_2) + ) + == stream_id1_2 + ) + assert ( + await redis_client.xadd( + key2, [("f2_2", "v2_2")], StreamAddOptions(id=stream_id2_2) + ) + == stream_id2_2 + ) + + # setup third entries in streams key1 and key2 + assert ( + await redis_client.xadd( + key1, [("f1_3", "v1_3")], StreamAddOptions(id=stream_id1_3) + ) + == stream_id1_3 + ) + assert ( + await redis_client.xadd( + key2, [("f2_3", "v2_3")], StreamAddOptions(id=stream_id2_3) + ) + == stream_id2_3 + ) + + assert await redis_client.xread({key1: stream_id1_1, key2: stream_id2_1}) == { + key1: { + stream_id1_2: [["f1_2", "v1_2"]], + stream_id1_3: [["f1_3", "v1_3"]], + }, + key2: { + stream_id2_2: [["f2_2", "v2_2"]], + stream_id2_3: [["f2_3", "v2_3"]], + }, + } + + assert await redis_client.xread({non_existing_key: stream_id1_1}) is None + assert await redis_client.xread({key1: non_existing_id}) is None + + # passing an empty read options argument has no effect + assert await redis_client.xread({key1: stream_id1_1}, StreamReadOptions()) == { + key1: { + stream_id1_2: [["f1_2", "v1_2"]], + stream_id1_3: [["f1_3", "v1_3"]], + }, + } + + assert await redis_client.xread( + {key1: stream_id1_1}, StreamReadOptions(count=1) + ) == { + key1: { + stream_id1_2: [["f1_2", "v1_2"]], + }, + } + assert await redis_client.xread( + {key1: stream_id1_1}, StreamReadOptions(count=1, block_ms=1000) + ) == { + key1: { + stream_id1_2: [["f1_2", "v1_2"]], + }, + } + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_xread_edge_cases_and_failures( + self, redis_client: TGlideClient, cluster_mode, protocol, request + ): + key1 = f"{{testKey}}:1-{get_random_string(10)}" + string_key = f"{{testKey}}:2-{get_random_string(10)}" + stream_id0 = "0-0" + stream_id1 = "1-1" + stream_id2 = "1-2" + + assert ( + await redis_client.xadd( + key1, [("f1", "v1")], StreamAddOptions(id=stream_id1) + ) + == stream_id1 + ) + assert ( + await redis_client.xadd( + key1, [("f2", "v2")], StreamAddOptions(id=stream_id2) + ) + == stream_id2 + ) + + test_client = await create_client( + request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900 + ) + # ensure command doesn't time out even if timeout > request timeout + assert ( + await test_client.xread( + {key1: stream_id2}, StreamReadOptions(block_ms=1000) + ) + is None + ) + + async def endless_xread_call(): + await test_client.xread({key1: stream_id2}, StreamReadOptions(block_ms=0)) + + # when xread is called with a block timeout of 0, it should never timeout, but we wrap the test with a timeout + # to avoid the test getting stuck forever. + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(endless_xread_call(), timeout=3) + + # if count is non-positive, it is ignored + assert await redis_client.xread( + {key1: stream_id0}, StreamReadOptions(count=0) + ) == { + key1: { + stream_id1: [["f1", "v1"]], + stream_id2: [["f2", "v2"]], + }, + } + assert await redis_client.xread( + {key1: stream_id0}, StreamReadOptions(count=-1) + ) == { + key1: { + stream_id1: [["f1", "v1"]], + stream_id2: [["f2", "v2"]], + }, + } + + # invalid stream ID + with pytest.raises(RequestError): + await redis_client.xread({key1: "invalid_stream_id"}) + + # invalid argument - block cannot be negative + with pytest.raises(RequestError): + await redis_client.xread({key1: stream_id1}, StreamReadOptions(block_ms=-1)) + + # invalid argument - keys_and_ids must not be empty + with pytest.raises(RequestError): + await redis_client.xread({}) + + # key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") + with pytest.raises(RequestError): + await redis_client.xread({string_key: stream_id1, key1: stream_id1}) + with pytest.raises(RequestError): + await redis_client.xread({key1: stream_id1, string_key: stream_id1}) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_xgroup_create_xgroup_destroy( + self, redis_client: TGlideClient, cluster_mode, protocol, request + ): + key = get_random_string(10) + non_existing_key = get_random_string(10) + string_key = get_random_string(10) + group_name1 = get_random_string(10) + group_name2 = get_random_string(10) + stream_id = "0-1" + + # trying to create a consumer group for a non-existing stream without the "MKSTREAM" arg results in error + with pytest.raises(RequestError): + await redis_client.xgroup_create(non_existing_key, group_name1, stream_id) + + # calling with the "MKSTREAM" arg should create the new stream automatically + assert ( + await redis_client.xgroup_create( + key, group_name1, stream_id, StreamGroupOptions(make_stream=True) + ) + == OK + ) + + # invalid arg - group names must be unique, but group_name1 already exists + with pytest.raises(RequestError): + await redis_client.xgroup_create(key, group_name1, stream_id) + + # invalid stream ID format + with pytest.raises(RequestError): + await redis_client.xgroup_create( + key, group_name2, "invalid_stream_id_format" + ) + + assert await redis_client.xgroup_destroy(key, group_name1) is True + # calling xgroup_destroy again returns False because the group was already destroyed above + assert await redis_client.xgroup_destroy(key, group_name1) is False + + # attempting to destroy a group for a non-existing key should raise an error + with pytest.raises(RequestError): + await redis_client.xgroup_destroy(non_existing_key, group_name1) + + # "ENTRIESREAD" option was added in Redis 7.0.0 + if await check_if_server_version_lt(redis_client, "7.0.0"): + with pytest.raises(RequestError): + await redis_client.xgroup_create( + key, + group_name1, + stream_id, + StreamGroupOptions(entries_read_id="10"), + ) + else: + assert ( + await redis_client.xgroup_create( + key, + group_name1, + stream_id, + StreamGroupOptions(entries_read_id="10"), + ) + == OK + ) + + # invalid entries_read_id - cannot be the zero ("0-0") ID + with pytest.raises(RequestError): + await redis_client.xgroup_create( + key, + group_name2, + stream_id, + StreamGroupOptions(entries_read_id="0-0"), + ) + + # key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.xgroup_create( + string_key, group_name1, stream_id, StreamGroupOptions(make_stream=True) + ) + with pytest.raises(RequestError): + await redis_client.xgroup_destroy(string_key, group_name1) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_xgroup_create_consumer_xreadgroup_xgroup_del_consumer( + self, redis_client: TGlideClient, cluster_mode, protocol, request + ): + key = f"{{testKey}}:{get_random_string(10)}" + non_existing_key = f"{{testKey}}:{get_random_string(10)}" + string_key = f"{{testKey}}:{get_random_string(10)}" + group_name = get_random_string(10) + consumer_name = get_random_string(10) + stream_id0 = "0" + stream_id1_0 = "1-0" + stream_id1_1 = "1-1" + stream_id1_2 = "1-2" + stream_id1_3 = "1-3" + + # create group and consumer for the group + assert ( + await redis_client.xgroup_create( + key, group_name, stream_id0, StreamGroupOptions(make_stream=True) + ) + == OK + ) + assert ( + await redis_client.xgroup_create_consumer(key, group_name, consumer_name) + is True + ) + + # attempting to create/delete a consumer for a group that does not exist results in a NOGROUP request error + with pytest.raises(RequestError): + await redis_client.xgroup_create_consumer( + key, "non_existing_group", consumer_name + ) + with pytest.raises(RequestError): + await redis_client.xgroup_del_consumer( + key, "non_existing_group", consumer_name + ) + + # attempt to create consumer for group again + assert ( + await redis_client.xgroup_create_consumer(key, group_name, consumer_name) + is False + ) + + # attempting to delete a consumer that has not been created yet returns 0 + assert ( + await redis_client.xgroup_del_consumer( + key, group_name, "non_existing_consumer" + ) + == 0 + ) + + # add two stream entries + assert ( + await redis_client.xadd( + key, [("f1_0", "v1_0")], StreamAddOptions(stream_id1_0) + ) + == stream_id1_0 + ) + assert ( + await redis_client.xadd( + key, [("f1_1", "v1_1")], StreamAddOptions(stream_id1_1) + ) + == stream_id1_1 + ) + + # read the entire stream for the consumer and mark messages as pending + assert await redis_client.xreadgroup( + {key: ">"}, + group_name, + consumer_name, + StreamReadGroupOptions(block_ms=1000, count=10), + ) == { + key: { + stream_id1_0: [["f1_0", "v1_0"]], + stream_id1_1: [["f1_1", "v1_1"]], + } + } + + # delete one of the stream entries + assert await redis_client.xdel(key, [stream_id1_0]) == 1 + + # now xreadgroup yields one empty stream entry and one non-empty stream entry + assert await redis_client.xreadgroup({key: "0"}, group_name, consumer_name) == { + key: {stream_id1_0: None, stream_id1_1: [["f1_1", "v1_1"]]} + } + + assert ( + await redis_client.xadd( + key, [("f1_2", "v1_2")], StreamAddOptions(stream_id1_2) + ) + == stream_id1_2 + ) + + # delete the consumer group and expect 2 pending messages + assert ( + await redis_client.xgroup_del_consumer(key, group_name, consumer_name) == 2 + ) + + # consume the last message with the previously deleted consumer (create the consumer anew) + assert await redis_client.xreadgroup( + {key: ">"}, + group_name, + consumer_name, + StreamReadGroupOptions(count=5, block_ms=1000), + ) == {key: {stream_id1_2: [["f1_2", "v1_2"]]}} + + # delete the consumer group and expect the pending message + assert ( + await redis_client.xgroup_del_consumer(key, group_name, consumer_name) == 1 + ) + + # test NOACK option + assert ( + await redis_client.xadd( + key, [("f1_3", "v1_3")], StreamAddOptions(stream_id1_3) + ) + == stream_id1_3 + ) + # since NOACK is passed, stream entry will be consumed without being added to the pending entries + assert await redis_client.xreadgroup( + {key: ">"}, + group_name, + consumer_name, + StreamReadGroupOptions(no_ack=True, count=5, block_ms=1000), + ) == {key: {stream_id1_3: [["f1_3", "v1_3"]]}} + assert ( + await redis_client.xreadgroup( + {key: ">"}, + group_name, + consumer_name, + StreamReadGroupOptions(no_ack=False, count=5, block_ms=1000), + ) + is None + ) + assert await redis_client.xreadgroup( + {key: "0"}, + group_name, + consumer_name, + StreamReadGroupOptions(no_ack=False, count=5, block_ms=1000), + ) == {key: {}} + + # attempting to call XGROUP CREATECONSUMER or XGROUP DELCONSUMER with a non-existing key should raise an error + with pytest.raises(RequestError): + await redis_client.xgroup_create_consumer( + non_existing_key, group_name, consumer_name + ) + with pytest.raises(RequestError): + await redis_client.xgroup_del_consumer( + non_existing_key, group_name, consumer_name + ) + + # key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.xgroup_create_consumer( + string_key, group_name, consumer_name + ) + with pytest.raises(RequestError): + await redis_client.xgroup_del_consumer( + string_key, group_name, consumer_name + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_xreadgroup_edge_cases_and_failures( + self, redis_client: TGlideClient, cluster_mode, protocol, request + ): + key = f"{{testKey}}:{get_random_string(10)}" + non_existing_key = f"{{testKey}}:{get_random_string(10)}" + string_key = f"{{testKey}}:{get_random_string(10)}" + group_name = get_random_string(10) + consumer_name = get_random_string(10) + stream_id0 = "0" + stream_id1_0 = "1-0" + stream_id1_1 = "1-1" + + # attempting to execute against a non-existing key results in an error + with pytest.raises(RequestError): + await redis_client.xreadgroup( + {non_existing_key: stream_id0}, group_name, consumer_name + ) + + # create group and consumer for group + assert await redis_client.xgroup_create( + key, group_name, stream_id0, StreamGroupOptions(make_stream=True) + ) + assert ( + await redis_client.xgroup_create_consumer(key, group_name, consumer_name) + is True + ) + + # read from empty stream + assert ( + await redis_client.xreadgroup({key: ">"}, group_name, consumer_name) is None + ) + assert await redis_client.xreadgroup({key: "0"}, group_name, consumer_name) == { + key: {} + } + + # setup first entry + assert ( + await redis_client.xadd(key, [("f1", "v1")], StreamAddOptions(stream_id1_1)) + == stream_id1_1 + ) + + # if count is non-positive, it is ignored + assert await redis_client.xreadgroup( + {key: ">"}, group_name, consumer_name, StreamReadGroupOptions(count=0) + ) == { + key: { + stream_id1_1: [["f1", "v1"]], + }, + } + assert await redis_client.xreadgroup( + {key: stream_id1_0}, + group_name, + consumer_name, + StreamReadGroupOptions(count=-1), + ) == { + key: { + stream_id1_1: [["f1", "v1"]], + }, + } + + # invalid stream ID + with pytest.raises(RequestError): + await redis_client.xreadgroup( + {key: "invalid_stream_id"}, group_name, consumer_name + ) + + # invalid argument - block cannot be negative + with pytest.raises(RequestError): + await redis_client.xreadgroup( + {key: stream_id0}, + group_name, + consumer_name, + StreamReadGroupOptions(block_ms=-1), + ) + + # invalid argument - keys_and_ids must not be empty + with pytest.raises(RequestError): + await redis_client.xreadgroup({}, group_name, consumer_name) + + # first key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.xreadgroup( + {string_key: stream_id1_1, key: stream_id1_1}, group_name, consumer_name + ) + + # second key exists, but it is not a stream + with pytest.raises(RequestError): + await redis_client.xreadgroup( + {key: stream_id1_1, string_key: stream_id1_1}, group_name, consumer_name + ) + + # attempting to execute command with a non-existing group results in an error + with pytest.raises(RequestError): + await redis_client.xreadgroup( + {key: stream_id1_1}, "non_existing_group", consumer_name + ) + + test_client = await create_client( + request=request, protocol=protocol, cluster_mode=cluster_mode, timeout=900 + ) + timeout_key = f"{{testKey}}:{get_random_string(10)}" + timeout_group_name = get_random_string(10) + timeout_consumer_name = get_random_string(10) + + # create a group read with the test client + # add a single stream entry and consumer + # the first call to ">" will return and update consumer group + # the second call to ">" will block waiting for new entries + # using anything other than ">" won't block, but will return the empty consumer result + # see: https://github.com/redis/redis/issues/6587 + assert ( + await test_client.xgroup_create( + timeout_key, + timeout_group_name, + stream_id0, + StreamGroupOptions(make_stream=True), + ) + == OK + ) + assert ( + await test_client.xgroup_create_consumer( + timeout_key, timeout_group_name, timeout_consumer_name + ) + is True + ) + assert ( + await test_client.xadd( + timeout_key, [("f1", "v1")], StreamAddOptions(stream_id1_1) + ) + == stream_id1_1 + ) + + # read the entire stream for the consumer and mark messages as pending + assert await test_client.xreadgroup( + {timeout_key: ">"}, timeout_group_name, timeout_consumer_name + ) == {timeout_key: {stream_id1_1: [["f1", "v1"]]}} + + # subsequent calls to read ">" will block + assert ( + await test_client.xreadgroup( + {timeout_key: ">"}, + timeout_group_name, + timeout_consumer_name, + StreamReadGroupOptions(block_ms=1000), + ) + is None + ) + + # ensure that command doesn't time out even if timeout > request timeout + async def endless_xreadgroup_call(): + await test_client.xreadgroup( + {timeout_key: ">"}, + timeout_group_name, + timeout_consumer_name, + StreamReadGroupOptions(block_ms=0), + ) + + # when xreadgroup is called with a block timeout of 0, it should never timeout, but we wrap the test with a + # timeout to avoid the test getting stuck forever. + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(endless_xreadgroup_call(), timeout=3) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_xack( + self, redis_client: TGlideClient, cluster_mode, protocol, request + ): + key = f"{{testKey}}:{get_random_string(10)}" + non_existing_key = f"{{testKey}}:{get_random_string(10)}" + string_key = f"{{testKey}}:{get_random_string(10)}" + group_name = get_random_string(10) + consumer_name = get_random_string(10) + stream_id0 = "0" + stream_id1_0 = "1-0" + stream_id1_1 = "1-1" + stream_id1_2 = "1-2" + + # setup: add 2 entries to the stream, create consumer group, read to mark them as pending + assert ( + await redis_client.xadd(key, [("f0", "v0")], StreamAddOptions(stream_id1_0)) + == stream_id1_0 + ) + assert ( + await redis_client.xadd(key, [("f1", "v1")], StreamAddOptions(stream_id1_1)) + == stream_id1_1 + ) + assert await redis_client.xgroup_create(key, group_name, stream_id0) == OK + assert await redis_client.xreadgroup({key: ">"}, group_name, consumer_name) == { + key: { + stream_id1_0: [["f0", "v0"]], + stream_id1_1: [["f1", "v1"]], + } + } + + # add one more entry + assert ( + await redis_client.xadd(key, [("f2", "v2")], StreamAddOptions(stream_id1_2)) + == stream_id1_2 + ) + + # acknowledge the first 2 entries + assert ( + await redis_client.xack(key, group_name, [stream_id1_0, stream_id1_1]) == 2 + ) + # attempting to acknowledge the first 2 entries again returns 0 since they were already acknowledged + assert ( + await redis_client.xack(key, group_name, [stream_id1_0, stream_id1_1]) == 0 + ) + # read the last, unacknowledged entry + assert await redis_client.xreadgroup({key: ">"}, group_name, consumer_name) == { + key: {stream_id1_2: [["f2", "v2"]]} + } + # deleting the consumer returns 1 since the last entry still hasn't been acknowledged + assert ( + await redis_client.xgroup_del_consumer(key, group_name, consumer_name) == 1 + ) + + # attempting to acknowledge a non-existing key returns 0 + assert ( + await redis_client.xack(non_existing_key, group_name, [stream_id1_0]) == 0 + ) + # attempting to acknowledge a non-existing group returns 0 + assert await redis_client.xack(key, "non_existing_group", [stream_id1_0]) == 0 + # attempting to acknowledge a non-existing ID returns 0 + assert await redis_client.xack(key, group_name, ["99-99"]) == 0 + + # invalid arg - ID list must not be empty + with pytest.raises(RequestError): + await redis_client.xack(key, group_name, []) + + # invalid arg - invalid stream ID format + with pytest.raises(RequestError): + await redis_client.xack(key, group_name, ["invalid_ID_format"]) + + # key exists, but it is not a stream + assert await redis_client.set(string_key, "foo") == OK + with pytest.raises(RequestError): + await redis_client.xack(string_key, group_name, [stream_id1_0]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_pfadd(self, redis_client: TGlideClient): + key = get_random_string(10) + assert await redis_client.pfadd(key, []) == 1 + assert await redis_client.pfadd(key, ["one", "two"]) == 1 + assert await redis_client.pfadd(key, ["two"]) == 0 + assert await redis_client.pfadd(key, []) == 0 + + assert await redis_client.set("foo", "value") == OK + with pytest.raises(RequestError): + await redis_client.pfadd("foo", []) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_pfcount(self, redis_client: TGlideClient): key1 = f"{{testKey}}:1-{get_random_string(10)}" key2 = f"{{testKey}}:2-{get_random_string(10)}" key3 = f"{{testKey}}:3-{get_random_string(10)}" string_key = f"{{testKey}}:4-{get_random_string(10)}" non_existing_key = f"{{testKey}}:5-{get_random_string(10)}" - assert await redis_client.pfadd(key1, ["a", "b", "c"]) == 1 - assert await redis_client.pfadd(key2, ["b", "c", "d"]) == 1 - assert await redis_client.pfcount([key1]) == 3 - assert await redis_client.pfcount([key2]) == 3 - assert await redis_client.pfcount([key1, key2]) == 4 - assert await redis_client.pfcount([key1, key2, non_existing_key]) == 4 - # empty HyperLogLog data set - assert await redis_client.pfadd(key3, []) == 1 - assert await redis_client.pfcount([key3]) == 0 + assert await redis_client.pfadd(key1, ["a", "b", "c"]) == 1 + assert await redis_client.pfadd(key2, ["b", "c", "d"]) == 1 + assert await redis_client.pfcount([key1]) == 3 + assert await redis_client.pfcount([key2]) == 3 + assert await redis_client.pfcount([key1, key2]) == 4 + assert await redis_client.pfcount([key1, key2, non_existing_key]) == 4 + # empty HyperLogLog data set + assert await redis_client.pfadd(key3, []) == 1 + assert await redis_client.pfcount([key3]) == 0 + + # incorrect argument - key list cannot be empty + with pytest.raises(RequestError): + await redis_client.pfcount([]) + + # key exists, but it is not a HyperLogLog + assert await redis_client.set(string_key, "value") == OK + with pytest.raises(RequestError): + await redis_client.pfcount([string_key]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_pfmerge(self, redis_client: TGlideClient): + key1 = f"{{testKey}}:1-{get_random_string(10)}" + key2 = f"{{testKey}}:2-{get_random_string(10)}" + key3 = f"{{testKey}}:3-{get_random_string(10)}" + string_key = f"{{testKey}}:4-{get_random_string(10)}" + non_existing_key = f"{{testKey}}:5-{get_random_string(10)}" + + assert await redis_client.pfadd(key1, ["a", "b", "c"]) == 1 + assert await redis_client.pfadd(key2, ["b", "c", "d"]) == 1 + + # merge into new HyperLogLog data set + assert await redis_client.pfmerge(key3, [key1, key2]) == OK + assert await redis_client.pfcount([key3]) == 4 + + # merge into existing HyperLogLog data set + assert await redis_client.pfmerge(key1, [key2]) == OK + assert await redis_client.pfcount([key1]) == 4 + + # non-existing source key + assert await redis_client.pfmerge(key2, [key1, non_existing_key]) == OK + assert await redis_client.pfcount([key2]) == 4 + + # empty source key list + assert await redis_client.pfmerge(key1, []) == OK + assert await redis_client.pfcount([key1]) == 4 + + # source key exists, but it is not a HyperLogLog + assert await redis_client.set(string_key, "foo") + with pytest.raises(RequestError): + assert await redis_client.pfmerge(key3, [string_key]) + + # destination key exists, but it is not a HyperLogLog + with pytest.raises(RequestError): + assert await redis_client.pfmerge(string_key, [key3]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_bitcount(self, redis_client: TGlideClient): + key1 = get_random_string(10) + set_key = get_random_string(10) + non_existing_key = get_random_string(10) + value = "foobar" + + assert await redis_client.set(key1, value) == OK + assert await redis_client.bitcount(key1) == 26 + assert await redis_client.bitcount(key1, OffsetOptions(1, 1)) == 6 + assert await redis_client.bitcount(key1, OffsetOptions(0, -5)) == 10 + assert await redis_client.bitcount(non_existing_key, OffsetOptions(5, 30)) == 0 + assert await redis_client.bitcount(non_existing_key) == 0 + + # key exists, but it is not a string + assert await redis_client.sadd(set_key, [value]) == 1 + with pytest.raises(RequestError): + await redis_client.bitcount(set_key) + with pytest.raises(RequestError): + await redis_client.bitcount(set_key, OffsetOptions(1, 1)) + + if await check_if_server_version_lt(redis_client, "7.0.0"): + # exception thrown because BIT and BYTE options were implemented after 7.0.0 + with pytest.raises(RequestError): + await redis_client.bitcount( + key1, OffsetOptions(2, 5, BitmapIndexType.BYTE) + ) + with pytest.raises(RequestError): + await redis_client.bitcount( + key1, OffsetOptions(2, 5, BitmapIndexType.BIT) + ) + else: + assert ( + await redis_client.bitcount( + key1, OffsetOptions(2, 5, BitmapIndexType.BYTE) + ) + == 16 + ) + assert ( + await redis_client.bitcount( + key1, OffsetOptions(5, 30, BitmapIndexType.BIT) + ) + == 17 + ) + assert ( + await redis_client.bitcount( + key1, OffsetOptions(5, -5, BitmapIndexType.BIT) + ) + == 23 + ) + assert ( + await redis_client.bitcount( + non_existing_key, OffsetOptions(5, 30, BitmapIndexType.BIT) + ) + == 0 + ) + + # key exists but it is not a string + with pytest.raises(RequestError): + await redis_client.bitcount( + set_key, OffsetOptions(1, 1, BitmapIndexType.BIT) + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_setbit(self, redis_client: TGlideClient): + key = get_random_string(10) + set_key = get_random_string(10) + + assert await redis_client.setbit(key, 0, 1) == 0 + assert await redis_client.setbit(key, 0, 0) == 1 + + # invalid argument - offset can't be negative + with pytest.raises(RequestError): + assert await redis_client.setbit(key, -1, 0) == 1 + + # key exists, but it is not a string + assert await redis_client.sadd(set_key, ["foo"]) == 1 + with pytest.raises(RequestError): + await redis_client.setbit(set_key, 0, 0) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_getbit(self, redis_client: TGlideClient): + key = get_random_string(10) + non_existing_key = get_random_string(10) + set_key = get_random_string(10) + value = "foobar" + + assert await redis_client.set(key, value) == OK + assert await redis_client.getbit(key, 1) == 1 + # When offset is beyond the string length, the string is assumed to be a contiguous space with 0 bits. + assert await redis_client.getbit(key, 1000) == 0 + # When key does not exist it is assumed to be an empty string, so offset is always out of range and the value is + # also assumed to be a contiguous space with 0 bits. + assert await redis_client.getbit(non_existing_key, 1) == 0 + + # invalid argument - offset can't be negative + with pytest.raises(RequestError): + assert await redis_client.getbit(key, -1) == 1 + + # key exists, but it is not a string + assert await redis_client.sadd(set_key, ["foo"]) == 1 + with pytest.raises(RequestError): + await redis_client.getbit(set_key, 0) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_bitpos_and_bitpos_interval(self, redis_client: TGlideClient): + key = get_random_string(10) + non_existing_key = get_random_string(10) + set_key = get_random_string(10) + value = ( + "?f0obar" # 00111111 01100110 00110000 01101111 01100010 01100001 01110010 + ) + + assert await redis_client.set(key, value) == OK + assert await redis_client.bitpos(key, 0) == 0 + assert await redis_client.bitpos(key, 1) == 2 + assert await redis_client.bitpos(key, 1, 1) == 9 + assert await redis_client.bitpos_interval(key, 0, 3, 5) == 24 + + # `BITPOS` returns -1 for non-existing strings + assert await redis_client.bitpos(non_existing_key, 1) == -1 + assert await redis_client.bitpos_interval(non_existing_key, 1, 3, 5) == -1 + + # invalid argument - bit value must be 0 or 1 + with pytest.raises(RequestError): + await redis_client.bitpos(key, 2) + with pytest.raises(RequestError): + await redis_client.bitpos_interval(key, 2, 3, 5) + + # key exists, but it is not a string + assert await redis_client.sadd(set_key, [value]) == 1 + with pytest.raises(RequestError): + await redis_client.bitpos(set_key, 1) + with pytest.raises(RequestError): + await redis_client.bitpos_interval(set_key, 1, 1, -1) + + if await check_if_server_version_lt(redis_client, "7.0.0"): + # error thrown because BIT and BYTE options were implemented after 7.0.0 + with pytest.raises(RequestError): + await redis_client.bitpos_interval(key, 1, 1, -1, BitmapIndexType.BYTE) + with pytest.raises(RequestError): + await redis_client.bitpos_interval(key, 1, 1, -1, BitmapIndexType.BIT) + else: + assert ( + await redis_client.bitpos_interval(key, 0, 3, 5, BitmapIndexType.BYTE) + == 24 + ) + assert ( + await redis_client.bitpos_interval(key, 1, 43, -2, BitmapIndexType.BIT) + == 47 + ) + assert ( + await redis_client.bitpos_interval( + non_existing_key, 1, 3, 5, BitmapIndexType.BYTE + ) + == -1 + ) + assert ( + await redis_client.bitpos_interval( + non_existing_key, 1, 3, 5, BitmapIndexType.BIT + ) + == -1 + ) + + # key exists, but it is not a string + with pytest.raises(RequestError): + await redis_client.bitpos_interval( + set_key, 1, 1, -1, BitmapIndexType.BIT + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_bitop(self, redis_client: TGlideClient): + key1 = f"{{testKey}}:1-{get_random_string(10)}" + key2 = f"{{testKey}}:2-{get_random_string(10)}" + keys = [key1, key2] + destination = f"{{testKey}}:3-{get_random_string(10)}" + non_existing_key1 = f"{{testKey}}:4-{get_random_string(10)}" + non_existing_key2 = f"{{testKey}}:5-{get_random_string(10)}" + non_existing_keys = [non_existing_key1, non_existing_key2] + set_key = f"{{testKey}}:6-{get_random_string(10)}" + value1 = "foobar" + value2 = "abcdef" + + assert await redis_client.set(key1, value1) == OK + assert await redis_client.set(key2, value2) == OK + assert await redis_client.bitop(BitwiseOperation.AND, destination, keys) == 6 + assert await redis_client.get(destination) == "`bc`ab" + assert await redis_client.bitop(BitwiseOperation.OR, destination, keys) == 6 + assert await redis_client.get(destination) == "goofev" + + # reset values for simplicity of results in XOR + assert await redis_client.set(key1, "a") == OK + assert await redis_client.set(key2, "b") == OK + assert await redis_client.bitop(BitwiseOperation.XOR, destination, keys) == 1 + assert await redis_client.get(destination) == "\u0003" + + # test single source key + assert await redis_client.bitop(BitwiseOperation.AND, destination, [key1]) == 1 + assert await redis_client.get(destination) == "a" + assert await redis_client.bitop(BitwiseOperation.OR, destination, [key1]) == 1 + assert await redis_client.get(destination) == "a" + assert await redis_client.bitop(BitwiseOperation.XOR, destination, [key1]) == 1 + assert await redis_client.get(destination) == "a" + assert await redis_client.bitop(BitwiseOperation.NOT, destination, [key1]) == 1 + # currently, attempting to get the value from destination after the above NOT incorrectly raises an error + # TODO: update with a GET call once fix is implemented for https://github.com/aws/glide-for-redis/issues/1447 + + assert await redis_client.setbit(key1, 0, 1) == 0 + assert await redis_client.bitop(BitwiseOperation.NOT, destination, [key1]) == 1 + assert await redis_client.get(destination) == "\u001e" + + # stores None when all keys hold empty strings + assert ( + await redis_client.bitop( + BitwiseOperation.AND, destination, non_existing_keys + ) + == 0 + ) + assert await redis_client.get(destination) is None + assert ( + await redis_client.bitop( + BitwiseOperation.OR, destination, non_existing_keys + ) + == 0 + ) + assert await redis_client.get(destination) is None + assert ( + await redis_client.bitop( + BitwiseOperation.XOR, destination, non_existing_keys + ) + == 0 + ) + assert await redis_client.get(destination) is None + assert ( + await redis_client.bitop( + BitwiseOperation.NOT, destination, [non_existing_key1] + ) + == 0 + ) + assert await redis_client.get(destination) is None + + # invalid argument - source key list cannot be empty + with pytest.raises(RequestError): + await redis_client.bitop(BitwiseOperation.OR, destination, []) - # incorrect argument - key list cannot be empty + # invalid arguments - NOT cannot be passed more than 1 key with pytest.raises(RequestError): - await redis_client.pfcount([]) + await redis_client.bitop(BitwiseOperation.NOT, destination, [key1, key2]) - # key exists, but it is not a HyperLogLog - assert await redis_client.set(string_key, "value") == OK + assert await redis_client.sadd(set_key, [value1]) == 1 + # invalid argument - source key has the wrong type with pytest.raises(RequestError): - await redis_client.pfcount([string_key]) + await redis_client.bitop(BitwiseOperation.AND, destination, [set_key]) @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_pfmerge(self, redis_client: TRedisClient): - key1 = f"{{testKey}}:1-{get_random_string(10)}" - key2 = f"{{testKey}}:2-{get_random_string(10)}" - key3 = f"{{testKey}}:3-{get_random_string(10)}" - string_key = f"{{testKey}}:4-{get_random_string(10)}" - non_existing_key = f"{{testKey}}:5-{get_random_string(10)}" + async def test_bitfield(self, redis_client: TGlideClient): + key1 = get_random_string(10) + key2 = get_random_string(10) + non_existing_key = get_random_string(10) + set_key = get_random_string(10) + foobar = "foobar" + u2 = UnsignedEncoding(2) + u7 = UnsignedEncoding(7) + i3 = SignedEncoding(3) + i8 = SignedEncoding(8) + offset1 = BitOffset(1) + offset5 = BitOffset(5) + offset_multiplier4 = BitOffsetMultiplier(4) + offset_multiplier8 = BitOffsetMultiplier(8) + overflow_set = BitFieldSet(u2, offset1, -10) + overflow_get = BitFieldGet(u2, offset1) + + # binary value: 01100110 01101111 01101111 01100010 01100001 01110010 + assert await redis_client.set(key1, foobar) == OK + + # SET tests + assert await redis_client.bitfield( + key1, + [ + # binary value becomes: 0(10)00110 01101111 01101111 01100010 01100001 01110010 + BitFieldSet(u2, offset1, 2), + # binary value becomes: 01000(011) 01101111 01101111 01100010 01100001 01110010 + BitFieldSet(i3, offset5, 3), + # binary value becomes: 01000011 01101111 01101111 0110(0010 010)00001 01110010 + BitFieldSet(u7, offset_multiplier4, 18), + # addressing with SET or INCRBY bits outside the current string length will enlarge the string, + # zero-padding it, as needed, for the minimal length needed, according to the most far bit touched. + # + # binary value becomes: + # 01000011 01101111 01101111 01100010 01000001 01110010 00000000 00000000 (00010100) + BitFieldSet(i8, offset_multiplier8, 20), + BitFieldGet(u2, offset1), + BitFieldGet(i3, offset5), + BitFieldGet(u7, offset_multiplier4), + BitFieldGet(i8, offset_multiplier8), + ], + ) == [3, -2, 19, 0, 2, 3, 18, 20] - assert await redis_client.pfadd(key1, ["a", "b", "c"]) == 1 - assert await redis_client.pfadd(key2, ["b", "c", "d"]) == 1 + # INCRBY tests + assert await redis_client.bitfield( + key1, + [ + # binary value becomes: + # 0(11)00011 01101111 01101111 01100010 01000001 01110010 00000000 00000000 00010100 + BitFieldIncrBy(u2, offset1, 1), + # binary value becomes: + # 01100(101) 01101111 01101111 01100010 01000001 01110010 00000000 00000000 00010100 + BitFieldIncrBy(i3, offset5, 2), + # binary value becomes: + # 01100101 01101111 01101111 0110(0001 111)00001 01110010 00000000 00000000 00010100 + BitFieldIncrBy(u7, offset_multiplier4, -3), + # binary value becomes: + # 01100101 01101111 01101111 01100001 11100001 01110010 00000000 00000000 (00011110) + BitFieldIncrBy(i8, offset_multiplier8, 10), + ], + ) == [3, -3, 15, 30] - # merge into new HyperLogLog data set - assert await redis_client.pfmerge(key3, [key1, key2]) == OK - assert await redis_client.pfcount([key3]) == 4 + # OVERFLOW WRAP is used by default if no OVERFLOW is specified + assert await redis_client.bitfield( + key2, + [ + overflow_set, + BitFieldOverflow(BitOverflowControl.WRAP), + overflow_set, + overflow_get, + ], + ) == [0, 2, 2] - # merge into existing HyperLogLog data set - assert await redis_client.pfmerge(key1, [key2]) == OK - assert await redis_client.pfcount([key1]) == 4 + # OVERFLOW affects only SET or INCRBY after OVERFLOW subcommand + assert await redis_client.bitfield( + key2, + [ + overflow_set, + BitFieldOverflow(BitOverflowControl.SAT), + overflow_set, + overflow_get, + BitFieldOverflow(BitOverflowControl.FAIL), + overflow_set, + ], + ) == [2, 2, 3, None] - # non-existing source key - assert await redis_client.pfmerge(key2, [key1, non_existing_key]) == OK - assert await redis_client.pfcount([key2]) == 4 + # if the key doesn't exist, the operation is performed as though the missing value was a string with all bits + # set to 0. + assert await redis_client.bitfield( + non_existing_key, [BitFieldSet(UnsignedEncoding(2), BitOffset(3), 2)] + ) == [0] - # empty source key list - assert await redis_client.pfmerge(key1, []) == OK - assert await redis_client.pfcount([key1]) == 4 + # empty subcommands argument returns an empty list + assert await redis_client.bitfield(key1, []) == [] - # source key exists, but it is not a HyperLogLog - assert await redis_client.set(string_key, "foo") + # invalid argument - offset must be >= 0 with pytest.raises(RequestError): - assert await redis_client.pfmerge(key3, [string_key]) + await redis_client.bitfield( + key1, [BitFieldSet(UnsignedEncoding(5), BitOffset(-1), 1)] + ) - # destination key exists, but it is not a HyperLogLog + # invalid argument - encoding size must be > 0 with pytest.raises(RequestError): - assert await redis_client.pfmerge(string_key, [key3]) + await redis_client.bitfield( + key1, [BitFieldSet(UnsignedEncoding(0), BitOffset(1), 1)] + ) + + # invalid argument - unsigned encoding size must be < 64 + with pytest.raises(RequestError): + await redis_client.bitfield( + key1, [BitFieldSet(UnsignedEncoding(64), BitOffset(1), 1)] + ) + + # invalid argument - signed encoding size must be < 65 + with pytest.raises(RequestError): + await redis_client.bitfield( + key1, [BitFieldSet(SignedEncoding(65), BitOffset(1), 1)] + ) + + # key exists, but it is not a string + assert await redis_client.sadd(set_key, [foobar]) == 1 + with pytest.raises(RequestError): + await redis_client.bitfield( + set_key, [BitFieldSet(SignedEncoding(3), BitOffset(1), 2)] + ) @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_setbit(self, redis_client: TRedisClient): + async def test_bitfield_read_only(self, redis_client: TGlideClient): + min_version = "6.0.0" + if await check_if_server_version_lt(redis_client, min_version): + return pytest.mark.skip(reason=f"Redis version required >= {min_version}") + key = get_random_string(10) + non_existing_key = get_random_string(10) set_key = get_random_string(10) + foobar = "foobar" + unsigned_offset_get = BitFieldGet(UnsignedEncoding(2), BitOffset(1)) - assert await redis_client.setbit(key, 0, 1) == 0 - assert await redis_client.setbit(key, 0, 0) == 1 + # binary value: 01100110 01101111 01101111 01100010 01100001 01110010 + assert await redis_client.set(key, foobar) == OK + assert await redis_client.bitfield_read_only( + key, + [ + # Get value in: 0(11)00110 01101111 01101111 01100010 01100001 01110010 00010100 + unsigned_offset_get, + # Get value in: 01100(110) 01101111 01101111 01100010 01100001 01110010 00010100 + BitFieldGet(SignedEncoding(3), BitOffset(5)), + # Get value in: 01100110 01101111 01101(111 0110)0010 01100001 01110010 00010100 + BitFieldGet(UnsignedEncoding(7), BitOffsetMultiplier(3)), + # Get value in: 01100110 01101111 (01101111) 01100010 01100001 01110010 00010100 + BitFieldGet(SignedEncoding(8), BitOffsetMultiplier(2)), + ], + ) == [3, -2, 118, 111] + # offset is greater than current length of string: the operation is performed like the missing part all consists + # of bits set to 0. + assert await redis_client.bitfield_read_only( + key, [BitFieldGet(UnsignedEncoding(3), BitOffset(100))] + ) == [0] + # similarly, if the key doesn't exist, the operation is performed as though the missing value was a string with + # all bits set to 0. + assert await redis_client.bitfield_read_only( + non_existing_key, [unsigned_offset_get] + ) == [0] + + # empty subcommands argument returns an empty list + assert await redis_client.bitfield_read_only(key, []) == [] + + # invalid argument - offset must be >= 0 + with pytest.raises(RequestError): + await redis_client.bitfield_read_only( + key, [BitFieldGet(UnsignedEncoding(5), BitOffset(-1))] + ) - # invalid argument - offset can't be negative + # invalid argument - encoding size must be > 0 with pytest.raises(RequestError): - assert await redis_client.setbit(key, -1, 0) == 1 + await redis_client.bitfield_read_only( + key, [BitFieldGet(UnsignedEncoding(0), BitOffset(1))] + ) + + # invalid argument - unsigned encoding size must be < 64 + with pytest.raises(RequestError): + await redis_client.bitfield_read_only( + key, [BitFieldGet(UnsignedEncoding(64), BitOffset(1))] + ) + + # invalid argument - signed encoding size must be < 65 + with pytest.raises(RequestError): + await redis_client.bitfield_read_only( + key, [BitFieldGet(SignedEncoding(65), BitOffset(1))] + ) # key exists, but it is not a string - assert await redis_client.sadd(set_key, ["foo"]) == 1 + assert await redis_client.sadd(set_key, [foobar]) == 1 with pytest.raises(RequestError): - await redis_client.setbit(set_key, 0, 0) + await redis_client.bitfield_read_only(set_key, [unsigned_offset_get]) @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_object_encoding(self, redis_client: TRedisClient): + async def test_object_encoding(self, redis_client: TGlideClient): string_key = get_random_string(10) list_key = get_random_string(10) hashtable_key = get_random_string(10) @@ -4412,7 +6174,7 @@ async def test_object_encoding(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_object_freq(self, redis_client: TRedisClient): + async def test_object_freq(self, redis_client: TGlideClient): key = get_random_string(10) non_existing_key = get_random_string(10) maxmemory_policy_key = "maxmemory-policy" @@ -4433,19 +6195,19 @@ async def test_object_freq(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_object_idletime(self, redis_client: TRedisClient): + async def test_object_idletime(self, redis_client: TGlideClient): string_key = get_random_string(10) non_existing_key = get_random_string(10) assert await redis_client.object_idletime(non_existing_key) is None assert await redis_client.set(string_key, "foo") == OK - time.sleep(1) + time.sleep(2) idletime = await redis_client.object_idletime(string_key) assert idletime is not None and idletime > 0 @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_object_refcount(self, redis_client: TRedisClient): + async def test_object_refcount(self, redis_client: TGlideClient): string_key = get_random_string(10) non_existing_key = get_random_string(10) @@ -4550,12 +6312,284 @@ async def test_function_load_cluster_with_route( assert await redis_client.function_load(new_code, True, route) == lib_name + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_srandmember(self, redis_client: TGlideClient): + key = get_random_string(10) + string_key = get_random_string(10) + elements = ["one", "two"] + assert await redis_client.sadd(key, elements) == 2 + + member = await redis_client.srandmember(key) + assert member in elements + assert await redis_client.srandmember("non_existing_key") is None + + # key exists, but it is not a set + assert await redis_client.set(string_key, "value") == OK + with pytest.raises(RequestError): + await redis_client.srandmember(string_key) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_srandmember_count(self, redis_client: TGlideClient): + key = get_random_string(10) + string_key = get_random_string(10) + elements = ["one", "two"] + assert await redis_client.sadd(key, elements) == 2 + + # unique values are expected as count is positive + members = await redis_client.srandmember_count(key, 4) + assert len(members) == 2 + assert set(members) == {"one", "two"} + + # duplicate values are expected as count is negative + members = await redis_client.srandmember_count(key, -4) + assert len(members) == 4 + for member in members: + assert member in elements + + # empty return values for non-existing or empty keys + assert await redis_client.srandmember_count(key, 0) == [] + assert await redis_client.srandmember_count("non_existing_key", 0) == [] + + # key exists, but it is not a set + assert await redis_client.set(string_key, "value") == OK + with pytest.raises(RequestError): + await redis_client.srandmember_count(string_key, 8) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_flushall(self, redis_client: TGlideClient): + min_version = "6.2.0" + key = f"{{key}}-1{get_random_string(5)}" + value = get_random_string(5) + + await redis_client.set(key, value) + assert await redis_client.dbsize() > 0 + assert await redis_client.flushall() is OK + assert await redis_client.flushall(FlushMode.ASYNC) is OK + if not await check_if_server_version_lt(redis_client, min_version): + assert await redis_client.flushall(FlushMode.SYNC) is OK + assert await redis_client.dbsize() == 0 + + if isinstance(redis_client, GlideClusterClient): + await redis_client.set(key, value) + assert await redis_client.flushall(route=AllPrimaries()) is OK + assert await redis_client.flushall(FlushMode.ASYNC, AllPrimaries()) is OK + if not await check_if_server_version_lt(redis_client, min_version): + assert await redis_client.flushall(FlushMode.SYNC, AllPrimaries()) is OK + assert await redis_client.dbsize() == 0 + + @pytest.mark.parametrize("cluster_mode", [False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_standalone_flushdb(self, redis_client: GlideClient): + min_version = "6.2.0" + key1 = f"{{key}}-1{get_random_string(5)}" + key2 = f"{{key}}-2{get_random_string(5)}" + value = get_random_string(5) + + # fill DB 0 and check size non-empty + assert await redis_client.select(0) is OK + await redis_client.set(key1, value) + assert await redis_client.dbsize() > 0 + + # fill DB 1 and check size non-empty + assert await redis_client.select(1) is OK + await redis_client.set(key2, value) + assert await redis_client.dbsize() > 0 + + # flush DB 1 and check again + assert await redis_client.flushdb() is OK + assert await redis_client.dbsize() == 0 + + # swith to DB 0, flush, and check + assert await redis_client.select(0) is OK + assert await redis_client.dbsize() > 0 + assert await redis_client.flushdb(FlushMode.ASYNC) is OK + assert await redis_client.dbsize() == 0 + + # verify flush SYNC + if not await check_if_server_version_lt(redis_client, min_version): + await redis_client.set(key2, value) + assert await redis_client.dbsize() > 0 + assert await redis_client.flushdb(FlushMode.SYNC) is OK + assert await redis_client.dbsize() == 0 + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_getex(self, redis_client: TGlideClient): + min_version = "6.2.0" + if await check_if_server_version_lt(redis_client, min_version): + return pytest.mark.skip(reason=f"Redis version required >= {min_version}") + + key1 = get_random_string(10) + non_existing_key = get_random_string(10) + value = get_random_string(10) + + assert await redis_client.set(key1, value) == OK + assert await redis_client.getex(non_existing_key) is None + assert await redis_client.getex(key1) == value + assert await redis_client.ttl(key1) == -1 + + # setting expiration timer + assert ( + await redis_client.getex(key1, ExpiryGetEx(ExpiryTypeGetEx.MILLSEC, 50)) + == value + ) + assert await redis_client.ttl(key1) != -1 + + # setting and clearing expiration timer + assert await redis_client.set(key1, value) == OK + assert ( + await redis_client.getex(key1, ExpiryGetEx(ExpiryTypeGetEx.SEC, 10)) + == value + ) + assert ( + await redis_client.getex(key1, ExpiryGetEx(ExpiryTypeGetEx.PERSIST, None)) + == value + ) + assert await redis_client.ttl(key1) == -1 + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_copy_no_database(self, redis_client: TGlideClient): + min_version = "6.2.0" + if await check_if_server_version_lt(redis_client, min_version): + return pytest.mark.skip(reason=f"Redis version required >= {min_version}") + + source = f"{{testKey}}:1-{get_random_string(10)}" + destination = f"{{testKey}}:2-{get_random_string(10)}" + value1 = get_random_string(5) + value2 = get_random_string(5) + + # neither key exists + assert await redis_client.copy(source, destination, replace=False) is False + assert await redis_client.copy(source, destination) is False + + # source exists, destination does not + await redis_client.set(source, value1) + assert await redis_client.copy(source, destination, replace=False) is True + assert await redis_client.get(destination) == value1 + + # new value for source key + await redis_client.set(source, value2) + + # both exists, no REPLACE + assert await redis_client.copy(source, destination) is False + assert await redis_client.copy(source, destination, replace=False) is False + assert await redis_client.get(destination) == value1 + + # both exists, with REPLACE + assert await redis_client.copy(source, destination, replace=True) is True + assert await redis_client.get(destination) == value2 + + @pytest.mark.parametrize("cluster_mode", [False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_copy_database(self, redis_client: GlideClient): + min_version = "6.2.0" + if await check_if_server_version_lt(redis_client, min_version): + return pytest.mark.skip(reason=f"Redis version required >= {min_version}") + + source = get_random_string(10) + destination = get_random_string(10) + value1 = get_random_string(5) + value2 = get_random_string(5) + index0 = 0 + index1 = 1 + index2 = 2 + + try: + assert await redis_client.select(index0) == OK + + # neither key exists + assert ( + await redis_client.copy(source, destination, index1, replace=False) + is False + ) + + # source exists, destination does not + await redis_client.set(source, value1) + assert ( + await redis_client.copy(source, destination, index1, replace=False) + is True + ) + assert await redis_client.select(index1) == OK + assert await redis_client.get(destination) == value1 + + # new value for source key + assert await redis_client.select(index0) == OK + await redis_client.set(source, value2) + + # no REPLACE, copying to existing key on DB 0 & 1, non-existing key on DB 2 + assert ( + await redis_client.copy(source, destination, index1, replace=False) + is False + ) + assert ( + await redis_client.copy(source, destination, index2, replace=False) + is True + ) + + # new value only gets copied to DB 2 + assert await redis_client.select(index1) == OK + assert await redis_client.get(destination) == value1 + assert await redis_client.select(index2) == OK + assert await redis_client.get(destination) == value2 + + # both exists, with REPLACE, when value isn't the same, source always get copied to destination + assert await redis_client.select(index0) == OK + assert ( + await redis_client.copy(source, destination, index1, replace=True) + is True + ) + assert await redis_client.select(index1) == OK + assert await redis_client.get(destination) == value2 + + # invalid DB index + with pytest.raises(RequestError): + await redis_client.copy(source, destination, -1, replace=True) + finally: + assert await redis_client.select(0) == OK + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_lolwut(self, redis_client: TGlideClient): + result = await redis_client.lolwut() + assert "Redis ver. " in result + result = await redis_client.lolwut(parameters=[]) + assert "Redis ver. " in result + result = await redis_client.lolwut(parameters=[50, 20]) + assert "Redis ver. " in result + result = await redis_client.lolwut(6) + assert "Redis ver. " in result + result = await redis_client.lolwut(5, [30, 4, 4]) + assert "Redis ver. " in result + + if isinstance(redis_client, GlideClusterClient): + # test with multi-node route + result = await redis_client.lolwut(route=AllNodes()) + assert isinstance(result, dict) + for node_result in result.values(): + assert "Redis ver. " in node_result + + result = await redis_client.lolwut(parameters=[10, 20], route=AllNodes()) + assert isinstance(result, dict) + for node_result in result.values(): + assert "Redis ver. " in node_result + + # test with single-node route + result = await redis_client.lolwut(2, route=RandomNode()) + assert "Redis ver. " in node_result + + result = await redis_client.lolwut(2, [10, 20], RandomNode()) + assert "Redis ver. " in node_result + class TestMultiKeyCommandCrossSlot: @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_multi_key_command_returns_cross_slot_error( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): promises: list[Any] = [ redis_client.blpop(["abc", "zxy", "lkn"], 0.1), @@ -4588,8 +6622,24 @@ async def test_multi_key_command_returns_cross_slot_error( "abc", "zxy", ListDirection.LEFT, ListDirection.LEFT, 1 ), redis_client.msetnx({"abc": "abc", "zxy": "zyx"}), + redis_client.sunion(["def", "ghi"]), + redis_client.bitop(BitwiseOperation.OR, "abc", ["zxy", "lkn"]), + redis_client.xread({"abc": "0-0", "zxy": "0-0"}), ] + if not await check_if_server_version_lt(redis_client, "6.2.0"): + promises.extend( + [ + redis_client.geosearchstore( + "abc", + "zxy", + GeospatialData(15, 37), + GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), + ), + redis_client.copy("abc", "zxy", replace=True), + ] + ) + if not await check_if_server_version_lt(redis_client, "7.0.0"): promises.extend( [ @@ -4613,14 +6663,14 @@ async def test_multi_key_command_returns_cross_slot_error( @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_multi_key_command_routed_to_multiple_nodes( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): await redis_client.exists(["abc", "zxy", "lkn"]) await redis_client.unlink(["abc", "zxy", "lkn"]) await redis_client.delete(["abc", "zxy", "lkn"]) await redis_client.mget(["abc", "zxy", "lkn"]) await redis_client.mset({"abc": "1", "zxy": "2", "lkn": "3"}) - # TODO touch + await redis_client.touch(["abc", "zxy", "lkn"]) class TestCommandsUnitTests: @@ -4658,6 +6708,50 @@ def test_expiry_cmd_args(self): ) assert exp_unix_millisec_datetime.get_cmd_args() == ["PXAT", "1682639759342"] + def test_get_expiry_cmd_args(self): + exp_sec = ExpiryGetEx(ExpiryTypeGetEx.SEC, 5) + assert exp_sec.get_cmd_args() == ["EX", "5"] + + exp_sec_timedelta = ExpiryGetEx(ExpiryTypeGetEx.SEC, timedelta(seconds=5)) + assert exp_sec_timedelta.get_cmd_args() == ["EX", "5"] + + exp_millsec = ExpiryGetEx(ExpiryTypeGetEx.MILLSEC, 5) + assert exp_millsec.get_cmd_args() == ["PX", "5"] + + exp_millsec_timedelta = ExpiryGetEx( + ExpiryTypeGetEx.MILLSEC, timedelta(seconds=5) + ) + assert exp_millsec_timedelta.get_cmd_args() == ["PX", "5000"] + + exp_millsec_timedelta = ExpiryGetEx( + ExpiryTypeGetEx.MILLSEC, timedelta(seconds=5) + ) + assert exp_millsec_timedelta.get_cmd_args() == ["PX", "5000"] + + exp_unix_sec = ExpiryGetEx(ExpiryTypeGetEx.UNIX_SEC, 1682575739) + assert exp_unix_sec.get_cmd_args() == ["EXAT", "1682575739"] + + exp_unix_sec_datetime = ExpiryGetEx( + ExpiryTypeGetEx.UNIX_SEC, + datetime(2023, 4, 27, 23, 55, 59, 342380, timezone.utc), + ) + assert exp_unix_sec_datetime.get_cmd_args() == ["EXAT", "1682639759"] + + exp_unix_millisec = ExpiryGetEx(ExpiryTypeGetEx.UNIX_MILLSEC, 1682586559964) + assert exp_unix_millisec.get_cmd_args() == ["PXAT", "1682586559964"] + + exp_unix_millisec_datetime = ExpiryGetEx( + ExpiryTypeGetEx.UNIX_MILLSEC, + datetime(2023, 4, 27, 23, 55, 59, 342380, timezone.utc), + ) + assert exp_unix_millisec_datetime.get_cmd_args() == ["PXAT", "1682639759342"] + + exp_persist = ExpiryGetEx( + ExpiryTypeGetEx.PERSIST, + None, + ) + assert exp_persist.get_cmd_args() == ["PERSIST"] + def test_expiry_raises_on_value_error(self): with pytest.raises(ValueError): ExpirySet(ExpiryType.SEC, 5.5) @@ -4675,7 +6769,7 @@ def test_is_single_response(self): class TestClusterRoutes: async def cluster_route_custom_command_multi_nodes( self, - redis_client: RedisClusterClient, + redis_client: GlideClusterClient, route: Route, ): cluster_nodes = await redis_client.custom_command(["CLUSTER", "NODES"]) @@ -4709,14 +6803,14 @@ async def cluster_route_custom_command_multi_nodes( @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_route_custom_command_all_nodes( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): await self.cluster_route_custom_command_multi_nodes(redis_client, AllNodes()) @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_route_custom_command_all_primaries( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): await self.cluster_route_custom_command_multi_nodes( redis_client, AllPrimaries() @@ -4725,7 +6819,7 @@ async def test_cluster_route_custom_command_all_primaries( @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_route_custom_command_random_node( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): info_res = await redis_client.custom_command( ["INFO", "REPLICATION"], RandomNode() @@ -4734,7 +6828,7 @@ async def test_cluster_route_custom_command_random_node( assert "role:master" in info_res or "role:slave" in info_res async def cluster_route_custom_command_slot_route( - self, redis_client: RedisClusterClient, is_slot_key: bool + self, redis_client: GlideClusterClient, is_slot_key: bool ): route_class = SlotKeyRoute if is_slot_key else SlotIdRoute route_second_arg = "foo" if is_slot_key else 4000 @@ -4761,20 +6855,20 @@ async def cluster_route_custom_command_slot_route( @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_route_custom_command_slot_key_route( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): await self.cluster_route_custom_command_slot_route(redis_client, True) @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_route_custom_command_slot_id_route( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): await self.cluster_route_custom_command_slot_route(redis_client, False) @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_info_random_route(self, redis_client: RedisClusterClient): + async def test_info_random_route(self, redis_client: GlideClusterClient): info = await redis_client.info([InfoSection.SERVER], RandomNode()) assert isinstance(info, str) assert "# Server" in info @@ -4782,7 +6876,7 @@ async def test_info_random_route(self, redis_client: RedisClusterClient): @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_route_by_address_reaches_correct_node( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): # returns the line that contains the word "myself", up to that point. This is done because the values after it might change with time. def clean_result(value: TResult): @@ -4822,18 +6916,41 @@ def clean_result(value: TResult): @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_cluster_fail_routing_by_address_if_no_port_is_provided( - self, redis_client: RedisClusterClient + self, redis_client: GlideClusterClient ): with pytest.raises(RequestError): await redis_client.info(route=ByAddressRoute("foo")) + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_cluster_flushdb(self, redis_client: GlideClusterClient): + min_version = "6.2.0" + key = f"{{key}}-1{get_random_string(5)}" + value = get_random_string(5) + + await redis_client.set(key, value) + assert await redis_client.dbsize() > 0 + assert await redis_client.flushdb(route=AllPrimaries()) is OK + assert await redis_client.dbsize() == 0 + + await redis_client.set(key, value) + assert await redis_client.dbsize() > 0 + assert await redis_client.flushdb(FlushMode.ASYNC, AllPrimaries()) is OK + assert await redis_client.dbsize() == 0 + + if not await check_if_server_version_lt(redis_client, min_version): + await redis_client.set(key, value) + assert await redis_client.dbsize() > 0 + assert await redis_client.flushdb(FlushMode.SYNC, AllPrimaries()) is OK + assert await redis_client.dbsize() == 0 + @pytest.mark.asyncio class TestScripts: @pytest.mark.smoke_test @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_script(self, redis_client: TRedisClient): + async def test_script(self, redis_client: TGlideClient): key1 = get_random_string(10) key2 = get_random_string(10) script = Script("return 'Hello'") diff --git a/python/python/tests/test_config.py b/python/python/tests/test_config.py index ccd4d82a77..9c05db1199 100644 --- a/python/python/tests/test_config.py +++ b/python/python/tests/test_config.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 from glide.config import ( BaseClientConfiguration, diff --git a/python/python/tests/test_proto_coded.py b/python/python/tests/test_proto_coded.py index a7e7bbb5f2..2c0a248a1f 100644 --- a/python/python/tests/test_proto_coded.py +++ b/python/python/tests/test_proto_coded.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import pytest from glide.protobuf.redis_request_pb2 import RedisRequest, RequestType diff --git a/python/python/tests/test_pubsub.py b/python/python/tests/test_pubsub.py new file mode 100644 index 0000000000..613f4614da --- /dev/null +++ b/python/python/tests/test_pubsub.py @@ -0,0 +1,2135 @@ +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 + +from __future__ import annotations + +import asyncio +from enum import IntEnum +from typing import Any, Dict, List, Optional, Set, Tuple, Union, cast + +import pytest +from glide.async_commands.core import CoreCommands +from glide.config import ( + ClusterClientConfiguration, + GlideClientConfiguration, + ProtocolVersion, +) +from glide.constants import OK +from glide.exceptions import ConfigurationError +from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient +from tests.conftest import create_client +from tests.utils.utils import check_if_server_version_lt, get_random_string + + +class MethodTesting(IntEnum): + """ + Enumeration for specifying the method of PUBSUB subscription. + """ + + Async = 0 + "Uses asynchronous subscription method." + Sync = 1 + "Uses synchronous subscription method." + Callback = 2 + "Uses callback-based subscription method." + + +async def create_two_clients( + request, + cluster_mode, + pub_sub, + pub_sub2: Optional[Any] = None, + protocol: ProtocolVersion = ProtocolVersion.RESP3, +) -> Tuple[ + Union[GlideClient, GlideClusterClient], Union[GlideClient, GlideClusterClient] +]: + """ + Sets 2 up clients for testing purposes. + + Args: + request: pytest request for creating a client. + cluster_mode: the cluster mode. + pub_sub: pubsub configuration subscription for a client. + pub_sub2: pubsub configuration subscription for a client. + protocol: what protocol to use, used for the test: `test_pubsub_resp2_raise_an_error`. + """ + cluster_mode_pubsub, standalone_mode_pubsub = None, None + cluster_mode_pubsub2, standalone_mode_pubsub2 = None, None + if cluster_mode: + cluster_mode_pubsub = pub_sub + cluster_mode_pubsub2 = pub_sub2 + else: + standalone_mode_pubsub = pub_sub + standalone_mode_pubsub2 = pub_sub2 + + client = await create_client( + request, + cluster_mode=cluster_mode, + cluster_mode_pubsub=cluster_mode_pubsub2, + standalone_mode_pubsub=standalone_mode_pubsub2, + protocol=protocol, + ) + client2 = await create_client( + request, + cluster_mode=cluster_mode, + cluster_mode_pubsub=cluster_mode_pubsub, + standalone_mode_pubsub=standalone_mode_pubsub, + protocol=protocol, + ) + return client, client2 + + +async def get_message_by_method( + method: MethodTesting, + client: TGlideClient, + messages: Optional[List[CoreCommands.PubSubMsg]] = None, + index: Optional[int] = None, +): + if method == MethodTesting.Async: + return await client.get_pubsub_message() + elif method == MethodTesting.Sync: + return client.try_get_pubsub_message() + assert messages and (index is not None) + return messages[index] + + +async def check_no_messages_left( + method, + client: TGlideClient, + callback: Optional[List[Any]] = None, + expected_callback_messages_count: int = 0, +): + if method == MethodTesting.Async: + # assert there are no messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(client.get_pubsub_message(), timeout=3) + elif method == MethodTesting.Sync: + assert client.try_get_pubsub_message() is None + else: + assert callback is not None + assert len(callback) == expected_callback_messages_count + + +def create_pubsub_subscription( + cluster_mode, + cluster_channels_and_patterns: Dict[ + ClusterClientConfiguration.PubSubChannelModes, Set[str] + ], + standalone_channels_and_patterns: Dict[ + GlideClientConfiguration.PubSubChannelModes, Set[str] + ], + callback=None, + context=None, +): + if cluster_mode: + return ClusterClientConfiguration.PubSubSubscriptions( + channels_and_patterns=cluster_channels_and_patterns, + callback=callback, + context=context, + ) + return GlideClientConfiguration.PubSubSubscriptions( + channels_and_patterns=standalone_channels_and_patterns, + callback=callback, + context=context, + ) + + +def new_message(msg: CoreCommands.PubSubMsg, context: Any): + received_messages: List[CoreCommands.PubSubMsg] = context + received_messages.append(msg) + + +@pytest.mark.asyncio +class TestPubSub: + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_exact_happy_path( + self, + request, + cluster_mode: bool, + method: MethodTesting, + ): + """ + Tests the basic happy path for exact PUBSUB functionality. + + This test covers the basic PUBSUB flow using three different methods: + Async, Sync, and Callback. It verifies that a message published to a + specific channel is correctly received by a subscriber. + """ + channel = get_random_string(10) + message = get_random_string(5) + publish_response = 1 if cluster_mode else OK + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {channel}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {channel}}, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + assert await publishing_client.publish(message, channel) == publish_response + # allow the message to propagate + await asyncio.sleep(1) + + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, 0 + ) + + assert pubsub_msg.message == message + assert pubsub_msg.channel == channel + assert pubsub_msg.pattern is None + + await check_no_messages_left(method, listening_client, callback_messages, 1) + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["UNSUBSCRIBE", channel]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_exact_happy_path_coexistence( + self, request, cluster_mode: bool + ): + """ + Tests the coexistence of async and sync message retrieval methods in exact PUBSUB. + + This test covers the scenario where messages are published to a channel + and received using both async and sync methods to ensure that both methods + can coexist and function correctly. + """ + channel = get_random_string(10) + message = get_random_string(5) + message2 = get_random_string(7) + publish_response = 1 if cluster_mode else OK + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {channel}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {channel}}, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + assert await publishing_client.publish(message, channel) == publish_response + assert ( + await publishing_client.publish(message2, channel) == publish_response + ) + # allow the message to propagate + await asyncio.sleep(1) + + async_msg = await listening_client.get_pubsub_message() + sync_msg = listening_client.try_get_pubsub_message() + assert sync_msg + + assert async_msg.message in [message, message2] + assert async_msg.channel == channel + assert async_msg.pattern is None + + assert sync_msg.message in [message, message2] + assert sync_msg.channel == channel + assert sync_msg.pattern is None + # we do not check the order of the messages, but we can check that we received both messages once + assert not sync_msg.message == async_msg.message + + # assert there are no messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(listening_client.get_pubsub_message(), timeout=3) + + assert listening_client.try_get_pubsub_message() is None + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["UNSUBSCRIBE", channel]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_exact_happy_path_many_channels( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests publishing and receiving messages across many channels in exact PUBSUB. + + This test covers the scenario where multiple channels each receive their own + unique message. It verifies that messages are correctly published and received + using different retrieval methods: async, sync, and callback. + """ + NUM_CHANNELS = 256 + shard_prefix = "{same-shard}" + publish_response = 1 if cluster_mode else OK + + # Create a map of channels to random messages with shard prefix + channels_and_messages = { + f"{shard_prefix}{get_random_string(10)}": get_random_string(5) + for _ in range(NUM_CHANNELS) + } + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Exact: set( + channels_and_messages.keys() + ) + }, + { + GlideClientConfiguration.PubSubChannelModes.Exact: set( + channels_and_messages.keys() + ) + }, + callback=callback, + context=context, + ) + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + # Publish messages to each channel + for channel, message in channels_and_messages.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # Allow the messages to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly + for index in range(len(channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, index + ) + assert pubsub_msg.channel in channels_and_messages.keys() + assert pubsub_msg.message == channels_and_messages[pubsub_msg.channel] + assert pubsub_msg.pattern is None + del channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert channels_and_messages == {} + # check no messages left + await check_no_messages_left( + method, listening_client, callback_messages, NUM_CHANNELS + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command( + ["UNSUBSCRIBE", *list(channels_and_messages.keys())] + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_exact_happy_path_many_channels_co_existence( + self, request, cluster_mode: bool + ): + """ + Tests publishing and receiving messages across many channels in exact PUBSUB, ensuring coexistence of async and sync retrieval methods. + + This test covers scenarios where multiple channels each receive their own unique message. + It verifies that messages are correctly published and received using both async and sync methods to ensure that both methods + can coexist and function correctly. + """ + NUM_CHANNELS = 256 + shard_prefix = "{same-shard}" + publish_response = 1 if cluster_mode else OK + + # Create a map of channels to random messages with shard prefix + channels_and_messages = { + f"{shard_prefix}{get_random_string(10)}": get_random_string(5) + for _ in range(NUM_CHANNELS) + } + + pub_sub = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Exact: set( + channels_and_messages.keys() + ) + }, + { + GlideClientConfiguration.PubSubChannelModes.Exact: set( + channels_and_messages.keys() + ) + }, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + # Publish messages to each channel + for channel, message in channels_and_messages.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # Allow the messages to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly by each method + for index in range(len(channels_and_messages)): + method = MethodTesting.Async if index % 2 else MethodTesting.Sync + pubsub_msg = await get_message_by_method(method, listening_client) + + assert pubsub_msg.channel in channels_and_messages.keys() + assert pubsub_msg.message == channels_and_messages[pubsub_msg.channel] + assert pubsub_msg.pattern is None + del channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert channels_and_messages == {} + # assert there are no messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(listening_client.get_pubsub_message(), timeout=3) + + assert listening_client.try_get_pubsub_message() is None + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command( + ["UNSUBSCRIBE", *list(channels_and_messages.keys())] + ) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_sharded_pubsub( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Test sharded PUBSUB functionality with different message retrieval methods. + + This test covers the sharded PUBSUB flow using three different methods: + Async, Sync, and Callback. It verifies that a message published to a + specific sharded channel is correctly received by a subscriber. + """ + channel = get_random_string(10) + message = get_random_string(5) + publish_response = 1 + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Sharded: {channel}}, + {}, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + min_version = "7.0.0" + if await check_if_server_version_lt(publishing_client, min_version): + pytest.skip(reason=f"Redis version required >= {min_version}") + + try: + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + # allow the message to propagate + await asyncio.sleep(1) + + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, 0 + ) + assert pubsub_msg.message == message + assert pubsub_msg.channel == channel + assert pubsub_msg.pattern is None + + finally: + # assert there are no messages to read + await check_no_messages_left(method, listening_client, callback_messages, 1) + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["SUNSUBSCRIBE", channel]) + + @pytest.mark.parametrize("cluster_mode", [True]) + async def test_sharded_pubsub_co_existence(self, request, cluster_mode: bool): + """ + Test sharded PUBSUB with co-existence of multiple messages. + + This test verifies the behavior of sharded PUBSUB when multiple messages are published + to the same sharded channel. It ensures that both async and sync methods of message retrieval + function correctly in this scenario. + + It covers the scenario where messages are published to a sharded channel and received using + both async and sync methods. This ensures that the asynchronous and synchronous message + retrieval methods can coexist without interfering with each other and operate as expected. + """ + channel = get_random_string(10) + message = get_random_string(5) + message2 = get_random_string(7) + publish_response = 1 if cluster_mode else OK + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Sharded: {channel}}, + {}, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + min_version = "7.0.0" + if await check_if_server_version_lt(publishing_client, min_version): + pytest.skip(reason=f"Redis version required >= {min_version}") + + try: + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message2, channel, sharded=True + ) + == publish_response + ) + # allow the messages to propagate + await asyncio.sleep(1) + + async_msg = await listening_client.get_pubsub_message() + sync_msg = listening_client.try_get_pubsub_message() + assert sync_msg + + assert async_msg.message == message + assert async_msg.message in [message, message2] + assert async_msg.channel == channel + assert async_msg.pattern is None + + assert sync_msg.message in [message, message2] + assert sync_msg.channel == channel + assert sync_msg.pattern is None + # we do not check the order of the messages, but we can check that we received both messages once + assert not sync_msg.message == async_msg.message + + # assert there are no messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(listening_client.get_pubsub_message(), timeout=3) + + assert listening_client.try_get_pubsub_message() is None + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["SUNSUBSCRIBE", channel]) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_sharded_pubsub_many_channels( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Test sharded PUBSUB with multiple channels and different message retrieval methods. + + This test verifies the behavior of sharded PUBSUB when multiple messages are published + across multiple sharded channels. It covers three different message retrieval methods: + Async, Sync, and Callback. + """ + NUM_CHANNELS = 256 + shard_prefix = "{same-shard}" + publish_response = 1 + + # Create a map of channels to random messages with shard prefix + channels_and_messages = { + f"{shard_prefix}{get_random_string(10)}": get_random_string(5) + for _ in range(NUM_CHANNELS) + } + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Sharded: set( + channels_and_messages.keys() + ) + }, + {}, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + min_version = "7.0.0" + if await check_if_server_version_lt(publishing_client, min_version): + pytest.skip(reason=f"Redis version required >= {min_version}") + + try: + # Publish messages to each channel + for channel, message in channels_and_messages.items(): + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + + # Allow the messages to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly + for index in range(len(channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, index + ) + assert pubsub_msg.channel in channels_and_messages.keys() + assert pubsub_msg.message == channels_and_messages[pubsub_msg.channel] + assert pubsub_msg.pattern is None + del channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert channels_and_messages == {} + + # Assert there are no more messages to read + await check_no_messages_left( + method, listening_client, callback_messages, NUM_CHANNELS + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command( + ["SUNSUBSCRIBE", *list(channels_and_messages.keys())] + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_pattern( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Test PUBSUB with pattern subscription using different message retrieval methods. + + This test verifies the behavior of PUBSUB when subscribing to a pattern and receiving + messages using three different methods: Async, Sync, and Callback. + """ + PATTERN = "{{{}}}:{}".format("channel", "*") + channels = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(5), + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(5), + } + publish_response = 1 if cluster_mode else OK + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + callback=callback, + context=context, + ) + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + for channel, message in channels.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly + for index in range(len(channels)): + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, index + ) + assert pubsub_msg.channel in channels.keys() + assert pubsub_msg.message == channels[pubsub_msg.channel] + assert pubsub_msg.pattern == PATTERN + del channels[pubsub_msg.channel] + + # check that we received all messages + assert channels == {} + + await check_no_messages_left(method, listening_client, callback_messages, 2) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["PUNSUBSCRIBE", PATTERN]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_pattern_co_existence(self, request, cluster_mode: bool): + """ + Tests the coexistence of async and sync message retrieval methods in pattern-based PUBSUB. + + This test covers the scenario where messages are published to a channel that match a specified pattern + and received using both async and sync methods to ensure that both methods + can coexist and function correctly. + """ + PATTERN = "{{{}}}:{}".format("channel", "*") + channels = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(5), + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(5), + } + publish_response = 1 if cluster_mode else OK + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + for channel, message in channels.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly by each method + for index in range(len(channels)): + method = MethodTesting.Async if index % 2 else MethodTesting.Sync + pubsub_msg = await get_message_by_method(method, listening_client) + + assert pubsub_msg.channel in channels.keys() + assert pubsub_msg.message == channels[pubsub_msg.channel] + assert pubsub_msg.pattern == PATTERN + del channels[pubsub_msg.channel] + + # check that we received all messages + assert channels == {} + + # assert there are no more messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(listening_client.get_pubsub_message(), timeout=3) + + assert listening_client.try_get_pubsub_message() is None + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["PUNSUBSCRIBE", PATTERN]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_pattern_many_channels( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests publishing and receiving messages across many channels in pattern-based PUBSUB. + + This test covers the scenario where messages are published to multiple channels that match a specified pattern + and received. It verifies that messages are correctly published and received + using different retrieval methods: async, sync, and callback. + """ + NUM_CHANNELS = 256 + PATTERN = "{{{}}}:{}".format("channel", "*") + channels = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(5) + for _ in range(NUM_CHANNELS) + } + publish_response = 1 if cluster_mode else OK + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + callback=callback, + context=context, + ) + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + for channel, message in channels.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly + for index in range(len(channels)): + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, index + ) + assert pubsub_msg.channel in channels.keys() + assert pubsub_msg.message == channels[pubsub_msg.channel] + assert pubsub_msg.pattern == PATTERN + del channels[pubsub_msg.channel] + + # check that we received all messages + assert channels == {} + + await check_no_messages_left( + method, listening_client, callback_messages, NUM_CHANNELS + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["PUNSUBSCRIBE", PATTERN]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_combined_exact_and_pattern_one_client( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests combined exact and pattern PUBSUB with one client. + + This test verifies that a single client can correctly handle both exact and pattern PUBSUB + subscriptions. It covers the following scenarios: + - Subscribing to multiple channels with exact names and verifying message reception. + - Subscribing to channels using a pattern and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + """ + NUM_CHANNELS = 256 + PATTERN = "{{{}}}:{}".format("pattern", "*") + + # Create dictionaries of channels and their corresponding messages + exact_channels_and_messages = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(10) + for _ in range(NUM_CHANNELS) + } + pattern_channels_and_messages = { + "{{{}}}:{}".format("pattern", get_random_string(5)): get_random_string(5) + for _ in range(NUM_CHANNELS) + } + + all_channels_and_messages = { + **exact_channels_and_messages, + **pattern_channels_and_messages, + } + + publish_response = 1 if cluster_mode else OK + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + # Setup PUBSUB for exact channels + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ), + ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}, + }, + { + GlideClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ), + GlideClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}, + }, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, + cluster_mode, + pub_sub_exact, + ) + + try: + # Publish messages to all channels + for channel, message in all_channels_and_messages.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Check if all messages are received correctly + for index in range(len(all_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, index + ) + pattern = ( + PATTERN + if pubsub_msg.channel in pattern_channels_and_messages.keys() + else None + ) + assert pubsub_msg.channel in all_channels_and_messages.keys() + assert ( + pubsub_msg.message == all_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern == pattern + del all_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert all_channels_and_messages == {} + + await check_no_messages_left( + method, listening_client, callback_messages, NUM_CHANNELS * 2 + ) + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command( + ["UNSUBSCRIBE", *list(exact_channels_and_messages.keys())] + ) + await listening_client.custom_command(["PUNSUBSCRIBE", PATTERN]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_combined_exact_and_pattern_multiple_clients( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests combined exact and pattern PUBSUB with multiple clients, one for each subscription. + + This test verifies that separate clients can correctly handle both exact and pattern PUBSUB + subscriptions. It covers the following scenarios: + - Subscribing to multiple channels with exact names and verifying message reception. + - Subscribing to channels using a pattern and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + - Verifying that no messages are left unread. + - Properly unsubscribing from all channels to avoid interference with other tests. + """ + NUM_CHANNELS = 256 + PATTERN = "{{{}}}:{}".format("pattern", "*") + + # Create dictionaries of channels and their corresponding messages + exact_channels_and_messages = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(10) + for _ in range(NUM_CHANNELS) + } + pattern_channels_and_messages = { + "{{{}}}:{}".format("pattern", get_random_string(5)): get_random_string(5) + for _ in range(NUM_CHANNELS) + } + + all_channels_and_messages = { + **exact_channels_and_messages, + **pattern_channels_and_messages, + } + + publish_response = 1 if cluster_mode else OK + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + # Setup PUBSUB for exact channels + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ) + }, + { + GlideClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ) + }, + callback=callback, + context=context, + ) + + publishing_client, listening_client_exact = await create_two_clients( + request, + cluster_mode, + pub_sub_exact, + ) + + callback_messages_pattern: List[CoreCommands.PubSubMsg] = [] + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages_pattern + + # Setup PUBSUB for pattern channels + pub_sub_pattern = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + callback=callback, + context=context, + ) + + _, listening_client_pattern = await create_two_clients( + request, cluster_mode, pub_sub_pattern + ) + + try: + # Publish messages to all channels + for channel, message in all_channels_and_messages.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # allow the messages to propagate + await asyncio.sleep(1) + + # Verify messages for exact PUBSUB + for index in range(len(exact_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client_exact, callback_messages, index + ) + assert pubsub_msg.channel in exact_channels_and_messages.keys() + assert ( + pubsub_msg.message + == exact_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern is None + del exact_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert exact_channels_and_messages == {} + + # Verify messages for pattern PUBSUB + for index in range(len(pattern_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client_pattern, callback_messages_pattern, index + ) + assert pubsub_msg.channel in pattern_channels_and_messages.keys() + assert ( + pubsub_msg.message + == pattern_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern == PATTERN + del pattern_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert pattern_channels_and_messages == {} + + await check_no_messages_left( + method, listening_client_exact, callback_messages, NUM_CHANNELS + ) + await check_no_messages_left( + method, + listening_client_pattern, + callback_messages_pattern, + NUM_CHANNELS, + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client_exact.custom_command( + ["UNSUBSCRIBE", *list(exact_channels_and_messages.keys())] + ) + await listening_client_pattern.custom_command(["PUNSUBSCRIBE", PATTERN]) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_combined_exact_pattern_and_sharded_one_client( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests combined exact, pattern and sharded PUBSUB with one client. + + This test verifies that a single client can correctly handle both exact, pattern and sharded PUBSUB + subscriptions. It covers the following scenarios: + - Subscribing to multiple channels with exact names and verifying message reception. + - Subscribing to channels using a pattern and verifying message reception. + - Subscribing to channels using a with sharded subscription and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + """ + NUM_CHANNELS = 256 + PATTERN = "{{{}}}:{}".format("pattern", "*") + SHARD_PREFIX = "{same-shard}" + + # Create dictionaries of channels and their corresponding messages + exact_channels_and_messages = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(10) + for _ in range(NUM_CHANNELS) + } + pattern_channels_and_messages = { + "{{{}}}:{}".format("pattern", get_random_string(5)): get_random_string(5) + for _ in range(NUM_CHANNELS) + } + sharded_channels_and_messages = { + f"{SHARD_PREFIX}:{get_random_string(10)}": get_random_string(7) + for _ in range(NUM_CHANNELS) + } + + publish_response = 1 + + callback, context = None, None + callback_messages: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages + + # Setup PUBSUB for exact channels + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ), + ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}, + ClusterClientConfiguration.PubSubChannelModes.Sharded: set( + sharded_channels_and_messages.keys() + ), + }, + {}, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, + cluster_mode, + pub_sub_exact, + ) + + # Setup PUBSUB for sharded channels (Redis version > 7) + if await check_if_server_version_lt(publishing_client, "7.0.0"): + pytest.skip("Redis version required >= 7.0.0") + + try: + # Publish messages to all channels + for channel, message in { + **exact_channels_and_messages, + **pattern_channels_and_messages, + }.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # Publish sharded messages to all channels + for channel, message in sharded_channels_and_messages.items(): + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + + # allow the messages to propagate + await asyncio.sleep(1) + + all_channels_and_messages = { + **exact_channels_and_messages, + **pattern_channels_and_messages, + **sharded_channels_and_messages, + } + # Check if all messages are received correctly + for index in range(len(all_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client, callback_messages, index + ) + pattern = ( + PATTERN + if pubsub_msg.channel in pattern_channels_and_messages.keys() + else None + ) + assert pubsub_msg.channel in all_channels_and_messages.keys() + assert ( + pubsub_msg.message == all_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern == pattern + del all_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert all_channels_and_messages == {} + + await check_no_messages_left( + method, listening_client, callback_messages, NUM_CHANNELS * 3 + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command( + ["UNSUBSCRIBE", *list(exact_channels_and_messages.keys())] + ) + await listening_client.custom_command(["PUNSUBSCRIBE", PATTERN]) + await listening_client.custom_command( + ["SUNSUBSCRIBE", *list(sharded_channels_and_messages.keys())] + ) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_combined_exact_pattern_and_sharded_multi_client( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests combined exact, pattern and sharded PUBSUB with multiple clients, one for each subscription. + + This test verifies that separate clients can correctly handle exact, pattern and sharded PUBSUB + subscriptions. It covers the following scenarios: + - Subscribing to multiple channels with exact names and verifying message reception. + - Subscribing to channels using a pattern and verifying message reception. + - Subscribing to channels using a sharded subscription and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + - Verifying that no messages are left unread. + - Properly unsubscribing from all channels to avoid interference with other tests. + """ + NUM_CHANNELS = 256 + PATTERN = "{{{}}}:{}".format("pattern", "*") + SHARD_PREFIX = "{same-shard}" + + # Create dictionaries of channels and their corresponding messages + exact_channels_and_messages = { + "{{{}}}:{}".format("channel", get_random_string(5)): get_random_string(10) + for _ in range(NUM_CHANNELS) + } + pattern_channels_and_messages = { + "{{{}}}:{}".format("pattern", get_random_string(5)): get_random_string(5) + for _ in range(NUM_CHANNELS) + } + sharded_channels_and_messages = { + f"{SHARD_PREFIX}:{get_random_string(10)}": get_random_string(7) + for _ in range(NUM_CHANNELS) + } + + publish_response = 1 + + callback, context = None, None + callback_messages_exact: List[CoreCommands.PubSubMsg] = [] + callback_messages_pattern: List[CoreCommands.PubSubMsg] = [] + callback_messages_sharded: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages_exact + + # Setup PUBSUB for exact channels + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ) + }, + { + GlideClientConfiguration.PubSubChannelModes.Exact: set( + exact_channels_and_messages.keys() + ) + }, + callback=callback, + context=context, + ) + + publishing_client, listening_client_exact = await create_two_clients( + request, + cluster_mode, + pub_sub_exact, + ) + + # Setup PUBSUB for sharded channels (Redis version > 7) + if await check_if_server_version_lt(publishing_client, "7.0.0"): + pytest.skip("Redis version required >= 7.0.0") + + if method == MethodTesting.Callback: + context = callback_messages_pattern + + # Setup PUBSUB for pattern channels + pub_sub_pattern = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {PATTERN}}, + callback=callback, + context=context, + ) + + if method == MethodTesting.Callback: + context = callback_messages_sharded + + pub_sub_sharded = create_pubsub_subscription( + cluster_mode, + { + ClusterClientConfiguration.PubSubChannelModes.Sharded: set( + sharded_channels_and_messages.keys() + ) + }, + {}, + callback=callback, + context=context, + ) + + listening_client_sharded, listening_client_pattern = await create_two_clients( + request, cluster_mode, pub_sub_pattern, pub_sub_sharded + ) + + try: + # Publish messages to all channels + for channel, message in { + **exact_channels_and_messages, + **pattern_channels_and_messages, + }.items(): + assert ( + await publishing_client.publish(message, channel) + == publish_response + ) + + # Publish sharded messages to all channels + for channel, message in sharded_channels_and_messages.items(): + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + + # allow the messages to propagate + await asyncio.sleep(1) + + # Verify messages for exact PUBSUB + for index in range(len(exact_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client_exact, callback_messages_exact, index + ) + assert pubsub_msg.channel in exact_channels_and_messages.keys() + assert ( + pubsub_msg.message + == exact_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern is None + del exact_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert exact_channels_and_messages == {} + + # Verify messages for pattern PUBSUB + for index in range(len(pattern_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client_pattern, callback_messages_pattern, index + ) + assert pubsub_msg.channel in pattern_channels_and_messages.keys() + assert ( + pubsub_msg.message + == pattern_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern == PATTERN + del pattern_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert pattern_channels_and_messages == {} + + # Verify messages for shaded PUBSUB + for index in range(len(sharded_channels_and_messages)): + pubsub_msg = await get_message_by_method( + method, listening_client_sharded, callback_messages_sharded, index + ) + assert pubsub_msg.channel in sharded_channels_and_messages.keys() + assert ( + pubsub_msg.message + == sharded_channels_and_messages[pubsub_msg.channel] + ) + assert pubsub_msg.pattern is None + del sharded_channels_and_messages[pubsub_msg.channel] + + # check that we received all messages + assert sharded_channels_and_messages == {} + + await check_no_messages_left( + method, listening_client_exact, callback_messages_exact, NUM_CHANNELS + ) + await check_no_messages_left( + method, + listening_client_pattern, + callback_messages_pattern, + NUM_CHANNELS, + ) + await check_no_messages_left( + method, + listening_client_sharded, + callback_messages_sharded, + NUM_CHANNELS, + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client_exact.custom_command( + ["UNSUBSCRIBE", *list(exact_channels_and_messages.keys())] + ) + await listening_client_pattern.custom_command(["PUNSUBSCRIBE", PATTERN]) + await listening_client_sharded.custom_command( + ["SUNSUBSCRIBE", *list(sharded_channels_and_messages.keys())] + ) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_combined_different_channels_with_same_name( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests combined PUBSUB with different channel modes using the same channel name. + One publishing clients, 3 listening clients, one for each mode. + + This test verifies that separate clients can correctly handle subscriptions for exact, pattern, and sharded channels with the same name. + It covers the following scenarios: + - Subscribing to an exact channel and verifying message reception. + - Subscribing to a pattern channel and verifying message reception. + - Subscribing to a sharded channel and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + - Verifying that no messages are left unread. + - Properly unsubscribing from all channels to avoid interference with other tests. + """ + CHANNEL_NAME = "same-channel-name" + MESSAGE_EXACT = get_random_string(10) + MESSAGE_PATTERN = get_random_string(7) + MESSAGE_SHARDED = get_random_string(5) + + callback, context = None, None + callback_messages_exact: List[CoreCommands.PubSubMsg] = [] + callback_messages_pattern: List[CoreCommands.PubSubMsg] = [] + callback_messages_sharded: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context = callback_messages_exact + + # Setup PUBSUB for exact channel + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {CHANNEL_NAME}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {CHANNEL_NAME}}, + callback=callback, + context=context, + ) + + publishing_client, listening_client_exact = await create_two_clients( + request, + cluster_mode, + pub_sub_exact, + ) + + # (Redis version > 7) + if await check_if_server_version_lt(publishing_client, "7.0.0"): + pytest.skip("Redis version required >= 7.0.0") + + # Setup PUBSUB for pattern channel + if method == MethodTesting.Callback: + context = callback_messages_pattern + + # Setup PUBSUB for pattern channels + pub_sub_pattern = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {CHANNEL_NAME}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {CHANNEL_NAME}}, + callback=callback, + context=context, + ) + + if method == MethodTesting.Callback: + context = callback_messages_sharded + + pub_sub_sharded = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Sharded: {CHANNEL_NAME}}, + {}, + callback=callback, + context=context, + ) + + listening_client_sharded, listening_client_pattern = await create_two_clients( + request, cluster_mode, pub_sub_pattern, pub_sub_sharded + ) + + try: + # Publish messages to each channel + assert await publishing_client.publish(MESSAGE_EXACT, CHANNEL_NAME) == 2 + assert await publishing_client.publish(MESSAGE_PATTERN, CHANNEL_NAME) == 2 + assert ( + await cast(GlideClusterClient, publishing_client).publish( + MESSAGE_SHARDED, CHANNEL_NAME, sharded=True + ) + == 1 + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Verify message for exact and pattern PUBSUB + for client, callback, pattern in [ # type: ignore + (listening_client_exact, callback_messages_exact, None), + (listening_client_pattern, callback_messages_pattern, CHANNEL_NAME), + ]: + pubsub_msg = await get_message_by_method(method, client, callback, 0) # type: ignore + + pubsub_msg2 = await get_message_by_method(method, client, callback, 1) # type: ignore + assert not pubsub_msg.message == pubsub_msg2.message + assert pubsub_msg2.message in [MESSAGE_PATTERN, MESSAGE_EXACT] + assert pubsub_msg.message in [MESSAGE_PATTERN, MESSAGE_EXACT] + assert pubsub_msg.channel == pubsub_msg2.channel == CHANNEL_NAME + assert pubsub_msg.pattern == pubsub_msg2.pattern == pattern + + # Verify message for sharded PUBSUB + pubsub_msg_sharded = await get_message_by_method( + method, listening_client_sharded, callback_messages_sharded, 0 + ) + assert pubsub_msg_sharded.message == MESSAGE_SHARDED + assert pubsub_msg_sharded.channel == CHANNEL_NAME + assert pubsub_msg_sharded.pattern is None + + await check_no_messages_left( + method, listening_client_exact, callback_messages_exact, 2 + ) + await check_no_messages_left( + method, listening_client_pattern, callback_messages_pattern, 2 + ) + await check_no_messages_left( + method, listening_client_sharded, callback_messages_sharded, 1 + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client_exact.custom_command( + ["UNSUBSCRIBE", CHANNEL_NAME] + ) + await listening_client_pattern.custom_command( + ["PUNSUBSCRIBE", CHANNEL_NAME] + ) + await listening_client_sharded.custom_command( + ["SUNSUBSCRIBE", CHANNEL_NAME] + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_two_publishing_clients_same_name( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests PUBSUB with two publishing clients using the same channel name. + One client uses pattern subscription, the other uses exact. + The clients publishes messages to each other, and to thyself. + + This test verifies that two separate clients can correctly publish to and handle subscriptions + for exact and pattern channels with the same name. It covers the following scenarios: + - Subscribing to an exact channel and verifying message reception. + - Subscribing to a pattern channel and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + - Verifying that no messages are left unread. + - Properly unsubscribing from all channels to avoid interference with other tests. + """ + CHANNEL_NAME = "channel-name" + MESSAGE_EXACT = get_random_string(10) + MESSAGE_PATTERN = get_random_string(7) + publish_response = 2 if cluster_mode else OK + callback, context_exact, context_pattern = None, None, None + callback_messages_exact: List[CoreCommands.PubSubMsg] = [] + callback_messages_pattern: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context_exact = callback_messages_exact + context_pattern = callback_messages_pattern + + # Setup PUBSUB for exact channel + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {CHANNEL_NAME}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {CHANNEL_NAME}}, + callback=callback, + context=context_exact, + ) + # Setup PUBSUB for pattern channels + pub_sub_pattern = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {CHANNEL_NAME}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {CHANNEL_NAME}}, + callback=callback, + context=context_pattern, + ) + + client_pattern, client_exact = await create_two_clients( + request, cluster_mode, pub_sub_exact, pub_sub_pattern + ) + + try: + # Publish messages to each channel - both clients publishing + assert ( + await client_pattern.publish(MESSAGE_EXACT, CHANNEL_NAME) + == publish_response + ) + assert ( + await client_exact.publish(MESSAGE_PATTERN, CHANNEL_NAME) + == publish_response + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Verify message for exact and pattern PUBSUB + for client, callback, pattern in [ # type: ignore + (client_exact, callback_messages_exact, None), + (client_pattern, callback_messages_pattern, CHANNEL_NAME), + ]: + pubsub_msg = await get_message_by_method(method, client, callback, 0) # type: ignore + + pubsub_msg2 = await get_message_by_method(method, client, callback, 1) # type: ignore + assert not pubsub_msg.message == pubsub_msg2.message + assert pubsub_msg2.message in [MESSAGE_PATTERN, MESSAGE_EXACT] + assert pubsub_msg.message in [MESSAGE_PATTERN, MESSAGE_EXACT] + assert pubsub_msg.channel == pubsub_msg2.channel == CHANNEL_NAME + assert pubsub_msg.pattern == pubsub_msg2.pattern == pattern + + await check_no_messages_left( + method, client_pattern, callback_messages_pattern, 2 + ) + await check_no_messages_left( + method, client_exact, callback_messages_exact, 2 + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await client_exact.custom_command(["UNSUBSCRIBE", CHANNEL_NAME]) + await client_pattern.custom_command(["PUNSUBSCRIBE", CHANNEL_NAME]) + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize( + "method", [MethodTesting.Async, MethodTesting.Sync, MethodTesting.Callback] + ) + async def test_pubsub_three_publishing_clients_same_name_with_sharded( + self, request, cluster_mode: bool, method: MethodTesting + ): + """ + Tests PUBSUB with 3 publishing clients using the same channel name. + One client uses pattern subscription, one uses exact, and one uses sharded. + + This test verifies that 3 separate clients can correctly publish to and handle subscriptions + for exact, sharded and pattern channels with the same name. It covers the following scenarios: + - Subscribing to an exact channel and verifying message reception. + - Subscribing to a pattern channel and verifying message reception. + - Subscribing to a sharded channel and verifying message reception. + - Ensuring that messages are correctly published and received using different retrieval methods (async, sync, callback). + - Verifying that no messages are left unread. + - Properly unsubscribing from all channels to avoid interference with other tests. + """ + CHANNEL_NAME = "same-channel-name" + MESSAGE_EXACT = get_random_string(10) + MESSAGE_PATTERN = get_random_string(7) + MESSAGE_SHARDED = get_random_string(5) + publish_response = 2 if cluster_mode else OK + callback, context_exact, context_pattern, context_sharded = ( + None, + None, + None, + None, + ) + callback_messages_exact: List[CoreCommands.PubSubMsg] = [] + callback_messages_pattern: List[CoreCommands.PubSubMsg] = [] + callback_messages_sharded: List[CoreCommands.PubSubMsg] = [] + + if method == MethodTesting.Callback: + callback = new_message + context_exact = callback_messages_exact + context_pattern = callback_messages_pattern + context_sharded = callback_messages_sharded + + # Setup PUBSUB for exact channel + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {CHANNEL_NAME}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {CHANNEL_NAME}}, + callback=callback, + context=context_exact, + ) + # Setup PUBSUB for pattern channels + pub_sub_pattern = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Pattern: {CHANNEL_NAME}}, + {GlideClientConfiguration.PubSubChannelModes.Pattern: {CHANNEL_NAME}}, + callback=callback, + context=context_pattern, + ) + # Setup PUBSUB for pattern channels + pub_sub_sharded = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Sharded: {CHANNEL_NAME}}, + {}, + callback=callback, + context=context_sharded, + ) + + client_pattern, client_exact = await create_two_clients( + request, cluster_mode, pub_sub_exact, pub_sub_pattern + ) + _, client_sharded = await create_two_clients( + request, cluster_mode, pub_sub_sharded + ) + # (Redis version > 7) + if await check_if_server_version_lt(client_pattern, "7.0.0"): + pytest.skip("Redis version required >= 7.0.0") + + try: + # Publish messages to each channel - both clients publishing + assert ( + await client_pattern.publish(MESSAGE_EXACT, CHANNEL_NAME) + == publish_response + ) + assert ( + await client_sharded.publish(MESSAGE_PATTERN, CHANNEL_NAME) + == publish_response + ) + assert ( + await cast(GlideClusterClient, client_exact).publish( + MESSAGE_SHARDED, CHANNEL_NAME, sharded=True + ) + == 1 + ) + + # allow the message to propagate + await asyncio.sleep(1) + + # Verify message for exact and pattern PUBSUB + for client, callback, pattern in [ # type: ignore + (client_exact, callback_messages_exact, None), + (client_pattern, callback_messages_pattern, CHANNEL_NAME), + ]: + pubsub_msg = await get_message_by_method(method, client, callback, 0) # type: ignore + + pubsub_msg2 = await get_message_by_method(method, client, callback, 1) # type: ignore + assert not pubsub_msg.message == pubsub_msg2.message + assert pubsub_msg2.message in [MESSAGE_PATTERN, MESSAGE_EXACT] + assert pubsub_msg.message in [MESSAGE_PATTERN, MESSAGE_EXACT] + assert pubsub_msg.channel == pubsub_msg2.channel == CHANNEL_NAME + assert pubsub_msg.pattern == pubsub_msg2.pattern == pattern + + msg = await get_message_by_method( + method, client_sharded, callback_messages_sharded, 0 + ) + assert msg.message == MESSAGE_SHARDED + assert msg.channel == CHANNEL_NAME + assert msg.pattern is None + + await check_no_messages_left( + method, client_pattern, callback_messages_pattern, 2 + ) + await check_no_messages_left( + method, client_exact, callback_messages_exact, 2 + ) + await check_no_messages_left( + method, client_sharded, callback_messages_sharded, 1 + ) + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await client_exact.custom_command(["UNSUBSCRIBE", CHANNEL_NAME]) + await client_pattern.custom_command(["PUNSUBSCRIBE", CHANNEL_NAME]) + await client_sharded.custom_command(["SUNSUBSCRIBE", CHANNEL_NAME]) + + @pytest.mark.skip( + reason="no way of currently testing this, see https://github.com/aws/glide-for-redis/issues/1649" + ) + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_exact_max_size_message(self, request, cluster_mode: bool): + """ + Tests publishing and receiving maximum size messages in PUBSUB. + + This test verifies that very large messages (512MB - BulkString max size) can be published and received + correctly in both cluster and standalone modes. It ensures that the PUBSUB system + can handle maximum size messages without errors and that async and sync message + retrieval methods can coexist and function correctly. + + The test covers the following scenarios: + - Setting up PUBSUB subscription for a specific channel. + - Publishing two maximum size messages to the channel. + - Verifying that the messages are received correctly using both async and sync methods. + - Ensuring that no additional messages are left after the expected messages are received. + """ + channel = get_random_string(10) + message = get_random_string(512 * 1024 * 1024) + message2 = get_random_string(512 * 1024 * 1024) + publish_response = 1 if cluster_mode else OK + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {channel}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {channel}}, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + assert await publishing_client.publish(message, channel) == publish_response + assert ( + await publishing_client.publish(message2, channel) == publish_response + ) + # allow the message to propagate + await asyncio.sleep(5) + + async_msg = await listening_client.get_pubsub_message() + sync_msg = listening_client.try_get_pubsub_message() + assert sync_msg + + assert async_msg.message == message + assert async_msg.channel == channel + assert async_msg.pattern is None + + assert sync_msg.message == message2 + assert sync_msg.channel == channel + assert sync_msg.pattern is None + + # assert there are no messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(listening_client.get_pubsub_message(), timeout=3) + + assert listening_client.try_get_pubsub_message() is None + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["UNSUBSCRIBE", channel]) + + @pytest.mark.skip( + reason="no way of currently testing this, see https://github.com/aws/glide-for-redis/issues/1649" + ) + @pytest.mark.parametrize("cluster_mode", [True]) + async def test_pubsub_sharded_max_size_message(self, request, cluster_mode: bool): + """ + Tests publishing and receiving maximum size messages in sharded PUBSUB. + + This test verifies that very large messages (512MB - BulkString max size) can be published and received + correctly. It ensures that the PUBSUB system + can handle maximum size messages without errors and that async and sync message + retrieval methods can coexist and function correctly. + + The test covers the following scenarios: + - Setting up PUBSUB subscription for a specific sharded channel. + - Publishing two maximum size messages to the channel. + - Verifying that the messages are received correctly using both async and sync methods. + - Ensuring that no additional messages are left after the expected messages are received. + """ + channel = get_random_string(10) + message = get_random_string(512 * 1024 * 1024) + message2 = get_random_string(512 * 1024 * 1024) + publish_response = 1 if cluster_mode else OK + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Sharded: {channel}}, + {}, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + # (Redis version > 7) + if await check_if_server_version_lt(publishing_client, "7.0.0"): + pytest.skip("Redis version required >= 7.0.0") + + try: + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + assert ( + await publishing_client.publish(message2, channel) == publish_response + ) + # allow the message to propagate + await asyncio.sleep(5) + + async_msg = await listening_client.get_pubsub_message() + sync_msg = listening_client.try_get_pubsub_message() + assert sync_msg + + assert async_msg.message == message + assert async_msg.channel == channel + assert async_msg.pattern is None + + assert sync_msg.message == message2 + assert sync_msg.channel == channel + assert sync_msg.pattern is None + + # assert there are no messages to read + with pytest.raises(asyncio.TimeoutError): + await asyncio.wait_for(listening_client.get_pubsub_message(), timeout=3) + + assert listening_client.try_get_pubsub_message() is None + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["UNSUBSCRIBE", channel]) + + @pytest.mark.skip( + reason="no way of currently testing this, see https://github.com/aws/glide-for-redis/issues/1649" + ) + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_exact_max_size_message_callback( + self, request, cluster_mode: bool + ): + """ + Tests publishing and receiving maximum size messages in exact PUBSUB with callback method. + + This test verifies that very large messages (512MB - BulkString max size) can be published and received + correctly in both cluster and standalone modes. It ensures that the PUBSUB system + can handle maximum size messages without errors and that the callback message + retrieval method works as expected. + + The test covers the following scenarios: + - Setting up PUBSUB subscription for a specific channel with a callback. + - Publishing a maximum size message to the channel. + - Verifying that the message is received correctly using the callback method. + """ + channel = get_random_string(10) + message = get_random_string(512 * 1024 * 1024) + publish_response = 1 if cluster_mode else OK + + callback_messages: List[CoreCommands.PubSubMsg] = [] + callback, context = new_message, callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {channel}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {channel}}, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + try: + assert await publishing_client.publish(message, channel) == publish_response + # allow the message to propagate + await asyncio.sleep(5) + + assert len(callback_messages) == 1 + + assert callback_messages[0].message == message + assert callback_messages[0].channel == channel + assert callback_messages[0].pattern is None + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["UNSUBSCRIBE", channel]) + + @pytest.mark.skip( + reason="no way of currently testing this, see https://github.com/aws/glide-for-redis/issues/1649" + ) + @pytest.mark.parametrize("cluster_mode", [True]) + async def test_pubsub_sharded_max_size_message_callback( + self, request, cluster_mode: bool + ): + """ + Tests publishing and receiving maximum size messages in sharded PUBSUB with callback method. + + This test verifies that very large messages (512MB - BulkString max size) can be published and received + correctly. It ensures that the PUBSUB system + can handle maximum size messages without errors and that the callback message + retrieval method works as expected. + + The test covers the following scenarios: + - Setting up PUBSUB subscription for a specific sharded channel with a callback. + - Publishing a maximum size message to the channel. + - Verifying that the message is received correctly using the callback method. + """ + channel = get_random_string(10) + message = get_random_string(512 * 1024 * 1024) + publish_response = 1 if cluster_mode else OK + + callback_messages: List[CoreCommands.PubSubMsg] = [] + callback, context = new_message, callback_messages + + pub_sub = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Sharded: {channel}}, + {}, + callback=callback, + context=context, + ) + + publishing_client, listening_client = await create_two_clients( + request, cluster_mode, pub_sub + ) + + # (Redis version > 7) + if await check_if_server_version_lt(publishing_client, "7.0.0"): + pytest.skip("Redis version required >= 7.0.0") + + try: + assert ( + await cast(GlideClusterClient, publishing_client).publish( + message, channel, sharded=True + ) + == publish_response + ) + # allow the message to propagate + await asyncio.sleep(5) + + assert len(callback_messages) == 1 + + assert callback_messages[0].message == message + assert callback_messages[0].channel == channel + assert callback_messages[0].pattern is None + + finally: + if cluster_mode: + # Since all tests run on the same cluster, when closing the client, garbage collector can be called after another test will start running + # In cluster mode, we check how many subscriptions received the message + # So to avoid flakiness, we make sure to unsubscribe from the channels + await listening_client.custom_command(["UNSUBSCRIBE", channel]) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_resp2_raise_an_error(self, request, cluster_mode: bool): + """Tests that when creating a resp2 client with PUBSUB - an error will be raised""" + channel = get_random_string(5) + + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {channel}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {channel}}, + ) + + with pytest.raises(ConfigurationError): + await create_two_clients( + request, cluster_mode, pub_sub_exact, protocol=ProtocolVersion.RESP2 + ) + + @pytest.mark.parametrize("cluster_mode", [True, False]) + async def test_pubsub_context_with_no_callback_raise_error( + self, request, cluster_mode: bool + ): + """Tests that when creating a PUBSUB client in callback method with context but no callback raises an error""" + channel = get_random_string(5) + context: List[CoreCommands.PubSubMsg] = [] + pub_sub_exact = create_pubsub_subscription( + cluster_mode, + {ClusterClientConfiguration.PubSubChannelModes.Exact: {channel}}, + {GlideClientConfiguration.PubSubChannelModes.Exact: {channel}}, + context=context, + ) + + with pytest.raises(ConfigurationError): + await create_two_clients(request, cluster_mode, pub_sub_exact) diff --git a/python/python/tests/test_transaction.py b/python/python/tests/test_transaction.py index d9a4c2a3a3..684188656a 100644 --- a/python/python/tests/test_transaction.py +++ b/python/python/tests/test_transaction.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import time from datetime import date, datetime, timedelta, timezone @@ -6,10 +6,27 @@ import pytest from glide import RequestError +from glide.async_commands.bitmap import ( + BitFieldGet, + BitFieldSet, + BitmapIndexType, + BitOffset, + BitOffsetMultiplier, + BitwiseOperation, + OffsetOptions, + SignedEncoding, + UnsignedEncoding, +) from glide.async_commands.command_args import Limit, ListDirection, OrderBy -from glide.async_commands.core import InsertPosition, StreamAddOptions, TrimByMinId +from glide.async_commands.core import ( + ExpiryGetEx, + ExpiryTypeGetEx, + FlushMode, + InsertPosition, +) from glide.async_commands.sorted_set import ( AggregationType, + GeoSearchByBox, GeoSearchByRadius, GeospatialData, GeoUnit, @@ -20,6 +37,13 @@ ScoreBoundary, ScoreFilter, ) +from glide.async_commands.stream import ( + IdBound, + StreamAddOptions, + StreamGroupOptions, + StreamReadGroupOptions, + TrimByMinId, +) from glide.async_commands.transaction import ( BaseTransaction, ClusterTransaction, @@ -27,7 +51,7 @@ ) from glide.config import ProtocolVersion from glide.constants import OK, TResult -from glide.redis_client import RedisClient, RedisClusterClient, TRedisClient +from glide.glide_client import GlideClient, GlideClusterClient, TGlideClient from tests.conftest import create_client from tests.utils.utils import ( check_if_server_version_lt, @@ -39,7 +63,7 @@ async def transaction_test( transaction: Union[Transaction, ClusterTransaction], keyslot: str, - redis_client: TRedisClient, + redis_client: TGlideClient, ) -> List[TResult]: key = "{{{}}}:{}".format(keyslot, get_random_string(3)) # to get the same slot key2 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # to get the same slot @@ -60,6 +84,8 @@ async def transaction_test( key17 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # sort key18 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # sort key19 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # bitmap + key20 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # bitmap + key22 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # getex value = datetime.now(timezone.utc).strftime("%m/%d/%Y, %H:%M:%S") value2 = get_random_string(5) @@ -95,12 +121,25 @@ async def transaction_test( transaction.persist(key) args.append(False) + transaction.ttl(key) + args.append(-1) + if not await check_if_server_version_lt(redis_client, "7.0.0"): + transaction.expiretime(key) + args.append(-1) + transaction.pexpiretime(key) + args.append(-1) + + if not await check_if_server_version_lt(redis_client, "6.2.0"): + transaction.copy(key, key2, replace=True) + args.append(True) transaction.rename(key, key2) args.append(OK) transaction.exists([key2]) args.append(1) + transaction.touch([key2]) + args.append(1) transaction.delete([key2]) args.append(1) @@ -109,6 +148,8 @@ async def transaction_test( transaction.set(key, value) args.append(OK) + transaction.getrange(key, 0, -1) + args.append(value) transaction.getdel(key) args.append(value) transaction.getdel(key) @@ -179,6 +220,8 @@ async def transaction_test( args.append([key3]) transaction.hrandfield_withvalues(key4, 1) args.append([[key3, "10.5"]]) + transaction.hstrlen(key4, key3) + args.append(4) transaction.client_getname() args.append(None) @@ -230,6 +273,8 @@ async def transaction_test( args.append([key9, value3]) transaction.brpop([key9], 1) args.append([key9, value]) + transaction.lset(key9, 0, value2) + args.append(OK) transaction.sadd(key7, ["foo", "bar"]) args.append(2) @@ -253,6 +298,8 @@ async def transaction_test( args.append(2) transaction.sinter([key7, key7]) args.append({"foo", "bar"}) + transaction.sunion([key7, key7]) + args.append({"foo", "bar"}) transaction.sinterstore(key7, [key7, key7]) args.append(2) if not await check_if_server_version_lt(redis_client, "7.0.0"): @@ -271,11 +318,17 @@ async def transaction_test( args.append(4) transaction.zrank(key8, "one") args.append(0) + transaction.zrevrank(key8, "one") + args.append(3) if not await check_if_server_version_lt(redis_client, "7.2.0"): transaction.zrank_withscore(key8, "one") args.append([0, 1]) + transaction.zrevrank_withscore(key8, "one") + args.append([3, 1]) transaction.zadd_incr(key8, "one", 3) args.append(4) + transaction.zincrby(key8, 3, "one") + args.append(7) transaction.zrem(key8, ["one"]) args.append(1) transaction.zcard(key8) @@ -361,9 +414,43 @@ async def transaction_test( transaction.setbit(key19, 1, 1) args.append(0) - transaction.setbit(key19, 1, 0) + transaction.getbit(key19, 1) args.append(1) + transaction.set(key20, "foobar") + args.append(OK) + transaction.bitcount(key20) + args.append(26) + transaction.bitcount(key20, OffsetOptions(1, 1)) + args.append(6) + transaction.bitpos(key20, 1) + args.append(1) + + if not await check_if_server_version_lt(redis_client, "6.0.0"): + transaction.bitfield_read_only( + key20, [BitFieldGet(SignedEncoding(5), BitOffset(3))] + ) + args.append([6]) + + transaction.set(key19, "abcdef") + args.append(OK) + transaction.bitop(BitwiseOperation.AND, key19, [key19, key20]) + args.append(6) + transaction.get(key19) + args.append("`bc`ab") + transaction.bitfield( + key20, [BitFieldSet(UnsignedEncoding(10), BitOffsetMultiplier(3), 4)] + ) + args.append([609]) + + if not await check_if_server_version_lt(redis_client, "7.0.0"): + transaction.set(key20, "foobar") + args.append(OK) + transaction.bitcount(key20, OffsetOptions(5, 30, BitmapIndexType.BIT)) + args.append(17) + transaction.bitpos_interval(key20, 1, 44, 50, BitmapIndexType.BIT) + args.append(46) + transaction.geoadd( key12, { @@ -386,10 +473,19 @@ async def transaction_test( None, ] ) + transaction.geosearch( key12, "Catania", GeoSearchByRadius(200, GeoUnit.KILOMETERS), OrderBy.ASC ) args.append(["Catania", "Palermo"]) + transaction.geosearchstore( + key12, + key12, + GeospatialData(15, 37), + GeoSearchByBox(400, 400, GeoUnit.KILOMETERS), + store_dist=True, + ) + args.append(2) transaction.xadd(key11, [("foo", "bar")], StreamAddOptions(id="0-1")) args.append("0-1") @@ -397,9 +493,42 @@ async def transaction_test( args.append("0-2") transaction.xlen(key11) args.append(2) + transaction.xread({key11: "0-1"}) + args.append({key11: {"0-2": [["foo", "bar"]]}}) + transaction.xrange(key11, IdBound("0-1"), IdBound("0-1")) + args.append({"0-1": [["foo", "bar"]]}) + transaction.xrevrange(key11, IdBound("0-1"), IdBound("0-1")) + args.append({"0-1": [["foo", "bar"]]}) transaction.xtrim(key11, TrimByMinId(threshold="0-2", exact=True)) args.append(1) + group_name1 = get_random_string(10) + group_name2 = get_random_string(10) + consumer = get_random_string(10) + transaction.xgroup_create(key11, group_name1, "0-1") + args.append(OK) + transaction.xgroup_create( + key11, group_name2, "0-0", StreamGroupOptions(make_stream=True) + ) + args.append(OK) + transaction.xgroup_create_consumer(key11, group_name1, consumer) + args.append(True) + transaction.xreadgroup( + {key11: ">"}, group_name1, consumer, StreamReadGroupOptions(count=5) + ) + args.append({key11: {"0-2": [["foo", "bar"]]}}) + transaction.xack(key11, group_name1, ["0-2"]) + args.append(1) + transaction.xgroup_del_consumer(key11, group_name1, consumer) + args.append(0) + transaction.xgroup_destroy(key11, group_name1) + args.append(True) + transaction.xgroup_destroy(key11, group_name2) + args.append(True) + + transaction.xdel(key11, ["0-2", "0-3"]) + args.append(1) + transaction.lpush(key17, ["2", "1", "4", "3", "a"]) args.append(5) transaction.sort( @@ -417,6 +546,36 @@ async def transaction_test( alpha=True, ) args.append(4) + transaction.sadd(key7, ["one"]) + args.append(1) + transaction.srandmember(key7) + args.append("one") + transaction.srandmember_count(key7, 1) + args.append(["one"]) + transaction.flushall(FlushMode.ASYNC) + args.append(OK) + transaction.flushall() + args.append(OK) + transaction.flushdb(FlushMode.ASYNC) + args.append(OK) + transaction.flushdb() + args.append(OK) + + min_version = "6.2.0" + if not await check_if_server_version_lt(redis_client, min_version): + transaction.flushall(FlushMode.SYNC) + args.append(OK) + transaction.flushdb(FlushMode.SYNC) + args.append(OK) + + min_version = "6.2.0" + if not await check_if_server_version_lt(redis_client, min_version): + transaction.set(key22, "value") + args.append(OK) + transaction.getex(key22) + args.append("value") + transaction.getex(key22, ExpiryGetEx(ExpiryTypeGetEx.SEC, 1)) + args.append("value") min_version = "7.0.0" if not await check_if_server_version_lt(redis_client, min_version): @@ -434,10 +593,10 @@ async def transaction_test( class TestTransaction: @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_transaction_with_different_slots(self, redis_client: TRedisClient): + async def test_transaction_with_different_slots(self, redis_client: TGlideClient): transaction = ( Transaction() - if isinstance(redis_client, RedisClient) + if isinstance(redis_client, GlideClient) else ClusterTransaction() ) transaction.set("key1", "value1") @@ -447,11 +606,11 @@ async def test_transaction_with_different_slots(self, redis_client: TRedisClient @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_transaction_custom_command(self, redis_client: TRedisClient): + async def test_transaction_custom_command(self, redis_client: TGlideClient): key = get_random_string(10) transaction = ( Transaction() - if isinstance(redis_client, RedisClient) + if isinstance(redis_client, GlideClient) else ClusterTransaction() ) transaction.custom_command(["HSET", key, "foo", "bar"]) @@ -462,12 +621,12 @@ async def test_transaction_custom_command(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_transaction_custom_unsupported_command( - self, redis_client: TRedisClient + self, redis_client: TGlideClient ): key = get_random_string(10) transaction = ( Transaction() - if isinstance(redis_client, RedisClient) + if isinstance(redis_client, GlideClient) else ClusterTransaction() ) transaction.custom_command(["WATCH", key]) @@ -479,12 +638,12 @@ async def test_transaction_custom_unsupported_command( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_transaction_discard_command(self, redis_client: TRedisClient): + async def test_transaction_discard_command(self, redis_client: TGlideClient): key = get_random_string(10) await redis_client.set(key, "1") transaction = ( Transaction() - if isinstance(redis_client, RedisClient) + if isinstance(redis_client, GlideClient) else ClusterTransaction() ) @@ -498,7 +657,7 @@ async def test_transaction_discard_command(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_transaction_exec_abort(self, redis_client: TRedisClient): + async def test_transaction_exec_abort(self, redis_client: TGlideClient): key = get_random_string(10) transaction = BaseTransaction() transaction.custom_command(["INCR", key, key, key]) @@ -510,7 +669,7 @@ async def test_transaction_exec_abort(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_cluster_transaction(self, redis_client: RedisClusterClient): + async def test_cluster_transaction(self, redis_client: GlideClusterClient): assert await redis_client.custom_command(["FLUSHALL"]) == OK keyslot = get_random_string(3) transaction = ClusterTransaction() @@ -525,9 +684,9 @@ async def test_cluster_transaction(self, redis_client: RedisClusterClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_can_return_null_on_watch_transaction_failures( - self, redis_client: TRedisClient, request + self, redis_client: TGlideClient, request ): - is_cluster = isinstance(redis_client, RedisClusterClient) + is_cluster = isinstance(redis_client, GlideClusterClient) client2 = await create_client( request, is_cluster, @@ -546,9 +705,26 @@ async def test_can_return_null_on_watch_transaction_failures( await client2.close() + @pytest.mark.parametrize("cluster_mode", [True, False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_transaction_large_values(self, request, cluster_mode, protocol): + redis_client = await create_client( + request, cluster_mode=cluster_mode, protocol=protocol, timeout=5000 + ) + length = 2**25 # 33mb + key = "0" * length + value = "0" * length + transaction = Transaction() + transaction.set(key, value) + transaction.get(key) + result = await redis_client.exec(transaction) + assert isinstance(result, list) + assert result[0] == OK + assert result[1] == value + @pytest.mark.parametrize("cluster_mode", [False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_standalone_transaction(self, redis_client: RedisClient): + async def test_standalone_transaction(self, redis_client: GlideClient): assert await redis_client.custom_command(["FLUSHALL"]) == OK keyslot = get_random_string(3) key = "{{{}}}:{}".format(keyslot, get_random_string(3)) # to get the same slot @@ -596,10 +772,31 @@ def test_transaction_clear(self): transaction.clear() assert len(transaction.commands) == 0 + @pytest.mark.parametrize("cluster_mode", [False]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_standalone_copy_transaction(self, redis_client: GlideClient): + min_version = "6.2.0" + if await check_if_server_version_lt(redis_client, min_version): + return pytest.mark.skip(reason=f"Redis version required >= {min_version}") + + keyslot = get_random_string(3) + key = "{{{}}}:{}".format(keyslot, get_random_string(3)) # to get the same slot + key1 = "{{{}}}:{}".format(keyslot, get_random_string(3)) # to get the same slot + value = get_random_string(5) + transaction = Transaction() + transaction.select(1) + transaction.set(key, value) + transaction.copy(key, key1, 1, replace=True) + transaction.get(key1) + result = await redis_client.exec(transaction) + assert result is not None + assert result[2] == True + assert result[3] == value + @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_transaction_chaining_calls(self, redis_client: TRedisClient): - cluster_mode = isinstance(redis_client, RedisClusterClient) + async def test_transaction_chaining_calls(self, redis_client: TGlideClient): + cluster_mode = isinstance(redis_client, GlideClusterClient) key = get_random_string(3) transaction = ClusterTransaction() if cluster_mode else Transaction() @@ -614,7 +811,7 @@ async def test_transaction_chaining_calls(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_transaction_object_commands( - self, redis_client: TRedisClient, cluster_mode: bool + self, redis_client: TGlideClient, cluster_mode: bool ): string_key = get_random_string(10) maxmemory_policy_key = "maxmemory-policy" @@ -652,7 +849,7 @@ async def test_transaction_object_commands( @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) async def test_transaction_lastsave( - self, redis_client: TRedisClient, cluster_mode: bool + self, redis_client: TGlideClient, cluster_mode: bool ): yesterday = date.today() - timedelta(1) yesterday_unix_time = time.mktime(yesterday.timetuple()) @@ -663,3 +860,15 @@ async def test_transaction_lastsave( lastsave_time = response[0] assert isinstance(lastsave_time, int) assert lastsave_time > yesterday_unix_time + + @pytest.mark.parametrize("cluster_mode", [True]) + @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) + async def test_lolwut_transaction(self, redis_client: GlideClusterClient): + transaction = Transaction() + transaction.lolwut().lolwut(5).lolwut(parameters=[1, 2]).lolwut(6, [42]) + results = await redis_client.exec(transaction) + assert results is not None + + for element in results: + assert isinstance(element, str) + assert "Redis ver. " in element diff --git a/python/python/tests/test_utils.py b/python/python/tests/test_utils.py index 25e50e53eb..3191db0867 100644 --- a/python/python/tests/test_utils.py +++ b/python/python/tests/test_utils.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import pytest from glide.logger import Level, Logger diff --git a/python/python/tests/tests_redis_modules/test_json.py b/python/python/tests/tests_redis_modules/test_json.py index e1a4dd381f..a6ace91b2d 100644 --- a/python/python/tests/tests_redis_modules/test_json.py +++ b/python/python/tests/tests_redis_modules/test_json.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import json as OuterJson @@ -9,7 +9,7 @@ from glide.config import ProtocolVersion from glide.constants import OK from glide.exceptions import RequestError -from glide.redis_client import TRedisClient +from glide.glide_client import TGlideClient from tests.test_async_client import get_random_string, parse_info_response @@ -17,13 +17,13 @@ class TestJson: @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_json_module_is_loaded(self, redis_client: TRedisClient): + async def test_json_module_is_loaded(self, redis_client: TGlideClient): res = parse_info_response(await redis_client.info([InfoSection.MODULES])) assert "ReJSON" in res["module"] @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_json_set_get(self, redis_client: TRedisClient): + async def test_json_set_get(self, redis_client: TGlideClient): key = get_random_string(5) json_value = {"a": 1.0, "b": 2} @@ -42,7 +42,7 @@ async def test_json_set_get(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_json_set_get_multiple_values(self, redis_client: TRedisClient): + async def test_json_set_get_multiple_values(self, redis_client: TGlideClient): key = get_random_string(5) assert ( @@ -70,7 +70,7 @@ async def test_json_set_get_multiple_values(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_json_set_conditional_set(self, redis_client: TRedisClient): + async def test_json_set_conditional_set(self, redis_client: TGlideClient): key = get_random_string(5) value = OuterJson.dumps({"a": 1.0, "b": 2}) assert ( @@ -122,7 +122,7 @@ async def test_json_set_conditional_set(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_json_get_formatting(self, redis_client: TRedisClient): + async def test_json_get_formatting(self, redis_client: TGlideClient): key = get_random_string(5) assert ( await json.set( @@ -152,7 +152,7 @@ async def test_json_get_formatting(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_del(self, redis_client: TRedisClient): + async def test_del(self, redis_client: TGlideClient): key = get_random_string(5) json_value = {"a": 1.0, "b": {"a": 1, "b": 2.5, "c": True}} @@ -171,7 +171,7 @@ async def test_del(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_forget(self, redis_client: TRedisClient): + async def test_forget(self, redis_client: TGlideClient): key = get_random_string(5) json_value = {"a": 1.0, "b": {"a": 1, "b": 2.5, "c": True}} @@ -190,7 +190,7 @@ async def test_forget(self, redis_client: TRedisClient): @pytest.mark.parametrize("cluster_mode", [True, False]) @pytest.mark.parametrize("protocol", [ProtocolVersion.RESP2, ProtocolVersion.RESP3]) - async def test_json_toggle(self, redis_client: TRedisClient): + async def test_json_toggle(self, redis_client: TGlideClient): key = get_random_string(10) json_value = {"bool": True, "nested": {"bool": False, "nested": {"bool": 10}}} assert await json.set(redis_client, key, "$", OuterJson.dumps(json_value)) == OK diff --git a/python/python/tests/utils/cluster.py b/python/python/tests/utils/cluster.py index 9fbf03d3ed..a00ec2d625 100644 --- a/python/python/tests/utils/cluster.py +++ b/python/python/tests/utils/cluster.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import os import subprocess diff --git a/python/python/tests/utils/utils.py b/python/python/tests/utils/utils.py index 05bb68ca0e..f97bcc14e6 100644 --- a/python/python/tests/utils/utils.py +++ b/python/python/tests/utils/utils.py @@ -5,7 +5,7 @@ from glide.async_commands.core import InfoSection from glide.constants import TResult -from glide.redis_client import TRedisClient +from glide.glide_client import TGlideClient from packaging import version T = TypeVar("T") @@ -70,7 +70,7 @@ def get_random_string(length): return result_str -async def check_if_server_version_lt(client: TRedisClient, min_version: str) -> bool: +async def check_if_server_version_lt(client: TGlideClient, min_version: str) -> bool: # TODO: change it to pytest fixture after we'll implement a sync client info_str = await client.info([InfoSection.SERVER]) redis_version = parse_info_response(info_str).get("redis_version") diff --git a/python/src/lib.rs b/python/src/lib.rs index e1a799a0dd..143d706f99 100644 --- a/python/src/lib.rs +++ b/python/src/lib.rs @@ -1,15 +1,18 @@ +use bytes::Bytes; /** - * Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 + * Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 */ use glide_core::start_socket_listener; +use glide_core::MAX_REQUEST_ARGS_LENGTH; use pyo3::prelude::*; -use pyo3::types::{PyBool, PyDict, PyFloat, PyList, PySet}; +use pyo3::types::{PyBool, PyBytes, PyDict, PyFloat, PyList, PySet}; use pyo3::Python; use redis::Value; pub const DEFAULT_TIMEOUT_IN_MILLISECONDS: u32 = glide_core::client::DEFAULT_RESPONSE_TIMEOUT.as_millis() as u32; +pub const MAX_REQUEST_ARGS_LEN: u32 = MAX_REQUEST_ARGS_LENGTH as u32; #[pyclass] #[derive(PartialEq, Eq, PartialOrd, Clone)] @@ -60,6 +63,7 @@ fn glide(_py: Python, m: &PyModule) -> PyResult<()> { "DEFAULT_TIMEOUT_IN_MILLISECONDS", DEFAULT_TIMEOUT_IN_MILLISECONDS, )?; + m.add("MAX_REQUEST_ARGS_LEN", MAX_REQUEST_ARGS_LEN)?; #[pyfn(m)] fn py_log(log_level: Level, log_identifier: String, message: String) { @@ -145,7 +149,13 @@ fn glide(_py: Python, m: &PyModule) -> PyResult<()> { Value::Boolean(boolean) => Ok(PyBool::new(py, boolean).into_py(py)), Value::VerbatimString { format: _, text } => Ok(text.into_py(py)), Value::BigNumber(bigint) => Ok(bigint.into_py(py)), - Value::Push { kind: _, data: _ } => todo!(), + Value::Push { kind, data } => { + let dict = PyDict::new(py); + dict.set_item("kind", format!("{kind:?}"))?; + let values: &PyList = PyList::new(py, iter_to_value(py, data)?); + dict.set_item("values", values)?; + Ok(dict.into_py(py)) + } } } @@ -162,6 +172,19 @@ fn glide(_py: Python, m: &PyModule) -> PyResult<()> { let value = Value::SimpleString(message); Box::leak(Box::new(value)) as *mut Value as usize } + + #[pyfn(m)] + pub fn create_leaked_bytes_vec(args_vec: Vec<&PyBytes>) -> usize { + // Convert the bytes vec -> Bytes vector + let bytes_vec: Vec = args_vec + .iter() + .map(|v| { + let bytes = v.as_bytes(); + Bytes::from(bytes.to_vec()) + }) + .collect(); + Box::leak(Box::new(bytes_vec)) as *mut Vec as usize + } Ok(()) } diff --git a/submodules/redis-rs b/submodules/redis-rs index c80fe144fd..cb81fb77b0 160000 --- a/submodules/redis-rs +++ b/submodules/redis-rs @@ -1 +1 @@ -Subproject commit c80fe144fd2127e95d4befb15091618d358a190a +Subproject commit cb81fb77b0dde6d57e3127158a17f6f81eac5a23 diff --git a/utils/cluster_manager.py b/utils/cluster_manager.py index ffa9f9af1e..8eedcb0e4d 100644 --- a/utils/cluster_manager.py +++ b/utils/cluster_manager.py @@ -1,6 +1,6 @@ #!/usr/bin/python3 -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import argparse import logging @@ -497,7 +497,7 @@ def wait_for_a_message_in_redis_logs( continue log_file = f"{dir}/redis.log" - if server_ports and str(dir) not in server_ports: + if server_ports and os.path.basename(os.path.normpath(dir)) not in server_ports: continue if not wait_for_message(log_file, message, 10): raise Exception( diff --git a/utils/get_licenses_from_ort.py b/utils/get_licenses_from_ort.py index 46d62e429d..19721e5138 100644 --- a/utils/get_licenses_from_ort.py +++ b/utils/get_licenses_from_ort.py @@ -1,4 +1,4 @@ -# Copyright GLIDE-for-Redis Project Contributors - SPDX Identifier: Apache-2.0 +# Copyright Valkey GLIDE Project Contributors - SPDX Identifier: Apache-2.0 import json import os @@ -65,6 +65,7 @@ def __str__(self): OrtResults("Python", "python/ort_results"), OrtResults("Node", "node/ort_results"), OrtResults("Rust", "glide-core/ort_results"), + OrtResults("Java", "java/ort_results"), ] all_licenses_set: Set = set()