diff --git a/.github/workflows/build-proxy.yml b/.github/workflows/build-proxy.yml
deleted file mode 100644
index 47ee21021..000000000
--- a/.github/workflows/build-proxy.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-name: Build and upload proxy client to JFrog
-
-on:
- push:
- branches:
- - stage
- # TODO: snapshots_private has been removed from base parent pom.xml. Need to add workflow code to write snapshots_private to local pipeline pom.xml (this workflow will not work until that is done)
-
-jobs:
- build:
- runs-on: ubuntu-latest
- steps:
- - name: Checkout Java client
- uses: actions/checkout@v2
-
- - name: Set up settings.xml for Maven
- uses: s4u/maven-settings-action@v2.8.0
- with:
- servers: '[{"id": "snapshots_private", "username": "${{ secrets.JFROG_USERNAME }}", "password": "${{ secrets.JFROG_MAVEN_TOKEN }}"}]'
-
- - name: Build Java client
- run: mvn install
-
- - name: Upload to JFrog
- run: mvn deploy
diff --git a/README.md b/README.md
index e5927a7bf..d5c313b63 100644
--- a/README.md
+++ b/README.md
@@ -4,7 +4,6 @@ Aerospike Java Client Package
Aerospike Java client. This package contains full source code for these projects.
* client: Java native client library.
-* proxy: Java proxy client library for dbaas (database as a service).
* examples: Java client examples.
* benchmarks: Java client benchmarks.
* test: Java client unit tests.
diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml
index cb4b52b73..15c28e7c3 100644
--- a/benchmarks/pom.xml
+++ b/benchmarks/pom.xml
@@ -6,7 +6,7 @@
com.aerospikeaerospike-parent
- 8.1.4
+ 9.0.0aerospike-benchmarksjar
@@ -18,11 +18,6 @@
aerospike-client-jdk8
-
- com.aerospike
- aerospike-proxy-client
-
-
io.nettynetty-transport
@@ -66,40 +61,34 @@
maven-compiler-plugin
- org.apache.maven.plugins
- maven-shade-plugin
- 3.4.1
+ maven-assembly-plugin
+
+
+ jar-with-dependencies
+
+
+
+ com.aerospike.benchmarks.Main
+
+
+
+ make-my-jar-with-dependencies
+ package
- shade
+ single
-
-
-
- *:*
-
- META-INF/*.SF
- META-INF/*.DSA
- META-INF/*.RSA
-
-
-
- true
- jar-with-dependencies
-
-
-
-
- com.aerospike.benchmarks.Main
-
-
-
-
+
+
+ resources
+ true
+
+
diff --git a/benchmarks/src/com/aerospike/benchmarks/Main.java b/benchmarks/src/com/aerospike/benchmarks/Main.java
index 1107393fc..130c9085b 100644
--- a/benchmarks/src/com/aerospike/benchmarks/Main.java
+++ b/benchmarks/src/com/aerospike/benchmarks/Main.java
@@ -37,6 +37,7 @@
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
+import com.aerospike.client.AerospikeClient;
import com.aerospike.client.Host;
import com.aerospike.client.IAerospikeClient;
import com.aerospike.client.Key;
@@ -62,11 +63,9 @@
import com.aerospike.client.policy.Replica;
import com.aerospike.client.policy.TlsPolicy;
import com.aerospike.client.policy.WritePolicy;
-import com.aerospike.client.proxy.AerospikeClientFactory;
import com.aerospike.client.util.Util;
import io.netty.channel.EventLoopGroup;
-import io.netty.channel.epoll.Epoll;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.kqueue.KQueueEventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
@@ -105,7 +104,6 @@ public static void main(String[] args) {
private int nThreads;
private int asyncMaxCommands = 100;
private int eventLoopSize = 1;
- private boolean useProxyClient;
private boolean asyncEnabled;
private boolean initialize;
private boolean batchShowNodes;
@@ -352,13 +350,11 @@ public Main(String[] commandLineArgs) throws Exception {
"Value: DIRECT_NIO | NETTY_NIO | NETTY_EPOLL | NETTY_KQUEUE | NETTY_IOURING"
);
- options.addOption("proxy", false, "Use proxy client.");
-
options.addOption("upn", "udfPackageName", true, "Specify the package name where the udf function is located");
options.addOption("ufn", "udfFunctionName", true, "Specify the udf function name that must be used in the udf benchmarks");
options.addOption("ufv","udfFunctionValues",true, "The udf argument values comma separated");
options.addOption("sendKey", false, "Send key to server");
-
+
options.addOption("pids", "partitionIds", true, "Specify the list of comma seperated partition IDs the primary keys must belong to");
// parse the command line arguments
@@ -385,10 +381,6 @@ public Main(String[] commandLineArgs) throws Exception {
this.asyncEnabled = true;
}
- if (line.hasOption("proxy")) {
- this.useProxyClient = true;
- }
-
args.readPolicy = clientPolicy.readPolicyDefault;
args.writePolicy = clientPolicy.writePolicyDefault;
args.batchPolicy = clientPolicy.batchPolicyDefault;
@@ -415,13 +407,6 @@ public Main(String[] commandLineArgs) throws Exception {
this.port = 3000;
}
- // If the Aerospike server's default port (3000) is used and the proxy client is used,
- // Reset the port to the proxy server's default port (4000).
- if (port == 3000 && useProxyClient) {
- System.out.println("Change proxy server port to 4000");
- port = 4000;
- }
-
if (line.hasOption("hosts")) {
this.hosts = Host.parseHosts(line.getOptionValue("hosts"), this.port);
}
@@ -999,12 +984,12 @@ else if (! level.equals("all")) {
if (line.hasOption("sendKey")) {
args.writePolicy.sendKey = true;
}
-
+
if (line.hasOption("partitionIds")) {
String[] pids = line.getOptionValue("partitionIds").split(",");
-
+
Set partitionIds = new HashSet<>();
-
+
for (String pid : pids) {
int partitionId = -1;
@@ -1021,7 +1006,7 @@ else if (! level.equals("all")) {
partitionIds.add(partitionId);
}
-
+
args.partitionIds = partitionIds;
}
@@ -1180,16 +1165,6 @@ public void runBenchmarks() throws Exception {
eventPolicy.minTimeout = args.writePolicy.socketTimeout;
}
- if (this.useProxyClient && this.eventLoopType == EventLoopType.DIRECT_NIO) {
- // Proxy client requires netty event loops.
- if (Epoll.isAvailable()) {
- this.eventLoopType = EventLoopType.NETTY_EPOLL;
- }
- else {
- this.eventLoopType = EventLoopType.NETTY_NIO;
- }
- }
-
switch (this.eventLoopType) {
default:
case DIRECT_NIO: {
@@ -1229,7 +1204,7 @@ public void runBenchmarks() throws Exception {
clientPolicy.asyncMaxConnsPerNode = this.asyncMaxCommands;
}
- IAerospikeClient client = AerospikeClientFactory.getClient(clientPolicy, useProxyClient, hosts);
+ IAerospikeClient client = new AerospikeClient(clientPolicy, hosts);
try {
if (initialize) {
@@ -1249,7 +1224,7 @@ public void runBenchmarks() throws Exception {
}
}
else {
- IAerospikeClient client = AerospikeClientFactory.getClient(clientPolicy, useProxyClient, hosts);
+ IAerospikeClient client = new AerospikeClient(clientPolicy, hosts);
try {
if (initialize) {
diff --git a/client/pom.xml b/client/pom.xml
index dfe67163a..889ef685e 100644
--- a/client/pom.xml
+++ b/client/pom.xml
@@ -6,7 +6,7 @@
com.aerospikeaerospike-parent
- 8.1.4
+ 9.0.0aerospike-client-jdk8jar
diff --git a/client/src/com/aerospike/client/AbortStatus.java b/client/src/com/aerospike/client/AbortStatus.java
new file mode 100644
index 000000000..c55f59b66
--- /dev/null
+++ b/client/src/com/aerospike/client/AbortStatus.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client;
+
+/**
+ * Multi-record transaction (MRT) abort status code.
+ */
+public enum AbortStatus {
+ OK("Abort succeeded"),
+ ALREADY_COMMITTED("Already committed"),
+ ALREADY_ABORTED("Already aborted"),
+ ROLL_BACK_ABANDONED("MRT client roll back abandoned. Server will eventually abort the MRT."),
+ CLOSE_ABANDONED("MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT.");
+
+ public final String str;
+
+ AbortStatus(String str) {
+ this.str = str;
+ }
+}
diff --git a/client/src/com/aerospike/client/AerospikeClient.java b/client/src/com/aerospike/client/AerospikeClient.java
index df7c8dbd2..c22a1a925 100644
--- a/client/src/com/aerospike/client/AerospikeClient.java
+++ b/client/src/com/aerospike/client/AerospikeClient.java
@@ -37,13 +37,16 @@
import com.aerospike.client.async.AsyncExists;
import com.aerospike.client.async.AsyncIndexTask;
import com.aerospike.client.async.AsyncInfoCommand;
-import com.aerospike.client.async.AsyncOperate;
+import com.aerospike.client.async.AsyncOperateRead;
+import com.aerospike.client.async.AsyncOperateWrite;
import com.aerospike.client.async.AsyncQueryExecutor;
import com.aerospike.client.async.AsyncQueryPartitionExecutor;
import com.aerospike.client.async.AsyncRead;
import com.aerospike.client.async.AsyncReadHeader;
import com.aerospike.client.async.AsyncScanPartitionExecutor;
import com.aerospike.client.async.AsyncTouch;
+import com.aerospike.client.async.AsyncTxnMonitor;
+import com.aerospike.client.async.AsyncTxnRoll;
import com.aerospike.client.async.AsyncWrite;
import com.aerospike.client.async.EventLoop;
import com.aerospike.client.cdt.CTX;
@@ -66,12 +69,15 @@
import com.aerospike.client.command.ExistsCommand;
import com.aerospike.client.command.IBatchCommand;
import com.aerospike.client.command.OperateArgs;
-import com.aerospike.client.command.OperateCommand;
+import com.aerospike.client.command.OperateCommandRead;
+import com.aerospike.client.command.OperateCommandWrite;
import com.aerospike.client.command.ReadCommand;
import com.aerospike.client.command.ReadHeaderCommand;
import com.aerospike.client.command.RegisterCommand;
import com.aerospike.client.command.ScanExecutor;
import com.aerospike.client.command.TouchCommand;
+import com.aerospike.client.command.TxnMonitor;
+import com.aerospike.client.command.TxnRoll;
import com.aerospike.client.command.WriteCommand;
import com.aerospike.client.exp.Expression;
import com.aerospike.client.listener.BatchListListener;
@@ -90,6 +96,8 @@
import com.aerospike.client.listener.RecordArrayListener;
import com.aerospike.client.listener.RecordListener;
import com.aerospike.client.listener.RecordSequenceListener;
+import com.aerospike.client.listener.AbortListener;
+import com.aerospike.client.listener.CommitListener;
import com.aerospike.client.listener.WriteListener;
import com.aerospike.client.metrics.MetricsPolicy;
import com.aerospike.client.policy.AdminPolicy;
@@ -102,6 +110,8 @@
import com.aerospike.client.policy.Policy;
import com.aerospike.client.policy.QueryPolicy;
import com.aerospike.client.policy.ScanPolicy;
+import com.aerospike.client.policy.TxnRollPolicy;
+import com.aerospike.client.policy.TxnVerifyPolicy;
import com.aerospike.client.policy.WritePolicy;
import com.aerospike.client.query.IndexCollectionType;
import com.aerospike.client.query.IndexType;
@@ -187,7 +197,7 @@ public class AerospikeClient implements IAerospikeClient, Closeable {
public final BatchDeletePolicy batchDeletePolicyDefault;
/**
- * Default user defined function policy used in batch UDF excecute commands.
+ * Default user defined function policy used in batch UDF execute commands.
*/
public final BatchUDFPolicy batchUDFPolicyDefault;
@@ -196,6 +206,17 @@ public class AerospikeClient implements IAerospikeClient, Closeable {
*/
public final InfoPolicy infoPolicyDefault;
+ /**
+ * Default multi-record transaction (MRT) policy when verifying record versions in a batch on a commit.
+ */
+ public final TxnVerifyPolicy txnVerifyPolicyDefault;
+
+ /**
+ * Default multi-record transaction (MRT) policy when rolling the transaction records forward (commit)
+ * or back (abort) in a batch.
+ */
+ public final TxnRollPolicy txnRollPolicyDefault;
+
private final WritePolicy operatePolicyReadDefault;
//-------------------------------------------------------
@@ -294,6 +315,8 @@ public AerospikeClient(ClientPolicy policy, Host... hosts)
this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault;
this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault;
this.infoPolicyDefault = policy.infoPolicyDefault;
+ this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault;
+ this.txnRollPolicyDefault = policy.txnRollPolicyDefault;
this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault);
cluster = new Cluster(this, policy, hosts);
@@ -318,6 +341,8 @@ protected AerospikeClient(ClientPolicy policy) {
this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault;
this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault;
this.infoPolicyDefault = policy.infoPolicyDefault;
+ this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault;
+ this.txnRollPolicyDefault = policy.txnRollPolicyDefault;
this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault);
}
else {
@@ -331,6 +356,8 @@ protected AerospikeClient(ClientPolicy policy) {
this.batchDeletePolicyDefault = new BatchDeletePolicy();
this.batchUDFPolicyDefault = new BatchUDFPolicy();
this.infoPolicyDefault = new InfoPolicy();
+ this.txnVerifyPolicyDefault = new TxnVerifyPolicy();
+ this.txnRollPolicyDefault = new TxnRollPolicy();
this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault);
}
}
@@ -340,145 +367,159 @@ protected AerospikeClient(ClientPolicy policy) {
//-------------------------------------------------------
/**
- * Return read policy default. Use when the policy will not be modified.
+ * Copy read policy default to avoid problems if this shared instance is later modified.
*/
public final Policy getReadPolicyDefault() {
- return readPolicyDefault;
+ return new Policy(readPolicyDefault);
}
/**
- * Copy read policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy read policy default.
*/
public final Policy copyReadPolicyDefault() {
return new Policy(readPolicyDefault);
}
/**
- * Return write policy default. Use when the policy will not be modified.
+ * Copy write policy default to avoid problems if this shared instance is later modified.
*/
public final WritePolicy getWritePolicyDefault() {
- return writePolicyDefault;
+ return new WritePolicy(writePolicyDefault);
}
/**
- * Copy write policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy write policy default.
*/
public final WritePolicy copyWritePolicyDefault() {
return new WritePolicy(writePolicyDefault);
}
/**
- * Return scan policy default. Use when the policy will not be modified.
+ * Copy scan policy default to avoid problems if this shared instance is later modified.
*/
public final ScanPolicy getScanPolicyDefault() {
- return scanPolicyDefault;
+ return new ScanPolicy(scanPolicyDefault);
}
/**
- * Copy scan policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy scan policy default.
*/
public final ScanPolicy copyScanPolicyDefault() {
return new ScanPolicy(scanPolicyDefault);
}
/**
- * Return query policy default. Use when the policy will not be modified.
+ * Copy query policy default to avoid problems if this shared instance is later modified.
*/
public final QueryPolicy getQueryPolicyDefault() {
- return queryPolicyDefault;
+ return new QueryPolicy(queryPolicyDefault);
}
/**
- * Copy query policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy query policy default.
*/
public final QueryPolicy copyQueryPolicyDefault() {
return new QueryPolicy(queryPolicyDefault);
}
/**
- * Return batch header read policy default. Use when the policy will not be modified.
+ * Copy batch header read policy default to avoid problems if this shared instance is later modified.
*/
public final BatchPolicy getBatchPolicyDefault() {
- return batchPolicyDefault;
+ return new BatchPolicy(batchPolicyDefault);
}
/**
- * Copy batch header read policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch header read policy default.
*/
public final BatchPolicy copyBatchPolicyDefault() {
return new BatchPolicy(batchPolicyDefault);
}
/**
- * Return batch header write policy default. Use when the policy will not be modified.
+ * Copy batch header write policy default to avoid problems if this shared instance is later modified.
*/
public final BatchPolicy getBatchParentPolicyWriteDefault() {
- return batchParentPolicyWriteDefault;
+ return new BatchPolicy(batchParentPolicyWriteDefault);
}
/**
- * Copy batch header write policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch header write policy default.
*/
public final BatchPolicy copyBatchParentPolicyWriteDefault() {
return new BatchPolicy(batchParentPolicyWriteDefault);
}
/**
- * Return batch detail write policy default. Use when the policy will not be modified.
+ * Copy batch detail write policy default to avoid problems if this shared instance is later modified.
*/
public final BatchWritePolicy getBatchWritePolicyDefault() {
- return batchWritePolicyDefault;
+ return new BatchWritePolicy(batchWritePolicyDefault);
}
/**
- * Copy batch detail write policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch detail write policy default.
*/
public final BatchWritePolicy copyBatchWritePolicyDefault() {
return new BatchWritePolicy(batchWritePolicyDefault);
}
/**
- * Return batch detail delete policy default. Use when the policy will not be modified.
+ * Copy batch detail delete policy default to avoid problems if this shared instance is later modified.
*/
public final BatchDeletePolicy getBatchDeletePolicyDefault() {
- return batchDeletePolicyDefault;
+ return new BatchDeletePolicy(batchDeletePolicyDefault);
}
/**
- * Copy batch detail delete policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch detail delete policy default.
*/
public final BatchDeletePolicy copyBatchDeletePolicyDefault() {
return new BatchDeletePolicy(batchDeletePolicyDefault);
}
/**
- * Return batch detail UDF policy default. Use when the policy will not be modified.
+ * Copy batch detail UDF policy default to avoid problems if this shared instance is later modified.
*/
public final BatchUDFPolicy getBatchUDFPolicyDefault() {
- return batchUDFPolicyDefault;
+ return new BatchUDFPolicy(batchUDFPolicyDefault);
}
/**
- * Copy batch detail UDF policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch detail UDF policy default.
*/
public final BatchUDFPolicy copyBatchUDFPolicyDefault() {
return new BatchUDFPolicy(batchUDFPolicyDefault);
}
/**
- * Return info command policy default. Use when the policy will not be modified.
+ * Copy info command policy default to avoid problems if this shared instance is later modified.
*/
public final InfoPolicy getInfoPolicyDefault() {
- return infoPolicyDefault;
+ return new InfoPolicy(infoPolicyDefault);
}
/**
- * Copy info command policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy info command policy default.
*/
public final InfoPolicy copyInfoPolicyDefault() {
return new InfoPolicy(infoPolicyDefault);
}
+ /**
+ * Copy MRT record version verify policy default.
+ */
+ public final TxnVerifyPolicy copyTxnVerifyPolicyDefault() {
+ return new TxnVerifyPolicy(txnVerifyPolicyDefault);
+ }
+
+ /**
+ * Copy MRT roll forward/back policy default.
+ */
+ public final TxnRollPolicy copyTxnRollPolicyDefault() {
+ return new TxnRollPolicy(txnRollPolicyDefault);
+ }
+
//-------------------------------------------------------
// Cluster Connection Management
//-------------------------------------------------------
@@ -574,13 +615,160 @@ public final Cluster getCluster() {
return cluster;
}
+ //-------------------------------------------------------
+ // Multi-Record Transactions
+ //-------------------------------------------------------
+
+ /**
+ * Attempt to commit the given multi-record transaction. First, the expected record versions are
+ * sent to the server nodes for verification. If all nodes return success, the transaction is
+ * committed. Otherwise, the transaction is aborted.
+ *
+ * Requires server version 8.0+
+ *
+ * @param txn multi-record transaction
+ * @return status of the commit on success
+ * @throws AerospikeException.Commit if verify commit fails
+ */
+ public final CommitStatus commit(Txn txn)
+ throws AerospikeException.Commit {
+
+ TxnRoll tr = new TxnRoll(cluster, txn);
+
+ switch (txn.getState()) {
+ default:
+ case OPEN:
+ tr.verify(txnVerifyPolicyDefault, txnRollPolicyDefault);
+ return tr.commit(txnRollPolicyDefault);
+
+ case VERIFIED:
+ return tr.commit(txnRollPolicyDefault);
+
+ case COMMITTED:
+ return CommitStatus.ALREADY_COMMITTED;
+
+ case ABORTED:
+ return CommitStatus.ALREADY_ABORTED;
+ }
+ }
+
+ /**
+ * Asynchronously attempt to commit the given multi-record transaction. First, the expected
+ * record versions are sent to the server nodes for verification. If all nodes return success,
+ * the transaction is committed. Otherwise, the transaction is aborted.
+ *
+ * This method registers the command with an event loop and returns.
+ * The event loop thread will process the command and send the results to the listener.
+ *
+ * Requires server version 8.0+
+ *
+ * @param eventLoop event loop that will process the command. If NULL, the event
+ * loop will be chosen by round-robin.
+ * @param listener where to send results
+ * @param txn multi-record transaction
+ * @throws AerospikeException if event loop registration fails
+ */
+ public final void commit(EventLoop eventLoop, CommitListener listener, Txn txn)
+ throws AerospikeException {
+ if (eventLoop == null) {
+ eventLoop = cluster.eventLoops.next();
+ }
+
+ AsyncTxnRoll atr = new AsyncTxnRoll(
+ cluster, eventLoop, txnVerifyPolicyDefault, txnRollPolicyDefault, txn
+ );
+
+ switch (txn.getState()) {
+ default:
+ case OPEN:
+ atr.verify(listener);
+ break;
+
+ case VERIFIED:
+ atr.commit(listener);
+ break;
+
+ case COMMITTED:
+ listener.onSuccess(CommitStatus.ALREADY_COMMITTED);
+ break;
+
+ case ABORTED:
+ listener.onSuccess(CommitStatus.ALREADY_ABORTED);
+ break;
+ }
+ }
+
+ /**
+ * Abort and rollback the given multi-record transaction.
+ *
+ * Requires server version 8.0+
+ *
+ * @param txn multi-record transaction
+ * @return status of the abort
+ */
+ public final AbortStatus abort(Txn txn) {
+ TxnRoll tr = new TxnRoll(cluster, txn);
+
+ switch (txn.getState()) {
+ default:
+ case OPEN:
+ case VERIFIED:
+ return tr.abort(txnRollPolicyDefault);
+
+ case COMMITTED:
+ return AbortStatus.ALREADY_COMMITTED;
+
+ case ABORTED:
+ return AbortStatus.ALREADY_ABORTED;
+ }
+ }
+
+ /**
+ * Asynchronously abort and rollback the given multi-record transaction.
+ *
+ * This method registers the command with an event loop and returns.
+ * The event loop thread will process the command and send the results to the listener.
+ *
+ * Requires server version 8.0+
+ *
+ * @param eventLoop event loop that will process the command. If NULL, the event
+ * loop will be chosen by round-robin.
+ * @param listener where to send results
+ * @param txn multi-record transaction
+ * @throws AerospikeException if event loop registration fails
+ */
+ public final void abort(EventLoop eventLoop, AbortListener listener, Txn txn)
+ throws AerospikeException {
+ if (eventLoop == null) {
+ eventLoop = cluster.eventLoops.next();
+ }
+
+ AsyncTxnRoll atr = new AsyncTxnRoll(cluster, eventLoop, null, txnRollPolicyDefault, txn);
+
+ switch (txn.getState()) {
+ default:
+ case OPEN:
+ case VERIFIED:
+ atr.abort(listener);
+ break;
+
+ case COMMITTED:
+ listener.onSuccess(AbortStatus.ALREADY_COMMITTED);
+ break;
+
+ case ABORTED:
+ listener.onSuccess(AbortStatus.ALREADY_ABORTED);
+ break;
+ }
+ }
+
//-------------------------------------------------------
// Write Record Operations
//-------------------------------------------------------
/**
* Write record bin(s).
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeouts, record expiration and how the command is
* handled when the record already exists.
*
* @param policy write configuration parameters, pass in null for defaults
@@ -593,6 +781,11 @@ public final void put(WritePolicy policy, Key key, Bin... bins)
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE);
command.execute();
}
@@ -602,7 +795,7 @@ public final void put(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
*
* @param eventLoop event loop that will process the command. If NULL, the event
@@ -622,8 +815,9 @@ public final void put(EventLoop eventLoop, WriteListener listener, WritePolicy p
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.WRITE);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
//-------------------------------------------------------
@@ -632,7 +826,7 @@ public final void put(EventLoop eventLoop, WriteListener listener, WritePolicy p
/**
* Append bin string values to existing record bin values.
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for string values.
*
@@ -646,6 +840,11 @@ public final void append(WritePolicy policy, Key key, Bin... bins)
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND);
command.execute();
}
@@ -655,7 +854,7 @@ public final void append(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for string values.
*
@@ -676,13 +875,14 @@ public final void append(EventLoop eventLoop, WriteListener listener, WritePolic
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.APPEND);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
/**
* Prepend bin string values to existing record bin values.
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call works only for string values.
*
@@ -696,6 +896,11 @@ public final void prepend(WritePolicy policy, Key key, Bin... bins)
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND);
command.execute();
}
@@ -705,7 +910,7 @@ public final void prepend(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for string values.
*
@@ -726,8 +931,9 @@ public final void prepend(EventLoop eventLoop, WriteListener listener, WritePoli
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.PREPEND);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
//-------------------------------------------------------
@@ -737,7 +943,7 @@ public final void prepend(EventLoop eventLoop, WriteListener listener, WritePoli
/**
* Add integer/double bin values to record bin values. If the record or bin does not exist, the
* record/bin will be created by default with the value to be added. The policy specifies the
- * transaction timeout, record expiration and how the transaction is handled when the record
+ * command timeout, record expiration and how the command is handled when the record
* already exists.
*
* @param policy write configuration parameters, pass in null for defaults
@@ -750,6 +956,11 @@ public final void add(WritePolicy policy, Key key, Bin... bins)
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD);
command.execute();
}
@@ -757,7 +968,7 @@ public final void add(WritePolicy policy, Key key, Bin... bins)
/**
* Asynchronously add integer/double bin values to record bin values. If the record or bin does
* not exist, the record/bin will be created by default with the value to be added. The policy
- * specifies the transaction timeout, record expiration and how the transaction is handled when
+ * specifies the command timeout, record expiration and how the command is handled when
* the record already exists.
*
* This method registers the command with an event loop and returns.
@@ -780,8 +991,9 @@ public final void add(EventLoop eventLoop, WriteListener listener, WritePolicy p
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.ADD);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
//-------------------------------------------------------
@@ -790,7 +1002,7 @@ public final void add(EventLoop eventLoop, WriteListener listener, WritePolicy p
/**
* Delete record for specified key.
- * The policy specifies the transaction timeout.
+ * The policy specifies the command timeout.
*
* @param policy delete configuration parameters, pass in null for defaults
* @param key unique record identifier
@@ -802,6 +1014,11 @@ public final boolean delete(WritePolicy policy, Key key)
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
DeleteCommand command = new DeleteCommand(cluster, policy, key);
command.execute();
return command.existed();
@@ -812,7 +1029,7 @@ public final boolean delete(WritePolicy policy, Key key)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout.
+ * The policy specifies the command timeout.
*
* @param eventLoop event loop that will process the command. If NULL, the event
* loop will be chosen by round-robin.
@@ -830,8 +1047,9 @@ public final void delete(EventLoop eventLoop, DeleteListener listener, WritePoli
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncDelete command = new AsyncDelete(cluster, listener, policy, key);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
/**
@@ -859,6 +1077,10 @@ public final BatchResults delete(BatchPolicy batchPolicy, BatchDeletePolicy dele
deletePolicy = batchDeletePolicyDefault;
}
+ if (batchPolicy.txn != null) {
+ TxnMonitor.addKeys(cluster, batchPolicy, keys);
+ }
+
BatchAttr attr = new BatchAttr();
attr.setDelete(deletePolicy);
@@ -962,7 +1184,7 @@ public final void delete(
executor, bn, batchPolicy, keys, null, records, attr);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys);
}
/**
@@ -1029,7 +1251,7 @@ public final void delete(
executor, bn, batchPolicy, keys, null, sent, listener, attr);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys);
}
/**
@@ -1102,6 +1324,11 @@ public final void touch(WritePolicy policy, Key key)
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
TouchCommand command = new TouchCommand(cluster, policy, key);
command.execute();
}
@@ -1129,8 +1356,9 @@ public final void touch(EventLoop eventLoop, WriteListener listener, WritePolicy
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncTouch command = new AsyncTouch(cluster, listener, policy, key);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
//-------------------------------------------------------
@@ -1151,6 +1379,11 @@ public final boolean exists(Policy policy, Key key)
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
ExistsCommand command = new ExistsCommand(cluster, policy, key);
command.execute();
return command.exists();
@@ -1179,6 +1412,11 @@ public final void exists(EventLoop eventLoop, ExistsListener listener, Policy po
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
AsyncExists command = new AsyncExists(cluster, listener, policy, key);
eventLoop.execute(cluster, command);
}
@@ -1202,6 +1440,10 @@ public final boolean[] exists(BatchPolicy policy, Key[] keys)
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
boolean[] existsArray = new boolean[keys.length];
try {
@@ -1258,6 +1500,10 @@ public final void exists(EventLoop eventLoop, ExistsArrayListener listener, Batc
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
boolean[] existsArray = new boolean[keys.length];
AsyncBatchExecutor.ExistsArray executor = new AsyncBatchExecutor.ExistsArray(
eventLoop, cluster, listener, keys, existsArray);
@@ -1308,6 +1554,10 @@ public final void exists(EventLoop eventLoop, ExistsSequenceListener listener, B
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
AsyncBatchExecutor.ExistsSequence executor = new AsyncBatchExecutor.ExistsSequence(
eventLoop, cluster, listener);
List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor);
@@ -1346,6 +1596,11 @@ public final Record get(Policy policy, Key key)
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
ReadCommand command = new ReadCommand(cluster, policy, key);
command.execute();
return command.getRecord();
@@ -1374,6 +1629,11 @@ public final void get(EventLoop eventLoop, RecordListener listener, Policy polic
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
AsyncRead command = new AsyncRead(cluster, listener, policy, key, null);
eventLoop.execute(cluster, command);
}
@@ -1393,6 +1653,11 @@ public final Record get(Policy policy, Key key, String... binNames)
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
ReadCommand command = new ReadCommand(cluster, policy, key, binNames);
command.execute();
return command.getRecord();
@@ -1422,6 +1687,11 @@ public final void get(EventLoop eventLoop, RecordListener listener, Policy polic
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
AsyncRead command = new AsyncRead(cluster, listener, policy, key, binNames);
eventLoop.execute(cluster, command);
}
@@ -1440,6 +1710,11 @@ public final Record getHeader(Policy policy, Key key)
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key);
command.execute();
return command.getRecord();
@@ -1468,6 +1743,11 @@ public final void getHeader(EventLoop eventLoop, RecordListener listener, Policy
if (policy == null) {
policy = readPolicyDefault;
}
+
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
AsyncReadHeader command = new AsyncReadHeader(cluster, listener, policy, key);
eventLoop.execute(cluster, command);
}
@@ -1498,6 +1778,10 @@ public final boolean get(BatchPolicy policy, List records)
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(records);
+ }
+
BatchStatus status = new BatchStatus(true);
List bns = BatchNodeList.generate(cluster, policy, records, status);
IBatchCommand[] commands = new IBatchCommand[bns.size()];
@@ -1548,6 +1832,10 @@ public final void get(EventLoop eventLoop, BatchListListener listener, BatchPoli
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(records);
+ }
+
AsyncBatchExecutor.ReadList executor = new AsyncBatchExecutor.ReadList(eventLoop, cluster, listener, records);
List bns = BatchNodeList.generate(cluster, policy, records, executor);
AsyncCommand[] commands = new AsyncCommand[bns.size()];
@@ -1597,6 +1885,10 @@ public final void get(EventLoop eventLoop, BatchSequenceListener listener, Batch
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(records);
+ }
+
AsyncBatchExecutor.ReadSequence executor = new AsyncBatchExecutor.ReadSequence(eventLoop, cluster, listener);
List bns = BatchNodeList.generate(cluster, policy, records, executor);
AsyncCommand[] commands = new AsyncCommand[bns.size()];
@@ -1636,6 +1928,10 @@ public final Record[] get(BatchPolicy policy, Key[] keys)
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
Record[] records = new Record[keys.length];
try {
@@ -1694,6 +1990,10 @@ public final void get(EventLoop eventLoop, RecordArrayListener listener, BatchPo
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
Record[] records = new Record[keys.length];
AsyncBatchExecutor.GetArray executor = new AsyncBatchExecutor.GetArray(
eventLoop, cluster, listener, keys, records);
@@ -1745,6 +2045,10 @@ public final void get(EventLoop eventLoop, RecordSequenceListener listener, Batc
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
AsyncBatchExecutor.GetSequence executor = new AsyncBatchExecutor.GetSequence(eventLoop, cluster, listener);
List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor);
AsyncCommand[] commands = new AsyncCommand[bns.size()];
@@ -1786,6 +2090,10 @@ public final Record[] get(BatchPolicy policy, Key[] keys, String... binNames)
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
int readAttr = (binNames == null || binNames.length == 0)?
Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ;
@@ -1847,6 +2155,10 @@ public final void get(EventLoop eventLoop, RecordArrayListener listener, BatchPo
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
int readAttr = (binNames == null || binNames.length == 0)?
Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ;
@@ -1902,6 +2214,10 @@ public final void get(EventLoop eventLoop, RecordSequenceListener listener, Batc
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
int readAttr = (binNames == null || binNames.length == 0)?
Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ;
@@ -1945,6 +2261,10 @@ public final Record[] get(BatchPolicy policy, Key[] keys, Operation... ops)
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
Record[] records = new Record[keys.length];
try {
@@ -2003,6 +2323,10 @@ public final void get(EventLoop eventLoop, RecordArrayListener listener, BatchPo
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
Record[] records = new Record[keys.length];
AsyncBatchExecutor.GetArray executor = new AsyncBatchExecutor.GetArray(
eventLoop, cluster, listener, keys, records);
@@ -2055,6 +2379,10 @@ public final void get(EventLoop eventLoop, RecordSequenceListener listener, Batc
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
AsyncBatchExecutor.GetSequence executor = new AsyncBatchExecutor.GetSequence(eventLoop, cluster, listener);
List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor);
AsyncCommand[] commands = new AsyncCommand[bns.size()];
@@ -2094,6 +2422,10 @@ public final Record[] getHeader(BatchPolicy policy, Key[] keys)
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
Record[] records = new Record[keys.length];
try {
@@ -2152,6 +2484,10 @@ public final void getHeader(EventLoop eventLoop, RecordArrayListener listener, B
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
Record[] records = new Record[keys.length];
AsyncBatchExecutor.GetArray executor = new AsyncBatchExecutor.GetArray(
eventLoop, cluster, listener, keys, records);
@@ -2204,6 +2540,10 @@ public final void getHeader(EventLoop eventLoop, RecordSequenceListener listener
policy = batchPolicyDefault;
}
+ if (policy.txn != null) {
+ policy.txn.prepareRead(keys);
+ }
+
AsyncBatchExecutor.GetSequence executor = new AsyncBatchExecutor.GetSequence(eventLoop, cluster, listener);
List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor);
AsyncCommand[] commands = new AsyncCommand[bns.size()];
@@ -2249,9 +2589,26 @@ public final void getHeader(EventLoop eventLoop, RecordSequenceListener listener
public final Record operate(WritePolicy policy, Key key, Operation... operations)
throws AerospikeException {
OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations);
- OperateCommand command = new OperateCommand(cluster, key, args);
- command.execute();
- return command.getRecord();
+ policy = args.writePolicy;
+
+ if (args.hasWrite) {
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
+ OperateCommandWrite command = new OperateCommandWrite(cluster, key, args);
+ command.execute();
+ return command.getRecord();
+ }
+ else {
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
+ OperateCommandRead command = new OperateCommandRead(cluster, key, args);
+ command.execute();
+ return command.getRecord();
+ }
}
/**
@@ -2284,8 +2641,20 @@ public final void operate(EventLoop eventLoop, RecordListener listener, WritePol
}
OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations);
- AsyncOperate command = new AsyncOperate(cluster, listener, key, args);
- eventLoop.execute(cluster, command);
+ policy = args.writePolicy;
+
+ if (args.hasWrite) {
+ AsyncOperateWrite command = new AsyncOperateWrite(cluster, listener, key, args);
+ AsyncTxnMonitor.execute(eventLoop, cluster, args.writePolicy, command);
+ }
+ else {
+ if (policy.txn != null) {
+ policy.txn.prepareRead(key.namespace);
+ }
+
+ AsyncOperateRead command = new AsyncOperateRead(cluster, listener, key, args);
+ eventLoop.execute(cluster, command);
+ }
}
//-------------------------------------------------------
@@ -2317,6 +2686,10 @@ public final boolean operate(BatchPolicy policy, List records)
policy = batchParentPolicyWriteDefault;
}
+ if (policy.txn != null) {
+ TxnMonitor.addKeys(cluster, policy, records);
+ }
+
BatchStatus status = new BatchStatus(true);
List bns = BatchNodeList.generate(cluster, policy, records, status);
IBatchCommand[] commands = new IBatchCommand[bns.size()];
@@ -2481,7 +2854,7 @@ public final void operate(
commands[count++] = new AsyncBatch.OperateListCommand(executor, bn, policy, records);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(policy, executor, commands, records);
}
/**
@@ -2586,7 +2959,7 @@ public final void operate(
commands[count++] = new AsyncBatch.OperateSequenceCommand(executor, bn, policy, listener, records);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(policy, executor, commands, records);
}
/**
@@ -2622,6 +2995,10 @@ public final BatchResults operate(
writePolicy = batchWritePolicyDefault;
}
+ if (batchPolicy.txn != null) {
+ TxnMonitor.addKeys(cluster, batchPolicy, keys);
+ }
+
BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops);
BatchRecord[] records = new BatchRecord[keys.length];
@@ -2731,7 +3108,7 @@ public final void operate(
executor, bn, batchPolicy, keys, ops, records, attr);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys);
}
/**
@@ -2801,7 +3178,7 @@ public final void operate(
executor, bn, batchPolicy, keys, ops, sent, listener, attr);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys);
}
//-------------------------------------------------------
@@ -3095,13 +3472,18 @@ public final void removeUdf(InfoPolicy policy, String serverPath)
* @param functionName user defined function
* @param functionArgs arguments passed in to user defined function
* @return return value of user defined function
- * @throws AerospikeException if transaction fails
+ * @throws AerospikeException if command fails
*/
public final Object execute(WritePolicy policy, Key key, String packageName, String functionName, Value... functionArgs)
throws AerospikeException {
if (policy == null) {
policy = writePolicyDefault;
}
+
+ if (policy.txn != null) {
+ TxnMonitor.addKey(cluster, policy, key);
+ }
+
ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, functionArgs);
command.execute();
@@ -3168,8 +3550,9 @@ public final void execute(
if (policy == null) {
policy = writePolicyDefault;
}
+
AsyncExecute command = new AsyncExecute(cluster, listener, policy, key, packageName, functionName, functionArgs);
- eventLoop.execute(cluster, command);
+ AsyncTxnMonitor.execute(eventLoop, cluster, policy, command);
}
/**
@@ -3208,6 +3591,10 @@ public final BatchResults execute(
udfPolicy = batchUDFPolicyDefault;
}
+ if (batchPolicy.txn != null) {
+ TxnMonitor.addKeys(cluster, batchPolicy, keys);
+ }
+
byte[] argBytes = Packer.pack(functionArgs);
BatchAttr attr = new BatchAttr();
@@ -3323,7 +3710,7 @@ public final void execute(
executor, bn, batchPolicy, keys, packageName, functionName, argBytes, records, attr);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys);
}
/**
@@ -3399,7 +3786,7 @@ public final void execute(
executor, bn, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr);
}
}
- executor.execute(commands);
+ AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys);
}
//----------------------------------------------------------
@@ -3432,7 +3819,7 @@ public final ExecuteTask execute(
}
statement.setAggregateFunction(packageName, functionName, functionArgs);
- cluster.addTran();
+ cluster.addCommandCount();
long taskId = statement.prepareTaskId();
Node[] nodes = cluster.validateNodes();
@@ -3471,7 +3858,7 @@ public final ExecuteTask execute(
statement.setOperations(operations);
}
- cluster.addTran();
+ cluster.addCommandCount();
long taskId = statement.prepareTaskId();
Node[] nodes = cluster.validateNodes();
diff --git a/client/src/com/aerospike/client/AerospikeException.java b/client/src/com/aerospike/client/AerospikeException.java
index db1c77420..7a092a086 100644
--- a/client/src/com/aerospike/client/AerospikeException.java
+++ b/client/src/com/aerospike/client/AerospikeException.java
@@ -150,14 +150,14 @@ public final void setNode(Node node) {
}
/**
- * Get transaction policy. Will be null for non-transaction exceptions.
+ * Get command policy. Will be null for non-command exceptions.
*/
public final Policy getPolicy() {
return policy;
}
/**
- * Set transaction policy.
+ * Set command policy.
*/
public final void setPolicy(Policy policy) {
this.policy = policy;
@@ -199,14 +199,14 @@ public final void setIteration(int iteration) {
}
/**
- * Is it possible that write transaction may have completed.
+ * Is it possible that write command may have completed.
*/
public final boolean getInDoubt() {
return inDoubt;
}
/**
- * Set whether it is possible that the write transaction may have completed
+ * Set whether it is possible that the write command may have completed
* even though this exception was generated. This may be the case when a
* client error occurs (like timeout) after the command was sent to the server.
*/
@@ -434,6 +434,11 @@ public BatchRecordArray(BatchRecord[] records, Throwable e) {
super(ResultCode.BATCH_FAILED, "Batch failed", e);
this.records = records;
}
+
+ public BatchRecordArray(BatchRecord[] records, String message, Throwable e) {
+ super(ResultCode.BATCH_FAILED, message, e);
+ this.records = records;
+ }
}
/**
@@ -489,4 +494,83 @@ public Backoff(int resultCode) {
super(resultCode);
}
}
+
+ /**
+ * Exception thrown when {@link AerospikeClient#commit(com.aerospike.client.Tran)} fails.
+ */
+ public static final class Commit extends AerospikeException {
+ private static final long serialVersionUID = 1L;
+
+ /**
+ * Error status of the attempted commit.
+ */
+ public final CommitError error;
+
+ /**
+ * Verify result for each read key in the MRT. May be null if failure occurred before verify.
+ */
+ public final BatchRecord[] verifyRecords;
+
+ /**
+ * Roll forward/backward result for each write key in the MRT. May be null if failure occurred before
+ * roll forward/backward.
+ */
+ public final BatchRecord[] rollRecords;
+
+ public Commit(CommitError error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords) {
+ super(ResultCode.TXN_FAILED, error.str);
+ this.error = error;
+ this.verifyRecords = verifyRecords;
+ this.rollRecords = rollRecords;
+ }
+
+ public Commit(CommitError error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Throwable cause) {
+ super(ResultCode.TXN_FAILED, error.str, cause);
+ this.error = error;
+ this.verifyRecords = verifyRecords;
+ this.rollRecords = rollRecords;
+ }
+
+ @Override
+ public String getMessage() {
+ String msg = super.getMessage();
+ StringBuilder sb = new StringBuilder(1024);
+ recordsToString(sb, "verify errors:", verifyRecords);
+ recordsToString(sb, "roll errors:", rollRecords);
+ return msg + sb.toString();
+ }
+ }
+
+ private static void recordsToString(StringBuilder sb, String title, BatchRecord[] records) {
+ if (records == null) {
+ return;
+ }
+
+ int count = 0;
+
+ for (BatchRecord br : records) {
+ // Only show results with an error response.
+ if (!(br.resultCode == ResultCode.OK || br.resultCode == ResultCode.NO_RESPONSE)) {
+ // Only show first 3 errors.
+ if (count >= 3) {
+ sb.append(System.lineSeparator());
+ sb.append("...");
+ break;
+ }
+
+ if (count == 0) {
+ sb.append(System.lineSeparator());
+ sb.append(title);
+ }
+
+ sb.append(System.lineSeparator());
+ sb.append(br.key);
+ sb.append(',');
+ sb.append(br.resultCode);
+ sb.append(',');
+ sb.append(br.inDoubt);
+ count++;
+ }
+ }
+ }
}
diff --git a/client/src/com/aerospike/client/BatchRecord.java b/client/src/com/aerospike/client/BatchRecord.java
index 33a7b94ea..cdb9dcd45 100644
--- a/client/src/com/aerospike/client/BatchRecord.java
+++ b/client/src/com/aerospike/client/BatchRecord.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -40,7 +40,7 @@ public class BatchRecord {
public int resultCode;
/**
- * Is it possible that the write transaction may have completed even though an error
+ * Is it possible that the write command may have completed even though an error
* occurred for this record. This may be the case when a client error occurs (like timeout)
* after the command was sent to the server.
*/
diff --git a/client/src/com/aerospike/client/CommitError.java b/client/src/com/aerospike/client/CommitError.java
new file mode 100644
index 000000000..05d6ddb34
--- /dev/null
+++ b/client/src/com/aerospike/client/CommitError.java
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client;
+
+/**
+ * Multi-record transaction (MRT) error status.
+ */
+public enum CommitError {
+ VERIFY_FAIL("MRT verify failed. MRT aborted."),
+ VERIFY_FAIL_CLOSE_ABANDONED("MRT verify failed. MRT aborted. MRT client close abandoned. Server will eventually close the MRT."),
+ VERIFY_FAIL_ABORT_ABANDONED("MRT verify failed. MRT client abort abandoned. Server will eventually abort the MRT."),
+ MARK_ROLL_FORWARD_ABANDONED("MRT client mark roll forward abandoned. Server will eventually abort the MRT.");
+
+ public final String str;
+
+ CommitError(String str) {
+ this.str = str;
+ }
+}
diff --git a/client/src/com/aerospike/client/CommitStatus.java b/client/src/com/aerospike/client/CommitStatus.java
new file mode 100644
index 000000000..7a973ae45
--- /dev/null
+++ b/client/src/com/aerospike/client/CommitStatus.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client;
+
+/**
+ * Multi-record transaction (MRT) commit status code.
+ */
+public enum CommitStatus {
+ OK("Commit succeeded"),
+ ALREADY_COMMITTED("Already committed"),
+ ALREADY_ABORTED("Already aborted"),
+ ROLL_FORWARD_ABANDONED("MRT client roll forward abandoned. Server will eventually commit the MRT."),
+ CLOSE_ABANDONED("MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT.");
+
+ public final String str;
+
+ CommitStatus(String str) {
+ this.str = str;
+ }
+}
diff --git a/client/src/com/aerospike/client/IAerospikeClient.java b/client/src/com/aerospike/client/IAerospikeClient.java
index 161ea58bb..bf3af145f 100644
--- a/client/src/com/aerospike/client/IAerospikeClient.java
+++ b/client/src/com/aerospike/client/IAerospikeClient.java
@@ -45,6 +45,8 @@
import com.aerospike.client.listener.RecordArrayListener;
import com.aerospike.client.listener.RecordListener;
import com.aerospike.client.listener.RecordSequenceListener;
+import com.aerospike.client.listener.AbortListener;
+import com.aerospike.client.listener.CommitListener;
import com.aerospike.client.listener.WriteListener;
import com.aerospike.client.metrics.MetricsPolicy;
import com.aerospike.client.policy.AdminPolicy;
@@ -56,6 +58,8 @@
import com.aerospike.client.policy.Policy;
import com.aerospike.client.policy.QueryPolicy;
import com.aerospike.client.policy.ScanPolicy;
+import com.aerospike.client.policy.TxnRollPolicy;
+import com.aerospike.client.policy.TxnVerifyPolicy;
import com.aerospike.client.policy.WritePolicy;
import com.aerospike.client.query.IndexCollectionType;
import com.aerospike.client.query.IndexType;
@@ -78,105 +82,115 @@ public interface IAerospikeClient extends Closeable {
//-------------------------------------------------------
/**
- * Return read policy default. Use when the policy will not be modified.
+ * Copy read policy default to avoid problems if this shared instance is later modified.
*/
public Policy getReadPolicyDefault();
/**
- * Copy read policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy read policy default.
*/
public Policy copyReadPolicyDefault();
/**
- * Return write policy default. Use when the policy will not be modified.
+ * Copy write policy default to avoid problems if this shared instance is later modified.
*/
public WritePolicy getWritePolicyDefault();
/**
- * Copy write policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy write policy default.
*/
public WritePolicy copyWritePolicyDefault();
/**
- * Return scan policy default. Use when the policy will not be modified.
+ * Copy scan policy default to avoid problems if this shared instance is later modified.
*/
public ScanPolicy getScanPolicyDefault();
/**
- * Copy scan policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy scan policy default.
*/
public ScanPolicy copyScanPolicyDefault();
/**
- * Return query policy default. Use when the policy will not be modified.
+ * Copy query policy default to avoid problems if this shared instance is later modified.
*/
public QueryPolicy getQueryPolicyDefault();
/**
- * Copy query policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy query policy default.
*/
public QueryPolicy copyQueryPolicyDefault();
/**
- * Return batch header read policy default. Use when the policy will not be modified.
+ * Copy batch header read policy default to avoid problems if this shared instance is later modified.
*/
public BatchPolicy getBatchPolicyDefault();
/**
- * Copy batch header read policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch header read policy default.
*/
public BatchPolicy copyBatchPolicyDefault();
/**
- * Return batch header write policy default. Use when the policy will not be modified.
+ * Copy batch header write policy default to avoid problems if this shared instance is later modified.
*/
public BatchPolicy getBatchParentPolicyWriteDefault();
/**
- * Copy batch header write policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch header write policy default.
*/
public BatchPolicy copyBatchParentPolicyWriteDefault();
/**
- * Return batch detail write policy default. Use when the policy will not be modified.
+ * Copy batch detail write policy default to avoid problems if this shared instance is later modified.
*/
public BatchWritePolicy getBatchWritePolicyDefault();
/**
- * Copy batch detail write policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch detail write policy default.
*/
public BatchWritePolicy copyBatchWritePolicyDefault();
/**
- * Return batch detail delete policy default. Use when the policy will not be modified.
+ * Copy batch detail delete policy default to avoid problems if this shared instance is later modified.
*/
public BatchDeletePolicy getBatchDeletePolicyDefault();
/**
- * Copy batch detail delete policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch detail delete policy default.
*/
public BatchDeletePolicy copyBatchDeletePolicyDefault();
/**
- * Return batch detail UDF policy default. Use when the policy will not be modified.
+ * Copy batch detail UDF policy default to avoid problems if this shared instance is later modified.
*/
public BatchUDFPolicy getBatchUDFPolicyDefault();
/**
- * Copy batch detail UDF policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy batch detail UDF policy default.
*/
public BatchUDFPolicy copyBatchUDFPolicyDefault();
/**
- * Return info command policy default. Use when the policy will not be modified.
+ * Copy info command policy default to avoid problems if this shared instance is later modified.
*/
public InfoPolicy getInfoPolicyDefault();
/**
- * Copy info command policy default. Use when the policy will be modified for use in a specific transaction.
+ * Copy info command policy default.
*/
public InfoPolicy copyInfoPolicyDefault();
+ /**
+ * Copy MRT record version verify policy default.
+ */
+ public TxnVerifyPolicy copyTxnVerifyPolicyDefault();
+
+ /**
+ * Copy MRT roll forward/back policy default.
+ */
+ public TxnRollPolicy copyTxnRollPolicyDefault();
+
//-------------------------------------------------------
// Cluster Connection Management
//-------------------------------------------------------
@@ -246,13 +260,77 @@ public Node getNode(String nodeName)
*/
public Cluster getCluster();
+ //-------------------------------------------------------
+ // Multi-Record Transactions
+ //-------------------------------------------------------
+
+ /**
+ * Attempt to commit the given multi-record transaction. First, the expected record versions are
+ * sent to the server nodes for verification. If all nodes return success, the transaction is
+ * committed. Otherwise, the transaction is aborted.
+ *
+ * Requires server version 8.0+
+ *
+ * @param txn multi-record transaction
+ * @return status of the commit on success
+ * @throws AerospikeException.Commit if verify commit fails
+ */
+ CommitStatus commit(Txn txn)
+ throws AerospikeException.Commit;
+
+ /**
+ * Asynchronously attempt to commit the given multi-record transaction. First, the expected
+ * record versions are sent to the server nodes for verification. If all nodes return success,
+ * the transaction is committed. Otherwise, the transaction is aborted.
+ *
+ * This method registers the command with an event loop and returns.
+ * The event loop thread will process the command and send the results to the listener.
+ *
+ * Requires server version 8.0+
+ *
+ * @param eventLoop event loop that will process the command. If NULL, the event
+ * loop will be chosen by round-robin.
+ * @param listener where to send results
+ * @param txn multi-record transaction
+ * @throws AerospikeException if event loop registration fails
+ */
+ void commit(EventLoop eventLoop, CommitListener listener, Txn txn)
+ throws AerospikeException;
+
+ /**
+ * Abort and rollback the given multi-record transaction.
+ *
+ * Requires server version 8.0+
+ *
+ * @param txn multi-record transaction
+ * @return status of the abort
+ */
+ AbortStatus abort(Txn txn);
+
+ /**
+ * Asynchronously abort and rollback the given multi-record transaction.
+ *
+ * This method registers the command with an event loop and returns.
+ * The event loop thread will process the command and send the results to the listener.
+ *
+ * Requires server version 8.0+
+ *
+ * @param eventLoop event loop that will process the command. If NULL, the event
+ * loop will be chosen by round-robin.
+ * @param listener where to send results
+ * @param txn multi-record transaction
+ * @throws AerospikeException if event loop registration fails
+ */
+ void abort(EventLoop eventLoop, AbortListener listener, Txn txn)
+ throws AerospikeException;
+
//-------------------------------------------------------
// Write Record Operations
//-------------------------------------------------------
/**
* Write record bin(s).
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
*
* @param policy write configuration parameters, pass in null for defaults
@@ -268,7 +346,7 @@ public void put(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
*
* @param eventLoop event loop that will process the command. If NULL, the event
@@ -288,7 +366,7 @@ public void put(EventLoop eventLoop, WriteListener listener, WritePolicy policy,
/**
* Append bin string values to existing record bin values.
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for string values.
*
@@ -305,7 +383,7 @@ public void append(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for string values.
*
@@ -322,7 +400,7 @@ public void append(EventLoop eventLoop, WriteListener listener, WritePolicy poli
/**
* Prepend bin string values to existing record bin values.
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call works only for string values.
*
@@ -339,7 +417,7 @@ public void prepend(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for string values.
*
@@ -360,7 +438,7 @@ public void prepend(EventLoop eventLoop, WriteListener listener, WritePolicy pol
/**
* Add integer bin values to existing record bin values.
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for integer values.
*
@@ -377,7 +455,7 @@ public void add(WritePolicy policy, Key key, Bin... bins)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout, record expiration and how the transaction is
+ * The policy specifies the command timeout, record expiration and how the command is
* handled when the record already exists.
* This call only works for integer values.
*
@@ -398,7 +476,7 @@ public void add(EventLoop eventLoop, WriteListener listener, WritePolicy policy,
/**
* Delete record for specified key.
- * The policy specifies the transaction timeout.
+ * The policy specifies the command timeout.
*
* @param policy delete configuration parameters, pass in null for defaults
* @param key unique record identifier
@@ -413,7 +491,7 @@ public boolean delete(WritePolicy policy, Key key)
* This method registers the command with an event loop and returns.
* The event loop thread will process the command and send the results to the listener.
*
- * The policy specifies the transaction timeout.
+ * The policy specifies the command timeout.
*
* @param eventLoop event loop that will process the command. If NULL, the event
* loop will be chosen by round-robin.
@@ -1379,7 +1457,7 @@ public void removeUdf(InfoPolicy policy, String serverPath)
* @param functionName user defined function
* @param args arguments passed in to user defined function
* @return return value of user defined function
- * @throws AerospikeException if transaction fails
+ * @throws AerospikeException if command fails
*/
public Object execute(WritePolicy policy, Key key, String packageName, String functionName, Value... args)
throws AerospikeException;
diff --git a/client/src/com/aerospike/client/Key.java b/client/src/com/aerospike/client/Key.java
index 242ebcb0a..f03e098c0 100644
--- a/client/src/com/aerospike/client/Key.java
+++ b/client/src/com/aerospike/client/Key.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2021 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -276,9 +276,10 @@ public Key(String namespace, byte[] digest, String setName, Value userKey) {
*/
@Override
public int hashCode() {
- final int prime = 31;
- int result = prime + Arrays.hashCode(digest);
- return prime * result + namespace.hashCode();
+ // The digest is already a hash, so pick 4 bytes from the 20 byte digest at a
+ // random offset (in this case 8).
+ final int result = Buffer.littleBytesToInt(digest, 8) + 31;
+ return result * 31 + namespace.hashCode();
}
/**
@@ -289,14 +290,16 @@ public boolean equals(Object obj) {
if (this == obj) {
return true;
}
+
if (obj == null || getClass() != obj.getClass()) {
return false;
}
+
Key other = (Key) obj;
- if (! Arrays.equals(digest, other.digest))
+ if (! Arrays.equals(digest, other.digest)) {
return false;
-
+ }
return namespace.equals(other.namespace);
}
diff --git a/client/src/com/aerospike/client/ResultCode.java b/client/src/com/aerospike/client/ResultCode.java
index 54a3ce096..5d454f80b 100644
--- a/client/src/com/aerospike/client/ResultCode.java
+++ b/client/src/com/aerospike/client/ResultCode.java
@@ -21,6 +21,11 @@
* side file proto.h.
*/
public final class ResultCode {
+ /**
+ * Multi-record transaction failed
+ */
+ public static final int TXN_FAILED = -17;
+
/**
* One or more keys failed in a batch.
*/
@@ -224,7 +229,7 @@ public final class ResultCode {
public static final int OP_NOT_APPLICABLE = 26;
/**
- * The transaction was not performed because the filter was false.
+ * The command was not performed because the filter was false.
*/
public static final int FILTERED_OUT = 27;
@@ -233,11 +238,37 @@ public final class ResultCode {
*/
public static final int LOST_CONFLICT = 28;
+ /**
+ * MRT record blocked by a different transaction.
+ */
+ public static final int MRT_BLOCKED = 29;
+
+ /**
+ * MRT read version mismatch identified during commit.
+ * Some other command changed the record outside of the transaction.
+ */
+ public static final int MRT_VERSION_MISMATCH = 30;
+
+ /**
+ * MRT deadline reached without a successful commit or abort.
+ */
+ public static final int MRT_EXPIRED = 31;
+
/**
* Write can't complete until XDR finishes shipping.
*/
public static final int XDR_KEY_BUSY = 32;
+ /**
+ * MRT was already committed.
+ */
+ public static final int MRT_COMMITTED = 33;
+
+ /**
+ * MRT was already aborted.
+ */
+ public static final int MRT_ABORTED = 34;
+
/**
* There are no more records left for query.
*/
@@ -460,6 +491,8 @@ public static boolean keepConnection(int resultCode) {
*/
public static String getResultString(int resultCode) {
switch (resultCode) {
+ case TXN_FAILED:
+ return "Multi-record transaction failed";
case BATCH_FAILED:
return "One or more keys failed in a batch";
@@ -582,14 +615,29 @@ public static String getResultString(int resultCode) {
return "Operation not applicable";
case FILTERED_OUT:
- return "Transaction filtered out";
+ return "Command filtered out";
case LOST_CONFLICT:
- return "Transaction failed due to conflict with XDR";
+ return "Command failed due to conflict with XDR";
+
+ case MRT_BLOCKED:
+ return "MRT record blocked by a different transaction";
+
+ case MRT_VERSION_MISMATCH:
+ return "MRT version mismatch";
+
+ case MRT_EXPIRED:
+ return "MRT expired";
case XDR_KEY_BUSY:
return "Write can't complete until XDR finishes shipping";
+ case MRT_COMMITTED:
+ return "MRT already committed";
+
+ case MRT_ABORTED:
+ return "MRT already aborted";
+
case QUERY_END:
return "Query end";
diff --git a/client/src/com/aerospike/client/Txn.java b/client/src/com/aerospike/client/Txn.java
new file mode 100644
index 000000000..d8e0e0330
--- /dev/null
+++ b/client/src/com/aerospike/client/Txn.java
@@ -0,0 +1,326 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Multi-record transaction (MRT). Each command in the MRT must use the same namespace.
+ */
+public final class Txn {
+ /**
+ * MRT state.
+ */
+ public static enum State {
+ OPEN,
+ VERIFIED,
+ COMMITTED,
+ ABORTED;
+ }
+
+ private static AtomicLong randomState = new AtomicLong(System.nanoTime());
+
+ private final long id;
+ private final ConcurrentHashMap reads;
+ private final Set writes;
+ private Txn.State state;
+ private String namespace;
+ private int timeout;
+ private int deadline;
+ private boolean monitorInDoubt;
+ private boolean inDoubt;
+
+ /**
+ * Create MRT, assign random transaction id and initialize reads/writes hashmaps with default
+ * capacities. The default MRT timeout is 10 seconds.
+ */
+ public Txn() {
+ id = createId();
+ reads = new ConcurrentHashMap<>();
+ writes = ConcurrentHashMap.newKeySet();
+ state = Txn.State.OPEN;
+ timeout = 10; // seconds
+ }
+
+ /**
+ * Create MRT, assign random transaction id and initialize reads/writes hashmaps with given
+ * capacities. The default MRT timeout is 10 seconds.
+ *
+ * @param readsCapacity expected number of record reads in the MRT. Minimum value is 16.
+ * @param writesCapacity expected number of record writes in the MRT. Minimum value is 16.
+ */
+ public Txn(int readsCapacity, int writesCapacity) {
+ if (readsCapacity < 16) {
+ readsCapacity = 16;
+ }
+
+ if (writesCapacity < 16) {
+ writesCapacity = 16;
+ }
+
+ id = createId();
+ reads = new ConcurrentHashMap<>(readsCapacity);
+ writes = ConcurrentHashMap.newKeySet(writesCapacity);
+ state = Txn.State.OPEN;
+ timeout = 10; // seconds
+ }
+
+ private static long createId() {
+ // xorshift64* doesn't generate zeroes.
+ long oldState;
+ long newState;
+
+ do {
+ oldState = randomState.get();
+ newState = oldState;
+ newState ^= newState >>> 12;
+ newState ^= newState << 25;
+ newState ^= newState >>> 27;
+ } while (!randomState.compareAndSet(oldState, newState));
+
+ return newState * 0x2545f4914f6cdd1dl;
+ }
+
+ /**
+ * Return MRT ID.
+ */
+ public long getId() {
+ return id;
+ }
+
+ /**
+ * Set MRT timeout in seconds. The timer starts when the MRT monitor record is created.
+ * This occurs when the first command in the MRT is executed. If the timeout is reached before
+ * a commit or abort is called, the server will expire and rollback the MRT.
+ */
+ public void setTimeout(int timeout) {
+ this.timeout = timeout;
+ }
+
+ /**
+ * Return MRT timeout in seconds.
+ */
+ public int getTimeout() {
+ return timeout;
+ }
+
+ /**
+ * Verify current MRT state and namespace for a future read command.
+ */
+ void prepareRead(String ns) {
+ verifyCommand();
+ setNamespace(ns);
+ }
+
+ /**
+ * Verify current MRT state and namespaces for a future batch read command.
+ */
+ void prepareRead(Key[] keys) {
+ verifyCommand();
+ setNamespace(keys);
+ }
+
+ /**
+ * Verify current MRT state and namespaces for a future batch read command.
+ */
+ void prepareRead(List records) {
+ verifyCommand();
+ setNamespace(records);
+ }
+
+ /**
+ * Verify that the MRT state allows future commands.
+ */
+ public void verifyCommand() {
+ if (state != Txn.State.OPEN) {
+ throw new AerospikeException("Command not allowed in current MRT state: " + state);
+ }
+ }
+
+ /**
+ * Process the results of a record read. For internal use only.
+ */
+ public void onRead(Key key, Long version) {
+ if (version != null) {
+ reads.put(key, version);
+ }
+ }
+
+ /**
+ * Get record version for a given key.
+ */
+ public Long getReadVersion(Key key) {
+ return reads.get(key);
+ }
+
+ /**
+ * Get all read keys and their versions.
+ */
+ public Set> getReads() {
+ return reads.entrySet();
+ }
+
+ /**
+ * Process the results of a record write. For internal use only.
+ */
+ public void onWrite(Key key, Long version, int resultCode) {
+ if (version != null) {
+ reads.put(key, version);
+ }
+ else {
+ if (resultCode == ResultCode.OK) {
+ reads.remove(key);
+ writes.add(key);
+ }
+ }
+ }
+
+ /**
+ * Add key to write hash when write command is in doubt (usually caused by timeout).
+ */
+ public void onWriteInDoubt(Key key) {
+ reads.remove(key);
+ writes.add(key);
+ }
+
+ /**
+ * Get all write keys and their versions.
+ */
+ public Set getWrites() {
+ return writes;
+ }
+
+ /**
+ * Set MRT namespace only if doesn't already exist.
+ * If namespace already exists, verify new namespace is the same.
+ */
+ public void setNamespace(String ns) {
+ if (namespace == null) {
+ namespace = ns;
+ }
+ else if (! namespace.equals(ns)) {
+ throw new AerospikeException("Namespace must be the same for all commands in the MRT. orig: " +
+ namespace + " new: " + ns);
+ }
+ }
+
+ /**
+ * Set MRT namespaces for each key only if doesn't already exist.
+ * If namespace already exists, verify new namespace is the same.
+ */
+ private void setNamespace(Key[] keys) {
+ for (Key key : keys) {
+ setNamespace(key.namespace);
+ }
+ }
+
+ /**
+ * Set MRT namespaces for each key only if doesn't already exist.
+ * If namespace already exists, verify new namespace is the same.
+ */
+ private void setNamespace(List records) {
+ for (BatchRead br : records) {
+ setNamespace(br.key.namespace);
+ }
+ }
+
+ /**
+ * Return MRT namespace.
+ */
+ public String getNamespace() {
+ return namespace;
+ }
+
+ /**
+ * Set MRT deadline. The deadline is a wall clock time calculated by the server from the
+ * MRT timeout that is sent by the client when creating the MRT monitor record. This deadline
+ * is used to avoid client/server clock skew issues. For internal use only.
+ */
+ public void setDeadline(int deadline) {
+ this.deadline = deadline;
+ }
+
+ /**
+ * Get MRT deadline. For internal use only.
+ */
+ public int getDeadline() {
+ return deadline;
+ }
+
+ /**
+ * Set that the MRT monitor existence is in doubt. For internal use only.
+ */
+ public void setMonitorInDoubt() {
+ this.monitorInDoubt = true;
+ }
+
+ /**
+ * Does MRT monitor record exist or is in doubt.
+ */
+ public boolean monitorMightExist() {
+ return deadline != 0 || monitorInDoubt;
+ }
+
+ /**
+ * Does MRT monitor record exist.
+ */
+ public boolean monitorExists() {
+ return deadline != 0;
+ }
+
+ /**
+ * Set MRT state. For internal use only.
+ */
+ public void setState(Txn.State state) {
+ this.state = state;
+ }
+
+ /**
+ * Return MRT state.
+ */
+ public Txn.State getState() {
+ return state;
+ }
+
+ /**
+ * Set MRT inDoubt flag. For internal use only.
+ */
+ public void setInDoubt(boolean inDoubt) {
+ this.inDoubt = inDoubt;
+ }
+
+ /**
+ * Return if MRT is inDoubt.
+ */
+ public boolean getInDoubt() {
+ return inDoubt;
+ }
+
+ /**
+ * Clear MRT. Remove all tracked keys.
+ */
+ public void clear() {
+ namespace = null;
+ deadline = 0;
+ reads.clear();
+ writes.clear();
+ }
+}
diff --git a/client/src/com/aerospike/client/admin/Role.java b/client/src/com/aerospike/client/admin/Role.java
index f521643b3..c1b8639a7 100644
--- a/client/src/com/aerospike/client/admin/Role.java
+++ b/client/src/com/aerospike/client/admin/Role.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -48,22 +48,22 @@ public final class Role {
public static final String SIndexAdmin = "sindex-admin";
/**
- * Allow read transactions.
+ * Allow read commands.
*/
public static final String Read = "read";
/**
- * Allow read and write transactions.
+ * Allow read and write commands.
*/
public static final String ReadWrite = "read-write";
/**
- * Allow read and write transactions within user defined functions.
+ * Allow read and write commands within user defined functions.
*/
public static final String ReadWriteUdf = "read-write-udf";
/**
- * Allow write transactions.
+ * Allow write commands.
*/
public static final String Write = "write";
@@ -110,18 +110,23 @@ public int hashCode() {
}
public boolean equals(Object obj) {
- if (this == obj)
+ if (this == obj) {
return true;
- if (obj == null)
+ }
+ if (obj == null) {
return false;
- if (getClass() != obj.getClass())
+ }
+ if (getClass() != obj.getClass()) {
return false;
+ }
Role other = (Role) obj;
if (name == null) {
- if (other.name != null)
+ if (other.name != null) {
return false;
- } else if (!name.equals(other.name))
+ }
+ } else if (!name.equals(other.name)) {
return false;
+ }
return true;
}
}
diff --git a/client/src/com/aerospike/client/async/AsyncBatch.java b/client/src/com/aerospike/client/async/AsyncBatch.java
index 22d4fecf6..ac9a834b6 100644
--- a/client/src/com/aerospike/client/async/AsyncBatch.java
+++ b/client/src/com/aerospike/client/async/AsyncBatch.java
@@ -27,6 +27,7 @@
import com.aerospike.client.Operation;
import com.aerospike.client.Record;
import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
import com.aerospike.client.command.BatchAttr;
import com.aerospike.client.command.BatchNode;
import com.aerospike.client.command.BatchNodeList;
@@ -71,10 +72,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
BatchRead record = records.get(batchIndex);
+ parseFieldsRead(record.key);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
}
@@ -126,10 +127,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
BatchRead record = records.get(batchIndex);
+ parseFieldsRead(record.key);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
}
@@ -193,7 +194,7 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
+ parseFieldsRead(keys[batchIndex]);
if (resultCode == 0) {
records[batchIndex] = parseRecord();
@@ -254,10 +255,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
Key keyOrig = keys[batchIndex];
+ parseFieldsRead(keyOrig);
+
if (resultCode == 0) {
Record record = parseRecord();
listener.onRecord(keyOrig, record);
@@ -311,12 +312,7 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
- if (opCount > 0) {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
+ parseFieldsRead(keys[batchIndex]);
existsArray[batchIndex] = resultCode == 0;
}
@@ -364,13 +360,8 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
- if (opCount > 0) {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
Key keyOrig = keys[batchIndex];
+ parseFieldsRead(keyOrig);
listener.onExists(keyOrig, resultCode == 0);
}
@@ -418,10 +409,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records.get(batchIndex);
+ parseFields(record.key, record.hasWrite);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
return;
@@ -456,6 +447,10 @@ protected void setInDoubt(boolean inDoubt) {
if (record.resultCode == ResultCode.NO_RESPONSE) {
record.inDoubt = record.hasWrite;
+
+ if (record.inDoubt && policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -507,10 +502,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records.get(batchIndex);
+ parseFields(record.key, record.hasWrite);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
}
@@ -547,6 +542,10 @@ protected void setInDoubt(boolean inDoubt) {
// Set inDoubt, but do not call onRecord() because user already has access to full
// BatchRecord list and can examine each record for inDoubt when the exception occurs.
record.inDoubt = record.hasWrite;
+
+ if (record.inDoubt && policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -600,10 +599,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records[batchIndex];
+ parseFields(record.key, record.hasWrite);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
}
@@ -623,7 +622,11 @@ protected void setInDoubt(boolean inDoubt) {
BatchRecord record = records[index];
if (record.resultCode == ResultCode.NO_RESPONSE) {
- record.inDoubt = inDoubt;
+ record.inDoubt = true;
+
+ if (policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -680,9 +683,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
Key keyOrig = keys[batchIndex];
+
+ parseFields(keyOrig, attr.hasWrite);
+
BatchRecord record;
if (resultCode == 0) {
@@ -691,6 +695,7 @@ record = new BatchRecord(keyOrig, parseRecord(), attr.hasWrite);
else {
record = new BatchRecord(keyOrig, null, resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
}
+
sent[batchIndex] = true;
AsyncBatch.onRecord(listener, record, batchIndex);
}
@@ -703,6 +708,11 @@ protected void setInDoubt(boolean inDoubt) {
Key key = keys[index];
BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite);
sent[index] = true;
+
+ if (record.inDoubt && policy.txn != null) {
+ policy.txn.onWriteInDoubt(key);
+ }
+
AsyncBatch.onRecord(listener, record, index);
}
}
@@ -763,10 +773,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records[batchIndex];
+ parseFields(record.key, record.hasWrite);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
return;
@@ -800,7 +810,11 @@ protected void setInDoubt(boolean inDoubt) {
BatchRecord record = records[index];
if (record.resultCode == ResultCode.NO_RESPONSE) {
- record.inDoubt = inDoubt;
+ record.inDoubt = true;
+
+ if (policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -863,9 +877,10 @@ protected void writeBuffer() {
@Override
protected void parseRow() {
- skipKey(fieldCount);
-
Key keyOrig = keys[batchIndex];
+
+ parseFields(keyOrig, attr.hasWrite);
+
BatchRecord record;
if (resultCode == 0) {
@@ -886,6 +901,7 @@ record = new BatchRecord(keyOrig, null, resultCode, Command.batchInDoubt(attr.ha
else {
record = new BatchRecord(keyOrig, null, resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite);
}
+
sent[batchIndex] = true;
AsyncBatch.onRecord(listener, record, batchIndex);
}
@@ -898,6 +914,10 @@ protected void setInDoubt(boolean inDoubt) {
Key key = keys[index];
BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite);
sent[index] = true;
+
+ if (record.inDoubt && policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
AsyncBatch.onRecord(listener, record, index);
}
}
@@ -914,6 +934,113 @@ protected List generateBatchNodes() {
}
}
+ //-------------------------------------------------------
+ // MRT
+ //-------------------------------------------------------
+
+ public static final class TxnVerify extends AsyncBatchCommand {
+ private final Key[] keys;
+ private final Long[] versions;
+ private final BatchRecord[] records;
+
+ public TxnVerify(
+ AsyncBatchExecutor parent,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ Long[] versions,
+ BatchRecord[] records
+ ) {
+ super(parent, batch, batchPolicy, false);
+ this.keys = keys;
+ this.versions = versions;
+ this.records = records;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setBatchTxnVerify(batchPolicy, keys, versions, batch);
+ }
+
+ @Override
+ protected void parseRow() {
+ skipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == ResultCode.OK) {
+ record.resultCode = resultCode;
+ }
+ else {
+ record.setError(resultCode, false);
+ parent.setRowError();
+ }
+ }
+
+ @Override
+ protected AsyncBatchCommand createCommand(BatchNode batchNode) {
+ return new TxnVerify(parent, batchNode, batchPolicy, keys, versions, records);
+ }
+
+ @Override
+ protected List generateBatchNodes() {
+ return BatchNodeList.generate(parent.cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent);
+ }
+ }
+
+ public static final class TxnRoll extends AsyncBatchCommand {
+ private final Txn txn;
+ private final Key[] keys;
+ private final BatchRecord[] records;
+ private final BatchAttr attr;
+
+ public TxnRoll(
+ AsyncBatchExecutor parent,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Txn txn,
+ Key[] keys,
+ BatchRecord[] records,
+ BatchAttr attr
+ ) {
+ super(parent, batch, batchPolicy, false);
+ this.txn = txn;
+ this.keys = keys;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setBatchTxnRoll(batchPolicy, txn, keys, batch, attr);
+ }
+
+ @Override
+ protected void parseRow() {
+ skipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == ResultCode.OK) {
+ record.resultCode = resultCode;
+ }
+ else {
+ record.setError(resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter));
+ parent.setRowError();
+ }
+ }
+
+ @Override
+ protected AsyncBatchCommand createCommand(BatchNode batchNode) {
+ return new TxnRoll(parent, batchNode, batchPolicy, txn, keys, records, attr);
+ }
+
+ @Override
+ protected List generateBatchNodes() {
+ return BatchNodeList.generate(parent.cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent);
+ }
+ }
+
//-------------------------------------------------------
// Batch Base Command
//-------------------------------------------------------
@@ -942,6 +1069,32 @@ void addSubException(AerospikeException ae) {
parent.addSubException(ae);
}
+ final void parseFieldsRead(Key key) {
+ if (policy.txn != null) {
+ Long version = parseVersion(fieldCount);
+ policy.txn.onRead(key, version);
+ }
+ else {
+ skipKey(fieldCount);
+ }
+ }
+
+ final void parseFields(Key key, boolean hasWrite) {
+ if (policy.txn != null) {
+ Long version = parseVersion(fieldCount);
+
+ if (hasWrite) {
+ policy.txn.onWrite(key, version, resultCode);
+ }
+ else {
+ policy.txn.onRead(key, version);
+ }
+ }
+ else {
+ skipKey(fieldCount);
+ }
+ }
+
@Override
protected boolean prepareRetry(boolean timeout) {
if (parent.done || ! (policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) {
diff --git a/client/src/com/aerospike/client/async/AsyncBatchExecutor.java b/client/src/com/aerospike/client/async/AsyncBatchExecutor.java
index 9d5d7cca1..6feb85409 100644
--- a/client/src/com/aerospike/client/async/AsyncBatchExecutor.java
+++ b/client/src/com/aerospike/client/async/AsyncBatchExecutor.java
@@ -314,7 +314,7 @@ protected AsyncBatchExecutor(EventLoop eventLoop, Cluster cluster, boolean hasRe
this.eventLoop = eventLoop;
this.cluster = cluster;
this.hasResultCode = hasResultCode;
- cluster.addTran();
+ cluster.addCommandCount();
}
public void execute(AsyncCommand[] cmds) {
diff --git a/client/src/com/aerospike/client/async/AsyncBatchSingle.java b/client/src/com/aerospike/client/async/AsyncBatchSingle.java
index 59e9c379e..e5ff3a07e 100644
--- a/client/src/com/aerospike/client/async/AsyncBatchSingle.java
+++ b/client/src/com/aerospike/client/async/AsyncBatchSingle.java
@@ -26,6 +26,7 @@
import com.aerospike.client.Operation;
import com.aerospike.client.Record;
import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
import com.aerospike.client.async.AsyncBatchExecutor.BatchRecordSequence;
import com.aerospike.client.cluster.Cluster;
import com.aerospike.client.cluster.Node;
@@ -63,8 +64,8 @@ public ReadGetSequence(
}
@Override
- protected final boolean parseResult() {
- super.parseResult();
+ protected void parseResult(RecordParser rp) {
+ super.parseResult(rp);
try {
listener.onRecord(record);
@@ -72,7 +73,6 @@ protected final boolean parseResult() {
catch (Throwable e) {
Log.error("Unexpected exception from onRecord(): " + Util.getErrorMessage(e));
}
- return true;
}
}
@@ -95,10 +95,9 @@ public ReadSequence(
}
@Override
- protected boolean parseResult() {
- super.parseResult();
+ protected void parseResult(RecordParser rp) {
+ super.parseResult(rp);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
}
@@ -122,9 +121,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(record.ops != null));
}
@@ -132,7 +129,6 @@ protected boolean parseResult() {
record.setError(rp.resultCode, false);
executor.setRowError();
}
- return true;
}
}
@@ -193,13 +189,10 @@ protected void writeBuffer() {
}
@Override
- protected final boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
records[index] = rp.parseRecord(isOperation);
}
- return true;
}
}
@@ -252,15 +245,13 @@ protected void writeBuffer() {
}
@Override
- protected final boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ protected void parseResult(RecordParser rp) {
Record record = null;
if (rp.resultCode == ResultCode.OK) {
record = rp.parseRecord(isOperation);
}
AsyncBatch.onRecord(listener, key, record);
- return true;
}
}
@@ -289,15 +280,13 @@ protected void writeBuffer() {
}
@Override
- protected final boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ protected void parseResult(RecordParser rp) {
Record record = null;
if (rp.resultCode == ResultCode.OK) {
record = rp.parseRecord(false);
}
AsyncBatch.onRecord(listener, key, record);
- return true;
}
}
@@ -325,13 +314,10 @@ protected void writeBuffer() {
}
@Override
- protected final boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
records[index] = rp.parseRecord(false);
}
- return true;
}
}
@@ -360,20 +346,13 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
- if (rp.opCount > 0) {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
+ protected void parseResult(RecordParser rp) {
try {
listener.onExists(key, rp.resultCode == 0);
}
catch (Throwable e) {
Log.error("Unexpected exception from onExists(): " + Util.getErrorMessage(e));
}
- return true;
}
}
@@ -401,15 +380,8 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
- if (rp.opCount > 0) {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
+ protected void parseResult(RecordParser rp) {
existsArray[index] = rp.resultCode == 0;
- return true;
}
}
@@ -449,8 +421,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ protected void parseResult(RecordParser rp) {
BatchRecord record;
if (rp.resultCode == 0) {
@@ -463,7 +434,6 @@ record = new BatchRecord(key, null, rp.resultCode,
}
parent.setSent(index);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
@Override
@@ -501,9 +471,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(true));
}
@@ -511,7 +479,6 @@ protected boolean parseResult() {
record.setError(rp.resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter));
executor.setRowError();
}
- return true;
}
@Override
@@ -546,10 +513,9 @@ public WriteSequence(
}
@Override
- protected boolean parseResult() {
- super.parseResult();
+ protected void parseResult(RecordParser rp) {
+ super.parseResult(rp);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
// setInDoubt() is not overridden to call onRecord() because user already has access to full
@@ -579,9 +545,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(true));
}
@@ -589,7 +553,6 @@ protected boolean parseResult() {
record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter));
executor.setRowError();
}
- return true;
}
@Override
@@ -624,10 +587,9 @@ public UDFSequence(
}
@Override
- protected boolean parseResult() {
- super.parseResult();
+ protected void parseResult(RecordParser rp) {
+ super.parseResult(rp);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
// setInDoubt() is not overridden to call onRecord() because user already has access to full
@@ -657,9 +619,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(false));
}
@@ -679,7 +639,6 @@ else if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) {
record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter));
executor.setRowError();
}
- return true;
}
@Override
@@ -728,8 +687,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ protected void parseResult(RecordParser rp) {
BatchRecord record;
if (rp.resultCode == ResultCode.OK) {
@@ -754,7 +712,6 @@ record = new BatchRecord(key, null, rp.resultCode, Command.batchInDoubt(true, co
}
parent.setSent(index);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
@Override
@@ -798,9 +755,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(false));
}
@@ -820,7 +775,6 @@ else if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) {
record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter));
executor.setRowError();
}
- return true;
}
@Override
@@ -864,8 +818,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ protected void parseResult(RecordParser rp) {
BatchRecord record;
if (rp.resultCode == 0) {
@@ -877,7 +830,6 @@ record = new BatchRecord(key, null, rp.resultCode, Command.batchInDoubt(true, co
}
parent.setSent(index);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
@Override
@@ -909,10 +861,9 @@ public DeleteSequence(
}
@Override
- protected boolean parseResult() {
- super.parseResult();
+ protected void parseResult(RecordParser rp) {
+ super.parseResult(rp);
AsyncBatch.onRecord(listener, record, index);
- return true;
}
// setInDoubt() is not overridden to call onRecord() because user already has access to full
@@ -942,9 +893,7 @@ protected void writeBuffer() {
}
@Override
- protected boolean parseResult() {
- RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
-
+ protected void parseResult(RecordParser rp) {
if (rp.resultCode == 0) {
record.setRecord(new Record(null, rp.generation, rp.expiration));
}
@@ -952,7 +901,6 @@ protected boolean parseResult() {
record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter));
executor.setRowError();
}
- return true;
}
@Override
@@ -963,6 +911,95 @@ public void setInDoubt() {
}
}
+ //-------------------------------------------------------
+ // MRT
+ //-------------------------------------------------------
+
+ public static class TxnVerify extends AsyncBaseCommand {
+ private final long version;
+ private final BatchRecord record;
+
+ public TxnVerify(
+ AsyncBatchExecutor executor,
+ Cluster cluster,
+ BatchPolicy policy,
+ long version,
+ BatchRecord record,
+ Node node
+ ) {
+ super(executor, cluster, policy, record.key, node, false);
+ this.version = version;
+ this.record = record;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setTxnVerify(record.key, version);
+ }
+
+ @Override
+ protected boolean parseResult() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+
+ if (rp.resultCode == ResultCode.OK) {
+ record.resultCode = rp.resultCode;
+ }
+ else {
+ record.setError(rp.resultCode, false);
+ executor.setRowError();
+ }
+ return true;
+ }
+
+ @Override
+ protected void parseResult(RecordParser rp) {
+ }
+ }
+
+ public static class TxnRoll extends AsyncBaseCommand {
+ private final Txn txn;
+ private final BatchRecord record;
+ private final int attr;
+
+ public TxnRoll(
+ AsyncBatchExecutor executor,
+ Cluster cluster,
+ BatchPolicy policy,
+ Txn txn,
+ BatchRecord record,
+ Node node,
+ int attr
+ ) {
+ super(executor, cluster, policy, record.key, node, true);
+ this.txn = txn;
+ this.record = record;
+ this.attr = attr;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setTxnRoll(record.key, txn, attr);
+ }
+
+ @Override
+ protected boolean parseResult() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+
+ if (rp.resultCode == ResultCode.OK) {
+ record.resultCode = rp.resultCode;
+ }
+ else {
+ record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter));
+ executor.setRowError();
+ }
+ return true;
+ }
+
+ @Override
+ protected void parseResult(RecordParser rp) {
+ }
+ }
+
//-------------------------------------------------------
// Async Batch Base Command
//-------------------------------------------------------
@@ -1011,6 +1048,16 @@ void addSubException(AerospikeException ae) {
executor.addSubException(ae);
}
+ @Override
+ protected boolean parseResult() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, hasWrite);
+ parseResult(rp);
+ return true;
+ }
+
+ protected abstract void parseResult(RecordParser rp);
+
@Override
protected boolean prepareRetry(boolean timeout) {
if (hasWrite) {
diff --git a/client/src/com/aerospike/client/async/AsyncCommand.java b/client/src/com/aerospike/client/async/AsyncCommand.java
index 90dc63f08..ace22ec28 100644
--- a/client/src/com/aerospike/client/async/AsyncCommand.java
+++ b/client/src/com/aerospike/client/async/AsyncCommand.java
@@ -139,12 +139,6 @@ protected void putBuffer(byte[] buffer) {
}
}
- final void validateHeaderSize() {
- if (receiveSize < Command.MSG_REMAINING_HEADER_SIZE) {
- throw new AerospikeException.Parse("Invalid receive size: " + receiveSize);
- }
- }
-
boolean parseCommandResult() {
if (compressed) {
int usize = (int)Buffer.bytesToLong(dataBuffer, 0);
@@ -183,6 +177,14 @@ final void stop() {
valid = false;
}
+ final void onRetryException(Node node, int iteration, AerospikeException ae) {
+ ae.setNode(node);
+ ae.setPolicy(policy);
+ ae.setIteration(iteration);
+ ae.setInDoubt(isWrite(), commandSentCounter);
+ addSubException(ae);
+ }
+
void addSubException(AerospikeException ae) {
if (subExceptions == null) {
subExceptions = new ArrayList(policy.maxRetries);
@@ -190,11 +192,30 @@ void addSubException(AerospikeException ae) {
subExceptions.add(ae);
}
+ final void onFinalException(Node node, int iteration, AerospikeException ae) {
+ ae.setNode(node);
+ ae.setPolicy(policy);
+ ae.setIteration(iteration);
+ ae.setInDoubt(isWrite(), commandSentCounter);
+ ae.setSubExceptions(subExceptions);
+
+ if (ae.getInDoubt()) {
+ onInDoubt();
+ }
+
+ onFailure(ae);
+ }
+
+ void onInDoubt() {
+ // Write commands will override this method.
+ }
+
boolean retryBatch(Runnable command, long deadline) {
// Override this method in batch to regenerate node assignments.
return false;
}
+ // TODD: Make abstract.
boolean isWrite() {
return false;
}
diff --git a/client/src/com/aerospike/client/async/AsyncDelete.java b/client/src/com/aerospike/client/async/AsyncDelete.java
index 45278c7d5..862afc109 100644
--- a/client/src/com/aerospike/client/async/AsyncDelete.java
+++ b/client/src/com/aerospike/client/async/AsyncDelete.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -20,41 +20,16 @@
import com.aerospike.client.Key;
import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
import com.aerospike.client.listener.DeleteListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.WritePolicy;
-public final class AsyncDelete extends AsyncCommand {
+public final class AsyncDelete extends AsyncWriteBase {
private final DeleteListener listener;
- private final WritePolicy writePolicy;
- private final Key key;
- private final Partition partition;
private boolean existed;
public AsyncDelete(Cluster cluster, DeleteListener listener, WritePolicy writePolicy, Key key) {
- super(writePolicy, true);
+ super(cluster, writePolicy, key);
this.listener = listener;
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.write(cluster, writePolicy, key);
- cluster.addTran();
- }
-
- @Override
- boolean isWrite() {
- return true;
- }
-
- @Override
- protected Node getNode(Cluster cluster) {
- return partition.getNodeWrite(cluster);
- }
-
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.WRITE;
}
@Override
@@ -64,11 +39,9 @@ protected void writeBuffer() {
@Override
protected boolean parseResult() {
- validateHeaderSize();
-
- int resultCode = dataBuffer[5] & 0xFF;
+ int resultCode = parseHeader();
- if (resultCode == 0) {
+ if (resultCode == ResultCode.OK) {
existed = true;
return true;
}
@@ -89,12 +62,6 @@ protected boolean parseResult() {
throw new AerospikeException(resultCode);
}
- @Override
- protected boolean prepareRetry(boolean timeout) {
- partition.prepareRetryWrite(timeout);
- return true;
- }
-
@Override
protected void onSuccess() {
if (listener != null) {
diff --git a/client/src/com/aerospike/client/async/AsyncExecute.java b/client/src/com/aerospike/client/async/AsyncExecute.java
index d0cc414cd..36183747f 100644
--- a/client/src/com/aerospike/client/async/AsyncExecute.java
+++ b/client/src/com/aerospike/client/async/AsyncExecute.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -20,20 +20,20 @@
import com.aerospike.client.AerospikeException;
import com.aerospike.client.Key;
+import com.aerospike.client.Record;
+import com.aerospike.client.ResultCode;
import com.aerospike.client.Value;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
+import com.aerospike.client.command.RecordParser;
import com.aerospike.client.listener.ExecuteListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.WritePolicy;
-public final class AsyncExecute extends AsyncRead {
+public final class AsyncExecute extends AsyncWriteBase {
private final ExecuteListener executeListener;
- private final WritePolicy writePolicy;
private final String packageName;
private final String functionName;
private final Value[] args;
+ private Record record;
public AsyncExecute(
Cluster cluster,
@@ -44,43 +44,65 @@ public AsyncExecute(
String functionName,
Value[] args
) {
- super(cluster, null, writePolicy, key, Partition.write(cluster, writePolicy, key), false);
+ super(cluster, writePolicy, key);
this.executeListener = listener;
- this.writePolicy = writePolicy;
this.packageName = packageName;
this.functionName = functionName;
this.args = args;
}
@Override
- boolean isWrite() {
- return true;
+ protected void writeBuffer() {
+ setUdf(writePolicy, key, packageName, functionName, args);
}
@Override
- protected Node getNode(Cluster cluster) {
- return partition.getNodeWrite(cluster);
- }
+ protected boolean parseResult() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, true);
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.WRITE;
- }
+ if (rp.resultCode == ResultCode.OK) {
+ record = rp.parseRecord(false);
+ return true;
+ }
- @Override
- protected void writeBuffer() throws AerospikeException {
- setUdf(writePolicy, key, packageName, functionName, args);
- }
+ if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) {
+ record = rp.parseRecord(false);
+ handleUdfError(rp.resultCode);
+ return true;
+ }
- @Override
- protected void handleNotFound(int resultCode) {
- throw new AerospikeException(resultCode);
+ if (rp.resultCode == ResultCode.FILTERED_OUT) {
+ if (policy.failOnFilteredOut) {
+ throw new AerospikeException(rp.resultCode);
+ }
+ return true;
+ }
+
+ throw new AerospikeException(rp.resultCode);
}
- @Override
- protected boolean prepareRetry(boolean timeout) {
- partition.prepareRetryWrite(timeout);
- return true;
+ private void handleUdfError(int resultCode) {
+ String ret = (String)record.bins.get("FAILURE");
+
+ if (ret == null) {
+ throw new AerospikeException(resultCode);
+ }
+
+ String message;
+ int code;
+
+ try {
+ String[] list = ret.split(":");
+ code = Integer.parseInt(list[2].trim());
+ message = list[0] + ':' + list[1] + ' ' + list[3];
+ }
+ catch (Throwable e) {
+ // Use generic exception if parse error occurs.
+ throw new AerospikeException(resultCode, ret);
+ }
+
+ throw new AerospikeException(code, message);
}
@Override
diff --git a/client/src/com/aerospike/client/async/AsyncExists.java b/client/src/com/aerospike/client/async/AsyncExists.java
index 812b7de9e..81171902e 100644
--- a/client/src/com/aerospike/client/async/AsyncExists.java
+++ b/client/src/com/aerospike/client/async/AsyncExists.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -20,34 +20,17 @@
import com.aerospike.client.Key;
import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
+import com.aerospike.client.command.RecordParser;
import com.aerospike.client.listener.ExistsListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.Policy;
-public final class AsyncExists extends AsyncCommand {
+public final class AsyncExists extends AsyncReadBase {
private final ExistsListener listener;
- private final Key key;
- private final Partition partition;
private boolean exists;
public AsyncExists(Cluster cluster, ExistsListener listener, Policy policy, Key key) {
- super(policy, true);
+ super(cluster, policy, key);
this.listener = listener;
- this.key = key;
- this.partition = Partition.read(cluster, policy, key);
- cluster.addTran();
- }
-
- @Override
- Node getNode(Cluster cluster) {
- return partition.getNodeRead(cluster);
- }
-
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.READ;
}
@Override
@@ -57,35 +40,28 @@ protected void writeBuffer() {
@Override
protected boolean parseResult() {
- validateHeaderSize();
-
- int resultCode = dataBuffer[5] & 0xFF;
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, false);
- if (resultCode == 0) {
+ if (rp.resultCode == ResultCode.OK) {
exists = true;
return true;
}
- if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
+ if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
exists = false;
return true;
}
- if (resultCode == ResultCode.FILTERED_OUT) {
+ if (rp.resultCode == ResultCode.FILTERED_OUT) {
if (policy.failOnFilteredOut) {
- throw new AerospikeException(resultCode);
+ throw new AerospikeException(rp.resultCode);
}
exists = true;
return true;
}
- throw new AerospikeException(resultCode);
- }
-
- @Override
- protected boolean prepareRetry(boolean timeout) {
- partition.prepareRetryRead(timeout);
- return true;
+ throw new AerospikeException(rp.resultCode);
}
@Override
diff --git a/proxy/src/com/aerospike/client/proxy/RecordProxy.java b/client/src/com/aerospike/client/async/AsyncOperateRead.java
similarity index 54%
rename from proxy/src/com/aerospike/client/proxy/RecordProxy.java
rename to client/src/com/aerospike/client/async/AsyncOperateRead.java
index 2e5d5ea78..5dd629693 100644
--- a/proxy/src/com/aerospike/client/proxy/RecordProxy.java
+++ b/client/src/com/aerospike/client/async/AsyncOperateRead.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -14,38 +14,23 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-
-package com.aerospike.client.proxy;
+package com.aerospike.client.async;
import com.aerospike.client.Key;
-import com.aerospike.client.Record;
-import com.aerospike.client.query.BVal;
-
-public class RecordProxy {
- /**
- * Optional Key.
- */
- public final Key key;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.command.OperateArgs;
+import com.aerospike.client.listener.RecordListener;
- /**
- * Optional Record result after command has completed.
- */
- public final Record record;
+public final class AsyncOperateRead extends AsyncRead {
+ private final OperateArgs args;
- /**
- * Optional bVal.
- */
- public final BVal bVal;
-
- /**
- * The result code from proxy server.
- */
- public final int resultCode;
+ public AsyncOperateRead(Cluster cluster, RecordListener listener, Key key, OperateArgs args) {
+ super(cluster, listener, args.writePolicy, key, true);
+ this.args = args;
+ }
- public RecordProxy(int resultCode, Key key, Record record, BVal bVal) {
- this.resultCode = resultCode;
- this.key = key;
- this.record = record;
- this.bVal = bVal;
+ @Override
+ protected void writeBuffer() {
+ setOperate(args.writePolicy, key, args);
}
}
diff --git a/client/src/com/aerospike/client/async/AsyncOperate.java b/client/src/com/aerospike/client/async/AsyncOperateWrite.java
similarity index 51%
rename from client/src/com/aerospike/client/async/AsyncOperate.java
rename to client/src/com/aerospike/client/async/AsyncOperateWrite.java
index 29d009171..4d0326d59 100644
--- a/client/src/com/aerospike/client/async/AsyncOperate.java
+++ b/client/src/com/aerospike/client/async/AsyncOperateWrite.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -18,57 +18,60 @@
import com.aerospike.client.AerospikeException;
import com.aerospike.client.Key;
+import com.aerospike.client.Record;
+import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
import com.aerospike.client.command.OperateArgs;
+import com.aerospike.client.command.RecordParser;
import com.aerospike.client.listener.RecordListener;
-import com.aerospike.client.metrics.LatencyType;
-public final class AsyncOperate extends AsyncRead {
+public final class AsyncOperateWrite extends AsyncWriteBase {
+ private final RecordListener listener;
private final OperateArgs args;
+ private Record record;
- public AsyncOperate(Cluster cluster, RecordListener listener, Key key, OperateArgs args) {
- super(cluster, listener, args.writePolicy, key, args.getPartition(cluster, key), true);
+ public AsyncOperateWrite(Cluster cluster, RecordListener listener, Key key, OperateArgs args) {
+ super(cluster, args.writePolicy, key);
+ this.listener = listener;
this.args = args;
}
@Override
- boolean isWrite() {
- return args.hasWrite;
+ protected void writeBuffer() {
+ setOperate(args.writePolicy, key, args);
}
@Override
- protected Node getNode(Cluster cluster) {
- return args.hasWrite ? partition.getNodeWrite(cluster) : partition.getNodeRead(cluster);
- }
+ protected boolean parseResult() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, true);
- @Override
- protected LatencyType getLatencyType() {
- return args.hasWrite ? LatencyType.WRITE : LatencyType.READ;
- }
+ if (rp.resultCode == ResultCode.OK) {
+ record = rp.parseRecord(true);
+ return true;
+ }
- @Override
- protected void writeBuffer() {
- setOperate(args.writePolicy, key, args);
+ if (rp.resultCode == ResultCode.FILTERED_OUT) {
+ if (policy.failOnFilteredOut) {
+ throw new AerospikeException(rp.resultCode);
+ }
+ return true;
+ }
+
+ throw new AerospikeException(rp.resultCode);
}
@Override
- protected void handleNotFound(int resultCode) {
- // Only throw not found exception for command with write operations.
- // Read-only command operations return a null record.
- if (args.hasWrite) {
- throw new AerospikeException(resultCode);
+ protected void onSuccess() {
+ if (listener != null) {
+ listener.onSuccess(key, record);
}
}
@Override
- protected boolean prepareRetry(boolean timeout) {
- if (args.hasWrite) {
- partition.prepareRetryWrite(timeout);
- }
- else {
- partition.prepareRetryRead(timeout);
+ protected void onFailure(AerospikeException e) {
+ if (listener != null) {
+ listener.onFailure(e);
}
- return true;
}
}
diff --git a/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java b/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java
index 21e9d126b..e8367448e 100644
--- a/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java
+++ b/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -49,7 +49,7 @@ public AsyncQueryPartitionExecutor(
this.statement = statement;
this.tracker = tracker;
- cluster.addTran();
+ cluster.addCommandCount();
taskId = statement.prepareTaskId();
tracker.setSleepBetweenRetries(0);
queryPartitions();
diff --git a/client/src/com/aerospike/client/async/AsyncRead.java b/client/src/com/aerospike/client/async/AsyncRead.java
index 2b2959ffe..d95c8b378 100644
--- a/client/src/com/aerospike/client/async/AsyncRead.java
+++ b/client/src/com/aerospike/client/async/AsyncRead.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -21,50 +21,28 @@
import com.aerospike.client.Record;
import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
-import com.aerospike.client.command.Buffer;
-import com.aerospike.client.command.Command;
+import com.aerospike.client.command.RecordParser;
import com.aerospike.client.listener.RecordListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.Policy;
-public class AsyncRead extends AsyncCommand {
+public class AsyncRead extends AsyncReadBase {
private final RecordListener listener;
- protected final Key key;
private final String[] binNames;
private final boolean isOperation;
- protected final Partition partition;
protected Record record;
public AsyncRead(Cluster cluster, RecordListener listener, Policy policy, Key key, String[] binNames) {
- super(policy, true);
+ super(cluster, policy, key);
this.listener = listener;
- this.key = key;
this.binNames = binNames;
this.isOperation = false;
- this.partition = Partition.read(cluster, policy, key);
- cluster.addTran();
}
- public AsyncRead(Cluster cluster, RecordListener listener, Policy policy, Key key, Partition partition, boolean isOperation) {
- super(policy, true);
+ public AsyncRead(Cluster cluster, RecordListener listener, Policy policy, Key key, boolean isOperation) {
+ super(cluster, policy, key);
this.listener = listener;
- this.key = key;
this.binNames = null;
this.isOperation = isOperation;
- this.partition = partition;
- cluster.addTran();
- }
-
- @Override
- Node getNode(Cluster cluster) {
- return partition.getNodeRead(cluster);
- }
-
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.READ;
}
@Override
@@ -74,79 +52,26 @@ protected void writeBuffer() {
@Override
protected final boolean parseResult() {
- validateHeaderSize();
-
- int resultCode = dataBuffer[dataOffset + 5] & 0xFF;
- int generation = Buffer.bytesToInt(dataBuffer, dataOffset + 6);
- int expiration = Buffer.bytesToInt(dataBuffer, dataOffset + 10);
- int fieldCount = Buffer.bytesToShort(dataBuffer, dataOffset + 18);
- int opCount = Buffer.bytesToShort(dataBuffer, dataOffset + 20);
- dataOffset += Command.MSG_REMAINING_HEADER_SIZE;
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, false);
- if (resultCode == 0) {
- if (opCount == 0) {
- // Bin data was not returned.
- record = new Record(null, generation, expiration);
- return true;
- }
- skipKey(fieldCount);
- record = parseRecord(opCount, generation, expiration, isOperation);
+ if (rp.resultCode == ResultCode.OK) {
+ this.record = rp.parseRecord(isOperation);
return true;
}
- if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
- handleNotFound(resultCode);
+ if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
return true;
}
- if (resultCode == ResultCode.FILTERED_OUT) {
+ if (rp.resultCode == ResultCode.FILTERED_OUT) {
if (policy.failOnFilteredOut) {
- throw new AerospikeException(resultCode);
+ throw new AerospikeException(rp.resultCode);
}
return true;
}
- if (resultCode == ResultCode.UDF_BAD_RESPONSE) {
- skipKey(fieldCount);
- record = parseRecord(opCount, generation, expiration, isOperation);
- handleUdfError(resultCode);
- return true;
- }
-
- throw new AerospikeException(resultCode);
- }
-
- @Override
- protected boolean prepareRetry(boolean timeout) {
- partition.prepareRetryRead(timeout);
- return true;
- }
-
- protected void handleNotFound(int resultCode) {
- // Do nothing in default case. Record will be null.
- }
-
- private final void handleUdfError(int resultCode) {
- String ret = (String)record.bins.get("FAILURE");
-
- if (ret == null) {
- throw new AerospikeException(resultCode);
- }
-
- String message;
- int code;
-
- try {
- String[] list = ret.split(":");
- code = Integer.parseInt(list[2].trim());
- message = list[0] + ':' + list[1] + ' ' + list[3];
- }
- catch (Throwable e) {
- // Use generic exception if parse error occurs.
- throw new AerospikeException(resultCode, ret);
- }
-
- throw new AerospikeException(code, message);
+ throw new AerospikeException(rp.resultCode);
}
@Override
diff --git a/client/src/com/aerospike/client/async/AsyncReadBase.java b/client/src/com/aerospike/client/async/AsyncReadBase.java
new file mode 100644
index 000000000..58b6d4b3a
--- /dev/null
+++ b/client/src/com/aerospike/client/async/AsyncReadBase.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client.async;
+
+import com.aerospike.client.Key;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.cluster.Node;
+import com.aerospike.client.cluster.Partition;
+import com.aerospike.client.metrics.LatencyType;
+import com.aerospike.client.policy.Policy;
+
+public abstract class AsyncReadBase extends AsyncCommand {
+ protected final Key key;
+ protected final Partition partition;
+
+ public AsyncReadBase(Cluster cluster, Policy policy, Key key) {
+ super(policy, true);
+ this.key = key;
+ this.partition = Partition.read(cluster, policy, key);
+ cluster.addCommandCount();
+ }
+
+ @Override
+ boolean isWrite() {
+ return false;
+ }
+
+ @Override
+ Node getNode(Cluster cluster) {
+ return partition.getNodeRead(cluster);
+ }
+
+ @Override
+ protected LatencyType getLatencyType() {
+ return LatencyType.READ;
+ }
+
+ @Override
+ protected boolean prepareRetry(boolean timeout) {
+ partition.prepareRetryRead(timeout);
+ return true;
+ }
+}
diff --git a/client/src/com/aerospike/client/async/AsyncReadHeader.java b/client/src/com/aerospike/client/async/AsyncReadHeader.java
index 654b027dc..7ab259aab 100644
--- a/client/src/com/aerospike/client/async/AsyncReadHeader.java
+++ b/client/src/com/aerospike/client/async/AsyncReadHeader.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -21,35 +21,17 @@
import com.aerospike.client.Record;
import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
-import com.aerospike.client.command.Buffer;
+import com.aerospike.client.command.RecordParser;
import com.aerospike.client.listener.RecordListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.Policy;
-public final class AsyncReadHeader extends AsyncCommand {
+public final class AsyncReadHeader extends AsyncReadBase {
private final RecordListener listener;
- private final Key key;
- private final Partition partition;
private Record record;
public AsyncReadHeader(Cluster cluster, RecordListener listener, Policy policy, Key key) {
- super(policy, true);
+ super(cluster, policy, key);
this.listener = listener;
- this.key = key;
- this.partition = Partition.read(cluster, policy, key);
- cluster.addTran();
- }
-
- @Override
- Node getNode(Cluster cluster) {
- return partition.getNodeRead(cluster);
- }
-
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.READ;
}
@Override
@@ -59,35 +41,26 @@ protected void writeBuffer() {
@Override
protected boolean parseResult() {
- validateHeaderSize();
-
- int resultCode = dataBuffer[5] & 0xFF;
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, false);
- if (resultCode == 0) {
- int generation = Buffer.bytesToInt(dataBuffer, 6);
- int expiration = Buffer.bytesToInt(dataBuffer, 10);
- record = new Record(null, generation, expiration);
+ if (rp.resultCode == ResultCode.OK) {
+ record = new Record(null, rp.generation, rp.expiration);
return true;
}
- if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
+ if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
return true;
}
- if (resultCode == ResultCode.FILTERED_OUT) {
+ if (rp.resultCode == ResultCode.FILTERED_OUT) {
if (policy.failOnFilteredOut) {
- throw new AerospikeException(resultCode);
+ throw new AerospikeException(rp.resultCode);
}
return true;
}
- throw new AerospikeException(resultCode);
- }
-
- @Override
- protected boolean prepareRetry(boolean timeout) {
- partition.prepareRetryRead(timeout);
- return true;
+ throw new AerospikeException(rp.resultCode);
}
@Override
diff --git a/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java b/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java
index 624d5f912..7ad3911e1 100644
--- a/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java
+++ b/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -53,7 +53,7 @@ public AsyncScanPartitionExecutor(
this.binNames = binNames;
this.tracker = tracker;
- cluster.addTran();
+ cluster.addCommandCount();
tracker.setSleepBetweenRetries(0);
scanPartitions();
}
diff --git a/client/src/com/aerospike/client/async/AsyncTouch.java b/client/src/com/aerospike/client/async/AsyncTouch.java
index e4f988133..ff24608a3 100644
--- a/client/src/com/aerospike/client/async/AsyncTouch.java
+++ b/client/src/com/aerospike/client/async/AsyncTouch.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -20,40 +20,15 @@
import com.aerospike.client.Key;
import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
import com.aerospike.client.listener.WriteListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.WritePolicy;
-public final class AsyncTouch extends AsyncCommand {
+public final class AsyncTouch extends AsyncWriteBase {
private final WriteListener listener;
- private final WritePolicy writePolicy;
- private final Key key;
- private final Partition partition;
public AsyncTouch(Cluster cluster, WriteListener listener, WritePolicy writePolicy, Key key) {
- super(writePolicy, true);
+ super(cluster, writePolicy, key);
this.listener = listener;
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.write(cluster, writePolicy, key);
- cluster.addTran();
- }
-
- @Override
- boolean isWrite() {
- return true;
- }
-
- @Override
- Node getNode(Cluster cluster) {
- return partition.getNodeWrite(cluster);
- }
-
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.WRITE;
}
@Override
@@ -63,11 +38,9 @@ protected void writeBuffer() {
@Override
protected boolean parseResult() {
- validateHeaderSize();
-
- int resultCode = dataBuffer[5] & 0xFF;
+ int resultCode = parseHeader();
- if (resultCode == 0) {
+ if (resultCode == ResultCode.OK) {
return true;
}
@@ -81,12 +54,6 @@ protected boolean parseResult() {
throw new AerospikeException(resultCode);
}
- @Override
- boolean prepareRetry(boolean timeout) {
- partition.prepareRetryWrite(timeout);
- return true;
- }
-
@Override
protected void onSuccess() {
if (listener != null) {
diff --git a/client/src/com/aerospike/client/async/AsyncTxnAddKeys.java b/client/src/com/aerospike/client/async/AsyncTxnAddKeys.java
new file mode 100644
index 000000000..3ac91732c
--- /dev/null
+++ b/client/src/com/aerospike/client/async/AsyncTxnAddKeys.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client.async;
+
+import com.aerospike.client.AerospikeException;
+import com.aerospike.client.Key;
+import com.aerospike.client.ResultCode;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.command.OperateArgs;
+import com.aerospike.client.command.RecordParser;
+import com.aerospike.client.listener.RecordListener;
+
+public final class AsyncTxnAddKeys extends AsyncWriteBase {
+ private final RecordListener listener;
+ private final OperateArgs args;
+
+ public AsyncTxnAddKeys(Cluster cluster, RecordListener listener, Key key, OperateArgs args) {
+ super(cluster, args.writePolicy, key);
+ this.listener = listener;
+ this.args = args;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setTxnAddKeys(args.writePolicy, key, args);
+ }
+
+ @Override
+ protected boolean parseResult() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseTranDeadline(policy.txn);
+
+ if (rp.resultCode == ResultCode.OK) {
+ return true;
+ }
+
+ throw new AerospikeException(rp.resultCode);
+ }
+
+ @Override
+ void onInDoubt() {
+ // The MRT monitor record might exist if AsyncTxnAddKeys command is inDoubt.
+ policy.txn.setMonitorInDoubt();
+ }
+
+ @Override
+ protected void onSuccess() {
+ if (listener != null) {
+ listener.onSuccess(key, null);
+ }
+ }
+
+ @Override
+ protected void onFailure(AerospikeException e) {
+ if (listener != null) {
+ listener.onFailure(e);
+ }
+ }
+}
diff --git a/client/src/com/aerospike/client/async/AsyncTxnClose.java b/client/src/com/aerospike/client/async/AsyncTxnClose.java
new file mode 100644
index 000000000..c06df23b6
--- /dev/null
+++ b/client/src/com/aerospike/client/async/AsyncTxnClose.java
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client.async;
+
+import com.aerospike.client.AerospikeException;
+import com.aerospike.client.Key;
+import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.listener.DeleteListener;
+import com.aerospike.client.policy.WritePolicy;
+
+public final class AsyncTxnClose extends AsyncWriteBase {
+ private final Txn txn;
+ private final DeleteListener listener;
+
+ public AsyncTxnClose(
+ Cluster cluster,
+ Txn txn,
+ DeleteListener listener,
+ WritePolicy writePolicy,
+ Key key
+ ) {
+ super(cluster, writePolicy, key);
+ this.txn = txn;
+ this.listener = listener;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setTxnClose(txn, key);
+ }
+
+ @Override
+ protected boolean parseResult() {
+ int resultCode = parseHeader();
+
+ if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
+ return true;
+ }
+
+ throw new AerospikeException(resultCode);
+ }
+
+ @Override
+ void onInDoubt() {
+ }
+
+ @Override
+ protected void onSuccess() {
+ listener.onSuccess(key, true);
+ }
+
+ @Override
+ protected void onFailure(AerospikeException e) {
+ listener.onFailure(e);
+ }
+}
diff --git a/proxy/src/com/aerospike/client/proxy/TouchCommandProxy.java b/client/src/com/aerospike/client/async/AsyncTxnMarkRollForward.java
similarity index 50%
rename from proxy/src/com/aerospike/client/proxy/TouchCommandProxy.java
rename to client/src/com/aerospike/client/async/AsyncTxnMarkRollForward.java
index b75fd4c74..8e640aa77 100644
--- a/proxy/src/com/aerospike/client/proxy/TouchCommandProxy.java
+++ b/client/src/com/aerospike/client/async/AsyncTxnMarkRollForward.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -14,67 +14,57 @@
* License for the specific language governing permissions and limitations under
* the License.
*/
-package com.aerospike.client.proxy;
+package com.aerospike.client.async;
import com.aerospike.client.AerospikeException;
import com.aerospike.client.Key;
import com.aerospike.client.ResultCode;
-import com.aerospike.client.command.Command;
+import com.aerospike.client.cluster.Cluster;
import com.aerospike.client.listener.WriteListener;
import com.aerospike.client.policy.WritePolicy;
-import com.aerospike.client.proxy.grpc.GrpcCallExecutor;
-import com.aerospike.proxy.client.KVSGrpc;
-public final class TouchCommandProxy extends SingleCommandProxy {
+public final class AsyncTxnMarkRollForward extends AsyncWriteBase {
private final WriteListener listener;
- private final WritePolicy writePolicy;
- private final Key key;
- public TouchCommandProxy(
- GrpcCallExecutor executor,
+ public AsyncTxnMarkRollForward(
+ Cluster cluster,
WriteListener listener,
WritePolicy writePolicy,
Key key
) {
- super(KVSGrpc.getTouchStreamingMethod(), executor, writePolicy);
+ super(cluster, writePolicy, key);
this.listener = listener;
- this.writePolicy = writePolicy;
- this.key = key;
}
@Override
- void writeCommand(Command command) {
- command.setTouch(writePolicy, key);
+ protected void writeBuffer() {
+ setTxnMarkRollForward(key);
}
@Override
- void parseResult(Parser parser) {
- int resultCode = parser.parseResultCode();
+ protected boolean parseResult() {
+ int resultCode = parseHeader();
- switch (resultCode) {
- case ResultCode.OK:
- break;
+ // MRT_COMMITTED is considered a success because it means a previous attempt already
+ // succeeded in notifying the server that the MRT will be rolled forward.
+ if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) {
+ return true;
+ }
- case ResultCode.FILTERED_OUT:
- if (policy.failOnFilteredOut) {
- throw new AerospikeException(resultCode);
- }
- break;
+ throw new AerospikeException(resultCode);
+ }
- default:
- throw new AerospikeException(resultCode);
- }
+ @Override
+ void onInDoubt() {
+ }
- try {
- listener.onSuccess(key);
- }
- catch (Throwable t) {
- logOnSuccessError(t);
- }
+ @Override
+ protected void onSuccess() {
+ listener.onSuccess(key);
}
@Override
- void onFailure(AerospikeException ae) {
- listener.onFailure(ae);
+ protected void onFailure(AerospikeException e) {
+ listener.onFailure(e);
}
}
diff --git a/client/src/com/aerospike/client/async/AsyncTxnMonitor.java b/client/src/com/aerospike/client/async/AsyncTxnMonitor.java
new file mode 100644
index 000000000..332e51b64
--- /dev/null
+++ b/client/src/com/aerospike/client/async/AsyncTxnMonitor.java
@@ -0,0 +1,193 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client.async;
+
+import com.aerospike.client.AerospikeException;
+import com.aerospike.client.BatchRecord;
+import com.aerospike.client.Key;
+import com.aerospike.client.Log;
+import com.aerospike.client.Operation;
+import com.aerospike.client.Record;
+import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.command.OperateArgs;
+import com.aerospike.client.command.TxnMonitor;
+import com.aerospike.client.listener.RecordListener;
+import com.aerospike.client.policy.BatchPolicy;
+import com.aerospike.client.policy.Policy;
+import com.aerospike.client.policy.WritePolicy;
+import com.aerospike.client.util.Util;
+import java.util.List;
+
+public abstract class AsyncTxnMonitor {
+ public static void execute(EventLoop eventLoop, Cluster cluster, WritePolicy policy, AsyncWriteBase command) {
+ if (policy.txn == null) {
+ // Command is not run under a MRT monitor. Run original command.
+ eventLoop.execute(cluster, command);
+ return;
+ }
+
+ Txn txn = policy.txn;
+ Key cmdKey = command.key;
+
+ if (txn.getWrites().contains(cmdKey)) {
+ // MRT monitor already contains this key. Run original command.
+ eventLoop.execute(cluster, command);
+ return;
+ }
+
+ // Add key to MRT monitor and then run original command.
+ Operation[] ops = TxnMonitor.getTranOps(txn, cmdKey);
+ AsyncTxnMonitor.Single ate = new AsyncTxnMonitor.Single(eventLoop, cluster, command);
+ ate.execute(policy, ops);
+ }
+
+ public static void executeBatch(
+ BatchPolicy policy,
+ AsyncBatchExecutor executor,
+ AsyncCommand[] commands,
+ Key[] keys
+ ) {
+ if (policy.txn == null) {
+ // Command is not run under a MRT monitor. Run original command.
+ executor.execute(commands);
+ return;
+ }
+
+ // Add write keys to MRT monitor and then run original command.
+ Operation[] ops = TxnMonitor.getTranOps(policy.txn, keys);
+ AsyncTxnMonitor.Batch ate = new AsyncTxnMonitor.Batch(executor, commands);
+ ate.execute(policy, ops);
+ }
+
+ public static void executeBatch(
+ BatchPolicy policy,
+ AsyncBatchExecutor executor,
+ AsyncCommand[] commands,
+ List records
+ ) {
+ if (policy.txn == null) {
+ // Command is not run under a MRT monitor. Run original command.
+ executor.execute(commands);
+ return;
+ }
+
+ // Add write keys to MRT monitor and then run original command.
+ Operation[] ops = TxnMonitor.getTranOps(policy.txn, records);
+
+ if (ops == null) {
+ // Readonly batch does not need to add key digests. Run original command.
+ executor.execute(commands);
+ return;
+ }
+
+ AsyncTxnMonitor.Batch ate = new AsyncTxnMonitor.Batch(executor, commands);
+ ate.execute(policy, ops);
+ }
+
+ private static class Single extends AsyncTxnMonitor {
+ private final AsyncWriteBase command;
+
+ private Single(EventLoop eventLoop, Cluster cluster, AsyncWriteBase command) {
+ super(eventLoop, cluster);
+ this.command = command;
+ }
+
+ @Override
+ void runCommand() {
+ eventLoop.execute(cluster, command);
+ }
+
+ @Override
+ void onFailure(AerospikeException ae) {
+ command.onFailure(ae);
+ }
+ }
+
+ private static class Batch extends AsyncTxnMonitor {
+ private final AsyncBatchExecutor executor;
+ private final AsyncCommand[] commands;
+
+ private Batch(AsyncBatchExecutor executor, AsyncCommand[] commands) {
+ super(executor.eventLoop, executor.cluster);
+ this.executor = executor;
+ this.commands = commands;
+ }
+
+ @Override
+ void runCommand() {
+ executor.execute(commands);
+ }
+
+ @Override
+ void onFailure(AerospikeException ae) {
+ executor.onFailure(ae);
+ }
+ }
+
+ final EventLoop eventLoop;
+ final Cluster cluster;
+
+ private AsyncTxnMonitor(EventLoop eventLoop, Cluster cluster) {
+ this.eventLoop = eventLoop;
+ this.cluster = cluster;
+ }
+
+ void execute(Policy policy, Operation[] ops) {
+ Key tranKey = TxnMonitor.getTxnMonitorKey(policy.txn);
+ WritePolicy wp = TxnMonitor.copyTimeoutPolicy(policy);
+
+ RecordListener tranListener = new RecordListener() {
+ @Override
+ public void onSuccess(Key key, Record record) {
+ try {
+ // Run original command.
+ runCommand();
+ }
+ catch (AerospikeException ae) {
+ notifyFailure(ae);
+ }
+ catch (Throwable t) {
+ notifyFailure(new AerospikeException(t));
+ }
+ }
+
+ @Override
+ public void onFailure(AerospikeException ae) {
+ notifyFailure(new AerospikeException(ResultCode.TXN_FAILED, "Failed to add key(s) to MRT monitor", ae));
+ }
+ };
+
+ // Add write key(s) to MRT monitor.
+ OperateArgs args = new OperateArgs(wp, null, null, ops);
+ AsyncTxnAddKeys tranCommand = new AsyncTxnAddKeys(cluster, tranListener, tranKey, args);
+ eventLoop.execute(cluster, tranCommand);
+ }
+
+ private void notifyFailure(AerospikeException ae) {
+ try {
+ onFailure(ae);
+ }
+ catch (Throwable t) {
+ Log.error("notifyCommandFailure onFailure() failed: " + Util.getStackTrace(t));
+ }
+ }
+
+ abstract void onFailure(AerospikeException ae);
+ abstract void runCommand();
+}
diff --git a/client/src/com/aerospike/client/async/AsyncTxnRoll.java b/client/src/com/aerospike/client/async/AsyncTxnRoll.java
new file mode 100644
index 000000000..0e0f20686
--- /dev/null
+++ b/client/src/com/aerospike/client/async/AsyncTxnRoll.java
@@ -0,0 +1,484 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client.async;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.aerospike.client.AbortStatus;
+import com.aerospike.client.AerospikeException;
+import com.aerospike.client.BatchRecord;
+import com.aerospike.client.CommitError;
+import com.aerospike.client.CommitStatus;
+import com.aerospike.client.Key;
+import com.aerospike.client.Log;
+import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.command.BatchAttr;
+import com.aerospike.client.command.BatchNode;
+import com.aerospike.client.command.BatchNodeList;
+import com.aerospike.client.command.Command;
+import com.aerospike.client.command.TxnMonitor;
+import com.aerospike.client.listener.AbortListener;
+import com.aerospike.client.listener.BatchRecordArrayListener;
+import com.aerospike.client.listener.CommitListener;
+import com.aerospike.client.listener.DeleteListener;
+import com.aerospike.client.listener.WriteListener;
+import com.aerospike.client.policy.BatchPolicy;
+import com.aerospike.client.policy.WritePolicy;
+import com.aerospike.client.util.Util;
+
+public final class AsyncTxnRoll {
+ private final Cluster cluster;
+ private final EventLoop eventLoop;
+ private final BatchPolicy verifyPolicy;
+ private final BatchPolicy rollPolicy;
+ private final WritePolicy writePolicy;
+ private final Txn txn;
+ private final Key tranKey;
+ private CommitListener commitListener;
+ private AbortListener abortListener;
+ private BatchRecord[] verifyRecords;
+ private BatchRecord[] rollRecords;
+ private AerospikeException verifyException;
+
+ public AsyncTxnRoll(
+ Cluster cluster,
+ EventLoop eventLoop,
+ BatchPolicy verifyPolicy,
+ BatchPolicy rollPolicy,
+ Txn txn
+ ) {
+ this.cluster = cluster;
+ this.eventLoop = eventLoop;
+ this.verifyPolicy = verifyPolicy;
+ this.rollPolicy = rollPolicy;
+ this.writePolicy = new WritePolicy(rollPolicy);
+ this.txn = txn;
+ this.tranKey = TxnMonitor.getTxnMonitorKey(txn);
+ }
+
+ public void verify(CommitListener listener) {
+ commitListener = listener;
+
+ BatchRecordArrayListener verifyListener = new BatchRecordArrayListener() {
+ @Override
+ public void onSuccess(BatchRecord[] records, boolean status) {
+ verifyRecords = records;
+
+ if (status) {
+ txn.setState(Txn.State.VERIFIED);
+ commit();
+ }
+ else {
+ txn.setState(Txn.State.ABORTED);
+ rollBack();
+ }
+ }
+
+ @Override
+ public void onFailure(BatchRecord[] records, AerospikeException ae) {
+ verifyRecords = records;
+ verifyException = ae;
+ txn.setState(Txn.State.ABORTED);
+ rollBack();
+ }
+ };
+
+ verify(verifyListener);
+ }
+
+ public void commit(CommitListener listener) {
+ commitListener = listener;
+ commit();
+ }
+
+ private void commit() {
+ if (txn.monitorExists()) {
+ markRollForward();
+ }
+ else {
+ // There is nothing to roll-forward.
+ txn.setState(Txn.State.COMMITTED);
+ closeOnCommit(true);
+ }
+ }
+
+ public void abort(AbortListener listener) {
+ abortListener = listener;
+ txn.setState(Txn.State.ABORTED);
+
+ BatchRecordArrayListener rollListener = new BatchRecordArrayListener() {
+ @Override
+ public void onSuccess(BatchRecord[] records, boolean status) {
+ rollRecords = records;
+
+ if (status) {
+ closeOnAbort();
+ }
+ else {
+ notifyAbortSuccess(AbortStatus.ROLL_BACK_ABANDONED);
+ }
+ }
+
+ @Override
+ public void onFailure(BatchRecord[] records, AerospikeException ae) {
+ rollRecords = records;
+ notifyAbortSuccess(AbortStatus.ROLL_BACK_ABANDONED);
+ }
+ };
+
+ roll(rollListener, Command.INFO4_MRT_ROLL_BACK);
+ }
+
+ private void verify(BatchRecordArrayListener verifyListener) {
+ // Validate record versions in a batch.
+ Set> reads = txn.getReads();
+ int max = reads.size();
+
+ if (max == 0) {
+ verifyListener.onSuccess(new BatchRecord[0], true);
+ return;
+ }
+
+ BatchRecord[] records = new BatchRecord[max];
+ Key[] keys = new Key[max];
+ Long[] versions = new Long[max];
+ int count = 0;
+
+ for (Map.Entry entry : reads) {
+ Key key = entry.getKey();
+ keys[count] = key;
+ records[count] = new BatchRecord(key, false);
+ versions[count] = entry.getValue();
+ count++;
+ }
+
+ AsyncBatchExecutor.BatchRecordArray executor = new AsyncBatchExecutor.BatchRecordArray(
+ eventLoop, cluster, verifyListener, records);
+
+ List bns = BatchNodeList.generate(cluster, verifyPolicy, keys, records, false, executor);
+ AsyncCommand[] commands = new AsyncCommand[bns.size()];
+
+ count = 0;
+
+ for (BatchNode bn : bns) {
+ if (bn.offsetsSize == 1) {
+ int i = bn.offsets[0];
+ commands[count++] = new AsyncBatchSingle.TxnVerify(
+ executor, cluster, verifyPolicy, versions[i], records[i], bn.node);
+ }
+ else {
+ commands[count++] = new AsyncBatch.TxnVerify(
+ executor, bn, verifyPolicy, keys, versions, records);
+ }
+ }
+ executor.execute(commands);
+ }
+
+ private void markRollForward() {
+ // Tell MRT monitor that a roll-forward will commence.
+ try {
+ WriteListener writeListener = new WriteListener() {
+ @Override
+ public void onSuccess(Key key) {
+ txn.setState(Txn.State.COMMITTED);
+ txn.setInDoubt(false);
+ rollForward();
+ }
+
+ @Override
+ public void onFailure(AerospikeException ae) {
+ notifyMarkRollForwardFailure(CommitError.MARK_ROLL_FORWARD_ABANDONED, ae);
+ }
+ };
+
+ AsyncTxnMarkRollForward command = new AsyncTxnMarkRollForward(cluster, writeListener, writePolicy, tranKey);
+ eventLoop.execute(cluster, command);
+ }
+ catch (Throwable t) {
+ notifyMarkRollForwardFailure(CommitError.MARK_ROLL_FORWARD_ABANDONED, t);
+ }
+ }
+
+ private void rollForward() {
+ try {
+ BatchRecordArrayListener rollListener = new BatchRecordArrayListener() {
+ @Override
+ public void onSuccess(BatchRecord[] records, boolean status) {
+ rollRecords = records;
+
+ if (status) {
+ closeOnCommit(true);
+ }
+ else {
+ notifyCommitSuccess(CommitStatus.ROLL_FORWARD_ABANDONED);
+ }
+ }
+
+ @Override
+ public void onFailure(BatchRecord[] records, AerospikeException ae) {
+ rollRecords = records;
+ notifyCommitSuccess(CommitStatus.ROLL_FORWARD_ABANDONED);
+ }
+ };
+
+ roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD);
+ }
+ catch (Throwable t) {
+ notifyCommitSuccess(CommitStatus.ROLL_FORWARD_ABANDONED);
+ }
+ }
+
+ private void rollBack() {
+ try {
+ BatchRecordArrayListener rollListener = new BatchRecordArrayListener() {
+ @Override
+ public void onSuccess(BatchRecord[] records, boolean status) {
+ rollRecords = records;
+
+ if (status) {
+ closeOnCommit(false);
+ }
+ else {
+ notifyCommitFailure(CommitError.VERIFY_FAIL_ABORT_ABANDONED, null);
+ }
+ }
+
+ @Override
+ public void onFailure(BatchRecord[] records, AerospikeException ae) {
+ rollRecords = records;
+ notifyCommitFailure(CommitError.VERIFY_FAIL_ABORT_ABANDONED, ae);
+ }
+ };
+
+ roll(rollListener, Command.INFO4_MRT_ROLL_BACK);
+ }
+ catch (Throwable t) {
+ notifyCommitFailure(CommitError.VERIFY_FAIL_ABORT_ABANDONED, t);
+ }
+ }
+
+ private void roll(BatchRecordArrayListener rollListener, int txnAttr) {
+ Set keySet = txn.getWrites();
+
+ if (keySet.isEmpty()) {
+ rollListener.onSuccess(new BatchRecord[0], true);
+ return;
+ }
+
+ Key[] keys = keySet.toArray(new Key[keySet.size()]);
+ BatchRecord[] records = new BatchRecord[keys.length];
+
+ for (int i = 0; i < keys.length; i++) {
+ records[i] = new BatchRecord(keys[i], true);
+ }
+
+ BatchAttr attr = new BatchAttr();
+ attr.setTxn(txnAttr);
+
+ AsyncBatchExecutor.BatchRecordArray executor = new AsyncBatchExecutor.BatchRecordArray(
+ eventLoop, cluster, rollListener, records);
+
+ List bns = BatchNodeList.generate(cluster, rollPolicy, keys, records, true, executor);
+ AsyncCommand[] commands = new AsyncCommand[bns.size()];
+ int count = 0;
+
+ for (BatchNode bn : bns) {
+ if (bn.offsetsSize == 1) {
+ int i = bn.offsets[0];
+ commands[count++] = new AsyncBatchSingle.TxnRoll(
+ executor, cluster, rollPolicy, txn, records[i], bn.node, txnAttr);
+ }
+ else {
+ commands[count++] = new AsyncBatch.TxnRoll(
+ executor, bn, rollPolicy, txn, keys, records, attr);
+ }
+ }
+ executor.execute(commands);
+ }
+
+ private void closeOnCommit(boolean verified) {
+ if (! txn.monitorMightExist()) {
+ // There is no MRT monitor to remove.
+ if (verified) {
+ notifyCommitSuccess(CommitStatus.OK);
+ }
+ else {
+ // Record verification failed and MRT was aborted.
+ notifyCommitFailure(CommitError.VERIFY_FAIL, null);
+ }
+ return;
+ }
+
+ try {
+ DeleteListener deleteListener = new DeleteListener() {
+ @Override
+ public void onSuccess(Key key, boolean existed) {
+ if (verified) {
+ notifyCommitSuccess(CommitStatus.OK);
+ }
+ else {
+ // Record verification failed and MRT was aborted.
+ notifyCommitFailure(CommitError.VERIFY_FAIL, null);
+ }
+ }
+
+ @Override
+ public void onFailure(AerospikeException ae) {
+ if (verified) {
+ notifyCommitSuccess(CommitStatus.CLOSE_ABANDONED);
+ }
+ else {
+ notifyCommitFailure(CommitError.VERIFY_FAIL_CLOSE_ABANDONED, ae);
+ }
+ }
+ };
+
+ AsyncTxnClose command = new AsyncTxnClose(cluster, txn, deleteListener, writePolicy, tranKey);
+ eventLoop.execute(cluster, command);
+ }
+ catch (Throwable t) {
+ if (verified) {
+ notifyCommitSuccess(CommitStatus.CLOSE_ABANDONED);
+ }
+ else {
+ notifyCommitFailure(CommitError.VERIFY_FAIL_CLOSE_ABANDONED, t);
+ }
+ }
+ }
+
+ private void closeOnAbort() {
+ if (! txn.monitorMightExist()) {
+ // There is no MRT monitor record to remove.
+ notifyAbortSuccess(AbortStatus.OK);
+ return;
+ }
+
+ try {
+ DeleteListener deleteListener = new DeleteListener() {
+ @Override
+ public void onSuccess(Key key, boolean existed) {
+ notifyAbortSuccess(AbortStatus.OK);
+ }
+
+ @Override
+ public void onFailure(AerospikeException ae) {
+ notifyAbortSuccess(AbortStatus.CLOSE_ABANDONED);
+ }
+ };
+
+ AsyncTxnClose command = new AsyncTxnClose(cluster, txn, deleteListener, writePolicy, tranKey);
+ eventLoop.execute(cluster, command);
+ }
+ catch (Throwable t) {
+ notifyAbortSuccess(AbortStatus.CLOSE_ABANDONED);
+ }
+ }
+
+ private void notifyCommitSuccess(CommitStatus status) {
+ txn.clear();
+
+ try {
+ commitListener.onSuccess(status);
+ }
+ catch (Throwable t) {
+ Log.error("CommitListener onSuccess() failed: " + Util.getStackTrace(t));
+ }
+ }
+
+ private void notifyCommitFailure(CommitError error, Throwable cause) {
+ AerospikeException.Commit aec = createCommitException(error, cause);
+
+ if (verifyException != null) {
+ aec.addSuppressed(verifyException);
+ }
+
+ notifyCommitFailure(aec);
+ }
+
+ private void notifyMarkRollForwardFailure(CommitError error, Throwable cause) {
+ AerospikeException.Commit aec = createCommitException(error, cause);
+
+ if (cause instanceof AerospikeException) {
+ AerospikeException ae = (AerospikeException)cause;
+
+ if (ae.getResultCode() == ResultCode.MRT_ABORTED) {
+ aec.setInDoubt(false);
+ txn.setInDoubt(false);
+ txn.setState(Txn.State.ABORTED);
+ }
+ else if (txn.getInDoubt()) {
+ // The transaction was already inDoubt and just failed again,
+ // so the new exception should also be inDoubt.
+ aec.setInDoubt(true);
+ }
+ else if (ae.getInDoubt()){
+ // The current exception is inDoubt.
+ aec.setInDoubt(true);
+ txn.setInDoubt(true);
+ }
+ }
+ else {
+ if (txn.getInDoubt()) {
+ aec.setInDoubt(true);
+ }
+ }
+
+ notifyCommitFailure(aec);
+ }
+
+ private AerospikeException.Commit createCommitException(CommitError error, Throwable cause) {
+ if (cause != null) {
+ AerospikeException.Commit aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause);
+
+ if (cause instanceof AerospikeException) {
+ AerospikeException src = (AerospikeException)cause;
+ aec.setNode(src.getNode());
+ aec.setPolicy(src.getPolicy());
+ aec.setIteration(src.getIteration());
+ aec.setInDoubt(src.getInDoubt());
+ }
+ return aec;
+ }
+ else {
+ return new AerospikeException.Commit(error, verifyRecords, rollRecords);
+ }
+ }
+
+ private void notifyCommitFailure(AerospikeException.Commit aec) {
+ try {
+ commitListener.onFailure(aec);
+ }
+ catch (Throwable t) {
+ Log.error("CommitListener onFailure() failed: " + Util.getStackTrace(t));
+ }
+ }
+
+ private void notifyAbortSuccess(AbortStatus status) {
+ txn.clear();
+
+ try {
+ abortListener.onSuccess(status);
+ }
+ catch (Throwable t) {
+ Log.error("AbortListener onSuccess() failed: " + Util.getStackTrace(t));
+ }
+ }
+}
diff --git a/client/src/com/aerospike/client/async/AsyncWrite.java b/client/src/com/aerospike/client/async/AsyncWrite.java
index fd81048dd..a2f2883d2 100644
--- a/client/src/com/aerospike/client/async/AsyncWrite.java
+++ b/client/src/com/aerospike/client/async/AsyncWrite.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -22,17 +22,11 @@
import com.aerospike.client.Operation;
import com.aerospike.client.ResultCode;
import com.aerospike.client.cluster.Cluster;
-import com.aerospike.client.cluster.Node;
-import com.aerospike.client.cluster.Partition;
import com.aerospike.client.listener.WriteListener;
-import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.WritePolicy;
-public final class AsyncWrite extends AsyncCommand {
+public final class AsyncWrite extends AsyncWriteBase {
private final WriteListener listener;
- private final WritePolicy writePolicy;
- private final Key key;
- private final Partition partition;
private final Bin[] bins;
private final Operation.Type operation;
@@ -44,29 +38,10 @@ public AsyncWrite(
Bin[] bins,
Operation.Type operation
) {
- super(writePolicy, true);
+ super(cluster, writePolicy, key);
this.listener = listener;
- this.writePolicy = writePolicy;
- this.key = key;
- this.partition = Partition.write(cluster, writePolicy, key);
this.bins = bins;
this.operation = operation;
- cluster.addTran();
- }
-
- @Override
- boolean isWrite() {
- return true;
- }
-
- @Override
- Node getNode(Cluster cluster) {
- return partition.getNodeWrite(cluster);
- }
-
- @Override
- protected LatencyType getLatencyType() {
- return LatencyType.WRITE;
}
@Override
@@ -76,11 +51,9 @@ protected void writeBuffer() {
@Override
protected boolean parseResult() {
- validateHeaderSize();
-
- int resultCode = dataBuffer[5] & 0xFF;
+ int resultCode = parseHeader();
- if (resultCode == 0) {
+ if (resultCode == ResultCode.OK) {
return true;
}
@@ -94,12 +67,6 @@ protected boolean parseResult() {
throw new AerospikeException(resultCode);
}
- @Override
- boolean prepareRetry(boolean timeout) {
- partition.prepareRetryWrite(timeout);
- return true;
- }
-
@Override
protected void onSuccess() {
if (listener != null) {
diff --git a/client/src/com/aerospike/client/async/AsyncWriteBase.java b/client/src/com/aerospike/client/async/AsyncWriteBase.java
new file mode 100644
index 000000000..54e1c63ba
--- /dev/null
+++ b/client/src/com/aerospike/client/async/AsyncWriteBase.java
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012-2024 Aerospike, Inc.
+ *
+ * Portions may be licensed to Aerospike, Inc. under one or more contributor
+ * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package com.aerospike.client.async;
+
+import com.aerospike.client.Key;
+import com.aerospike.client.cluster.Cluster;
+import com.aerospike.client.cluster.Node;
+import com.aerospike.client.cluster.Partition;
+import com.aerospike.client.command.RecordParser;
+import com.aerospike.client.metrics.LatencyType;
+import com.aerospike.client.policy.WritePolicy;
+
+public abstract class AsyncWriteBase extends AsyncCommand {
+ final WritePolicy writePolicy;
+ final Key key;
+ final Partition partition;
+
+ public AsyncWriteBase(Cluster cluster, WritePolicy writePolicy, Key key) {
+ super(writePolicy, true);
+ this.writePolicy = writePolicy;
+ this.key = key;
+ this.partition = Partition.write(cluster, writePolicy, key);
+ cluster.addCommandCount();
+ }
+
+ @Override
+ boolean isWrite() {
+ return true;
+ }
+
+ @Override
+ Node getNode(Cluster cluster) {
+ return partition.getNodeWrite(cluster);
+ }
+
+ @Override
+ protected LatencyType getLatencyType() {
+ return LatencyType.WRITE;
+ }
+
+ @Override
+ boolean prepareRetry(boolean timeout) {
+ partition.prepareRetryWrite(timeout);
+ return true;
+ }
+
+ @Override
+ void onInDoubt() {
+ if (writePolicy.txn != null) {
+ writePolicy.txn.onWriteInDoubt(key);
+ }
+ }
+
+ protected int parseHeader() {
+ RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize);
+ rp.parseFields(policy.txn, key, true);
+ return rp.resultCode;
+ }
+}
diff --git a/client/src/com/aerospike/client/async/NettyCommand.java b/client/src/com/aerospike/client/async/NettyCommand.java
index 4f1ff82d7..00d06bb9a 100644
--- a/client/src/com/aerospike/client/async/NettyCommand.java
+++ b/client/src/com/aerospike/client/async/NettyCommand.java
@@ -593,7 +593,7 @@ private void parseAuthBody() {
if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) {
// Authentication failed. Session token probably expired.
// Signal tend thread to perform node login, so future
- // transactions do not fail.
+ // commands do not fail.
node.signalLogin();
// This is a rare event because the client tracks session
@@ -1041,11 +1041,7 @@ public void run() {
}
private void retry(AerospikeException ae, long deadline) {
- ae.setNode(node);
- ae.setPolicy(command.policy);
- ae.setIteration(iteration);
- ae.setInDoubt(command.isWrite(), command.commandSentCounter);
- command.addSubException(ae);
+ command.onRetryException(node, iteration, ae);
if (! command.prepareRetry(ae.getResultCode() != ResultCode.SERVER_NOT_AVAILABLE)) {
// Batch may be retried in separate commands.
@@ -1093,15 +1089,10 @@ private void onFatalError(AerospikeException ae) {
private void notifyFailure(AerospikeException ae) {
try {
- ae.setNode(node);
- ae.setPolicy(command.policy);
- ae.setIteration(iteration);
- ae.setInDoubt(command.isWrite(), command.commandSentCounter);
- ae.setSubExceptions(command.subExceptions);
- command.onFailure(ae);
+ command.onFinalException(node, iteration, ae);
}
catch (Throwable e) {
- logError("onFailure() error", e);
+ logError("onFinalException() error", e);
}
}
diff --git a/client/src/com/aerospike/client/async/NettyConnection.java b/client/src/com/aerospike/client/async/NettyConnection.java
index 49923061e..068854449 100644
--- a/client/src/com/aerospike/client/async/NettyConnection.java
+++ b/client/src/com/aerospike/client/async/NettyConnection.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2021 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -39,7 +39,7 @@ public NettyConnection(SocketChannel channel) {
}
/**
- * Validate connection in a transaction.
+ * Validate connection in a command.
*/
@Override
public boolean isValid(ByteBuffer notUsed) {
diff --git a/client/src/com/aerospike/client/async/NettyConnector.java b/client/src/com/aerospike/client/async/NettyConnector.java
index 6a77a213d..fa57b8288 100644
--- a/client/src/com/aerospike/client/async/NettyConnector.java
+++ b/client/src/com/aerospike/client/async/NettyConnector.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -209,7 +209,7 @@ private void parseAuthBody() {
if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) {
// Authentication failed. Session token probably expired.
// Signal tend thread to perform node login, so future
- // transactions do not fail.
+ // commands do not fail.
node.signalLogin();
// This is a rare event because the client tracks session
diff --git a/client/src/com/aerospike/client/async/NettyRecover.java b/client/src/com/aerospike/client/async/NettyRecover.java
index 895fe229b..0409bfe2b 100644
--- a/client/src/com/aerospike/client/async/NettyRecover.java
+++ b/client/src/com/aerospike/client/async/NettyRecover.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2021 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -105,7 +105,7 @@ public NettyRecover(NettyCommand cmd) {
public final void timeout() {
//System.out.println("" + tranId + " timeout expired. close connection");
- // Transaction has been delayed long enough.
+ // Command has been delayed long enough.
// User has already been notified.
// timeoutTask has already been removed, so avoid cancel.
abort(false);
diff --git a/client/src/com/aerospike/client/async/NioCommand.java b/client/src/com/aerospike/client/async/NioCommand.java
index d25be5ac4..d06b1c5e4 100644
--- a/client/src/com/aerospike/client/async/NioCommand.java
+++ b/client/src/com/aerospike/client/async/NioCommand.java
@@ -524,7 +524,7 @@ private final void readAuthBody() {
if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) {
// Authentication failed. Session token probably expired.
// Signal tend thread to perform node login, so future
- // transactions do not fail.
+ // commands do not fail.
node.signalLogin();
// This is a rare event because the client tracks session
@@ -935,11 +935,7 @@ public void run() {
}
private final void retry(AerospikeException ae, long deadline) {
- ae.setNode(node);
- ae.setPolicy(command.policy);
- ae.setIteration(iteration);
- ae.setInDoubt(command.isWrite(), command.commandSentCounter);
- command.addSubException(ae);
+ command.onRetryException(node, iteration, ae);
if (! command.prepareRetry(ae.getResultCode() != ResultCode.SERVER_NOT_AVAILABLE)) {
// Batch may be retried in separate commands.
@@ -976,15 +972,10 @@ protected final void onApplicationError(AerospikeException ae) {
private final void notifyFailure(AerospikeException ae) {
try {
- ae.setNode(node);
- ae.setPolicy(command.policy);
- ae.setIteration(iteration);
- ae.setInDoubt(command.isWrite(), command.commandSentCounter);
- ae.setSubExceptions(command.subExceptions);
- command.onFailure(ae);
+ command.onFinalException(node, iteration, ae);
}
catch (Throwable e) {
- Log.error("onFailure() error: " + Util.getErrorMessage(e));
+ Log.error("onFinalException() error: " + Util.getErrorMessage(e));
}
}
diff --git a/client/src/com/aerospike/client/async/NioConnection.java b/client/src/com/aerospike/client/async/NioConnection.java
index a06c89581..487b80385 100644
--- a/client/src/com/aerospike/client/async/NioConnection.java
+++ b/client/src/com/aerospike/client/async/NioConnection.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -116,7 +116,7 @@ public boolean read(ByteBuffer byteBuffer) throws IOException {
}
/**
- * Validate connection in a transaction. Return true if socket is connected and
+ * Validate connection in a command. Return true if socket is connected and
* has no data in it's buffer. Return false, if not connected, socket read error
* or has data in it's buffer.
*/
diff --git a/client/src/com/aerospike/client/async/NioConnector.java b/client/src/com/aerospike/client/async/NioConnector.java
index 623ce0806..acbbc7c0a 100644
--- a/client/src/com/aerospike/client/async/NioConnector.java
+++ b/client/src/com/aerospike/client/async/NioConnector.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -159,7 +159,7 @@ private final void readAuthBody() {
if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) {
// Authentication failed. Session token probably expired.
// Signal tend thread to perform node login, so future
- // transactions do not fail.
+ // commands do not fail.
node.signalLogin();
// This is a rare event because the client tracks session
diff --git a/client/src/com/aerospike/client/async/NioRecover.java b/client/src/com/aerospike/client/async/NioRecover.java
index f640713b5..ff59812f7 100644
--- a/client/src/com/aerospike/client/async/NioRecover.java
+++ b/client/src/com/aerospike/client/async/NioRecover.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -118,7 +118,7 @@ public NioRecover(NioCommand cmd) {
public final void timeout() {
//System.out.println("" + tranId + " timeout expired. close connection");
- // Transaction has been delayed long enough.
+ // Command has been delayed long enough.
// User has already been notified.
// timeoutTask has already been removed, so avoid cancel.
abort(false);
diff --git a/client/src/com/aerospike/client/cluster/Cluster.java b/client/src/com/aerospike/client/cluster/Cluster.java
index 10e601a4c..f8ae07562 100644
--- a/client/src/com/aerospike/client/cluster/Cluster.java
+++ b/client/src/com/aerospike/client/cluster/Cluster.java
@@ -128,7 +128,7 @@ public class Cluster implements Runnable, Closeable {
// Extra event loop state for this cluster.
public final EventState[] eventState;
- // Maximum socket idle to validate connections in transactions.
+ // Maximum socket idle to validate connections in command.
private final long maxSocketIdleNanosTran;
// Maximum socket idle to trim peak connections to min connections.
@@ -207,7 +207,7 @@ public class Cluster implements Runnable, Closeable {
MetricsPolicy metricsPolicy;
private volatile MetricsListener metricsListener;
private final AtomicLong retryCount = new AtomicLong();
- private final AtomicLong tranCount = new AtomicLong();
+ private final AtomicLong commandCount = new AtomicLong();
private final AtomicLong delayQueueTimeoutCount = new AtomicLong();
public Cluster(AerospikeClient client, ClientPolicy policy, Host[] hosts) {
@@ -1382,37 +1382,45 @@ public final boolean isActive() {
}
/**
- * Increment transaction count when metrics are enabled.
+ * Increment command count when metrics are enabled.
*/
- public final void addTran() {
+ public final void addCommandCount() {
if (metricsEnabled) {
- tranCount.getAndIncrement();
+ commandCount.getAndIncrement();
}
}
/**
- * Return transaction count. The value is cumulative and not reset per metrics interval.
+ * Return command count. The value is cumulative and not reset per metrics interval.
+ */
+ public final long getCommandCount() {
+ return commandCount.get();
+ }
+
+ /**
+ * Return command count. The value is cumulative and not reset per metrics interval.
+ * This function is left for backwards compatibility. Use {@link #getCommandCount()} instead.
*/
public final long getTranCount() {
- return tranCount.get();
+ return commandCount.get();
}
/**
- * Increment transaction retry count. There can be multiple retries for a single transaction.
+ * Increment command retry count. There can be multiple retries for a single command.
*/
public final void addRetry() {
retryCount.getAndIncrement();
}
/**
- * Add transaction retry count. There can be multiple retries for a single transaction.
+ * Add command retry count. There can be multiple retries for a single command.
*/
public final void addRetries(int count) {
retryCount.getAndAdd(count);
}
/**
- * Return transaction retry count. The value is cumulative and not reset per metrics interval.
+ * Return command retry count. The value is cumulative and not reset per metrics interval.
*/
public final long getRetryCount() {
return retryCount.get();
diff --git a/client/src/com/aerospike/client/cluster/ClusterStats.java b/client/src/com/aerospike/client/cluster/ClusterStats.java
index a0f27c224..e75f4849a 100644
--- a/client/src/com/aerospike/client/cluster/ClusterStats.java
+++ b/client/src/com/aerospike/client/cluster/ClusterStats.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -49,7 +49,7 @@ public final class ClusterStats {
public final int invalidNodeCount;
/**
- * Count of transaction retries since the client was started.
+ * Count of command retries since the client was started.
*/
public final long retryCount;
diff --git a/client/src/com/aerospike/client/cluster/Node.java b/client/src/com/aerospike/client/cluster/Node.java
index 8fa6be5a7..61730040b 100644
--- a/client/src/com/aerospike/client/cluster/Node.java
+++ b/client/src/com/aerospike/client/cluster/Node.java
@@ -955,7 +955,7 @@ public final boolean putAsyncConnection(AsyncConnection conn, int index) {
// This should not happen since connection slots are reserved in advance
// and total connections should never exceed maxSize. If it does happen,
// it's highly likely that total count was decremented twice for the same
- // transaction, causing the connection balancer to create more connections
+ // command, causing the connection balancer to create more connections
// than necessary. Attempt to correct situation by not decrementing total
// when this excess connection is closed.
conn.close();
diff --git a/client/src/com/aerospike/client/cluster/NodeStats.java b/client/src/com/aerospike/client/cluster/NodeStats.java
index 8cf78da92..02c3f3094 100644
--- a/client/src/com/aerospike/client/cluster/NodeStats.java
+++ b/client/src/com/aerospike/client/cluster/NodeStats.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -36,14 +36,14 @@ public final class NodeStats {
public ConnectionStats async;
/**
- * Transaction error count since node was initialized. If the error is retryable, multiple errors per
- * transaction may occur.
+ * Command error count since node was initialized. If the error is retryable, multiple errors per
+ * command may occur.
*/
public final long errorCount;
/**
- * Transaction timeout count since node was initialized. If the timeout is retryable (ie socketTimeout),
- * multiple timeouts per transaction may occur.
+ * Command timeout count since node was initialized. If the timeout is retryable (ie socketTimeout),
+ * multiple timeouts per command may occur.
*/
public final long timeoutCount;
diff --git a/client/src/com/aerospike/client/command/Batch.java b/client/src/com/aerospike/client/command/Batch.java
index fecf05ed4..6a2a40732 100644
--- a/client/src/com/aerospike/client/command/Batch.java
+++ b/client/src/com/aerospike/client/command/Batch.java
@@ -26,6 +26,7 @@
import com.aerospike.client.Operation;
import com.aerospike.client.Record;
import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
import com.aerospike.client.cluster.Cluster;
import com.aerospike.client.metrics.LatencyType;
import com.aerospike.client.policy.BatchPolicy;
@@ -63,10 +64,10 @@ protected void writeBuffer() {
@Override
protected boolean parseRow() {
- skipKey(fieldCount);
-
BatchRead record = records.get(batchIndex);
+ parseFieldsRead(record.key);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
}
@@ -132,7 +133,7 @@ protected void writeBuffer() {
@Override
protected boolean parseRow() {
- skipKey(fieldCount);
+ parseFieldsRead(keys[batchIndex]);
if (resultCode == 0) {
records[batchIndex] = parseRecord();
@@ -185,12 +186,7 @@ protected void writeBuffer() {
@Override
protected boolean parseRow() {
- skipKey(fieldCount);
-
- if (opCount > 0) {
- throw new AerospikeException.Parse("Received bins that were not requested!");
- }
-
+ parseFieldsRead(keys[batchIndex]);
existsArray[batchIndex] = resultCode == 0;
return true;
}
@@ -240,10 +236,10 @@ protected void writeBuffer() {
@Override
protected boolean parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records.get(batchIndex);
+ parseFields(record);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
return true;
@@ -275,6 +271,10 @@ protected void inDoubt() {
if (record.resultCode == ResultCode.NO_RESPONSE) {
record.inDoubt = record.hasWrite;
+
+ if (record.inDoubt && policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -329,10 +329,10 @@ protected void writeBuffer() {
@Override
protected boolean parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records[batchIndex];
+ parseFields(record);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
}
@@ -354,6 +354,10 @@ protected void inDoubt() {
if (record.resultCode == ResultCode.NO_RESPONSE) {
record.inDoubt = true;
+
+ if (policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -414,10 +418,10 @@ protected void writeBuffer() {
@Override
protected boolean parseRow() {
- skipKey(fieldCount);
-
BatchRecord record = records[batchIndex];
+ parseFields(record);
+
if (resultCode == 0) {
record.setRecord(parseRecord());
return true;
@@ -453,6 +457,10 @@ protected void inDoubt() {
if (record.resultCode == ResultCode.NO_RESPONSE) {
record.inDoubt = true;
+
+ if (policy.txn != null) {
+ policy.txn.onWriteInDoubt(record.key);
+ }
}
}
}
@@ -468,6 +476,142 @@ protected List generateBatchNodes() {
}
}
+ //-------------------------------------------------------
+ // MRT
+ //-------------------------------------------------------
+
+ public static final class TxnVerify extends BatchCommand {
+ private final Key[] keys;
+ private final Long[] versions;
+ private final BatchRecord[] records;
+
+ public TxnVerify(
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Key[] keys,
+ Long[] versions,
+ BatchRecord[] records,
+ BatchStatus status
+ ) {
+ super(cluster, batch, batchPolicy, status, false);
+ this.keys = keys;
+ this.versions = versions;
+ this.records = records;
+ }
+
+ @Override
+ protected boolean isWrite() {
+ return false;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setBatchTxnVerify(batchPolicy, keys, versions, batch);
+ }
+
+ @Override
+ protected boolean parseRow() {
+ skipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == 0) {
+ record.resultCode = resultCode;
+ }
+ else {
+ record.setError(resultCode, false);
+ status.setRowError();
+ }
+ return true;
+ }
+
+ @Override
+ protected BatchCommand createCommand(BatchNode batchNode) {
+ return new TxnVerify(cluster, batchNode, batchPolicy, keys, versions, records, status);
+ }
+
+ @Override
+ protected List generateBatchNodes() {
+ return BatchNodeList.generate(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status);
+ }
+ }
+
+ public static final class TxnRoll extends BatchCommand {
+ private final Txn txn;
+ private final Key[] keys;
+ private final BatchRecord[] records;
+ private final BatchAttr attr;
+
+ public TxnRoll(
+ Cluster cluster,
+ BatchNode batch,
+ BatchPolicy batchPolicy,
+ Txn txn,
+ Key[] keys,
+ BatchRecord[] records,
+ BatchAttr attr,
+ BatchStatus status
+ ) {
+ super(cluster, batch, batchPolicy, status, false);
+ this.txn = txn;
+ this.keys = keys;
+ this.records = records;
+ this.attr = attr;
+ }
+
+ @Override
+ protected boolean isWrite() {
+ return attr.hasWrite;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setBatchTxnRoll(batchPolicy, txn, keys, batch, attr);
+ }
+
+ @Override
+ protected boolean parseRow() {
+ skipKey(fieldCount);
+
+ BatchRecord record = records[batchIndex];
+
+ if (resultCode == 0) {
+ record.resultCode = resultCode;
+ }
+ else {
+ record.setError(resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter));
+ status.setRowError();
+ }
+ return true;
+ }
+
+ @Override
+ protected void inDoubt() {
+ if (!attr.hasWrite) {
+ return;
+ }
+
+ for (int index : batch.offsets) {
+ BatchRecord record = records[index];
+
+ if (record.resultCode == ResultCode.NO_RESPONSE) {
+ record.inDoubt = true;
+ }
+ }
+ }
+
+ @Override
+ protected BatchCommand createCommand(BatchNode batchNode) {
+ return new TxnRoll(cluster, batchNode, batchPolicy, txn, keys, records, attr, status);
+ }
+
+ @Override
+ protected List generateBatchNodes() {
+ return BatchNodeList.generate(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status);
+ }
+ }
+
//-------------------------------------------------------
// Batch Base Command
//-------------------------------------------------------
@@ -519,6 +663,32 @@ public void run() {
}
}
+ protected final void parseFieldsRead(Key key) {
+ if (policy.txn != null) {
+ Long version = parseVersion(fieldCount);
+ policy.txn.onRead(key, version);
+ }
+ else {
+ skipKey(fieldCount);
+ }
+ }
+
+ protected final void parseFields(BatchRecord br) {
+ if (policy.txn != null) {
+ Long version = parseVersion(fieldCount);
+
+ if (br.hasWrite) {
+ policy.txn.onWrite(br.key, version, resultCode);
+ }
+ else {
+ policy.txn.onRead(br.key, version);
+ }
+ }
+ else {
+ skipKey(fieldCount);
+ }
+ }
+
@Override
protected void addSubException(AerospikeException ae) {
status.addSubException(ae);
diff --git a/client/src/com/aerospike/client/command/BatchAttr.java b/client/src/com/aerospike/client/command/BatchAttr.java
index 7f063537e..f589aae9d 100644
--- a/client/src/com/aerospike/client/command/BatchAttr.java
+++ b/client/src/com/aerospike/client/command/BatchAttr.java
@@ -32,6 +32,7 @@ public final class BatchAttr {
public int readAttr;
public int writeAttr;
public int infoAttr;
+ public int txnAttr;
public int expiration;
public int opSize;
public short generation;
@@ -321,4 +322,16 @@ public void setOpSize(Operation[] ops) {
}
opSize = dataOffset;
}
+
+ public void setTxn(int attr) {
+ filterExp = null;
+ readAttr = 0;
+ writeAttr = Command.INFO2_WRITE | Command.INFO2_RESPOND_ALL_OPS | Command.INFO2_DURABLE_DELETE;
+ infoAttr = 0;
+ txnAttr = attr;
+ expiration = 0;
+ generation = 0;
+ hasWrite = true;
+ sendKey = false;
+ }
}
diff --git a/client/src/com/aerospike/client/command/BatchExecutor.java b/client/src/com/aerospike/client/command/BatchExecutor.java
index 7bdb473df..a38f76451 100644
--- a/client/src/com/aerospike/client/command/BatchExecutor.java
+++ b/client/src/com/aerospike/client/command/BatchExecutor.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -27,7 +27,7 @@
public final class BatchExecutor {
public static void execute(Cluster cluster, BatchPolicy policy, IBatchCommand[] commands, BatchStatus status) {
- cluster.addTran();
+ cluster.addCommandCount();
if (policy.maxConcurrentThreads == 1 || commands.length <= 1) {
// Run batch requests sequentially in same thread.
diff --git a/client/src/com/aerospike/client/command/BatchSingle.java b/client/src/com/aerospike/client/command/BatchSingle.java
index a2ee0942d..7157f2747 100644
--- a/client/src/com/aerospike/client/command/BatchSingle.java
+++ b/client/src/com/aerospike/client/command/BatchSingle.java
@@ -25,6 +25,7 @@
import com.aerospike.client.Operation;
import com.aerospike.client.Record;
import com.aerospike.client.ResultCode;
+import com.aerospike.client.Txn;
import com.aerospike.client.Value;
import com.aerospike.client.cluster.Cluster;
import com.aerospike.client.cluster.Connection;
@@ -92,6 +93,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
+ rp.parseFields(policy.txn, key, false);
if (rp.resultCode == ResultCode.OK) {
records[index] = rp.parseRecord(isOperation);
@@ -127,6 +129,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
+ rp.parseFields(policy.txn, key, false);
if (rp.resultCode == 0) {
records[index] = new Record(null, rp.generation, rp.expiration);
@@ -156,6 +159,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
+ rp.parseFields(policy.txn, key, false);
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(true));
@@ -195,7 +199,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
-
+ rp.parseFields(policy.txn, key, false);
existsArray[index] = rp.resultCode == 0;
}
}
@@ -228,6 +232,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
+ rp.parseFields(policy.txn, key, record.hasWrite);
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(true));
@@ -271,6 +276,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
+ rp.parseFields(policy.txn, key, true);
if (rp.resultCode == ResultCode.OK || rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) {
record.setRecord(new Record(null, rp.generation, rp.expiration));
@@ -323,6 +329,7 @@ protected void writeBuffer() {
@Override
protected void parseResult(Connection conn) throws IOException {
RecordParser rp = new RecordParser(conn, dataBuffer);
+ rp.parseFields(policy.txn, key, true);
if (rp.resultCode == ResultCode.OK) {
record.setRecord(rp.parseRecord(false));
@@ -353,6 +360,92 @@ public void setInDoubt() {
}
}
+ //-------------------------------------------------------
+ // MRT
+ //-------------------------------------------------------
+
+ public static final class TxnVerify extends BaseCommand {
+ private final long version;
+ private final BatchRecord record;
+
+ public TxnVerify(
+ Cluster cluster,
+ BatchPolicy policy,
+ long version,
+ BatchRecord record,
+ BatchStatus status,
+ Node node
+ ) {
+ super(cluster, policy, status, record.key, node, false);
+ this.version = version;
+ this.record = record;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setTxnVerify(record.key, version);
+ }
+
+ @Override
+ protected void parseResult(Connection conn) throws IOException {
+ RecordParser rp = new RecordParser(conn, dataBuffer);
+
+ if (rp.resultCode == ResultCode.OK) {
+ record.resultCode = rp.resultCode;
+ }
+ else {
+ record.setError(rp.resultCode, false);
+ status.setRowError();
+ }
+ }
+ }
+
+ public static final class TxnRoll extends BaseCommand {
+ private final Txn txn;
+ private final BatchRecord record;
+ private final int attr;
+
+ public TxnRoll(
+ Cluster cluster,
+ BatchPolicy policy,
+ Txn txn,
+ BatchRecord record,
+ BatchStatus status,
+ Node node,
+ int attr
+ ) {
+ super(cluster, policy, status, record.key, node, true);
+ this.txn = txn;
+ this.record = record;
+ this.attr = attr;
+ }
+
+ @Override
+ protected void writeBuffer() {
+ setTxnRoll(record.key, txn, attr);
+ }
+
+ @Override
+ protected void parseResult(Connection conn) throws IOException {
+ RecordParser rp = new RecordParser(conn, dataBuffer);
+
+ if (rp.resultCode == ResultCode.OK) {
+ record.resultCode = rp.resultCode;
+ }
+ else {
+ record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter));
+ status.setRowError();
+ }
+ }
+
+ @Override
+ public void setInDoubt() {
+ if (record.resultCode == ResultCode.NO_RESPONSE) {
+ record.inDoubt = true;
+ }
+ }
+ }
+
public static abstract class BaseCommand extends SyncCommand implements IBatchCommand {
BatchExecutor parent;
BatchStatus status;
diff --git a/client/src/com/aerospike/client/command/Buffer.java b/client/src/com/aerospike/client/command/Buffer.java
index d419df6d2..95dae6ef5 100644
--- a/client/src/com/aerospike/client/command/Buffer.java
+++ b/client/src/com/aerospike/client/command/Buffer.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -276,7 +276,7 @@ public static int utf8DigitsToInt(byte[] buf, int begin, int end) {
int mult = 1;
for (int i = end - 1; i >= begin; i--) {
- val += ((int)buf[i] - 48) * mult;
+ val += (buf[i] - 48) * mult;
mult *= 10;
}
return val;
@@ -451,6 +451,38 @@ public static long littleBytesToLong(byte[] buf, int offset) {
);
}
+ //-------------------------------------------------------
+ // Transaction version conversions.
+ //-------------------------------------------------------
+
+ /**
+ * Convert long to a 7 byte record version for MRT.
+ */
+ public static void longToVersionBytes(long v, byte[] buf, int offset) {
+ buf[offset++] = (byte)(v >>> 0);
+ buf[offset++] = (byte)(v >>> 8);
+ buf[offset++] = (byte)(v >>> 16);
+ buf[offset++] = (byte)(v >>> 24);
+ buf[offset++] = (byte)(v >>> 32);
+ buf[offset++] = (byte)(v >>> 40);
+ buf[offset] = (byte)(v >>> 48);
+ }
+
+ /**
+ * Convert 7 byte record version to a long for MRT.
+ */
+ public static long versionBytesToLong(byte[] buf, int offset) {
+ return (
+ ((long)(buf[offset] & 0xFF) << 0) |
+ ((long)(buf[offset+1] & 0xFF) << 8) |
+ ((long)(buf[offset+2] & 0xFF) << 16) |
+ ((long)(buf[offset+3] & 0xFF) << 24) |
+ ((long)(buf[offset+4] & 0xFF) << 32) |
+ ((long)(buf[offset+5] & 0xFF) << 40) |
+ ((long)(buf[offset+6] & 0xFF) << 48)
+ );
+ }
+
//-------------------------------------------------------
// 32 bit number conversions.
//-------------------------------------------------------
diff --git a/client/src/com/aerospike/client/command/Command.java b/client/src/com/aerospike/client/command/Command.java
index 7ae5e92d1..1481f4e9f 100644
--- a/client/src/com/aerospike/client/command/Command.java
+++ b/client/src/com/aerospike/client/command/Command.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2023 Aerospike, Inc.
+ * Copyright 2012-2024 Aerospike, Inc.
*
* Portions may be licensed to Aerospike, Inc. under one or more contributor
* license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0.
@@ -54,6 +54,7 @@
import com.aerospike.client.query.PartitionStatus;
import com.aerospike.client.query.PartitionTracker.NodePartitions;
import com.aerospike.client.query.Statement;
+import com.aerospike.client.Txn;
import com.aerospike.client.util.Packer;
import com.aerospike.client.util.ThreadLocalData;
@@ -71,7 +72,7 @@ public class Command {
public static final int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch.
public static final int INFO2_GENERATION = (1 << 2); // Update if expected generation == old.
public static final int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore.
- public static final int INFO2_DURABLE_DELETE = (1 << 4); // Transaction resulting in record deletion leaves tombstone (Enterprise only).
+ public static final int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only).
public static final int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists.
public static final int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relax read consistency.
public static final int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation.
@@ -100,6 +101,10 @@ public class Command {
// 1 0 allow replica
// 1 1 allow unavailable
+ public static final int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified.
+ public static final int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT.
+ public static final int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT.
+
public static final byte STATE_READ_AUTH_HEADER = 1;
public static final byte STATE_READ_HEADER = 2;
public static final byte STATE_READ_DETAIL = 3;
@@ -110,12 +115,12 @@ public class Command {
public static final byte BATCH_MSG_INFO = 0x2;
public static final byte BATCH_MSG_GEN = 0x4;
public static final byte BATCH_MSG_TTL = 0x8;
+ public static final byte BATCH_MSG_INFO4 = 0x10;
public static final int MSG_TOTAL_HEADER_SIZE = 30;
public static final int FIELD_HEADER_SIZE = 5;
public static final int OPERATION_HEADER_SIZE = 8;
public static final int MSG_REMAINING_HEADER_SIZE = 22;
- public static final int DIGEST_SIZE = 20;
public static final int COMPRESS_THRESHOLD = 128;
public static final long CL_MSG_VERSION = 2L;
public static final long AS_MSG_TYPE = 3L;
@@ -127,6 +132,7 @@ public class Command {
public final int serverTimeout;
public int socketTimeout;
public int totalTimeout;
+ public Long version;
public Command(int socketTimeout, int totalTimeout, int maxRetries) {
this.maxRetries = maxRetries;
@@ -142,13 +148,328 @@ public Command(int socketTimeout, int totalTimeout, int maxRetries) {
}
}
+ //--------------------------------------------------
+ // Multi-record Transactions
+ //--------------------------------------------------
+
+ public final void setTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) {
+ begin();
+ int fieldCount = estimateKeySize(key);
+ dataOffset += args.size;
+
+ sizeBuffer();
+
+ dataBuffer[8] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[9] = (byte)args.readAttr;
+ dataBuffer[10] = (byte)args.writeAttr;
+ dataBuffer[11] = (byte)0;
+ dataBuffer[12] = 0;
+ dataBuffer[13] = 0;
+ Buffer.intToBytes(0, dataBuffer, 14);
+ Buffer.intToBytes(policy.expiration, dataBuffer, 18);
+ Buffer.intToBytes(serverTimeout, dataBuffer, 22);
+ Buffer.shortToBytes(fieldCount, dataBuffer, 26);
+ Buffer.shortToBytes(args.operations.length, dataBuffer, 28);
+ dataOffset = MSG_TOTAL_HEADER_SIZE;
+
+ writeKey(key);
+
+ for (Operation operation : args.operations) {
+ writeOperation(operation);
+ }
+ end();
+ compress(policy);
+ }
+
+ public final void setTxnVerify(Key key, long ver) {
+ begin();
+ int fieldCount = estimateKeySize(key);
+
+ // Version field.
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ sizeBuffer();
+ dataBuffer[8] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[9] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ dataBuffer[10] = (byte)0;
+ dataBuffer[11] = (byte)Command.INFO3_SC_READ_TYPE;
+ dataBuffer[12] = (byte)Command.INFO4_MRT_VERIFY_READ;
+ dataBuffer[13] = 0;
+ Buffer.intToBytes(0, dataBuffer, 14);
+ Buffer.intToBytes(0, dataBuffer, 18);
+ Buffer.intToBytes(serverTimeout, dataBuffer, 22);
+ Buffer.shortToBytes(fieldCount, dataBuffer, 26);
+ Buffer.shortToBytes(0, dataBuffer, 28);
+ dataOffset = MSG_TOTAL_HEADER_SIZE;
+
+ writeKey(key);
+ writeFieldVersion(ver);
+ end();
+ }
+
+ public final void setBatchTxnVerify(
+ BatchPolicy policy,
+ Key[] keys,
+ Long[] versions,
+ BatchNode batch
+ ) {
+ // Estimate buffer size.
+ begin();
+
+ // Batch field
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key keyPrev = null;
+ Long verPrev = null;
+ int max = batch.offsetsSize;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = versions[offset];
+
+ dataOffset += key.digest.length + 4;
+
+ if (canRepeat(key, keyPrev, ver, verPrev)) {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else {
+ // Write full header and namespace/set/bin names.
+ dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9
+ dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE;
+ dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+
+ if (ver != null) {
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ }
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ sizeBuffer();
+
+ writeBatchHeader(policy, totalTimeout, 1);
+
+ int fieldSizeOffset = dataOffset;
+ writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ Buffer.intToBytes(max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = getBatchFlags(policy);
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = versions[offset];
+
+ Buffer.intToBytes(offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ byte[] digest = key.digest;
+ System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
+ dataOffset += digest.length;
+
+ if (canRepeat(key, keyPrev, ver, verPrev)) {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else {
+ // Write full message.
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4);
+ dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA);
+ dataBuffer[dataOffset++] = (byte)0;
+ dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE;
+ dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ;
+
+ int fieldCount = 0;
+
+ if (ver != null) {
+ fieldCount++;
+ }
+
+ writeBatchFields(key, fieldCount, 0);
+
+ if (ver != null) {
+ writeFieldVersion(ver);
+ }
+
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
+ end();
+ compress(policy);
+ }
+
+ public final void setTxnMarkRollForward(Key key) {
+ Bin bin = new Bin("fwd", true);
+
+ begin();
+ int fieldCount = estimateKeySize(key);
+ estimateOperationSize(bin);
+ writeTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1);
+ writeOperation(bin, Operation.Type.WRITE);
+ end();
+ }
+
+ public final void setTxnRoll(Key key, Txn txn, int txnAttr) {
+ begin();
+ int fieldCount = estimateKeySize(key);
+
+ fieldCount += sizeTxn(key, txn, false);
+
+ sizeBuffer();
+ dataBuffer[8] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[9] = (byte)0;
+ dataBuffer[10] = (byte)Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE;
+ dataBuffer[11] = (byte)0;
+ dataBuffer[12] = (byte)txnAttr;
+ dataBuffer[13] = 0; // clear the result code
+ Buffer.intToBytes(0, dataBuffer, 14);
+ Buffer.intToBytes(0, dataBuffer, 18);
+ Buffer.intToBytes(serverTimeout, dataBuffer, 22);
+ Buffer.shortToBytes(fieldCount, dataBuffer, 26);
+ Buffer.shortToBytes(0, dataBuffer, 28);
+ dataOffset = MSG_TOTAL_HEADER_SIZE;
+
+ writeKey(key);
+ writeTxn(txn, false);
+ end();
+ }
+
+ public final void setBatchTxnRoll(
+ BatchPolicy policy,
+ Txn txn,
+ Key[] keys,
+ BatchNode batch,
+ BatchAttr attr
+ ) {
+ // Estimate buffer size.
+ begin();
+ int fieldCount = 1;
+ int max = batch.offsetsSize;
+ Long[] versions = new Long[max];
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ versions[i] = txn.getReadVersion(key);
+ }
+
+ // Batch field
+ dataOffset += FIELD_HEADER_SIZE + 5;
+
+ Key keyPrev = null;
+ Long verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = versions[i];
+
+ dataOffset += key.digest.length + 4;
+
+ if (canRepeat(key, keyPrev, ver, verPrev)) {
+ // Can set repeat previous namespace/bin names to save space.
+ dataOffset++;
+ }
+ else {
+ // Write full header and namespace/set/bin names.
+ dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
+ dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE;
+ dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ sizeTxnBatch(txn, ver, attr.hasWrite);
+ dataOffset += 2; // gen(2) = 2
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ sizeBuffer();
+
+ writeBatchHeader(policy, totalTimeout, fieldCount);
+
+ int fieldSizeOffset = dataOffset;
+ writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
+
+ Buffer.intToBytes(max, dataBuffer, dataOffset);
+ dataOffset += 4;
+ dataBuffer[dataOffset++] = getBatchFlags(policy);
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = versions[i];
+
+ Buffer.intToBytes(offset, dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ byte[] digest = key.digest;
+ System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
+ dataOffset += digest.length;
+
+ if (canRepeat(key, keyPrev, ver, verPrev)) {
+ // Can set repeat previous namespace/bin names to save space.
+ dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
+ }
+ else {
+ // Write full message.
+ writeBatchWrite(key, txn, ver, attr, null, 0, 0);
+ keyPrev = key;
+ verPrev = ver;
+ }
+ }
+
+ // Write real field size.
+ Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset);
+ end();
+ compress(policy);
+ }
+
+ public void setTxnClose(Txn txn, Key key) {
+ begin();
+ int fieldCount = estimateKeySize(key);
+ writeTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE,
+ fieldCount, 0);
+ end();
+ }
+
+ private void writeTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) {
+ sizeBuffer();
+
+ dataBuffer[8] = MSG_REMAINING_HEADER_SIZE;
+ dataBuffer[9] = (byte)readAttr;
+ dataBuffer[10] = (byte)writeAttr;
+ dataBuffer[11] = (byte)0;
+ dataBuffer[12] = 0;
+ dataBuffer[13] = 0;
+ Buffer.intToBytes(0, dataBuffer, 14);
+ Buffer.intToBytes(0, dataBuffer, 18);
+ Buffer.intToBytes(serverTimeout, dataBuffer, 22);
+ Buffer.shortToBytes(fieldCount, dataBuffer, 26);
+ Buffer.shortToBytes(opCount, dataBuffer, 28);
+ dataOffset = MSG_TOTAL_HEADER_SIZE;
+
+ writeKey(key);
+ }
+
//--------------------------------------------------
// Writes
//--------------------------------------------------
public final void setWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, true);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -160,7 +481,7 @@ public final void setWrite(WritePolicy policy, Operation.Type operation, Key key
}
sizeBuffer();
writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.length);
- writeKey(policy, key);
+ writeKey(policy, key, true);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -175,7 +496,7 @@ public final void setWrite(WritePolicy policy, Operation.Type operation, Key key
public void setDelete(WritePolicy policy, Key key) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, true);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -183,7 +504,7 @@ public void setDelete(WritePolicy policy, Key key) {
}
sizeBuffer();
writeHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0);
- writeKey(policy, key);
+ writeKey(policy, key, true);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -194,15 +515,15 @@ public void setDelete(WritePolicy policy, Key key) {
public void setDelete(Policy policy, Key key, BatchAttr attr) {
begin();
Expression exp = getBatchExpression(policy, attr);
- int fieldCount = estimateKeyAttrSize(key, attr, exp);
+ int fieldCount = estimateKeyAttrSize(policy, key, attr, exp);
sizeBuffer();
- writeKeyAttr(key, attr, exp, fieldCount, 0);
+ writeKeyAttr(policy, key, attr, exp, fieldCount, 0);
end();
}
public final void setTouch(WritePolicy policy, Key key) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, true);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -211,7 +532,7 @@ public final void setTouch(WritePolicy policy, Key key) {
estimateOperationSize();
sizeBuffer();
writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1);
- writeKey(policy, key);
+ writeKey(policy, key, true);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -226,7 +547,7 @@ public final void setTouch(WritePolicy policy, Key key) {
public final void setExists(Policy policy, Key key) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, false);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -234,7 +555,7 @@ public final void setExists(Policy policy, Key key) {
}
sizeBuffer();
writeHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
- writeKey(policy, key);
+ writeKey(policy, key, false);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -254,7 +575,7 @@ public final void setRead(Policy policy, Key key, String[] binNames) {
}
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, false);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -269,7 +590,7 @@ public final void setRead(Policy policy, Key key, String[] binNames) {
sizeBuffer();
writeHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount);
- writeKey(policy, key);
+ writeKey(policy, key, false);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -323,10 +644,10 @@ else if (br.ops != null) {
opCount = 0;
}
- int fieldCount = estimateKeyAttrSize(br.key, attr, exp);
+ int fieldCount = estimateKeyAttrSize(policy, br.key, attr, exp);
sizeBuffer();
- writeKeyAttr(br.key, attr, exp, fieldCount, opCount);
+ writeKeyAttr(policy, br.key, attr, exp, fieldCount, opCount);
if (br.binNames != null) {
for (String binName : br.binNames) {
@@ -348,7 +669,7 @@ public final void setRead(Policy policy, Key key, Operation[] ops) {
attr.setRead(policy);
attr.adjustRead(ops);
- int fieldCount = estimateKeyAttrSize(key, attr, policy.filterExp);
+ int fieldCount = estimateKeyAttrSize(policy, key, attr, policy.filterExp);
for (Operation op : ops) {
if (op.type.isWrite) {
@@ -358,7 +679,7 @@ public final void setRead(Policy policy, Key key, Operation[] ops) {
}
sizeBuffer();
- writeKeyAttr(key, attr, policy.filterExp, fieldCount, ops.length);
+ writeKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.length);
for (Operation op : ops) {
writeOperation(op);
@@ -368,16 +689,15 @@ public final void setRead(Policy policy, Key key, Operation[] ops) {
public final void setReadHeader(Policy policy, Key key) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, false);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
fieldCount++;
}
- estimateOperationSize((String)null);
sizeBuffer();
writeHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0);
- writeKey(policy, key);
+ writeKey(policy, key, false);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -391,7 +711,7 @@ public final void setReadHeader(Policy policy, Key key) {
public final void setOperate(WritePolicy policy, Key key, OperateArgs args) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, args.hasWrite);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -401,7 +721,7 @@ public final void setOperate(WritePolicy policy, Key key, OperateArgs args) {
sizeBuffer();
writeHeaderReadWrite(policy, args, fieldCount);
- writeKey(policy, key);
+ writeKey(policy, key, args.hasWrite);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -417,11 +737,11 @@ public final void setOperate(WritePolicy policy, Key key, OperateArgs args) {
public final void setOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) {
begin();
Expression exp = getBatchExpression(policy, attr);
- int fieldCount = estimateKeyAttrSize(key, attr, exp);
+ int fieldCount = estimateKeyAttrSize(policy, key, attr, exp);
dataOffset += attr.opSize;
sizeBuffer();
- writeKeyAttr(key, attr, exp, fieldCount, ops.length);
+ writeKeyAttr(policy, key, attr, exp, fieldCount, ops.length);
for (Operation op : ops) {
writeOperation(op);
@@ -436,7 +756,7 @@ public final void setOperate(Policy policy, BatchAttr attr, Key key, Operation[]
public final void setUdf(WritePolicy policy, Key key, String packageName, String functionName, Value[] args) {
begin();
- int fieldCount = estimateKeySize(policy, key);
+ int fieldCount = estimateKeySize(policy, key, true);
if (policy.filterExp != null) {
dataOffset += policy.filterExp.size();
@@ -448,7 +768,7 @@ public final void setUdf(WritePolicy policy, Key key, String packageName, String
sizeBuffer();
writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0);
- writeKey(policy, key);
+ writeKey(policy, key, true);
if (policy.filterExp != null) {
policy.filterExp.write(this);
@@ -469,11 +789,11 @@ public final void setUdf(Policy policy, BatchAttr attr, Key key, String packageN
public final void setUdf(Policy policy, BatchAttr attr, Key key, String packageName, String functionName, byte[] argBytes) {
begin();
Expression exp = getBatchExpression(policy, attr);
- int fieldCount = estimateKeyAttrSize(key, attr, exp);
+ int fieldCount = estimateKeyAttrSize(policy, key, attr, exp);
fieldCount += estimateUdfSize(packageName, functionName, argBytes);
sizeBuffer();
- writeKeyAttr(key, attr, exp, fieldCount, 0);
+ writeKeyAttr(policy, key, attr, exp, fieldCount, 0);
writeField(packageName, FieldType.UDF_PACKAGE_NAME);
writeField(functionName, FieldType.UDF_FUNCTION);
writeField(argBytes, FieldType.UDF_ARGLIST);
@@ -746,21 +1066,21 @@ public final void setBatchOperate(
List extends BatchRecord> records,
BatchNode batch
) {
- final BatchRecordIterNative iter = new BatchRecordIterNative(records, batch);
- setBatchOperate(policy, writePolicy, udfPolicy, deletePolicy, iter);
- }
+ begin();
+ int max = batch.offsetsSize;
+ Txn txn = policy.txn;
+ Long[] versions = null;
- public final void setBatchOperate(
- BatchPolicy policy,
- BatchWritePolicy writePolicy,
- BatchUDFPolicy udfPolicy,
- BatchDeletePolicy deletePolicy,
- KeyIter iter
- ) {
- BatchRecord record;
- BatchRecord prev = null;
+ if (txn != null) {
+ versions = new Long[max];
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ BatchRecord record = records.get(offset);
+ versions[i] = txn.getReadVersion(record.key);
+ }
+ }
- begin();
int fieldCount = 1;
if (policy.filterExp != null) {
@@ -770,17 +1090,18 @@ public final void setBatchOperate(
dataOffset += FIELD_HEADER_SIZE + 5;
- while ((record = iter.next()) != null) {
- final Key key = record.key;
+ BatchRecord prev = null;
+ Long verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ BatchRecord record = records.get(offset);
+ Key key = record.key;
+ Long ver = (versions != null)? versions[i] : null;
dataOffset += key.digest.length + 4;
- // Avoid relatively expensive full equality checks for performance reasons.
- // Use reference equality only in hope that common namespaces/bin names are set from
- // fixed variables. It's fine if equality not determined correctly because it just
- // results in more space used. The batch will still be correct.
- if (!policy.sendKey && prev != null && prev.key.namespace == key.namespace && prev.key.setName == key.setName &&
- record.equals(prev)) {
+ if (canRepeat(policy, key, record, prev, ver, verPrev)) {
// Can set repeat previous namespace/bin names to save space.
dataOffset++;
}
@@ -789,8 +1110,10 @@ public final void setBatchOperate(
dataOffset += 12;
dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE;
dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ sizeTxnBatch(txn, ver, record.hasWrite);
dataOffset += record.size(policy);
prev = record;
+ verPrev = ver;
}
}
sizeBuffer();
@@ -804,29 +1127,28 @@ public final void setBatchOperate(
final int fieldSizeOffset = dataOffset;
writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
- Buffer.intToBytes(iter.size(), dataBuffer, dataOffset);
+ Buffer.intToBytes(max, dataBuffer, dataOffset);
dataOffset += 4;
dataBuffer[dataOffset++] = getBatchFlags(policy);
BatchAttr attr = new BatchAttr();
prev = null;
- iter.reset();
+ verPrev = null;
- while ((record = iter.next()) != null) {
- Buffer.intToBytes(iter.offset(), dataBuffer, dataOffset);
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ BatchRecord record = records.get(offset);
+ Long ver = (versions != null)? versions[i] : null;
+
+ Buffer.intToBytes(offset, dataBuffer, dataOffset);
dataOffset += 4;
- final Key key = record.key;
+ Key key = record.key;
final byte[] digest = key.digest;
System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
dataOffset += digest.length;
- // Avoid relatively expensive full equality checks for performance reasons.
- // Use reference equality only in hope that common namespaces/bin names are set from
- // fixed variables. It's fine if equality not determined correctly because it just
- // results in more space used. The batch will still be correct.
- if (!policy.sendKey && prev != null && prev.key.namespace == key.namespace && prev.key.setName == key.setName &&
- record.equals(prev)) {
+ if (canRepeat(policy, key, record, prev, ver, verPrev)) {
// Can set repeat previous namespace/bin names to save space.
dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
}
@@ -845,20 +1167,20 @@ public final void setBatchOperate(
if (br.binNames != null) {
if (br.binNames.length > 0) {
- writeBatchBinNames(key, br.binNames, attr, attr.filterExp);
+ writeBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp);
}
else {
attr.adjustRead(true);
- writeBatchRead(key, attr, attr.filterExp, 0);
+ writeBatchRead(key, txn, ver, attr, attr.filterExp, 0);
}
}
else if (br.ops != null) {
attr.adjustRead(br.ops);
- writeBatchOperations(key, br.ops, attr, attr.filterExp);
+ writeBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp);
}
else {
attr.adjustRead(br.readAllBins);
- writeBatchRead(key, attr, attr.filterExp, 0);
+ writeBatchRead(key, txn, ver, attr, attr.filterExp, 0);
}
break;
}
@@ -869,7 +1191,7 @@ else if (br.ops != null) {
attr.setWrite(bwp);
attr.adjustWrite(bw.ops);
- writeBatchOperations(key, bw.ops, attr, attr.filterExp);
+ writeBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp);
break;
}
@@ -878,7 +1200,7 @@ else if (br.ops != null) {
BatchUDFPolicy bup = (bu.policy != null)? bu.policy : udfPolicy;
attr.setUDF(bup);
- writeBatchWrite(key, attr, attr.filterExp, 3, 0);
+ writeBatchWrite(key, txn, ver, attr, attr.filterExp, 3, 0);
writeField(bu.packageName, FieldType.UDF_PACKAGE_NAME);
writeField(bu.functionName, FieldType.UDF_FUNCTION);
writeField(bu.argBytes, FieldType.UDF_ARGLIST);
@@ -890,11 +1212,12 @@ else if (br.ops != null) {
BatchDeletePolicy bdp = (bd.policy != null)? bd.policy : deletePolicy;
attr.setDelete(bdp);
- writeBatchWrite(key, attr, attr.filterExp, 0, 0);
+ writeBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0);
break;
}
}
prev = record;
+ verPrev = ver;
}
}
@@ -911,22 +1234,25 @@ public final void setBatchOperate(
String[] binNames,
Operation[] ops,
BatchAttr attr
- ) {
- final KeyIterNative iter = new KeyIterNative(keys, batch);
- setBatchOperate(policy, iter, binNames, ops, attr);
- }
-
- public final void setBatchOperate(
- BatchPolicy policy,
- KeyIter iter,
- String[] binNames,
- Operation[] ops,
- BatchAttr attr
) {
// Estimate buffer size.
begin();
- int fieldCount = 1;
+ int max = batch.offsetsSize;
+ Txn txn = policy.txn;
+ Long[] versions = null;
+
+ if (txn != null) {
+ versions = new Long[max];
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ versions[i] = txn.getReadVersion(key);
+ }
+ }
+
Expression exp = getBatchExpression(policy, attr);
+ int fieldCount = 1;
if (exp != null) {
dataOffset += exp.size();
@@ -935,22 +1261,26 @@ public final void setBatchOperate(
dataOffset += FIELD_HEADER_SIZE + 5;
- Key key;
- Key prev = null;
+ Key keyPrev = null;
+ Long verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = (versions != null)? versions[i] : null;
- while ((key = iter.next()) != null) {
dataOffset += key.digest.length + 4;
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) {
+ if (canRepeat(attr, key, keyPrev, ver, verPrev)) {
// Can set repeat previous namespace/bin names to save space.
dataOffset++;
}
else {
// Write full header and namespace/set/bin names.
- dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12
+ dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE;
dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ sizeTxnBatch(txn, ver, attr.hasWrite);
if (attr.sendKey) {
dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1;
@@ -975,7 +1305,8 @@ else if (ops != null) {
else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) {
dataOffset += 2; // Extra write specific fields.
}
- prev = key;
+ keyPrev = key;
+ verPrev = ver;
}
}
@@ -990,40 +1321,44 @@ else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) {
int fieldSizeOffset = dataOffset;
writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
- Buffer.intToBytes(iter.size(), dataBuffer, dataOffset);
+ Buffer.intToBytes(max, dataBuffer, dataOffset);
dataOffset += 4;
dataBuffer[dataOffset++] = getBatchFlags(policy);
- prev = null;
- iter.reset();
+ keyPrev = null;
+ verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = (versions != null)? versions[i] : null;
- while ((key = iter.next()) != null) {
- Buffer.intToBytes(iter.offset(), dataBuffer, dataOffset);
+ Buffer.intToBytes(offset, dataBuffer, dataOffset);
dataOffset += 4;
byte[] digest = key.digest;
System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
dataOffset += digest.length;
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) {
+ if (canRepeat(attr, key, keyPrev, ver, verPrev)) {
// Can set repeat previous namespace/bin names to save space.
dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
}
else {
// Write full message.
if (binNames != null) {
- writeBatchBinNames(key, binNames, attr, null);
+ writeBatchBinNames(key, txn, ver, binNames, attr, null);
}
else if (ops != null) {
- writeBatchOperations(key, ops, attr, null);
+ writeBatchOperations(key, txn, ver, ops, attr, null);
}
else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) {
- writeBatchWrite(key, attr, null, 0, 0);
+ writeBatchWrite(key, txn, ver, attr, null, 0, 0);
}
else {
- writeBatchRead(key, attr, null, 0);
+ writeBatchRead(key, txn, ver, attr, null, 0);
}
- prev = key;
+ keyPrev = key;
+ verPrev = ver;
}
}
@@ -1041,23 +1376,25 @@ public final void setBatchUDF(
String functionName,
byte[] argBytes,
BatchAttr attr
- ) {
- final KeyIterNative iter = new KeyIterNative(keys, batch);
- setBatchUDF(policy, iter, packageName, functionName, argBytes, attr);
- }
-
- public final void setBatchUDF(
- BatchPolicy policy,
- KeyIter iter,
- String packageName,
- String functionName,
- byte[] argBytes,
- BatchAttr attr
) {
// Estimate buffer size.
begin();
- int fieldCount = 1;
+ int max = batch.offsetsSize;
+ Txn txn = policy.txn;
+ Long[] versions = null;
+
+ if (txn != null) {
+ versions = new Long[max];
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ versions[i] = txn.getReadVersion(key);
+ }
+ }
+
Expression exp = getBatchExpression(policy, attr);
+ int fieldCount = 1;
if (exp != null) {
dataOffset += exp.size();
@@ -1066,14 +1403,17 @@ public final void setBatchUDF(
dataOffset += FIELD_HEADER_SIZE + 5;
- Key key;
- Key prev = null;
+ Key keyPrev = null;
+ Long verPrev = null;
+
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = (versions != null)? versions[i] : null;
- while ((key = iter.next()) != null) {
dataOffset += key.digest.length + 4;
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) {
+ if (canRepeat(attr, key, keyPrev, ver, verPrev)) {
// Can set repeat previous namespace/bin names to save space.
dataOffset++;
}
@@ -1082,13 +1422,15 @@ public final void setBatchUDF(
dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE;
dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE;
+ sizeTxnBatch(txn, ver, attr.hasWrite);
if (attr.sendKey) {
dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1;
}
dataOffset += 2; // gen(2) = 2
estimateUdfSize(packageName, functionName, argBytes);
- prev = key;
+ keyPrev = key;
+ verPrev = ver;
}
}
@@ -1103,32 +1445,36 @@ public final void setBatchUDF(
int fieldSizeOffset = dataOffset;
writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end
- Buffer.intToBytes(iter.size(), dataBuffer, dataOffset);
+ Buffer.intToBytes(max, dataBuffer, dataOffset);
dataOffset += 4;
dataBuffer[dataOffset++] = getBatchFlags(policy);
- prev = null;
- iter.reset();
+ keyPrev = null;
+ verPrev = null;
- while ((key = iter.next()) != null) {
- Buffer.intToBytes(iter.offset(), dataBuffer, dataOffset);
+ for (int i = 0; i < max; i++) {
+ int offset = batch.offsets[i];
+ Key key = keys[offset];
+ Long ver = (versions != null)? versions[i] : null;
+
+ Buffer.intToBytes(offset, dataBuffer, dataOffset);
dataOffset += 4;
byte[] digest = key.digest;
System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length);
dataOffset += digest.length;
- // Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) {
+ if (canRepeat(attr, key, keyPrev, ver, verPrev)) {
// Can set repeat previous namespace/bin names to save space.
dataBuffer[dataOffset++] = BATCH_MSG_REPEAT;
}
else {
// Write full message.
- writeBatchWrite(key, attr, null, 3, 0);
+ writeBatchWrite(key, txn, ver, attr, null, 3, 0);
writeField(packageName, FieldType.UDF_PACKAGE_NAME);
writeField(functionName, FieldType.UDF_FUNCTION);
writeField(argBytes, FieldType.UDF_ARGLIST);
- prev = key;
+ keyPrev = key;
+ verPrev = ver;
}
}
@@ -1138,6 +1484,33 @@ public final void setBatchUDF(
compress(policy);
}
+ private static boolean canRepeat(
+ Policy policy,
+ Key key,
+ BatchRecord record,
+ BatchRecord prev,
+ Long ver,
+ Long verPrev
+ ) {
+ // Avoid relatively expensive full equality checks for performance reasons.
+ // Use reference equality only in hope that common namespaces/bin names are set from
+ // fixed variables. It's fine if equality not determined correctly because it just
+ // results in more space used. The batch will still be correct.
+ // Same goes for ver reference equality check.
+ return !policy.sendKey && verPrev == ver && prev != null && prev.key.namespace == key.namespace &&
+ prev.key.setName == key.setName && record.equals(prev);
+ }
+
+ private static boolean canRepeat(BatchAttr attr, Key key, Key keyPrev, Long ver, Long verPrev) {
+ return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.namespace == key.namespace &&
+ keyPrev.setName == key.setName;
+ }
+
+ private static boolean canRepeat(Key key, Key keyPrev, Long ver, Long verPrev) {
+ return verPrev == ver && keyPrev != null && keyPrev.namespace == key.namespace &&
+ keyPrev.setName == key.setName;
+ }
+
private static final Expression getBatchExpression(Policy policy, BatchAttr attr) {
return (attr.filterExp != null) ? attr.filterExp : policy.filterExp;
}
@@ -1159,6 +1532,21 @@ private static byte getBatchFlags(BatchPolicy policy) {
return flags;
}
+ private void sizeTxnBatch(Txn txn, Long ver, boolean hasWrite) {
+ if (txn != null) {
+ dataOffset++; // Add info4 byte for MRT.
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+
+ if (ver != null) {
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ }
+
+ if (hasWrite && txn.getDeadline() != 0) {
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ }
+ }
+ }
+
private void writeBatchHeader(Policy policy, int timeout, int fieldCount) {
int readAttr = Command.INFO1_BATCH;
@@ -1181,20 +1569,20 @@ private void writeBatchHeader(Policy policy, int timeout, int fieldCount) {
dataOffset = MSG_TOTAL_HEADER_SIZE;
}
- private void writeBatchBinNames(Key key, String[] binNames, BatchAttr attr, Expression filter) {
- writeBatchRead(key, attr, filter, binNames.length);
+ private void writeBatchBinNames(Key key, Txn txn, Long ver, String[] binNames, BatchAttr attr, Expression filter) {
+ writeBatchRead(key, txn, ver, attr, filter, binNames.length);
for (String binName : binNames) {
writeOperation(binName, Operation.Type.READ);
}
}
- private void writeBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expression filter) {
+ private void writeBatchOperations(Key key, Txn txn, Long ver, Operation[] ops, BatchAttr attr, Expression filter) {
if (attr.hasWrite) {
- writeBatchWrite(key, attr, filter, 0, ops.length);
+ writeBatchWrite(key, txn, ver, attr, filter, 0, ops.length);
}
else {
- writeBatchRead(key, attr, filter, ops.length);
+ writeBatchRead(key, txn, ver, attr, filter, ops.length);
}
for (Operation op : ops) {
@@ -1202,44 +1590,133 @@ private void writeBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expr
}
}
- private void writeBatchRead(Key key, BatchAttr attr, Expression filter, int opCount) {
- dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL);
- dataBuffer[dataOffset++] = (byte)attr.readAttr;
- dataBuffer[dataOffset++] = (byte)attr.writeAttr;
- dataBuffer[dataOffset++] = (byte)attr.infoAttr;
- Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset);
- dataOffset += 4;
- writeBatchFields(key, filter, 0, opCount);
+ private void writeBatchRead(Key key, Txn txn, Long ver, BatchAttr attr, Expression filter, int opCount) {
+ if (txn != null) {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ dataBuffer[dataOffset++] = (byte)attr.txnAttr;
+ Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ writeBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount);
+ }
+ else {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ writeBatchFieldsReg(key, attr, filter, 0, opCount);
+ }
}
- private void writeBatchWrite(Key key, BatchAttr attr, Expression filter, int fieldCount, int opCount) {
- dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL);
- dataBuffer[dataOffset++] = (byte)attr.readAttr;
- dataBuffer[dataOffset++] = (byte)attr.writeAttr;
- dataBuffer[dataOffset++] = (byte)attr.infoAttr;
- Buffer.shortToBytes(attr.generation, dataBuffer, dataOffset);
- dataOffset += 2;
- Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset);
- dataOffset += 4;
+ private void writeBatchWrite(
+ Key key,
+ Txn txn,
+ Long ver,
+ BatchAttr attr,
+ Expression filter,
+ int fieldCount,
+ int opCount
+ ) {
+ if (txn != null) {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ dataBuffer[dataOffset++] = (byte)attr.txnAttr;
+ Buffer.shortToBytes(attr.generation, dataBuffer, dataOffset);
+ dataOffset += 2;
+ Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ writeBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount);
+ }
+ else {
+ dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL);
+ dataBuffer[dataOffset++] = (byte)attr.readAttr;
+ dataBuffer[dataOffset++] = (byte)attr.writeAttr;
+ dataBuffer[dataOffset++] = (byte)attr.infoAttr;
+ Buffer.shortToBytes(attr.generation, dataBuffer, dataOffset);
+ dataOffset += 2;
+ Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset);
+ dataOffset += 4;
+ writeBatchFieldsReg(key, attr, filter, fieldCount, opCount);
+ }
+ }
+
+ private void writeBatchFieldsTxn(
+ Key key,
+ Txn txn,
+ Long ver,
+ BatchAttr attr,
+ Expression filter,
+ int fieldCount,
+ int opCount
+ ) {
+ fieldCount++;
+
+ if (ver != null) {
+ fieldCount++;
+ }
+
+ if (attr.hasWrite && txn.getDeadline() != 0) {
+ fieldCount++;
+ }
+
+ if (filter != null) {
+ fieldCount++;
+ }
if (attr.sendKey) {
fieldCount++;
- writeBatchFields(key, filter, fieldCount, opCount);
- writeField(key.userKey, FieldType.KEY);
}
- else {
- writeBatchFields(key, filter, fieldCount, opCount);
+
+ writeBatchFields(key, fieldCount, opCount);
+
+ writeFieldLE(txn.getId(), FieldType.MRT_ID);
+
+ if (ver != null) {
+ writeFieldVersion(ver);
+ }
+
+ if (attr.hasWrite && txn.getDeadline() != 0) {
+ writeFieldLE(txn.getDeadline(), FieldType.MRT_DEADLINE);
+ }
+
+ if (filter != null) {
+ filter.write(this);
+ }
+
+ if (attr.sendKey) {
+ writeField(key.userKey, FieldType.KEY);
}
}
- private void writeBatchFields(Key key, Expression filter, int fieldCount, int opCount) {
+ private void writeBatchFieldsReg(
+ Key key,
+ BatchAttr attr,
+ Expression filter,
+ int fieldCount,
+ int opCount
+ ) {
if (filter != null) {
fieldCount++;
- writeBatchFields(key, fieldCount, opCount);
+ }
+
+ if (attr.sendKey) {
+ fieldCount++;
+ }
+
+ writeBatchFields(key, fieldCount, opCount);
+
+ if (filter != null) {
filter.write(this);
}
- else {
- writeBatchFields(key, fieldCount, opCount);
+
+ if (attr.sendKey) {
+ writeField(key.userKey, FieldType.KEY);
}
}
@@ -1374,7 +1851,7 @@ public final void setScan(
writeField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT);
// Write taskId field
- writeField(taskId, FieldType.TRAN_ID);
+ writeField(taskId, FieldType.QUERY_ID);
if (binNames != null) {
for (String binName : binNames) {
@@ -1602,7 +2079,7 @@ else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP) {
writeField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT);
// Write taskId field
- writeField(taskId, FieldType.TRAN_ID);
+ writeField(taskId, FieldType.QUERY_ID);
if (filter != null) {
IndexCollectionType type = filter.getCollectionType();
@@ -1699,13 +2176,8 @@ else if (binNames != null && (isNew || filter == null)) {
// Command Sizing
//--------------------------------------------------
- private final int estimateKeyAttrSize(Key key, BatchAttr attr, Expression filterExp) {
- int fieldCount = estimateKeySize(key);
-
- if (attr.sendKey) {
- dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1;
- fieldCount++;
- }
+ private final int estimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp) {
+ int fieldCount = estimateKeySize(policy, key, attr.hasWrite);
if (filterExp != null) {
dataOffset += filterExp.size();
@@ -1714,9 +2186,11 @@ private final int estimateKeyAttrSize(Key key, BatchAttr attr, Expression filter
return fieldCount;
}
- private final int estimateKeySize(Policy policy, Key key) {
+ private int estimateKeySize(Policy policy, Key key, boolean hasWrite) {
int fieldCount = estimateKeySize(key);
+ fieldCount += sizeTxn(key, policy.txn, hasWrite);
+
if (policy.sendKey) {
dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1;
fieldCount++;
@@ -1835,7 +2309,7 @@ private final void writeHeaderWrite(WritePolicy policy, int writeAttr, int field
dataBuffer[9] = (byte)readAttr;
dataBuffer[10] = (byte)writeAttr;
dataBuffer[11] = (byte)infoAttr;
- dataBuffer[12] = 0; // unused
+ dataBuffer[12] = 0;
dataBuffer[13] = 0; // clear the result code
Buffer.intToBytes(generation, dataBuffer, 14);
Buffer.intToBytes(policy.expiration, dataBuffer, 18);
@@ -2033,7 +2507,14 @@ private final void writeHeaderReadHeader(Policy policy, int readAttr, int fieldC
/**
* Header write for batch single commands.
*/
- private final void writeKeyAttr(Key key, BatchAttr attr, Expression filterExp, int fieldCount, int operationCount) {
+ private void writeKeyAttr(
+ Policy policy,
+ Key key,
+ BatchAttr attr,
+ Expression filterExp,
+ int fieldCount,
+ int operationCount
+ ) {
// Write all header data except total size which must be written last.
dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; // Message header length.
dataBuffer[9] = (byte)attr.readAttr;
@@ -2048,19 +2529,16 @@ private final void writeKeyAttr(Key key, BatchAttr attr, Expression filterExp, i
Buffer.shortToBytes(operationCount, dataBuffer, 28);
dataOffset = MSG_TOTAL_HEADER_SIZE;
- writeKey(key);
-
- if (attr.sendKey) {
- writeField(key.userKey, FieldType.KEY);
- }
+ writeKey(policy, key, attr.hasWrite);
if (filterExp != null) {
filterExp.write(this);
}
}
- private final void writeKey(Policy policy, Key key) {
+ private void writeKey(Policy policy, Key key, boolean sendDeadline) {
writeKey(key);
+ writeTxn(policy.txn, sendDeadline);
if (policy.sendKey) {
writeField(key.userKey, FieldType.KEY);
@@ -2157,7 +2635,49 @@ private final void writeOperation(Operation.Type operation) {
dataBuffer[dataOffset++] = 0;
}
- private final void writeField(Value value, int type) {
+ private int sizeTxn(Key key, Txn txn, boolean hasWrite) {
+ int fieldCount = 0;
+
+ if (txn != null) {
+ dataOffset += 8 + FIELD_HEADER_SIZE;
+ fieldCount++;
+
+ version = txn.getReadVersion(key);
+
+ if (version != null) {
+ dataOffset += 7 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+
+ if (hasWrite && txn.getDeadline() != 0) {
+ dataOffset += 4 + FIELD_HEADER_SIZE;
+ fieldCount++;
+ }
+ }
+ return fieldCount;
+ }
+
+ private void writeTxn(Txn txn, boolean sendDeadline) {
+ if (txn != null) {
+ writeFieldLE(txn.getId(), FieldType.MRT_ID);
+
+ if (version != null) {
+ writeFieldVersion(version);
+ }
+
+ if (sendDeadline && txn.getDeadline() != 0) {
+ writeFieldLE(txn.getDeadline(), FieldType.MRT_DEADLINE);
+ }
+ }
+ }
+
+ private void writeFieldVersion(long ver) {
+ writeFieldHeader(7, FieldType.RECORD_VERSION);
+ Buffer.longToVersionBytes(ver, dataBuffer, dataOffset);
+ dataOffset += 7;
+ }
+
+ private void writeField(Value value, int type) {
int offset = dataOffset + FIELD_HEADER_SIZE;
dataBuffer[offset++] = (byte)value.getType();
int len = value.write(dataBuffer, offset) + 1;
@@ -2165,31 +2685,43 @@ private final void writeField(Value value, int type) {
dataOffset += len;
}
- private final void writeField(String str, int type) {
+ private void writeField(String str, int type) {
int len = Buffer.stringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE);
writeFieldHeader(len, type);
dataOffset += len;
}
- private final void writeField(byte[] bytes, int type) {
+ private void writeField(byte[] bytes, int type) {
System.arraycopy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.length);
writeFieldHeader(bytes.length, type);
dataOffset += bytes.length;
}
- private final void writeField(int val, int type) {
+ private void writeField(int val, int type) {
writeFieldHeader(4, type);
Buffer.intToBytes(val, dataBuffer, dataOffset);
dataOffset += 4;
}
- private final void writeField(long val, int type) {
+ private void writeFieldLE(int val, int type) {
+ writeFieldHeader(4, type);
+ Buffer.intToLittleBytes(val, dataBuffer, dataOffset);
+ dataOffset += 4;
+ }
+
+ private void writeField(long val, int type) {
writeFieldHeader(8, type);
Buffer.longToBytes(val, dataBuffer, dataOffset);
dataOffset += 8;
}
- private final void writeFieldHeader(int size, int type) {
+ private void writeFieldLE(long val, int type) {
+ writeFieldHeader(8, type);
+ Buffer.longToLittleBytes(val, dataBuffer, dataOffset);
+ dataOffset += 8;
+ }
+
+ private void writeFieldHeader(int size, int type) {
Buffer.intToBytes(size+1, dataBuffer, dataOffset);
dataOffset += 4;
dataBuffer[dataOffset++] = (byte)type;
@@ -2211,9 +2743,8 @@ protected final void end() {
private final void compress(Policy policy) {
if (policy.compress && dataOffset > COMPRESS_THRESHOLD) {
- Deflater def = new Deflater();
+ Deflater def = new Deflater(Deflater.BEST_SPEED);
try {
- def.setLevel(Deflater.BEST_SPEED);
def.setInput(dataBuffer, 0, dataOffset);
def.finish();
@@ -2297,6 +2828,24 @@ protected final Key parseKey(int fieldCount, BVal bval) {
return new Key(namespace, digest, setName, userKey);
}
+ public Long parseVersion(int fieldCount) {
+ Long version = null;
+
+ for (int i = 0; i < fieldCount; i++) {
+ int len = Buffer.bytesToInt(dataBuffer, dataOffset);
+ dataOffset += 4;
+
+ int type = dataBuffer[dataOffset++];
+ int size = len - 1;
+
+ if (type == FieldType.RECORD_VERSION && size == 7) {
+ version = Buffer.versionBytesToLong(dataBuffer, dataOffset);
+ }
+ dataOffset += size;
+ }
+ return version;
+ }
+
protected final Record parseRecord(
int opCount,
int generation,
@@ -2352,77 +2901,4 @@ public static boolean batchInDoubt(boolean isWrite, int commandSentCounter) {
public static class OpResults extends ArrayList
- * If threadPool is shared, threadPool will not be shutdown when the client instance is
- * closed. This shared threadPool should be shutdown manually before the program
- * terminates. Shutdown is recommended, but not absolutely required if threadPool is
- * constructed to use daemon threads.
- *
- * Default: false
- */
- private final boolean sharedThreadPool;
-
- /**
- * Underlying thread pool used in synchronous batch, scan, and query commands. These commands
- * are often sent to multiple server nodes in parallel threads. A thread pool improves
- * performance because threads do not have to be created/destroyed for each command.
- * The default, null, indicates that the following daemon thread pool will be used:
- *
- * Daemon threads automatically terminate when the program terminates.
- *
- * Default: null (use Executors.newCachedThreadPool)
- */
- private final ExecutorService threadPool;
-
- /**
- * Upper limit of proxy server connection.
- */
- private static final int MAX_CONNECTIONS = 8;
-
- private static final String NotSupported = "Method not supported in proxy client: ";
-
- //-------------------------------------------------------
- // Member variables.
- //-------------------------------------------------------
-
- /**
- * Default read policy that is used when read command policy is null.
- */
- public final Policy readPolicyDefault;
-
- /**
- * Default write policy that is used when write command policy is null.
- */
- public final WritePolicy writePolicyDefault;
-
- /**
- * Default scan policy that is used when scan command policy is null.
- */
- public final ScanPolicy scanPolicyDefault;
-
- /**
- * Default query policy that is used when query command policy is null.
- */
- public final QueryPolicy queryPolicyDefault;
-
- /**
- * Default parent policy used in batch read commands. Parent policy fields
- * include socketTimeout, totalTimeout, maxRetries, etc...
- */
- public final BatchPolicy batchPolicyDefault;
-
- /**
- * Default parent policy used in batch write commands. Parent policy fields
- * include socketTimeout, totalTimeout, maxRetries, etc...
- */
- public final BatchPolicy batchParentPolicyWriteDefault;
-
- /**
- * Default write policy used in batch operate commands.
- * Write policy fields include generation, expiration, durableDelete, etc...
- */
- public final BatchWritePolicy batchWritePolicyDefault;
-
- /**
- * Default delete policy used in batch delete commands.
- */
- public final BatchDeletePolicy batchDeletePolicyDefault;
-
- /**
- * Default user defined function policy used in batch UDF execute commands.
- */
- public final BatchUDFPolicy batchUDFPolicyDefault;
-
- /**
- * Default info policy that is used when info command policy is null.
- */
- public final InfoPolicy infoPolicyDefault;
-
- private final WritePolicy operatePolicyReadDefault;
- private final AuthTokenManager authTokenManager;
- private final GrpcCallExecutor executor;
-
- //-------------------------------------------------------
- // Constructors
- //-------------------------------------------------------
-
- /**
- * Initialize proxy client with suitable hosts to seed the cluster map.
- * The client policy is used to set defaults and size internal data structures.
- *
- * In most cases, only one host is necessary to seed the cluster. The remaining hosts
- * are added as future seeds in case of a complete network failure.
- *
- * @param policy client configuration parameters, pass in null for defaults
- * @param hosts array of potential hosts to seed the cluster
- * @throws AerospikeException if all host connections fail
- */
- public AerospikeClientProxy(ClientPolicy policy, Host... hosts) {
- if (policy == null) {
- policy = new ClientPolicy();
- policy.minConnsPerNode = 1;
- policy.maxConnsPerNode = 8;
- policy.asyncMaxConnsPerNode = 8;
- policy.timeout = 5000;
- }
-
- if (policy.threadPool == null) {
- threadPool = Executors.newCachedThreadPool(new ThreadDaemonFactory());
- }
- else {
- threadPool = policy.threadPool;
- }
- sharedThreadPool = policy.sharedThreadPool;
-
- this.readPolicyDefault = policy.readPolicyDefault;
- this.writePolicyDefault = policy.writePolicyDefault;
- this.scanPolicyDefault = policy.scanPolicyDefault;
- this.queryPolicyDefault = policy.queryPolicyDefault;
- this.batchPolicyDefault = policy.batchPolicyDefault;
- this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault;
- this.batchWritePolicyDefault = policy.batchWritePolicyDefault;
- this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault;
- this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault;
- this.infoPolicyDefault = policy.infoPolicyDefault;
- this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault);
-
- GrpcChannelProvider channelProvider = new GrpcChannelProvider();
-
- if (policy.user != null || policy.password != null) {
- authTokenManager = new AuthTokenManager(policy, channelProvider);
- }
- else {
- authTokenManager = null;
- }
-
- try {
- // The gRPC client policy transformed from the client policy.
- GrpcClientPolicy grpcClientPolicy = toGrpcClientPolicy(policy);
- executor = new GrpcCallExecutor(grpcClientPolicy, authTokenManager, hosts);
- channelProvider.setCallExecutor(executor);
-
- // Warmup after the call executor in the channel provider has
- // been set. The channel provider is used to fetch auth tokens
- // required for the warm up calls.
- executor.warmupChannels();
- }
- catch (Throwable e) {
- if(authTokenManager != null) {
- authTokenManager.close();
- }
- throw e;
- }
- }
-
- /**
- * Return client version string.
- */
- private static String getVersion() {
- final Properties properties = new Properties();
- String version = null;
-
- try {
- properties.load(AerospikeClientProxy.class.getClassLoader().getResourceAsStream("project.properties"));
- version = properties.getProperty("version");
- }
- catch (Exception e) {
- Log.warn("Failed to retrieve client version: " + Util.getErrorMessage(e));
- }
- return version;
- }
-
- //-------------------------------------------------------
- // Default Policies
- //-------------------------------------------------------
-
- /**
- * Return read policy default. Use when the policy will not be modified.
- */
- public final Policy getReadPolicyDefault() {
- return readPolicyDefault;
- }
-
- /**
- * Copy read policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final Policy copyReadPolicyDefault() {
- return new Policy(readPolicyDefault);
- }
-
- /**
- * Return write policy default. Use when the policy will not be modified.
- */
- public final WritePolicy getWritePolicyDefault() {
- return writePolicyDefault;
- }
-
- /**
- * Copy write policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final WritePolicy copyWritePolicyDefault() {
- return new WritePolicy(writePolicyDefault);
- }
-
- /**
- * Return scan policy default. Use when the policy will not be modified.
- */
- public final ScanPolicy getScanPolicyDefault() {
- return scanPolicyDefault;
- }
-
- /**
- * Copy scan policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final ScanPolicy copyScanPolicyDefault() {
- return new ScanPolicy(scanPolicyDefault);
- }
-
- /**
- * Return query policy default. Use when the policy will not be modified.
- */
- public final QueryPolicy getQueryPolicyDefault() {
- return queryPolicyDefault;
- }
-
- /**
- * Copy query policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final QueryPolicy copyQueryPolicyDefault() {
- return new QueryPolicy(queryPolicyDefault);
- }
-
- /**
- * Return batch header read policy default. Use when the policy will not be modified.
- */
- public final BatchPolicy getBatchPolicyDefault() {
- return batchPolicyDefault;
- }
-
- /**
- * Copy batch header read policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final BatchPolicy copyBatchPolicyDefault() {
- return new BatchPolicy(batchPolicyDefault);
- }
-
- /**
- * Return batch header write policy default. Use when the policy will not be modified.
- */
- public final BatchPolicy getBatchParentPolicyWriteDefault() {
- return batchParentPolicyWriteDefault;
- }
-
- /**
- * Copy batch header write policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final BatchPolicy copyBatchParentPolicyWriteDefault() {
- return new BatchPolicy(batchParentPolicyWriteDefault);
- }
-
- /**
- * Return batch detail write policy default. Use when the policy will not be modified.
- */
- public final BatchWritePolicy getBatchWritePolicyDefault() {
- return batchWritePolicyDefault;
- }
-
- /**
- * Copy batch detail write policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final BatchWritePolicy copyBatchWritePolicyDefault() {
- return new BatchWritePolicy(batchWritePolicyDefault);
- }
-
- /**
- * Return batch detail delete policy default. Use when the policy will not be modified.
- */
- public final BatchDeletePolicy getBatchDeletePolicyDefault() {
- return batchDeletePolicyDefault;
- }
-
- /**
- * Copy batch detail delete policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final BatchDeletePolicy copyBatchDeletePolicyDefault() {
- return new BatchDeletePolicy(batchDeletePolicyDefault);
- }
-
- /**
- * Return batch detail UDF policy default. Use when the policy will not be modified.
- */
- public final BatchUDFPolicy getBatchUDFPolicyDefault() {
- return batchUDFPolicyDefault;
- }
-
- /**
- * Copy batch detail UDF policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final BatchUDFPolicy copyBatchUDFPolicyDefault() {
- return new BatchUDFPolicy(batchUDFPolicyDefault);
- }
-
- /**
- * Return info command policy default. Use when the policy will not be modified.
- */
- public final InfoPolicy getInfoPolicyDefault() {
- return infoPolicyDefault;
- }
-
- /**
- * Copy info command policy default. Use when the policy will be modified for use in a specific transaction.
- */
- public final InfoPolicy copyInfoPolicyDefault() {
- return new InfoPolicy(infoPolicyDefault);
- }
-
- //-------------------------------------------------------
- // Client Management
- //-------------------------------------------------------
-
- /**
- * Close GRPC executor and associated resources. The client instance should not
- * be used after this call.
- */
- @Override
- public void close() {
- try {
- executor.close();
- }
- catch (Throwable e) {
- Log.warn("Failed to close grpcCallExecutor: " + Util.getErrorMessage(e));
- }
-
- try {
- if (authTokenManager != null) {
- authTokenManager.close();
- }
- }
- catch (Throwable e) {
- Log.warn("Failed to close authTokenManager: " + Util.getErrorMessage(e));
- }
-
- if (! sharedThreadPool) {
- // Shutdown synchronous thread pool.
- threadPool.shutdown();
- }
- }
-
- /**
- * This method will always return true in the proxy client.
- */
- @Override
- public boolean isConnected() {
- return executor != null;
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public Node[] getNodes() {
- throw new AerospikeException(NotSupported + "getNodes");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public List getNodeNames() {
- throw new AerospikeException(NotSupported + "getNodeNames");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public Node getNode(String nodeName) {
- throw new AerospikeException(NotSupported + "getNode");
- }
-
- /**
- * Not supported in proxy client.
- */
- public final void enableMetrics(MetricsPolicy policy) {
- throw new AerospikeException(NotSupported + "enableMetrics");
- }
-
- /**
- * Not supported in proxy client.
- */
- public final void disableMetrics() {
- throw new AerospikeException(NotSupported + "disableMetrics");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public ClusterStats getClusterStats() {
- throw new AerospikeException(NotSupported + "getClusterStats");
- }
-
- /**
- * Not supported in proxy client.
- */
- public final void getClusterStats(ClusterStatsListener listener) {
- throw new AerospikeException(NotSupported + "getClusterStats");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public Cluster getCluster() {
- throw new AerospikeException(NotSupported + "getCluster");
- }
-
- //-------------------------------------------------------
- // Write Record Operations
- //-------------------------------------------------------
-
- /**
- * Write record bin(s).
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if write fails
- */
- @Override
- public void put(WritePolicy policy, Key key, Bin... bins) {
- CompletableFuture future = new CompletableFuture<>();
- WriteListener listener = prepareWriteListener(future);
- put(null, listener, policy, key, bins);
- getFuture(future);
- }
-
- /**
- * Asynchronously write record bin(s).
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void put(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) {
- if (policy == null) {
- policy = writePolicyDefault;
- }
- WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.WRITE);
- command.execute();
- }
-
- //-------------------------------------------------------
- // String Operations
- //-------------------------------------------------------
-
- /**
- * Append bin string values to existing record bin values.
- * This call only works for string values.
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if append fails
- */
- @Override
- public void append(WritePolicy policy, Key key, Bin... bins) {
- CompletableFuture future = new CompletableFuture<>();
- WriteListener listener = prepareWriteListener(future);
- append(null, listener, policy, key, bins);
- getFuture(future);
- }
-
- /**
- * Asynchronously append bin string values to existing record bin values.
- * This call only works for string values.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void append(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) {
- if (policy == null) {
- policy = writePolicyDefault;
- }
- WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.APPEND);
- command.execute();
- }
-
- /**
- * Prepend bin string values to existing record bin values.
- * This call works only for string values.
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if prepend fails
- */
- @Override
- public void prepend(WritePolicy policy, Key key, Bin... bins) {
- CompletableFuture future = new CompletableFuture<>();
- WriteListener listener = prepareWriteListener(future);
- prepend(null, listener, policy, key, bins);
- getFuture(future);
- }
-
- /**
- * Asynchronously prepend bin string values to existing record bin values.
- * This call only works for string values.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void prepend(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) {
- if (policy == null) {
- policy = writePolicyDefault;
- }
- WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.PREPEND);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Arithmetic Operations
- //-------------------------------------------------------
-
- /**
- * Add integer/double bin values to existing record bin values.
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if add fails
- */
- @Override
- public void add(WritePolicy policy, Key key, Bin... bins) {
- CompletableFuture future = new CompletableFuture<>();
- WriteListener listener = prepareWriteListener(future);
- add(null, listener, policy, key, bins);
- getFuture(future);
- }
-
- /**
- * Asynchronously add integer/double bin values to existing record bin values.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param bins array of bin name/value pairs
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void add(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) {
- if (policy == null) {
- policy = writePolicyDefault;
- }
- WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.ADD);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Delete Operations
- //-------------------------------------------------------
-
- /**
- * Delete record for specified key.
- *
- * @param policy delete configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @return whether record existed on server before deletion
- * @throws AerospikeException if delete fails
- */
- @Override
- public boolean delete(WritePolicy policy, Key key) {
- CompletableFuture future = new CompletableFuture<>();
- DeleteListener listener = prepareDeleteListener(future);
- delete(null, listener, policy, key);
- return getFuture(future);
- }
-
- /**
- * Asynchronously delete record for specified key.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void delete(EventLoop eventLoop, DeleteListener listener, WritePolicy policy, Key key) {
- if (policy == null) {
- policy = writePolicyDefault;
- }
- DeleteCommandProxy command = new DeleteCommandProxy(executor, listener, policy, key);
- command.execute();
- }
-
- /**
- * Delete records for specified keys. If a key is not found, the corresponding result
- * {@link BatchRecord#resultCode} will be {@link ResultCode#KEY_NOT_FOUND_ERROR}.
- *
- * @param batchPolicy batch configuration parameters, pass in null for defaults
- * @param deletePolicy delete configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException.BatchRecordArray which contains results for keys that did complete
- */
- @Override
- public BatchResults delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys) {
- CompletableFuture future = new CompletableFuture<>();
- BatchRecordArrayListener listener = prepareBatchRecordArrayListener(future);
- delete(null, listener, batchPolicy, deletePolicy, keys);
- return getFuture(future);
- }
-
- /**
- * Asynchronously delete records for specified keys.
- *
- * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be
- * {@link ResultCode#KEY_NOT_FOUND_ERROR}.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param batchPolicy batch configuration parameters, pass in null for defaults
- * @param deletePolicy delete configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void delete(
- EventLoop eventLoop,
- BatchRecordArrayListener listener,
- BatchPolicy batchPolicy,
- BatchDeletePolicy deletePolicy,
- Key[] keys
- ) {
- if (keys.length == 0) {
- listener.onSuccess(new BatchRecord[0], true);
- return;
- }
-
- if (batchPolicy == null) {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (deletePolicy == null) {
- deletePolicy = batchDeletePolicyDefault;
- }
-
- BatchAttr attr = new BatchAttr();
- attr.setDelete(deletePolicy);
-
- CommandProxy command = new BatchProxy.OperateRecordArrayCommand(executor,
- batchPolicy, keys, null, listener, attr);
-
- command.execute();
- }
-
- /**
- * Asynchronously delete records for specified keys.
- *
- * Each record result is returned in separate onRecord() calls.
- * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be
- * {@link ResultCode#KEY_NOT_FOUND_ERROR}.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param batchPolicy batch configuration parameters, pass in null for defaults
- * @param deletePolicy delete configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void delete(
- EventLoop eventLoop,
- BatchRecordSequenceListener listener,
- BatchPolicy batchPolicy,
- BatchDeletePolicy deletePolicy,
- Key[] keys
- ) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (batchPolicy == null) {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (deletePolicy == null) {
- deletePolicy = batchDeletePolicyDefault;
- }
-
- BatchAttr attr = new BatchAttr();
- attr.setDelete(deletePolicy);
-
- CommandProxy command = new BatchProxy.OperateRecordSequenceCommand(executor,
- batchPolicy, keys, null, listener, attr);
-
- command.execute();
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public void truncate(InfoPolicy policy, String ns, String set, Calendar beforeLastUpdate) {
- throw new AerospikeException(NotSupported + "truncate");
- }
-
- //-------------------------------------------------------
- // Touch Operations
- //-------------------------------------------------------
-
- /**
- * Reset record's time to expiration using the policy's expiration.
- * Fail if the record does not exist.
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @throws AerospikeException if touch fails
- */
- @Override
- public void touch(WritePolicy policy, Key key) {
- CompletableFuture future = new CompletableFuture<>();
- WriteListener listener = prepareWriteListener(future);
- touch(null, listener, policy, key);
- getFuture(future);
- }
-
- /**
- * Asynchronously reset record's time to expiration using the policy's expiration.
- * Fail if the record does not exist.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void touch(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key) {
- if (policy == null) {
- policy = writePolicyDefault;
- }
- TouchCommandProxy command = new TouchCommandProxy(executor, listener, policy, key);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Existence-Check Operations
- //-------------------------------------------------------
-
- /**
- * Determine if a record key exists.
- *
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @return whether record exists or not
- * @throws AerospikeException if command fails
- */
- @Override
- public boolean exists(Policy policy, Key key) {
- CompletableFuture future = new CompletableFuture<>();
- ExistsListener listener = prepareExistsListener(future);
- exists(null, listener, policy, key);
- return getFuture(future);
- }
-
- /**
- * Asynchronously determine if a record key exists.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void exists(EventLoop eventLoop, ExistsListener listener, Policy policy, Key key) {
- if (policy == null) {
- policy = readPolicyDefault;
- }
- ExistsCommandProxy command = new ExistsCommandProxy(executor, listener, policy, key);
- command.execute();
- }
-
- /**
- * Check if multiple record keys exist in one batch call.
- * The returned boolean array is in positional order with the original key array order.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @return array key/existence status pairs
- * @throws AerospikeException.BatchExists which contains results for keys that did complete
- */
- @Override
- public boolean[] exists(BatchPolicy policy, Key[] keys) {
- CompletableFuture future = new CompletableFuture<>();
- ExistsArrayListener listener = prepareExistsArrayListener(future);
- exists(null, listener, policy, keys);
- return getFuture(future);
- }
-
- /**
- * Asynchronously check if multiple record keys exist in one batch call.
- *
- * The returned boolean array is in positional order with the original key array order.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void exists(EventLoop eventLoop, ExistsArrayListener listener, BatchPolicy policy, Key[] keys) {
- if (keys.length == 0) {
- listener.onSuccess(keys, new boolean[0]);
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.ExistsArrayCommand(executor, policy, listener, keys);
- command.execute();
- }
-
- /**
- * Asynchronously check if multiple record keys exist in one batch call.
- *
- * Each key's result is returned in separate onExists() calls.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void exists(EventLoop eventLoop, ExistsSequenceListener listener, BatchPolicy policy, Key[] keys) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.ExistsSequenceCommand(executor, policy, listener, keys);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Read Record Operations
- //-------------------------------------------------------
-
- /**
- * Read entire record for specified key.
- *
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @return if found, return record instance. If not found, return null.
- * @throws AerospikeException if read fails
- */
- @Override
- public Record get(Policy policy, Key key) {
- return get(policy, key, (String[])null);
- }
-
- /**
- * Asynchronously read entire record for specified key.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordListener listener, Policy policy, Key key) {
- get(eventLoop, listener, policy, key, (String[])null);
- }
-
- /**
- * Read record header and bins for specified key.
- *
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param binNames bins to retrieve
- * @return if found, return record instance. If not found, return null.
- * @throws AerospikeException if read fails
- */
- @Override
- public Record get(Policy policy, Key key, String... binNames) {
- CompletableFuture future = new CompletableFuture<>();
- RecordListener listener = prepareRecordListener(future);
- get(null, listener, policy, key, binNames);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read record header and bins for specified key.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param binNames bins to retrieve
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordListener listener, Policy policy, Key key, String... binNames) {
- if (policy == null) {
- policy = readPolicyDefault;
- }
- ReadCommandProxy command = new ReadCommandProxy(executor, listener, policy, key, binNames);
- command.execute();
- }
-
- /**
- * Read record generation and expiration only for specified key. Bins are not read.
- *
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @return if found, return record instance. If not found, return null.
- * @throws AerospikeException if read fails
- */
- @Override
- public Record getHeader(Policy policy, Key key) {
- CompletableFuture future = new CompletableFuture<>();
- RecordListener listener = prepareRecordListener(future);
- getHeader(null, listener, policy, key);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read record generation and expiration only for specified key. Bins are not read.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy generic configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void getHeader(EventLoop eventLoop, RecordListener listener, Policy policy, Key key) {
- if (policy == null) {
- policy = readPolicyDefault;
- }
- ReadHeaderCommandProxy command = new ReadHeaderCommandProxy(executor, listener, policy, key);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Batch Read Operations
- //-------------------------------------------------------
-
- /**
- * Read multiple records for specified batch keys in one batch call.
- * This method allows different namespaces/bins to be requested for each key in the batch.
- * The returned records are located in the same list.
- * If the BatchRead key field is not found, the corresponding record field will be null.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param records list of unique record identifiers and the bins to retrieve.
- * The returned records are located in the same list.
- * @return true if all batch key requests succeeded
- * @throws AerospikeException if read fails
- */
- @Override
- public boolean get(BatchPolicy policy, List records) {
- if (records.size() == 0) {
- return true;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CompletableFuture future = new CompletableFuture<>();
- BatchListListenerSync listener = prepareBatchListListenerSync(future);
-
- CommandProxy command = new BatchProxy.ReadListCommandSync(executor, policy, listener, records);
- command.execute();
-
- return getFuture(future);
- }
-
- /**
- * Asynchronously read multiple records for specified batch keys in one batch call.
- *
- * This method allows different namespaces/bins to be requested for each key in the batch.
- * The returned records are located in the same list.
- * If the BatchRead key field is not found, the corresponding record field will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param records list of unique record identifiers and the bins to retrieve.
- * The returned records are located in the same list.
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, BatchListListener listener, BatchPolicy policy, List records) {
- if (records.size() == 0) {
- listener.onSuccess(records);
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
- CommandProxy command = new BatchProxy.ReadListCommand(executor, policy, listener, records);
- command.execute();
- }
-
- /**
- * Asynchronously read multiple records for specified batch keys in one batch call.
- *
- * This method allows different namespaces/bins to be requested for each key in the batch.
- * Each record result is returned in separate onRecord() calls.
- * If the BatchRead key field is not found, the corresponding record field will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param records list of unique record identifiers and the bins to retrieve.
- * The returned records are located in the same list.
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, BatchSequenceListener listener, BatchPolicy policy, List records) {
- if (records.size() == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.ReadSequenceCommand(executor, policy, listener, records);
- command.execute();
- }
-
- /**
- * Read multiple records for specified keys in one batch call.
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @return array of records
- * @throws AerospikeException.BatchRecords which contains results for keys that did complete
- */
- @Override
- public Record[] get(BatchPolicy policy, Key[] keys) {
- CompletableFuture future = new CompletableFuture<>();
- RecordArrayListener listener = prepareRecordArrayListener(future);
- get(null, listener, policy, keys);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read multiple records for specified keys in one batch call.
- *
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys) {
- if (keys.length == 0) {
- listener.onSuccess(keys, new Record[0]);
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false);
- command.execute();
- }
-
- /**
- * Asynchronously read multiple records for specified keys in one batch call.
- *
- * Each record result is returned in separate onRecord() calls.
- * If a key is not found, the record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false);
- command.execute();
- }
-
- /**
- * Read multiple record headers and bins for specified keys in one batch call.
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param binNames array of bins to retrieve
- * @return array of records
- * @throws AerospikeException.BatchRecords which contains results for keys that did complete
- */
- @Override
- public Record[] get(BatchPolicy policy, Key[] keys, String... binNames) {
- CompletableFuture future = new CompletableFuture<>();
- RecordArrayListener listener = prepareRecordArrayListener(future);
- get(null, listener, policy, keys, binNames);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read multiple record headers and bins for specified keys in one batch call.
- *
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param binNames array of bins to retrieve
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys, String... binNames) {
- if (keys.length == 0) {
- listener.onSuccess(keys, new Record[0]);
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ;
-
- CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, binNames, null, readAttr, false);
- command.execute();
- }
-
- /**
- * Asynchronously read multiple record headers and bins for specified keys in one batch call.
- *
- * Each record result is returned in separate onRecord() calls.
- * If a key is not found, the record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param binNames array of bins to retrieve
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys, String... binNames) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ;
-
- CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, binNames, null, readAttr, false);
- command.execute();
- }
-
- /**
- * Read multiple records for specified keys using read operations in one batch call.
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param ops array of read operations on record
- * @return array of records
- * @throws AerospikeException.BatchRecords which contains results for keys that did complete
- */
- @Override
- public Record[] get(BatchPolicy policy, Key[] keys, Operation... ops) {
- CompletableFuture future = new CompletableFuture<>();
- RecordArrayListener listener = prepareRecordArrayListener(future);
- get(null, listener, policy, keys, ops);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read multiple records for specified keys using read operations in one batch call.
- *
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param ops array of read operations on record
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys, Operation... ops) {
- if (keys.length == 0) {
- listener.onSuccess(keys, new Record[0]);
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, null, ops, Command.INFO1_READ, true);
- command.execute();
- }
-
- /**
- * Asynchronously read multiple records for specified keys using read operations in one batch call.
- *
- * Each record result is returned in separate onRecord() calls.
- * If a key is not found, the record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param ops array of read operations on record
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void get(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys, Operation... ops) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, null, ops, Command.INFO1_READ, true);
- command.execute();
- }
-
- /**
- * Read multiple record header data for specified keys in one batch call.
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @return array of records
- * @throws AerospikeException.BatchRecords which contains results for keys that did complete
- */
- @Override
- public Record[] getHeader(BatchPolicy policy, Key[] keys) {
- CompletableFuture future = new CompletableFuture<>();
- RecordArrayListener listener = prepareRecordArrayListener(future);
- getHeader(null, listener, policy, keys);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read multiple record header data for specified keys in one batch call.
- *
- * The returned records are in positional order with the original key array order.
- * If a key is not found, the positional record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void getHeader(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys) {
- if (keys.length == 0) {
- listener.onSuccess(keys, new Record[0]);
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false);
- command.execute();
- }
-
- /**
- * Asynchronously read multiple record header data for specified keys in one batch call.
- *
- * Each record result is returned in separate onRecord() calls.
- * If a key is not found, the record will be null.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void getHeader(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchPolicyDefault;
- }
-
- CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Generic Database Operations
- //-------------------------------------------------------
-
- /**
- * Perform multiple read/write operations on a single key in one batch call.
- * An example would be to add an integer value to an existing record and then
- * read the result, all in one database call.
- *
- * The server executes operations in the same order as the operations array.
- * Both scalar bin operations (Operation) and CDT bin operations (ListOperation,
- * MapOperation) can be performed in same call.
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param operations database operations to perform
- * @return record if there is a read in the operations list
- * @throws AerospikeException if command fails
- */
- @Override
- public Record operate(WritePolicy policy, Key key, Operation... operations) {
- CompletableFuture future = new CompletableFuture<>();
- RecordListener listener = prepareRecordListener(future);
- operate(null, listener, policy, key, operations);
- return getFuture(future);
- }
-
- /**
- * Asynchronously perform multiple read/write operations on a single key in one batch call.
- *
- * An example would be to add an integer value to an existing record and then
- * read the result, all in one database call.
- *
- * The server executes operations in the same order as the operations array.
- * Both scalar bin operations (Operation) and CDT bin operations (ListOperation,
- * MapOperation) can be performed in same call.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results, pass in null for fire and forget
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param operations database operations to perform
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void operate(EventLoop eventLoop, RecordListener listener, WritePolicy policy, Key key, Operation... operations) {
- OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations);
- OperateCommandProxy command = new OperateCommandProxy(executor, listener, args.writePolicy, key, args);
- command.execute();
- }
-
- //-------------------------------------------------------
- // Batch Read/Write Operations
- //-------------------------------------------------------
-
- /**
- * Read/Write multiple records for specified batch keys in one batch call.
- * This method allows different namespaces/bins for each key in the batch.
- * The returned records are located in the same list.
- *
- * {@link BatchRecord} can be {@link BatchRead}, {@link BatchWrite}, {@link BatchDelete} or
- * {@link BatchUDF}.
- *
- * @param policy batch configuration parameters, pass in null for defaults
- * @param records list of unique record identifiers and read/write operations
- * @return true if all batch sub-commands succeeded
- * @throws AerospikeException if command fails
- */
- @Override
- public boolean operate(BatchPolicy policy, List records) {
- CompletableFuture future = new CompletableFuture<>();
- BatchOperateListListener listener = prepareBatchOperateListListener(future);
- operate(null, listener, policy, records);
- return getFuture(future);
- }
-
- /**
- * Asynchronously read/write multiple records for specified batch keys in one batch call.
- *
- * This method allows different namespaces/bins to be requested for each key in the batch.
- * The returned records are located in the same list.
- *
- * {@link BatchRecord} can be {@link BatchRead}, {@link BatchWrite}, {@link BatchDelete} or
- * {@link BatchUDF}.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param records list of unique record identifiers and read/write operations
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void operate(
- EventLoop eventLoop,
- BatchOperateListListener listener,
- BatchPolicy policy,
- List records
- ) {
- if (records.size() == 0) {
- listener.onSuccess(records, true);
- return;
- }
-
- if (policy == null) {
- policy = batchParentPolicyWriteDefault;
- }
-
- CommandProxy command = new BatchProxy.OperateListCommand(this, executor, policy, listener, records);
- command.execute();
- }
-
- /**
- * Asynchronously read/write multiple records for specified batch keys in one batch call.
- *
- * This method allows different namespaces/bins to be requested for each key in the batch.
- * Each record result is returned in separate onRecord() calls.
- *
- * {@link BatchRecord} can be {@link BatchRead}, {@link BatchWrite}, {@link BatchDelete} or
- * {@link BatchUDF}.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy batch configuration parameters, pass in null for defaults
- * @param records list of unique record identifiers and read/write operations
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void operate(
- EventLoop eventLoop,
- BatchRecordSequenceListener listener,
- BatchPolicy policy,
- List records
- ) {
- if (records.size() == 0) {
- listener.onSuccess();
- return;
- }
-
- if (policy == null) {
- policy = batchParentPolicyWriteDefault;
- }
-
- CommandProxy command = new BatchProxy.OperateSequenceCommand(this, executor, policy, listener, records);
- command.execute();
- }
-
- /**
- * Perform read/write operations on multiple keys. If a key is not found, the corresponding result
- * {@link BatchRecord#resultCode} will be {@link ResultCode#KEY_NOT_FOUND_ERROR}.
- *
- * @param batchPolicy batch configuration parameters, pass in null for defaults
- * @param writePolicy write configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param ops
- * read/write operations to perform. {@link Operation#get()} is not allowed because it returns a
- * variable number of bins and makes it difficult (sometimes impossible) to lineup operations
- * with results. Instead, use {@link Operation#get(String)} for each bin name.
- * @throws AerospikeException.BatchRecordArray which contains results for keys that did complete
- */
- @Override
- public BatchResults operate(
- BatchPolicy batchPolicy,
- BatchWritePolicy writePolicy,
- Key[] keys,
- Operation... ops
- ) {
- CompletableFuture future = new CompletableFuture<>();
- BatchRecordArrayListener listener = prepareBatchRecordArrayListener(future);
- operate(null, listener, batchPolicy, writePolicy, keys, ops);
- return getFuture(future);
- }
-
- /**
- * Asynchronously perform read/write operations on multiple keys.
- *
- * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be
- * {@link ResultCode#KEY_NOT_FOUND_ERROR}.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param batchPolicy batch configuration parameters, pass in null for defaults
- * @param writePolicy write configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param ops
- * read/write operations to perform. {@link Operation#get()} is not allowed because it returns a
- * variable number of bins and makes it difficult (sometimes impossible) to lineup operations
- * with results. Instead, use {@link Operation#get(String)} for each bin name.
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void operate(
- EventLoop eventLoop,
- BatchRecordArrayListener listener,
- BatchPolicy batchPolicy,
- BatchWritePolicy writePolicy,
- Key[] keys,
- Operation... ops
- ) {
- if (keys.length == 0) {
- listener.onSuccess(new BatchRecord[0], true);
- return;
- }
-
- if (batchPolicy == null) {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (writePolicy == null) {
- writePolicy = batchWritePolicyDefault;
- }
-
- BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops);
-
- CommandProxy command = new BatchProxy.OperateRecordArrayCommand(executor,
- batchPolicy, keys, ops, listener, attr);
-
- command.execute();
- }
-
- /**
- * Asynchronously perform read/write operations on multiple keys.
- *
- * Each record result is returned in separate onRecord() calls.
- * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be
- * {@link ResultCode#KEY_NOT_FOUND_ERROR}.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param batchPolicy batch configuration parameters, pass in null for defaults
- * @param writePolicy write configuration parameters, pass in null for defaults
- * @param keys array of unique record identifiers
- * @param ops
- * read/write operations to perform. {@link Operation#get()} is not allowed because it returns a
- * variable number of bins and makes it difficult (sometimes impossible) to lineup operations
- * with results. Instead, use {@link Operation#get(String)} for each bin name.
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void operate(
- EventLoop eventLoop,
- BatchRecordSequenceListener listener,
- BatchPolicy batchPolicy,
- BatchWritePolicy writePolicy,
- Key[] keys,
- Operation... ops
- ) {
- if (keys.length == 0) {
- listener.onSuccess();
- return;
- }
-
- if (batchPolicy == null) {
- batchPolicy = batchParentPolicyWriteDefault;
- }
-
- if (writePolicy == null) {
- writePolicy = batchWritePolicyDefault;
- }
-
- BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops);
-
- CommandProxy command = new BatchProxy.OperateRecordSequenceCommand(executor,
- batchPolicy, keys, ops, listener, attr);
-
- command.execute();
- }
-
- //-------------------------------------------------------
- // Scan Operations
- //-------------------------------------------------------
-
- /**
- * Read all records in specified namespace and set.
- *
- * This call will block until the scan is complete - callbacks are made
- * within the scope of this call.
- *
- * @param policy scan configuration parameters, pass in null for defaults
- * @param namespace namespace - equivalent to database name
- * @param setName optional set name - equivalent to database table
- * @param callback read callback method - called with record data
- * @param binNames optional bin to retrieve. All bins will be returned if not specified.
- * @throws AerospikeException if scan fails
- */
- @Override
- public void scanAll(
- ScanPolicy policy,
- String namespace,
- String setName,
- ScanCallback callback,
- String... binNames
- ) {
- CompletableFuture future = new CompletableFuture<>();
- RecordSequenceListener listener = new RecordSequenceListenerToCallback(callback, future);
- scanPartitions(null, listener, policy, null, namespace, setName, binNames);
- getFuture(future);
- }
-
- /**
- * Asynchronously read all records in specified namespace and set.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy scan configuration parameters, pass in null for defaults
- * @param namespace namespace - equivalent to database name
- * @param setName optional set name - equivalent to database table
- * @param binNames optional bin to retrieve. All bins will be returned if not specified.
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void scanAll(
- EventLoop eventLoop,
- RecordSequenceListener listener,
- ScanPolicy policy,
- String namespace,
- String setName,
- String... binNames
- ) {
- scanPartitions(eventLoop, listener, policy, null, namespace, setName, binNames);
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public void scanNode(
- ScanPolicy policy,
- String nodeName,
- String namespace,
- String setName,
- ScanCallback callback,
- String... binNames
- ) {
- throw new AerospikeException(NotSupported + "scanNode");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public void scanNode(
- ScanPolicy policy,
- Node node,
- String namespace,
- String setName,
- ScanCallback callback,
- String... binNames
- ) {
- throw new AerospikeException(NotSupported + "scanNode");
- }
-
- /**
- * Read records in specified namespace, set and partition filter.
- *
- * This call will block until the scan is complete - callbacks are made
- * within the scope of this call.
- *
- * @param policy scan configuration parameters, pass in null for defaults
- * @param partitionFilter filter on a subset of data partitions
- * @param namespace namespace - equivalent to database name
- * @param setName optional set name - equivalent to database table
- * @param callback read callback method - called with record data
- * @param binNames optional bin to retrieve. All bins will be returned if not specified
- * @throws AerospikeException if scan fails
- */
- @Override
- public void scanPartitions(
- ScanPolicy policy,
- PartitionFilter partitionFilter,
- String namespace,
- String setName,
- ScanCallback callback,
- String... binNames
- ) {
- CompletableFuture future = new CompletableFuture<>();
- RecordSequenceListener listener = new RecordSequenceListenerToCallback(callback, future);
- scanPartitions(null, listener, policy, partitionFilter, namespace, setName, binNames);
- getFuture(future);
- }
-
- /**
- * Asynchronously read records in specified namespace, set and partition filter.
- *
- * @param eventLoop ignored, pass in null
- * @param listener where to send results
- * @param policy scan configuration parameters, pass in null for defaults
- * @param partitionFilter filter on a subset of data partitions
- * @param namespace namespace - equivalent to database name
- * @param setName optional set name - equivalent to database table
- * @param binNames optional bin to retrieve. All bins will be returned if not specified.
- * @throws AerospikeException if event loop registration fails
- */
- @Override
- public void scanPartitions(
- EventLoop eventLoop,
- RecordSequenceListener listener,
- ScanPolicy policy,
- PartitionFilter partitionFilter,
- String namespace,
- String setName,
- String... binNames
- ) {
- if (policy == null) {
- policy = scanPolicyDefault;
- }
-
- PartitionTracker tracker = null;
-
- if (partitionFilter != null) {
- tracker = new PartitionTracker(policy, 1, partitionFilter);
- }
-
- ScanCommandProxy command = new ScanCommandProxy(executor, policy, listener, namespace,
- setName, binNames, partitionFilter, tracker);
- command.execute();
- }
-
- //---------------------------------------------------------------
- // User defined functions
- //---------------------------------------------------------------
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public RegisterTask register(Policy policy, String clientPath, String serverPath, Language language) {
- throw new AerospikeException(NotSupported + "register");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public RegisterTask register(
- Policy policy,
- ClassLoader resourceLoader,
- String resourcePath,
- String serverPath,
- Language language
- ) {
- throw new AerospikeException(NotSupported + "register");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public RegisterTask registerUdfString(Policy policy, String code, String serverPath, Language language) {
- throw new AerospikeException(NotSupported + "registerUdfString");
- }
-
- /**
- * Not supported in proxy client.
- */
- @Override
- public void removeUdf(InfoPolicy policy, String serverPath) {
- throw new AerospikeException(NotSupported + "removeUdf");
- }
-
- /**
- * Execute user defined function on server and return results.
- * The function operates on a single record.
- * The package name is used to locate the udf file location:
- *
- * {@code udf file = /.lua}
- *
- * @param policy write configuration parameters, pass in null for defaults
- * @param key unique record identifier
- * @param packageName server package name where user defined function resides
- * @param functionName user defined function
- * @param functionArgs arguments passed in to user defined function
- * @return return value of user defined function
- * @throws AerospikeException if transaction fails
- */
- @Override
- public Object execute(WritePolicy policy, Key key, String packageName, String functionName, Value... functionArgs) {
- CompletableFuture