diff --git a/.palantir/revapi.yml b/.palantir/revapi.yml
index fad4288dbe4..ba24a13f368 100644
--- a/.palantir/revapi.yml
+++ b/.palantir/revapi.yml
@@ -277,6 +277,34 @@ acceptedBreaks:
         \ ===java.util.Map<com.palantir.atlasdb.keyvalue.api.Cell, byte[]>===)"
       justification: "KeyValueSnapshotReader is a relatively new API and is not widely\
         \ used"
+  "0.1142.0":
+    com.palantir.atlasdb:atlasdb-api:
+    - code: "java.annotation.removed"
+      old: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfImmutableTsOrCommitLocksExpired(===com.palantir.lock.v2.LockToken===)"
+      new: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfImmutableTsOrCommitLocksExpired(===java.util.Set<com.palantir.lock.v2.LockToken>===)"
+      justification: "Prototyping"
+    - code: "java.annotation.removed"
+      old: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfPreCommitRequirementsNotMet(===com.palantir.lock.v2.LockToken===,\
+        \ long)"
+      new: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfPreCommitRequirementsNotMet(===java.util.Set<com.palantir.lock.v2.LockToken>===,\
+        \ long)"
+      justification: "Prototyping"
+    - code: "java.method.addedToInterface"
+      new: "method long com.palantir.atlasdb.transaction.api.TransactionManager::getCommitImmutableTimestamp()"
+      justification: "Prototyping"
+    - code: "java.method.addedToInterface"
+      new: "method void com.palantir.atlasdb.transaction.api.Transaction::putDelayed(java.util.List<com.palantir.atlasdb.transaction.api.DelayedWrite>)"
+      justification: "Prototyping"
+    - code: "java.method.parameterTypeChanged"
+      old: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfImmutableTsOrCommitLocksExpired(===com.palantir.lock.v2.LockToken===)"
+      new: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfImmutableTsOrCommitLocksExpired(===java.util.Set<com.palantir.lock.v2.LockToken>===)"
+      justification: "Prototyping"
+    - code: "java.method.parameterTypeChanged"
+      old: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfPreCommitRequirementsNotMet(===com.palantir.lock.v2.LockToken===,\
+        \ long)"
+      new: "parameter void com.palantir.atlasdb.transaction.api.precommit.PreCommitRequirementValidator::throwIfPreCommitRequirementsNotMet(===java.util.Set<com.palantir.lock.v2.LockToken>===,\
+        \ long)"
+      justification: "Prototyping"
   "0.770.0":
     com.palantir.atlasdb:atlasdb-api:
     - code: "java.class.removed"
diff --git a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/DelayedWrite.java b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/DelayedWrite.java
new file mode 100644
index 00000000000..74700e990f9
--- /dev/null
+++ b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/DelayedWrite.java
@@ -0,0 +1,34 @@
+/*
+ * (c) Copyright 2024 Palantir Technologies Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.palantir.atlasdb.transaction.api;
+
+import com.palantir.atlasdb.keyvalue.api.Cell;
+import com.palantir.atlasdb.keyvalue.api.TableReference;
+import java.util.Map;
+import java.util.function.LongFunction;
+
+/**
+ * Curious creature, but it allows the user to delay deciding the particular cell to write to until the actual commit,
+ * at which point the user will be provided with a unique long value that can be used to generate the cell.
+ *
+ * This is a very specific feature that not many people probably require, or should try to use.
+ *
+ * Right now the interface allows you to do TOO MUCH, you don't even have to specify the tables you write to.
+ * This can be improved and locked down, but this allows for maximum experimentation.
+ */
+@FunctionalInterface
+public interface DelayedWrite extends LongFunction<Map<TableReference, Map<Cell, byte[]>>> {}
diff --git a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/Transaction.java b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/Transaction.java
index 4285083b3a6..abf7a2136fa 100644
--- a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/Transaction.java
+++ b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/Transaction.java
@@ -33,6 +33,7 @@
 import com.palantir.lock.watch.ChangeMetadata;
 import com.palantir.util.result.Result;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.Set;
@@ -323,6 +324,9 @@ Stream<BatchingVisitable<RowResult<byte[]>>> getRangesLazy(
     @Idempotent
     void putWithMetadata(TableReference tableRef, Map<Cell, ValueAndChangeMetadata> valuesAndMetadata);
 
+    @Idempotent
+    void putDelayed(List<DelayedWrite> values);
+
     /**
      * Deletes values from the key-value store.
      * @param tableRef the table from which to delete the values
diff --git a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/TransactionManager.java b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/TransactionManager.java
index e407ac46827..fd420044ae3 100644
--- a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/TransactionManager.java
+++ b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/TransactionManager.java
@@ -309,6 +309,8 @@ <T, C extends PreCommitCondition, E extends Exception> T runTaskWithConditionRea
      */
     long getImmutableTimestamp();
 
+    long getCommitImmutableTimestamp();
+
     /**
      * Returns the lock service used by this transaction manager.
      *
diff --git a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/precommit/PreCommitRequirementValidator.java b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/precommit/PreCommitRequirementValidator.java
index 7b6ff0a459f..66e034b5d31 100644
--- a/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/precommit/PreCommitRequirementValidator.java
+++ b/atlasdb-api/src/main/java/com/palantir/atlasdb/transaction/api/precommit/PreCommitRequirementValidator.java
@@ -21,7 +21,7 @@
 import com.palantir.atlasdb.transaction.api.TransactionFailedException;
 import com.palantir.lock.v2.LockToken;
 import java.util.Map;
-import javax.annotation.Nullable;
+import java.util.Set;
 
 public interface PreCommitRequirementValidator {
     /**
@@ -41,10 +41,10 @@ void throwIfPreCommitConditionInvalidAtCommitOnWriteTransaction(
      * user pre-commit condition is no longer valid, or possibly because of other internal state such as commit
      * locks having expired.
      */
-    void throwIfPreCommitRequirementsNotMet(@Nullable LockToken commitLocksToken, long timestamp);
+    void throwIfPreCommitRequirementsNotMet(Set<LockToken> commitLocksToken, long timestamp);
 
     /**
      * Throws a {@link TransactionFailedException} if the immutable timestamp lock or commit locks have expired.
      */
-    void throwIfImmutableTsOrCommitLocksExpired(@Nullable LockToken commitLocksToken);
+    void throwIfImmutableTsOrCommitLocksExpired(Set<LockToken> commitLocksToken);
 }
diff --git a/atlasdb-autobatch/build.gradle b/atlasdb-autobatch/build.gradle
index 71b0ef4e2ae..43b47cbb0b4 100644
--- a/atlasdb-autobatch/build.gradle
+++ b/atlasdb-autobatch/build.gradle
@@ -18,6 +18,7 @@ dependencies {
     implementation 'com.palantir.conjure.java.api:errors'
     implementation 'com.palantir.safe-logging:preconditions'
     implementation 'com.palantir.tracing:tracing'
+    implementation 'com.palantir.tracing:tracing-api'
     implementation 'io.dropwizard.metrics:metrics-core'
     implementation project(':commons-executors')
 
diff --git a/atlasdb-autobatch/src/main/java/com/palantir/atlasdb/autobatch/Autobatchers.java b/atlasdb-autobatch/src/main/java/com/palantir/atlasdb/autobatch/Autobatchers.java
index bf713b70f2d..db49d94ba7b 100644
--- a/atlasdb-autobatch/src/main/java/com/palantir/atlasdb/autobatch/Autobatchers.java
+++ b/atlasdb-autobatch/src/main/java/com/palantir/atlasdb/autobatch/Autobatchers.java
@@ -29,6 +29,9 @@
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
 import com.palantir.tracing.Observability;
+import com.palantir.tracing.Tracer;
+import com.palantir.tracing.Tracers;
+import com.palantir.tracing.api.SpanType;
 import java.time.Duration;
 import java.util.List;
 import java.util.Map;
@@ -97,7 +100,25 @@ public static <O> AutobatcherBuilder<SupplierKey, O> coalescing(Supplier<O> supp
      */
     public static <I, O> AutobatcherBuilder<I, O> independent(Consumer<List<BatchElement<I, O>>> batchFunction) {
         return new AutobatcherBuilder<>(parameters -> new IndependentBatchingEventHandler<>(
-                maybeWrapWithTimeout(batchFunction, parameters), parameters.batchSize()));
+                maybeWrapWithTimeout(
+                        input -> {
+                            long start = System.nanoTime();
+                            try {
+                                Tracer.initTraceWithSpan(
+                                        Observability.SAMPLE,
+                                        Tracers.randomId(),
+                                        parameters.safeLoggablePurpose(),
+                                        SpanType.LOCAL);
+                                batchFunction.accept(input);
+                            } finally {
+                                log.info(
+                                        "Finished batch",
+                                        SafeArg.of("duration", Duration.ofNanos(System.nanoTime() - start)));
+                                Tracer.fastCompleteSpan();
+                            }
+                        },
+                        parameters),
+                parameters.batchSize()));
     }
 
     private static <I, O> Consumer<List<BatchElement<I, O>>> maybeWrapWithTimeout(
diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolImpl.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolImpl.java
index f4ca377d456..08c7d8980ec 100644
--- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolImpl.java
+++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolImpl.java
@@ -30,6 +30,7 @@
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraClientPoolMetrics;
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraServer;
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraService;
+import com.palantir.atlasdb.tracing.Tracing;
 import com.palantir.atlasdb.util.MetricsManager;
 import com.palantir.common.base.FunctionCheckedException;
 import com.palantir.common.concurrent.InitializeableScheduledExecutorServiceSupplier;
@@ -42,6 +43,7 @@
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
 import com.palantir.refreshable.Refreshable;
+import com.palantir.tracing.CloseableTracer;
 import java.time.Duration;
 import java.util.HashMap;
 import java.util.Map;
@@ -638,36 +640,45 @@ public <V, K extends Exception> V runWithRetryOnServer(
         RetryableCassandraRequest<V, K> req = new RetryableCassandraRequest<>(specifiedServer, fn);
 
         while (true) {
-            if (log.isTraceEnabled()) {
-                log.trace("Running function on host {}.", SafeArg.of("server", req.getCassandraServer()));
-            }
-            CassandraClientPoolingContainer hostPool = getPreferredHostOrFallBack(req);
+            try (CloseableTracer trace =
+                    Tracing.startLocalTrace("CassandraClientPoolImpl.runWithRetryOnServer", tagConsumer -> {
+                        tagConsumer.integer("try", req.getNumberOfAttempts());
+                    })) {
+                if (log.isTraceEnabled()) {
+                    log.trace("Running function on host {}.", SafeArg.of("server", req.getCassandraServer()));
+                }
+                CassandraClientPoolingContainer hostPool = getPreferredHostOrFallBack(req);
 
-            try {
-                V response = runWithPooledResourceRecordingMetrics(hostPool, req.getFunction());
-                removeFromBlacklistAfterResponse(hostPool.getCassandraServer());
-                return response;
-            } catch (Exception ex) {
-                exceptionHandler.handleExceptionFromRequest(req, hostPool.getCassandraServer(), ex);
+                try {
+                    V response = runWithPooledResourceRecordingMetrics(hostPool, req.getFunction());
+                    removeFromBlacklistAfterResponse(hostPool.getCassandraServer());
+                    return response;
+                } catch (Exception ex) {
+                    exceptionHandler.handleExceptionFromRequest(req, hostPool.getCassandraServer(), ex);
+                }
             }
         }
     }
 
     private <V, K extends Exception> CassandraClientPoolingContainer getPreferredHostOrFallBack(
             RetryableCassandraRequest<V, K> req) {
-        CassandraClientPoolingContainer hostPool = cassandra.getPools().get(req.getCassandraServer());
-
-        if (blacklist.contains(req.getCassandraServer()) || hostPool == null || req.shouldGiveUpOnPreferredHost()) {
-            CassandraServer previousHost = hostPool == null ? req.getCassandraServer() : hostPool.getCassandraServer();
-            Optional<CassandraClientPoolingContainer> hostPoolCandidate = cassandra.getRandomGoodHostForPredicate(
-                    address -> !req.alreadyTriedOnHost(address), req.getTriedHosts());
-            hostPool = hostPoolCandidate.orElseGet(cassandra::getRandomGoodHost);
-            log.warn(
-                    "Randomly redirected a query intended for host {} to {}.",
-                    SafeArg.of("previousHost", previousHost),
-                    SafeArg.of("randomHost", hostPool.getCassandraServer()));
+        try (CloseableTracer trace =
+                Tracing.startLocalTrace("CassandraClientPoolImpl.getPreferredHostOfFallBack", tagConsumer -> {})) {
+            CassandraClientPoolingContainer hostPool = cassandra.getPools().get(req.getCassandraServer());
+
+            if (blacklist.contains(req.getCassandraServer()) || hostPool == null || req.shouldGiveUpOnPreferredHost()) {
+                CassandraServer previousHost =
+                        hostPool == null ? req.getCassandraServer() : hostPool.getCassandraServer();
+                Optional<CassandraClientPoolingContainer> hostPoolCandidate = cassandra.getRandomGoodHostForPredicate(
+                        address -> !req.alreadyTriedOnHost(address), req.getTriedHosts());
+                hostPool = hostPoolCandidate.orElseGet(cassandra::getRandomGoodHost);
+                log.warn(
+                        "Randomly redirected a query intended for host {} to {}.",
+                        SafeArg.of("previousHost", previousHost),
+                        SafeArg.of("randomHost", hostPool.getCassandraServer()));
+            }
+            return hostPool;
         }
-        return hostPool;
     }
 
     @Override
@@ -696,15 +707,20 @@ private void removeFromBlacklistAfterResponse(CassandraServer cassandraServer) {
     private <V, K extends Exception> V runWithPooledResourceRecordingMetrics(
             CassandraClientPoolingContainer hostPool, FunctionCheckedException<CassandraClient, V, K> fn) throws K {
 
-        metrics.recordRequestOnHost(hostPool);
-        try {
-            return hostPool.runWithPooledResource(fn);
-        } catch (Exception e) {
-            metrics.recordExceptionOnHost(hostPool);
-            if (CassandraRequestExceptionHandler.isConnectionException(e)) {
-                metrics.recordConnectionExceptionOnHost(hostPool);
+        try (CloseableTracer trace = Tracing.startLocalTrace(
+                "CassandraClientPoolImpl.runWithPooledResourceRecordingMetrics", tagConsumer -> {
+                    tagConsumer.accept("host", hostPool.getCassandraServer().cassandraHostName());
+                })) {
+            metrics.recordRequestOnHost(hostPool);
+            try {
+                return hostPool.runWithPooledResource(fn);
+            } catch (Exception e) {
+                metrics.recordExceptionOnHost(hostPool);
+                if (CassandraRequestExceptionHandler.isConnectionException(e)) {
+                    metrics.recordConnectionExceptionOnHost(hostPool);
+                }
+                throw e;
             }
-            throw e;
         }
     }
 
diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolingContainer.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolingContainer.java
index 54d082e752f..8dc223231ad 100644
--- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolingContainer.java
+++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraClientPoolingContainer.java
@@ -16,6 +16,7 @@
 package com.palantir.atlasdb.keyvalue.cassandra;
 
 import com.codahale.metrics.Gauge;
+import com.codahale.metrics.Timer;
 import com.google.common.base.Function;
 import com.google.common.base.MoreObjects;
 import com.palantir.atlasdb.cassandra.CassandraKeyValueServiceConfig;
@@ -24,6 +25,7 @@
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraClientPoolHostLevelMetric;
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraClientPoolMetrics;
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraServer;
+import com.palantir.atlasdb.tracing.Tracing;
 import com.palantir.atlasdb.util.MetricsManager;
 import com.palantir.common.base.FunctionCheckedException;
 import com.palantir.common.pooling.PoolingContainer;
@@ -32,6 +34,7 @@
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
 import com.palantir.nylon.threads.ThreadNames;
+import com.palantir.tracing.CloseableTracer;
 import com.palantir.util.TimedRunner;
 import com.palantir.util.TimedRunner.TaskContext;
 import java.lang.management.ManagementFactory;
@@ -72,6 +75,7 @@ public class CassandraClientPoolingContainer implements PoolingContainer<Cassand
     private final int poolNumber;
     private final CassandraClientPoolMetrics poolMetrics;
     private final TimedRunner timedRunner;
+    private final Timer getResourceTimer;
 
     public CassandraClientPoolingContainer(
             MetricsManager metricsManager,
@@ -80,6 +84,7 @@ public CassandraClientPoolingContainer(
             int poolNumber,
             CassandraClientPoolMetrics poolMetrics,
             CassandraClientMetrics cassandraClientMetrics) {
+        getResourceTimer = metricsManager.registerOrGetTimer(CassandraClientPoolingContainer.class, "getResource");
         this.metricsManager = metricsManager;
         this.cassandraServer = cassandraServer;
         this.proxy = cassandraServer.proxy();
@@ -117,35 +122,38 @@ public int getPoolSize() {
     @Override
     public <V, K extends Exception> V runWithPooledResource(FunctionCheckedException<CassandraClient, V, K> fn)
             throws K {
-        final String origName = Thread.currentThread().getName();
-        String newThreadName = origName + " to cassandra " + proxy.getHostString() + ':' + proxy.getPort() + " - "
-                + count.getAndIncrement();
-        if (log.isDebugEnabled()) {
-            // the timestamp is expensive to stringify, only add to thread names when debugging
-            newThreadName += " started at " + Instant.now();
-        }
-        ThreadNames.setThreadName(Thread.currentThread(), newThreadName);
-        try {
-            openRequests.getAndIncrement();
-            return runWithGoodResource(fn);
-        } catch (Throwable t) {
-            log.warn("Error occurred talking to host '{}'", SafeArg.of("host", cassandraServer), t);
-            if (t instanceof NoSuchElementException && t.getMessage().contains("Pool exhausted")) {
-                log.warn(
-                        "Extra information about exhausted pool",
-                        SafeArg.of("numActive", clientPool.getNumActive()),
-                        SafeArg.of("maxTotal", clientPool.getMaxTotal()),
-                        SafeArg.of("meanActiveTimeMillis", clientPool.getMeanActiveTimeMillis()),
-                        SafeArg.of("meanIdleTimeMillis", clientPool.getMeanIdleTimeMillis()));
-                poolMetrics.recordPoolExhaustion();
-                if (log.isDebugEnabled()) {
-                    logThreadStates();
+        try (CloseableTracer trace =
+                Tracing.startLocalTrace("CassandraClientPoolingContainer.runWithPooledResource", tagConsumer -> {})) {
+            final String origName = Thread.currentThread().getName();
+            String newThreadName = origName + " to cassandra " + proxy.getHostString() + ':' + proxy.getPort() + " - "
+                    + count.getAndIncrement();
+            if (log.isDebugEnabled()) {
+                // the timestamp is expensive to stringify, only add to thread names when debugging
+                newThreadName += " started at " + Instant.now();
+            }
+            ThreadNames.setThreadName(Thread.currentThread(), newThreadName);
+            try {
+                openRequests.getAndIncrement();
+                return runWithGoodResource(fn);
+            } catch (Throwable t) {
+                log.warn("Error occurred talking to host '{}'", SafeArg.of("host", cassandraServer), t);
+                if (t instanceof NoSuchElementException && t.getMessage().contains("Pool exhausted")) {
+                    log.warn(
+                            "Extra information about exhausted pool",
+                            SafeArg.of("numActive", clientPool.getNumActive()),
+                            SafeArg.of("maxTotal", clientPool.getMaxTotal()),
+                            SafeArg.of("meanActiveTimeMillis", clientPool.getMeanActiveTimeMillis()),
+                            SafeArg.of("meanIdleTimeMillis", clientPool.getMeanIdleTimeMillis()));
+                    poolMetrics.recordPoolExhaustion();
+                    if (log.isDebugEnabled()) {
+                        logThreadStates();
+                    }
                 }
+                throw t;
+            } finally {
+                openRequests.getAndDecrement();
+                ThreadNames.setThreadName(Thread.currentThread(), origName);
             }
-            throw t;
-        } finally {
-            openRequests.getAndDecrement();
-            ThreadNames.setThreadName(Thread.currentThread(), origName);
         }
     }
 
@@ -158,54 +166,65 @@ public <V> V runWithPooledResource(Function<CassandraClient, V> fn) {
     @SuppressWarnings("unchecked")
     private <V, K extends Exception> V runWithGoodResource(FunctionCheckedException<CassandraClient, V, K> fn)
             throws K {
-        boolean shouldReuse = true;
-        CassandraClient resource = null;
-        try {
-            resource = clientPool.borrowObject();
-            CassandraClient finalResource = resource;
-            TaskContext<V> taskContext = TaskContext.create(() -> fn.apply(finalResource), () -> {});
-            return timedRunner.run(taskContext);
-        } catch (Exception e) {
-            if (isInvalidClientConnection(resource)) {
-                log.warn(
-                        "Not reusing resource due to {} of host {}",
-                        UnsafeArg.of("exception", e.toString()),
-                        SafeArg.of("cassandraHostName", cassandraServer.cassandraHostName()),
-                        SafeArg.of("proxy", CassandraLogHelper.host(proxy)),
-                        e);
-                shouldReuse = false;
-            }
-            if (e instanceof TTransportException
-                    && ((TTransportException) e).getType() == TTransportException.END_OF_FILE) {
-                // If we have an end of file this is most likely due to this cassandra node being bounced.
-                log.warn(
-                        "Clearing client pool due to exception",
-                        SafeArg.of("cassandraHostName", cassandraServer.cassandraHostName()),
-                        SafeArg.of("proxy", CassandraLogHelper.host(proxy)),
-                        e);
-                clientPool.clear();
-            }
-            throw (K) e;
-        } finally {
-            if (resource != null) {
-                if (shouldReuse) {
-                    if (log.isDebugEnabled()) {
-                        log.debug(
-                                "Returning resource to pool of host {}",
-                                SafeArg.of("cassandraHostname", cassandraServer.cassandraHostName()),
-                                SafeArg.of("proxy", CassandraLogHelper.host(proxy)));
+        try (CloseableTracer trace =
+                Tracing.startLocalTrace("CassandraClientPoolingContainer.runWithGoodResource", tagConsumer -> {})) {
+            boolean shouldReuse = true;
+            CassandraClient resource = null;
+            try {
+                resource = getResource();
+                CassandraClient finalResource = resource;
+                TaskContext<V> taskContext = TaskContext.create(() -> fn.apply(finalResource), () -> {});
+                return timedRunner.run(taskContext);
+            } catch (Exception e) {
+                if (isInvalidClientConnection(resource)) {
+                    log.warn(
+                            "Not reusing resource due to {} of host {}",
+                            UnsafeArg.of("exception", e.toString()),
+                            SafeArg.of("cassandraHostName", cassandraServer.cassandraHostName()),
+                            SafeArg.of("proxy", CassandraLogHelper.host(proxy)),
+                            e);
+                    shouldReuse = false;
+                }
+                if (e instanceof TTransportException
+                        && ((TTransportException) e).getType() == TTransportException.END_OF_FILE) {
+                    // If we have an end of file this is most likely due to this cassandra node being bounced.
+                    log.warn(
+                            "Clearing client pool due to exception",
+                            SafeArg.of("cassandraHostName", cassandraServer.cassandraHostName()),
+                            SafeArg.of("proxy", CassandraLogHelper.host(proxy)),
+                            e);
+                    clientPool.clear();
+                }
+                throw (K) e;
+            } finally {
+                if (resource != null) {
+                    if (shouldReuse) {
+                        if (log.isDebugEnabled()) {
+                            log.debug(
+                                    "Returning resource to pool of host {}",
+                                    SafeArg.of("cassandraHostname", cassandraServer.cassandraHostName()),
+                                    SafeArg.of("proxy", CassandraLogHelper.host(proxy)));
+                        }
+                        eagerlyCleanupReadBuffersFromIdleConnection(resource, cassandraServer);
+                        clientPool.returnObject(resource);
+                    } else {
+                        invalidateQuietly(resource);
                     }
-                    eagerlyCleanupReadBuffersFromIdleConnection(resource, cassandraServer);
-                    clientPool.returnObject(resource);
                 } else {
-                    invalidateQuietly(resource);
+                    log.warn("Failed to acquire Cassandra resource from object pool");
                 }
-            } else {
-                log.warn("Failed to acquire Cassandra resource from object pool");
             }
         }
     }
 
+    private CassandraClient getResource() throws Exception {
+        try (Timer.Context timer = getResourceTimer.time();
+                CloseableTracer trace =
+                        Tracing.startLocalTrace("CassandraClientPoolingContainer.getResource", tagConsumer -> {})) {
+            return clientPool.borrowObject();
+        }
+    }
+
     private static void eagerlyCleanupReadBuffersFromIdleConnection(
             CassandraClient idleClient, CassandraServer server) {
         // eagerly cleanup idle-connection read buffer to keep a smaller memory footprint
diff --git a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraRequestExceptionHandler.java b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraRequestExceptionHandler.java
index 98fb7d83867..55946220cd5 100644
--- a/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraRequestExceptionHandler.java
+++ b/atlasdb-cassandra/src/main/java/com/palantir/atlasdb/keyvalue/cassandra/CassandraRequestExceptionHandler.java
@@ -18,11 +18,13 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.palantir.atlasdb.keyvalue.api.InsufficientConsistencyException;
 import com.palantir.atlasdb.keyvalue.cassandra.pool.CassandraServer;
+import com.palantir.atlasdb.tracing.Tracing;
 import com.palantir.common.exception.AtlasDbDependencyException;
 import com.palantir.logsafe.SafeArg;
 import com.palantir.logsafe.UnsafeArg;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import java.net.SocketTimeoutException;
 import java.time.Duration;
 import java.util.NoSuchElementException;
@@ -72,27 +74,30 @@ static CassandraRequestExceptionHandler withNoBackoffForTest(
     @SuppressWarnings("unchecked")
     <K extends Exception> void handleExceptionFromRequest(
             RetryableCassandraRequest<?, K> req, CassandraServer serverTried, Exception ex) throws K {
-        if (!isRetryable(ex)) {
-            throw (K) ex;
+        try (CloseableTracer trace = Tracing.startLocalTrace(
+                "CassandraRequestExceptionHandler.handleExceptionFromRequest", tagConsumer -> {})) {
+            if (!isRetryable(ex)) {
+                throw (K) ex;
+            }
+
+            RequestExceptionHandlerStrategy strategy = getStrategy();
+            req.triedOnHost(serverTried);
+            req.registerException(ex);
+            int numberOfAttempts = req.getNumberOfAttempts();
+            int numberOfAttemptsOnHost = req.getNumberOfAttemptsOnHost(serverTried);
+
+            if (numberOfAttempts >= maxTriesTotal.get()) {
+                throw logAndThrowException(numberOfAttempts, numberOfAttemptsOnHost, ex, serverTried, req);
+            }
+
+            if (shouldBlacklist(ex, numberOfAttemptsOnHost)) {
+                blacklist.add(serverTried);
+            }
+
+            logNumberOfAttempts(ex, numberOfAttempts);
+            handleBackoff(req, serverTried, ex, strategy);
+            handleRetryOnDifferentHosts(req, serverTried, ex, strategy);
         }
-
-        RequestExceptionHandlerStrategy strategy = getStrategy();
-        req.triedOnHost(serverTried);
-        req.registerException(ex);
-        int numberOfAttempts = req.getNumberOfAttempts();
-        int numberOfAttemptsOnHost = req.getNumberOfAttemptsOnHost(serverTried);
-
-        if (numberOfAttempts >= maxTriesTotal.get()) {
-            throw logAndThrowException(numberOfAttempts, numberOfAttemptsOnHost, ex, serverTried, req);
-        }
-
-        if (shouldBlacklist(ex, numberOfAttemptsOnHost)) {
-            blacklist.add(serverTried);
-        }
-
-        logNumberOfAttempts(ex, numberOfAttempts);
-        handleBackoff(req, serverTried, ex, strategy);
-        handleRetryOnDifferentHosts(req, serverTried, ex, strategy);
     }
 
     @VisibleForTesting
diff --git a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ForwardingTransaction.java b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ForwardingTransaction.java
index 6caba9c9fcb..04cdc2eb4da 100644
--- a/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ForwardingTransaction.java
+++ b/atlasdb-client/src/main/java/com/palantir/atlasdb/transaction/impl/ForwardingTransaction.java
@@ -25,6 +25,7 @@
 import com.palantir.atlasdb.keyvalue.api.RowResult;
 import com.palantir.atlasdb.keyvalue.api.TableReference;
 import com.palantir.atlasdb.transaction.api.ConstraintCheckable;
+import com.palantir.atlasdb.transaction.api.DelayedWrite;
 import com.palantir.atlasdb.transaction.api.GetRangesQuery;
 import com.palantir.atlasdb.transaction.api.Transaction;
 import com.palantir.atlasdb.transaction.api.TransactionFailedException;
@@ -37,6 +38,7 @@
 import com.palantir.lock.watch.ChangeMetadata;
 import com.palantir.util.result.Result;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.Set;
@@ -144,6 +146,11 @@ public void putWithMetadata(TableReference tableRef, Map<Cell, ValueAndChangeMet
         delegate().putWithMetadata(tableRef, valuesAndMetadata);
     }
 
+    @Override
+    public void putDelayed(List<DelayedWrite> values) {
+        delegate().putDelayed(values);
+    }
+
     @Override
     public void delete(TableReference tableRef, Set<Cell> keys) {
         delegate().delete(tableRef, keys);
diff --git a/atlasdb-commons/src/main/java/com/palantir/util/TimedRunner.java b/atlasdb-commons/src/main/java/com/palantir/util/TimedRunner.java
index 8d77890737d..6f5a68137bd 100644
--- a/atlasdb-commons/src/main/java/com/palantir/util/TimedRunner.java
+++ b/atlasdb-commons/src/main/java/com/palantir/util/TimedRunner.java
@@ -20,6 +20,7 @@
 import com.palantir.logsafe.SafeArg;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import java.time.Duration;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -45,34 +46,40 @@ public static TimedRunner create(Duration timeoutDuration) {
     }
 
     public <T> T run(TaskContext<T> taskContext) throws Exception {
-        Future<T> future = executor.submit(() -> taskContext.task().call());
-        final Exception failure;
-        try {
-            return future.get(timeoutDuration.toMillis(), TimeUnit.MILLISECONDS);
-        } catch (InterruptedException e) {
-            log.warn("Execution failed with exception", e);
-            Thread.currentThread().interrupt();
-            failure = e;
-        } catch (ExecutionException e) {
-            log.warn("Execution failed with exception", e);
-            if (e.getCause() instanceof Error) {
-                throw (Error) e.getCause();
-            } else {
-                failure = (Exception) e.getCause();
+        try (CloseableTracer trace = CloseableTracer.startSpan("TimedRunner.run")) {
+            Future<T> future = executor.submit(() -> {
+                try (CloseableTracer trace1 = CloseableTracer.startSpan("TimedRunner.executorRun")) {
+                    return taskContext.task().call();
+                }
+            });
+            final Exception failure;
+            try {
+                return future.get(timeoutDuration.toMillis(), TimeUnit.MILLISECONDS);
+            } catch (InterruptedException e) {
+                log.warn("Execution failed with exception", e);
+                Thread.currentThread().interrupt();
+                failure = e;
+            } catch (ExecutionException e) {
+                log.warn("Execution failed with exception", e);
+                if (e.getCause() instanceof Error) {
+                    throw (Error) e.getCause();
+                } else {
+                    failure = (Exception) e.getCause();
+                }
+            } catch (TimeoutException e) {
+                log.warn("Execution timed out", SafeArg.of("timeoutDuration", timeoutDuration), e);
+                future.cancel(true);
+                failure = e;
             }
-        } catch (TimeoutException e) {
-            log.warn("Execution timed out", SafeArg.of("timeoutDuration", timeoutDuration), e);
-            future.cancel(true);
-            failure = e;
-        }
 
-        try {
-            taskContext.taskFailureHandler().run();
-        } catch (Throwable t) {
-            log.warn("Shutdown failure handler threw an exception itself!", t);
-            failure.addSuppressed(t);
+            try {
+                taskContext.taskFailureHandler().run();
+            } catch (Throwable t) {
+                log.warn("Shutdown failure handler threw an exception itself!", t);
+                failure.addSuppressed(t);
+            }
+            throw failure;
         }
-        throw failure;
     }
 
     @Value.Immutable
diff --git a/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/DefaultLockAndTimestampServiceFactory.java b/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/DefaultLockAndTimestampServiceFactory.java
index df7d0609086..b3172b21dd4 100644
--- a/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/DefaultLockAndTimestampServiceFactory.java
+++ b/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/DefaultLockAndTimestampServiceFactory.java
@@ -441,7 +441,11 @@ private static LockAndTimestampServices createRawRemoteServices(
                 .lock(lockService)
                 .timestamp(timeService)
                 .timestampManagement(timestampManagementService)
-                .timelock(new LegacyTimelockService(timeService, lockService, TransactionManagers.LOCK_CLIENT))
+                .timelock(new LegacyTimelockService(
+                        timeService,
+                        lockService,
+                        TransactionManagers.LOCK_CLIENT,
+                        TransactionManagers.COMMIT_TIMESTAMP_LOCK_CLIENT))
                 .build();
     }
 
@@ -463,7 +467,11 @@ private static LockAndTimestampServices createRawEmbeddedServices(
                 .lock(lockService)
                 .timestamp(timeService)
                 .timestampManagement(timestampManagementService)
-                .timelock(new LegacyTimelockService(timeService, lockService, TransactionManagers.LOCK_CLIENT))
+                .timelock(new LegacyTimelockService(
+                        timeService,
+                        lockService,
+                        TransactionManagers.LOCK_CLIENT,
+                        TransactionManagers.COMMIT_TIMESTAMP_LOCK_CLIENT))
                 .build();
     }
 }
diff --git a/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/TransactionManagers.java b/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/TransactionManagers.java
index 9df0b53032a..937cd6930a0 100644
--- a/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/TransactionManagers.java
+++ b/atlasdb-config/src/main/java/com/palantir/atlasdb/factory/TransactionManagers.java
@@ -176,6 +176,7 @@
 public abstract class TransactionManagers {
     private static final SafeLogger log = SafeLoggerFactory.get(TransactionManagers.class);
     public static final LockClient LOCK_CLIENT = LockClient.of("atlas instance");
+    public static final LockClient COMMIT_TIMESTAMP_LOCK_CLIENT = LockClient.of("commitTimestamp");
 
     abstract AtlasDbConfig config();
 
@@ -335,6 +336,7 @@ private static TransactionManager createInMemory(
                 .userAgent(AtlasDbRemotingConstants.DEFAULT_USER_AGENT)
                 .globalMetricsRegistry(new MetricRegistry())
                 .globalTaggedMetricRegistry(DefaultTaggedMetricRegistry.getDefault())
+                .defaultTimelockClientFactory((service, _management) -> TimeLockClient.withSynchronousUnlocker(service))
                 .lockAndTimestampServiceFactory(maybeFactory)
                 .addAllSchemas(schemas)
                 .build()
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStore.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStore.java
index 203a77403f3..4d27509fac2 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStore.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStore.java
@@ -18,6 +18,7 @@
 
 import com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp;
 import com.palantir.lock.watch.CommitUpdate;
+import java.util.function.Supplier;
 
 interface CacheStore {
     void createCache(StartTimestamp timestamp);
@@ -28,7 +29,7 @@ interface CacheStore {
 
     void reset();
 
-    void createReadOnlyCache(StartTimestamp timestamp, CommitUpdate commitUpdate);
+    void createReadOnlyCache(StartTimestamp timestamp, Supplier<CommitUpdate> commitUpdate);
 
     TransactionScopedCache getReadOnlyCache(StartTimestamp timestamp);
 }
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStoreImpl.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStoreImpl.java
index c1dbdd5de06..db94bb9078a 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStoreImpl.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/CacheStoreImpl.java
@@ -16,6 +16,7 @@
 
 package com.palantir.atlasdb.keyvalue.api.cache;
 
+import com.google.common.base.Suppliers;
 import com.palantir.atlasdb.keyvalue.api.watch.StartTimestamp;
 import com.palantir.lock.watch.CommitUpdate;
 import com.palantir.logsafe.Preconditions;
@@ -23,10 +24,12 @@
 import com.palantir.logsafe.exceptions.SafeIllegalStateException;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import java.util.Comparator;
 import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.function.Supplier;
 import javax.annotation.concurrent.ThreadSafe;
 import org.immutables.value.Value;
 
@@ -85,15 +88,19 @@ public void reset() {
     }
 
     @Override
-    public void createReadOnlyCache(StartTimestamp timestamp, CommitUpdate commitUpdate) {
-        cacheMap.computeIfPresent(timestamp, (_startTs, cache) -> cache.withReadOnlyCache(commitUpdate));
+    public void createReadOnlyCache(StartTimestamp timestamp, Supplier<CommitUpdate> commitUpdate) {
+        try (CloseableTracer tracer = CloseableTracer.startSpan("CacheStoreImpl#createReadOnlyCache")) {
+            cacheMap.computeIfPresent(timestamp, (_startTs, cache) -> cache.withComputeCache(commitUpdate));
+        }
     }
 
     @Override
     public TransactionScopedCache getReadOnlyCache(StartTimestamp timestamp) {
         return getCacheInternal(timestamp)
                 .flatMap(Caches::readOnlyCache)
-                .orElseGet(() -> NoOpTransactionScopedCache.create().createReadOnlyCache(CommitUpdate.invalidateAll()));
+                .orElseGet(() ->
+                        (() -> NoOpTransactionScopedCache.create().createReadOnlyCache(CommitUpdate.invalidateAll())))
+                .get();
     }
 
     private void validateStateSize() {
@@ -117,17 +124,17 @@ private Optional<Caches> getCacheInternal(StartTimestamp timestamp) {
     interface Caches {
         TransactionScopedCache mainCache();
 
-        Optional<TransactionScopedCache> readOnlyCache();
+        Optional<Supplier<TransactionScopedCache>> readOnlyCache();
 
         static Caches create(TransactionScopedCache mainCache) {
             return ImmutableCaches.builder().mainCache(mainCache).build();
         }
 
-        default Caches withReadOnlyCache(CommitUpdate commitUpdate) {
+        default Caches withComputeCache(Supplier<CommitUpdate> commitUpdate) {
             Preconditions.checkState(!readOnlyCache().isPresent(), "Read-only cache is already present");
             return ImmutableCaches.builder()
                     .from(this)
-                    .readOnlyCache(mainCache().createReadOnlyCache(commitUpdate))
+                    .readOnlyCache(Suppliers.memoize(() -> mainCache().createReadOnlyCache(commitUpdate.get())))
                     .build();
         }
     }
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/FilteringValueCacheSnapshot.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/FilteringValueCacheSnapshot.java
index bbba1444952..f81c8c0b3f3 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/FilteringValueCacheSnapshot.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/FilteringValueCacheSnapshot.java
@@ -20,11 +20,14 @@
 import com.palantir.atlasdb.keyvalue.api.AtlasLockDescriptorUtils;
 import com.palantir.atlasdb.keyvalue.api.CellReference;
 import com.palantir.atlasdb.keyvalue.api.TableReference;
+import com.palantir.conjure.java.client.config.ImmutablesStyle;
 import com.palantir.lock.LockDescriptor;
 import com.palantir.lock.watch.CommitUpdate;
 import com.palantir.lock.watch.CommitUpdate.Visitor;
 import com.palantir.logsafe.Unsafe;
+import com.palantir.tracing.CloseableTracer;
 import java.util.List;
+import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Collectors;
@@ -41,7 +44,9 @@ private FilteringValueCacheSnapshot(ValueCacheSnapshot delegate, LockedCells loc
     }
 
     static ValueCacheSnapshot create(ValueCacheSnapshot delegate, CommitUpdate commitUpdate) {
-        return new FilteringValueCacheSnapshot(delegate, toLockedCells(commitUpdate));
+        try (CloseableTracer tracer = CloseableTracer.startSpan("FilteringValueCacheSnapshot#create")) {
+            return new FilteringValueCacheSnapshot(delegate, toLockedCells(commitUpdate));
+        }
     }
 
     @Override
@@ -79,20 +84,23 @@ public String toString() {
     }
 
     private static LockedCells toLockedCells(CommitUpdate commitUpdate) {
-        return commitUpdate.accept(new Visitor<LockedCells>() {
-            @Override
-            public LockedCells invalidateAll() {
-                return LockedCells.invalidateAll();
-            }
-
-            @Override
-            public LockedCells invalidateSome(Set<LockDescriptor> invalidatedLocks) {
-                return LockedCells.invalidateSome(invalidatedLocks);
-            }
-        });
+        try (CloseableTracer tracer = CloseableTracer.startSpan("FilteringValueCacheSnapshot#toLockedCells")) {
+            return commitUpdate.accept(new Visitor<LockedCells>() {
+                @Override
+                public LockedCells invalidateAll() {
+                    return LockedCells.invalidateAll();
+                }
+
+                @Override
+                public LockedCells invalidateSome(Set<LockDescriptor> invalidatedLocks) {
+                    return LockedCells.invalidateSome(invalidatedLocks);
+                }
+            });
+        }
     }
 
     @Value.Immutable
+    @ImmutablesStyle
     interface LockedCells {
         boolean allLocked();
 
@@ -103,13 +111,17 @@ static LockedCells invalidateAll() {
         }
 
         static LockedCells invalidateSome(Set<LockDescriptor> descriptors) {
-            return ImmutableLockedCells.builder()
-                    .allLocked(false)
-                    .lockedCells(descriptors.stream()
-                            .map(AtlasLockDescriptorUtils::candidateCells)
-                            .flatMap(List::stream)
-                            .collect(Collectors.toSet()))
-                    .build();
+            try (CloseableTracer tracer = CloseableTracer.startSpan(
+                    "FilteringValueCacheSnapshot#invalidateSome",
+                    Map.of("size", Integer.toString(descriptors.size())))) {
+                return ImmutableLockedCells.builder()
+                        .allLocked(false)
+                        .lockedCells(descriptors.stream()
+                                .map(AtlasLockDescriptorUtils::candidateCells)
+                                .flatMap(List::stream)
+                                .collect(Collectors.toSet()))
+                        .build();
+            }
         }
 
         default boolean isUnlocked(CellReference cellReference) {
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImpl.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImpl.java
index f2264774e9b..3b004fd7265 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImpl.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImpl.java
@@ -40,6 +40,7 @@
 import com.palantir.logsafe.SafeArg;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import com.palantir.util.RateLimitedLogger;
 import java.util.Comparator;
 import java.util.List;
@@ -112,22 +113,32 @@ public static LockWatchValueScopingCache create(
 
     @Override
     public synchronized void processStartTransactions(Set<Long> startTimestamps) {
-        TransactionsLockWatchUpdate updateForTransactions =
-                eventCache.getUpdateForTransactions(startTimestamps, currentVersion);
+        try (CloseableTracer trace = CloseableTracer.startSpan("processStartTransactions")) {
+            TransactionsLockWatchUpdate updateForTransactions =
+                    eventCache.getUpdateForTransactions(startTimestamps, currentVersion);
 
-        Optional<LockWatchVersion> latestVersionFromUpdate = computeMaxUpdateVersion(updateForTransactions);
+            Optional<LockWatchVersion> latestVersionFromUpdate = computeMaxUpdateVersion(updateForTransactions);
 
-        if (updateForTransactions.clearCache()) {
-            clearCache(updateForTransactions, latestVersionFromUpdate);
-        }
+            if (updateForTransactions.clearCache()) {
+                clearCache(updateForTransactions, latestVersionFromUpdate);
+            }
 
-        updateStores(updateForTransactions);
-        updateCurrentVersion(latestVersionFromUpdate);
+            try (CloseableTracer tracer = CloseableTracer.startSpan("LockWatchValueScopingCacheImpl#updateStores")) {
+                updateStores(updateForTransactions);
+            }
+            try (CloseableTracer tracer =
+                    CloseableTracer.startSpan("LockWatchValueScopingCacheImpl#updateCurrentVersion")) {
+                updateCurrentVersion(latestVersionFromUpdate);
+            }
+        }
     }
 
     @Override
-    public synchronized void updateCacheWithCommitTimestampsInformation(Set<Long> startTimestamps) {
-        startTimestamps.forEach(this::processCommitUpdate);
+    public void updateCacheWithCommitTimestampsInformation(Set<Long> startTimestamps) {
+        try (CloseableTracer tracer = CloseableTracer.startSpan(
+                "LockWatchValueScopingCacheImpl#updateCacheWithCommitTimestampsInformation")) {
+            startTimestamps.forEach(this::processCommitUpdate);
+        }
     }
 
     @Override
@@ -139,8 +150,27 @@ public void requestStateRemoved(long startTs) {
 
     @Override
     public synchronized void ensureStateRemoved(long startTimestamp) {
+        ensureStateRemovedImpl(startTimestamp);
+    }
+
+    private synchronized void ensureStateRemoved(Set<Long> elements) {
+        try (CloseableTracer tracer = CloseableTracer.startSpan("ensureStateRemoved(Set<Long>)")) {
+            for (long startTimestamp : elements) {
+                ensureStateRemovedImpl(startTimestamp);
+            }
+        }
+    }
+
+    private synchronized void ensureStateRemovedImpl(long startTimestamp) {
+        // Not very concurrency safe, but should be fast, just removal from
+        // 2 maps. Once a second it runs some cleanup on internal datastructures,
+        // which will block, but should also be fairly quick.
+        // I think the requirement here is that sometimes we need to
+        // do computation on the whole datastrure, but we should figure out
+        // a way to not have to do that.
         StartTimestamp startTs = StartTimestamp.of(startTimestamp);
         snapshotStore.removeTimestamp(startTs);
+        // Concurrency safe
         cacheStore.removeCache(startTs);
     }
 
@@ -214,12 +244,16 @@ public TransactionScopedCache getReadOnlyTransactionScopedCacheForCommit(long st
         return cacheStore.getReadOnlyCache(StartTimestamp.of(startTs));
     }
 
-    private synchronized void processCommitUpdate(long startTimestamp) {
-        StartTimestamp startTs = StartTimestamp.of(startTimestamp);
-        TransactionScopedCache cache = cacheStore.getCache(startTs);
-        cache.finalise();
-        CommitUpdate commitUpdate = eventCache.getCommitUpdate(startTimestamp);
-        cacheStore.createReadOnlyCache(startTs, commitUpdate);
+    private void processCommitUpdate(long startTimestamp) {
+        try (CloseableTracer tracer = CloseableTracer.startSpan("LockWatchValueScopingCacheImpl#processCommitUpdate")) {
+            StartTimestamp startTs = StartTimestamp.of(startTimestamp);
+            // Just a lookup to a map: threadsafe and doesn't really need to be synchronized.
+            TransactionScopedCache cache = cacheStore.getCache(startTs);
+            cache.finalise();
+
+            // Just #computeIfPresent on a threadsafe map
+            cacheStore.createReadOnlyCache(startTs, () -> eventCache.getCommitUpdate(startTimestamp));
+        }
     }
 
     /**
@@ -350,9 +384,7 @@ public void accept(Set<Long> elements) {
             // TODO (jkong): Implement a nicer version of multi-removal.
             // The current version for instance may, in between timestamps, spam the rate limiter or clean up snapshots
             // multiple times when it could just do the check once at the end.
-            for (long element : elements) {
-                ensureStateRemoved(element);
-            }
+            ensureStateRemoved(elements);
         }
     }
 }
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/TransactionScopedCacheImpl.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/TransactionScopedCacheImpl.java
index 949ef84aa10..cc89555ad5d 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/TransactionScopedCacheImpl.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/cache/TransactionScopedCacheImpl.java
@@ -33,6 +33,7 @@
 import com.palantir.atlasdb.transaction.api.TransactionLockWatchFailedException;
 import com.palantir.common.streams.KeyedStream;
 import com.palantir.lock.watch.CommitUpdate;
+import com.palantir.tracing.CloseableTracer;
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
@@ -181,8 +182,10 @@ public synchronized HitDigest getHitDigest() {
 
     @Override
     public TransactionScopedCache createReadOnlyCache(CommitUpdate commitUpdate) {
-        return ReadOnlyTransactionScopedCache.create(
-                new TransactionScopedCacheImpl(valueStore.createWithFilteredSnapshot(commitUpdate), metrics));
+        try (CloseableTracer tracer = CloseableTracer.startSpan("TransactionScopedCacheImpl#createReadOnlyCache")) {
+            return ReadOnlyTransactionScopedCache.create(
+                    new TransactionScopedCacheImpl(valueStore.createWithFilteredSnapshot(commitUpdate), metrics));
+        }
     }
 
     @Override
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventCacheImpl.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventCacheImpl.java
index c1b8d44f4e0..83c08900497 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventCacheImpl.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventCacheImpl.java
@@ -34,6 +34,7 @@
 import com.palantir.logsafe.UnsafeArg;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import java.util.Collection;
 import java.util.Optional;
 import java.util.Set;
@@ -88,25 +89,28 @@ public synchronized void processGetCommitTimestampsUpdate(
 
     @Override
     public CommitUpdate getCommitUpdate(long startTs) {
-        Optional<TimestampStateStore.TimestampVersionInfo> timestampInfo =
-                timestampStateStore.getTimestampInfo(startTs);
-        Optional<LockWatchVersion> startVersion = timestampInfo.map(TimestampStateStore.TimestampVersionInfo::version);
-        Optional<CommitInfo> maybeCommitInfo =
-                timestampInfo.flatMap(TimestampStateStore.TimestampVersionInfo::commitInfo);
-
-        assertTrue(
-                maybeCommitInfo.isPresent() && startVersion.isPresent(),
-                "start or commit info not processed for start timestamp");
-
-        CommitInfo commitInfo = maybeCommitInfo.get();
-
-        VersionBounds versionBounds = VersionBounds.builder()
-                .startVersion(startVersion)
-                .endVersion(commitInfo.commitVersion())
-                .build();
-
-        return eventLog.getEventsBetweenVersions(versionBounds)
-                .toCommitUpdate(startVersion.get(), commitInfo.commitVersion(), maybeCommitInfo);
+        try (CloseableTracer tracer = CloseableTracer.startSpan("LockWatchEventCacheImpl#getCommitUpdate")) {
+            Optional<TimestampStateStore.TimestampVersionInfo> timestampInfo =
+                    timestampStateStore.getTimestampInfo(startTs);
+            Optional<LockWatchVersion> startVersion =
+                    timestampInfo.map(TimestampStateStore.TimestampVersionInfo::version);
+            Optional<CommitInfo> maybeCommitInfo =
+                    timestampInfo.flatMap(TimestampStateStore.TimestampVersionInfo::commitInfo);
+
+            assertTrue(
+                    maybeCommitInfo.isPresent() && startVersion.isPresent(),
+                    "start or commit info not processed for start timestamp");
+
+            CommitInfo commitInfo = maybeCommitInfo.get();
+
+            VersionBounds versionBounds = VersionBounds.builder()
+                    .startVersion(startVersion)
+                    .endVersion(commitInfo.commitVersion())
+                    .build();
+
+            return eventLog.getEventsBetweenVersions(versionBounds)
+                    .toCommitUpdate(startVersion.get(), commitInfo.commitVersion(), maybeCommitInfo);
+        }
     }
 
     @Override
@@ -129,17 +133,19 @@ public CommitUpdate getEventUpdate(long startTs) {
     @Override
     public synchronized TransactionsLockWatchUpdate getUpdateForTransactions(
             Set<Long> startTimestamps, Optional<LockWatchVersion> lastKnownVersion) {
-        Preconditions.checkArgument(!startTimestamps.isEmpty(), "Cannot get update for empty set of transactions");
-        TimestampMapping timestampMapping = getTimestampMappings(startTimestamps);
-
-        VersionBounds versionBounds = VersionBounds.builder()
-                .startVersion(lastKnownVersion)
-                .endVersion(timestampMapping.lastVersion())
-                .earliestSnapshotVersion(timestampMapping.versionRange().lowerEndpoint())
-                .build();
-
-        return eventLog.getEventsBetweenVersions(versionBounds)
-                .toTransactionsLockWatchUpdate(timestampMapping, lastKnownVersion);
+        try (CloseableTracer tracer = CloseableTracer.startSpan("LockWatchEventCacheImpl#getUpdateForTransactions")) {
+            Preconditions.checkArgument(!startTimestamps.isEmpty(), "Cannot get update for empty set of transactions");
+            TimestampMapping timestampMapping = getTimestampMappings(startTimestamps);
+
+            VersionBounds versionBounds = VersionBounds.builder()
+                    .startVersion(lastKnownVersion)
+                    .endVersion(timestampMapping.lastVersion())
+                    .earliestSnapshotVersion(timestampMapping.versionRange().lowerEndpoint())
+                    .build();
+
+            return eventLog.getEventsBetweenVersions(versionBounds)
+                    .toTransactionsLockWatchUpdate(timestampMapping, lastKnownVersion);
+        }
     }
 
     @Override
@@ -165,13 +171,15 @@ synchronized LockWatchEventCacheState getStateForDiagnostics() {
     }
 
     private synchronized TimestampMapping getTimestampMappings(Set<Long> startTimestamps) {
-        ImmutableTimestampMapping.Builder mappingBuilder = TimestampMapping.builder();
-        startTimestamps.forEach(timestamp -> {
-            Optional<LockWatchVersion> entry = timestampStateStore.getStartVersion(timestamp);
-            assertTrue(entry.isPresent(), "start timestamp missing from map");
-            mappingBuilder.putTimestampMapping(timestamp, entry.get());
-        });
-        return mappingBuilder.build();
+        try (CloseableTracer trace = CloseableTracer.startSpan("LockWatchEventCacheImpl#getTimestampMappings")) {
+            ImmutableTimestampMapping.Builder mappingBuilder = TimestampMapping.builder();
+            startTimestamps.forEach(timestamp -> {
+                Optional<LockWatchVersion> entry = timestampStateStore.getStartVersion(timestamp);
+                assertTrue(entry.isPresent(), "start timestamp missing from map");
+                mappingBuilder.putTimestampMapping(timestamp, entry.get());
+            });
+            return mappingBuilder.build();
+        }
     }
 
     private synchronized Optional<LockWatchVersion> processEventLogUpdate(LockWatchStateUpdate update) {
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventLog.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventLog.java
index cabaaf7388e..de83f3b80c2 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventLog.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/LockWatchEventLog.java
@@ -25,6 +25,7 @@
 import com.palantir.lock.watch.LockWatchVersion;
 import com.palantir.logsafe.Preconditions;
 import com.palantir.logsafe.Unsafe;
+import com.palantir.tracing.CloseableTracer;
 import java.util.Collection;
 import java.util.Optional;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -80,7 +81,14 @@ CacheUpdate processUpdate(LockWatchStateUpdate update) {
      *         condensed.
      */
     ClientLogEvents getEventsBetweenVersions(VersionBounds versionBounds) {
-        return runWithReadLock(() -> getEventsBetweenVersionsInternal(versionBounds));
+        try (CloseableTracer tracer = CloseableTracer.startSpan("LockWatchEventLog#getEventsBetweenVersions")) {
+            return runWithReadLock(() -> {
+                try (CloseableTracer tracer1 =
+                        CloseableTracer.startSpan("LockWatchEventLog#getEventsBetweenVersionsInternal")) {
+                    return getEventsBetweenVersionsInternal(versionBounds);
+                }
+            });
+        }
     }
 
     /**
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/TimestampStateStore.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/TimestampStateStore.java
index b654e5cf72a..ff0dca61370 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/TimestampStateStore.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/keyvalue/api/watch/TimestampStateStore.java
@@ -34,6 +34,7 @@
 import com.palantir.logsafe.exceptions.SafeIllegalStateException;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import java.util.Collection;
 import java.util.Comparator;
 import java.util.NavigableMap;
@@ -138,7 +139,9 @@ Optional<LockWatchVersion> getStartVersion(long startTimestamp) {
     }
 
     Optional<TimestampVersionInfo> getTimestampInfo(long startTimestamp) {
-        return Optional.ofNullable(timestampMap.get(StartTimestamp.of(startTimestamp)));
+        try (CloseableTracer tracer = CloseableTracer.startSpan("TimestampStateStore#getTimestampInfo")) {
+            return Optional.ofNullable(timestampMap.get(StartTimestamp.of(startTimestamp)));
+        }
     }
 
     Optional<Sequence> getEarliestLiveSequence() {
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/DefaultCommitTimestampLoader.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/DefaultCommitTimestampLoader.java
index 14d79f3e6b4..ae6808a36e4 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/DefaultCommitTimestampLoader.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/DefaultCommitTimestampLoader.java
@@ -44,6 +44,7 @@
 import com.palantir.logsafe.UnsafeArg;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.tracing.CloseableTracer;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
@@ -197,20 +198,22 @@ private void waitForCommitToComplete(LongIterable startTimestamps) {
     }
 
     private void waitFor(Set<LockDescriptor> lockDescriptors) {
-        TransactionConfig currentTransactionConfig = transactionConfig.get();
+        try (CloseableTracer tracer = CloseableTracer.startSpan("waitFor")) {
+            TransactionConfig currentTransactionConfig = transactionConfig.get();
 
-        // TODO(fdesouza): Revert this once PDS-95791 is resolved.
-        long lockAcquireTimeoutMillis = currentTransactionConfig.getLockAcquireTimeoutMillis();
-        WaitForLocksRequest request = WaitForLocksRequest.of(lockDescriptors, lockAcquireTimeoutMillis);
-        WaitForLocksResponse response = timelockService.waitForLocks(request);
-        if (!response.wasSuccessful()) {
-            log.error(
-                    "Timed out waiting for commits to complete. Timeout was {} ms. First ten locks were {}.",
-                    SafeArg.of("requestId", request.getRequestId()),
-                    SafeArg.of("acquireTimeoutMs", lockAcquireTimeoutMillis),
-                    SafeArg.of("numberOfDescriptors", lockDescriptors.size()),
-                    UnsafeArg.of("firstTenLockDescriptors", Iterables.limit(lockDescriptors, 10)));
-            throw new TransactionLockAcquisitionTimeoutException("Timed out waiting for commits to complete.");
+            // TODO(fdesouza): Revert this once PDS-95791 is resolved.
+            long lockAcquireTimeoutMillis = currentTransactionConfig.getLockAcquireTimeoutMillis();
+            WaitForLocksRequest request = WaitForLocksRequest.of(lockDescriptors, lockAcquireTimeoutMillis);
+            WaitForLocksResponse response = timelockService.waitForLocks(request);
+            if (!response.wasSuccessful()) {
+                log.error(
+                        "Timed out waiting for commits to complete. Timeout was {} ms. First ten locks were {}.",
+                        SafeArg.of("requestId", request.getRequestId()),
+                        SafeArg.of("acquireTimeoutMs", lockAcquireTimeoutMillis),
+                        SafeArg.of("numberOfDescriptors", lockDescriptors.size()),
+                        UnsafeArg.of("firstTenLockDescriptors", Iterables.limit(lockDescriptors, 10)));
+                throw new TransactionLockAcquisitionTimeoutException("Timed out waiting for commits to complete.");
+            }
         }
     }
 
@@ -236,14 +239,16 @@ private Timer getTimer(String name) {
 
     private ListenableFuture<Map<Long, TransactionStatus>> loadCommitTimestamps(LongSet startTimestamps) {
         // distinguish between a single timestamp and a batch, for more granular metrics
-        if (startTimestamps.size() == 1) {
-            long singleTs = startTimestamps.longIterator().next();
-            return Futures.transform(
-                    transactionService.getAsyncV2(singleTs),
-                    commitState -> ImmutableMap.of(singleTs, commitState),
-                    MoreExecutors.directExecutor());
-        } else {
-            return transactionService.getAsyncV2(startTimestamps.collect(Long::valueOf));
+        try (CloseableTracer tracer = CloseableTracer.startSpan("loadCommitTimestamps")) {
+            if (startTimestamps.size() == 1) {
+                long singleTs = startTimestamps.longIterator().next();
+                return Futures.transform(
+                        transactionService.getAsyncV2(singleTs),
+                        commitState -> ImmutableMap.of(singleTs, commitState),
+                        MoreExecutors.directExecutor());
+            } else {
+                return transactionService.getAsyncV2(startTimestamps.collect(Long::valueOf));
+            }
         }
     }
 
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManager.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManager.java
index 0052e555e79..d14c555a456 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManager.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManager.java
@@ -36,9 +36,13 @@ public ImmutableTimestampLockManager(
         this.lockValidityChecker = lockValidityChecker;
     }
 
-    public Optional<ExpiredLocks> getExpiredImmutableTimestampAndCommitLocks(Optional<LockToken> commitLocksToken) {
+    public Optional<ExpiredLocks> getExpiredImmutableTimestampAndCommitLocks(Set<LockToken> commitLocksToken) {
         Set<LockToken> toRefresh = new HashSet<>();
-        commitLocksToken.ifPresent(toRefresh::add);
+
+        // TODO(jakubk): Handle this upstream
+        if (commitLocksToken != null) {
+            toRefresh.addAll(commitLocksToken);
+        }
         immutableTimestampLock.ifPresent(toRefresh::add);
 
         if (toRefresh.isEmpty()) {
@@ -55,13 +59,13 @@ public Optional<ExpiredLocks> getExpiredImmutableTimestampAndCommitLocks(Optiona
     }
 
     public SummarizedLockCheckResult getExpiredImmutableTimestampAndCommitLocksWithFullSummary(
-            LockToken commitLocksToken) {
+            Set<LockToken> commitLocksToken) {
         Preconditions.checkNotNull(
                 commitLocksToken,
                 "commitLocksToken was null, not expected to be in a call to"
                         + " getExpiredImmutableTimestampAndCommitLocksWithFullSummary",
                 SafeArg.of("immutableTimestampLock", immutableTimestampLock));
-        Optional<ExpiredLocks> expiredLocks = getExpiredImmutableTimestampAndCommitLocks(Optional.of(commitLocksToken));
+        Optional<ExpiredLocks> expiredLocks = getExpiredImmutableTimestampAndCommitLocks(commitLocksToken);
         return SummarizedLockCheckResult.builder()
                 .expiredLocks(expiredLocks)
                 .immutableTimestampLock(immutableTimestampLock)
@@ -69,7 +73,7 @@ public SummarizedLockCheckResult getExpiredImmutableTimestampAndCommitLocksWithF
                 .build();
     }
 
-    private String getExpiredLocksErrorString(Optional<LockToken> commitLocksToken, Set<LockToken> expiredLocks) {
+    private String getExpiredLocksErrorString(Set<LockToken> commitLocksToken, Set<LockToken> expiredLocks) {
         return "The following immutable timestamp lock was required: " + immutableTimestampLock
                 + "; the following commit locks were required: " + commitLocksToken
                 + "; the following locks are no longer valid: " + expiredLocks;
@@ -95,7 +99,7 @@ public interface SummarizedLockCheckResult {
 
         Optional<LockToken> immutableTimestampLock();
 
-        LockToken userProvidedLock();
+        Set<LockToken> userProvidedLock();
 
         static ImmutableSummarizedLockCheckResult.Builder builder() {
             return ImmutableSummarizedLockCheckResult.builder();
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/InstrumentedTimelockService.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/InstrumentedTimelockService.java
index 352c7d0b1f7..a7cd9940f6d 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/InstrumentedTimelockService.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/InstrumentedTimelockService.java
@@ -19,6 +19,7 @@
 import com.palantir.atlasdb.AtlasDbMetricNames;
 import com.palantir.atlasdb.util.MetricsManager;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockRequest;
 import com.palantir.lock.v2.LockResponse;
@@ -69,7 +70,7 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return executeWithRecord(() -> timelockService.getCommitTimestamp(startTs, commitLocksToken));
     }
 
@@ -93,6 +94,11 @@ public long getImmutableTimestamp() {
         return executeWithRecord(timelockService::getImmutableTimestamp);
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return executeWithRecord(timelockService::getCommitImmutableTimestamp);
+    }
+
     @Override
     public LockResponse lock(LockRequest request) {
         return executeWithRecord(() -> timelockService.lock(request));
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/LocalWriteBuffer.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/LocalWriteBuffer.java
index 103bfaf35b6..d5861956267 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/LocalWriteBuffer.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/LocalWriteBuffer.java
@@ -23,14 +23,17 @@
 import com.palantir.atlasdb.keyvalue.api.TableReference;
 import com.palantir.atlasdb.keyvalue.impl.Cells;
 import com.palantir.atlasdb.logging.LoggingArgs;
+import com.palantir.atlasdb.transaction.api.DelayedWrite;
 import com.palantir.lock.watch.ChangeMetadata;
 import com.palantir.logsafe.SafeArg;
 import com.palantir.logsafe.UnsafeArg;
 import com.palantir.logsafe.exceptions.SafeIllegalStateException;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
@@ -44,6 +47,7 @@ class LocalWriteBuffer {
 
     private final ConcurrentMap<TableReference, ConcurrentNavigableMap<Cell, byte[]>> writesByTable =
             new ConcurrentHashMap<>();
+    private final List<DelayedWrite> delayedWritesByTable = Collections.synchronizedList(new ArrayList<>());
     private final ConcurrentMap<TableReference, Map<Cell, ChangeMetadata>> metadataByTable = new ConcurrentHashMap<>();
     private final ConcurrentMap<TableReference, Object> locksByTable = new ConcurrentHashMap<>();
     private final AtomicLong valuesByteCount = new AtomicLong();
@@ -92,6 +96,10 @@ public void putLocalWritesAndMetadata(
         }
     }
 
+    public void putDelayed(List<DelayedWrite> values) {
+        delayedWritesByTable.addAll(values);
+    }
+
     /**
      * Returns all local writes that have been buffered.
      */
@@ -99,6 +107,10 @@ public ConcurrentMap<TableReference, ConcurrentNavigableMap<Cell, byte[]>> getLo
         return writesByTable;
     }
 
+    public List<DelayedWrite> getDelayedWrites() {
+        return delayedWritesByTable;
+    }
+
     /**
      * Returns the local writes for cells of the given table.
      */
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java
index 57202ef647d..41a6d8765c5 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransaction.java
@@ -82,6 +82,7 @@
 import com.palantir.atlasdb.transaction.api.ConflictHandler;
 import com.palantir.atlasdb.transaction.api.ConstraintCheckable;
 import com.palantir.atlasdb.transaction.api.ConstraintCheckingTransaction;
+import com.palantir.atlasdb.transaction.api.DelayedWrite;
 import com.palantir.atlasdb.transaction.api.DeleteExecutor;
 import com.palantir.atlasdb.transaction.api.GetRangesQuery;
 import com.palantir.atlasdb.transaction.api.ImmutableGetRangesQuery;
@@ -147,6 +148,7 @@
 import com.palantir.lock.AtlasRowLockDescriptor;
 import com.palantir.lock.LockDescriptor;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockRequest;
 import com.palantir.lock.v2.LockResponse;
 import com.palantir.lock.v2.LockToken;
@@ -162,6 +164,7 @@
 import com.palantir.logsafe.exceptions.SafeRuntimeException;
 import com.palantir.logsafe.logger.SafeLogger;
 import com.palantir.logsafe.logger.SafeLoggerFactory;
+import com.palantir.timestamp.TimestampRange;
 import com.palantir.tracing.CloseableTracer;
 import com.palantir.util.AssertUtils;
 import com.palantir.util.RateLimitedLogger;
@@ -200,6 +203,7 @@
 import java.util.function.Function;
 import java.util.function.LongSupplier;
 import java.util.function.Supplier;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.stream.StreamSupport;
 import javax.annotation.Nullable;
@@ -1749,6 +1753,26 @@ public void putWithMetadata(TableReference tableRef, Map<Cell, ValueAndChangeMet
                 Maps.transformValues(valuesAndMetadata, ValueAndChangeMetadata::metadata));
     }
 
+    @Override
+    public void putDelayed(List<DelayedWrite> values) {
+        //        Preconditions.checkArgument(!AtlasDbConstants.HIDDEN_TABLES.contains(tableRef));
+        //        markTableAsInvolvedInThisTransaction(tableRef);
+
+        if (values.isEmpty()) {
+            return;
+        }
+
+        numWriters.incrementAndGet();
+        try {
+            // We need to check the status after incrementing writers to ensure that we fail if we are committing.
+            ensureUncommitted();
+
+            localWriteBuffer.putDelayed(values);
+        } finally {
+            numWriters.decrementAndGet();
+        }
+    }
+
     private void putWithMetadataInternal(
             TableReference tableRef, Map<Cell, byte[]> values, Map<Cell, ChangeMetadata> metadata) {
         ensureNoEmptyValues(values);
@@ -1990,7 +2014,13 @@ private void commitWrites(TransactionService transactionService) {
             // This must happen before conflict checking, otherwise we could complete the checks and then have someone
             // else write underneath us before we proceed (thus missing a write/write conflict).
             // Timing still useful to distinguish bad lock percentiles from user-generated lock requests.
-            LockToken commitLocksToken = timedAndTraced("commitAcquireLocks", this::acquireLocksForCommit);
+            Set<LockToken> commitLocksToken = new HashSet<>();
+            commitLocksToken.add(timedAndTraced("commitAcquireLocks", this::acquireLocksForCommit));
+
+            // We could technically grab the commit timestamp as part of grabbing the locks and we would probably
+            // call it something slightly different then.
+            // The idea is the same though: have an idea of "beyond this point, writes are unsettled and therefore
+            // should not be read"
             try {
                 // Conflict checking. We can actually do this later without compromising correctness, but there is no
                 // reason to postpone this check - we waste resources writing unnecessarily if these are going to fail.
@@ -2029,9 +2059,14 @@ private void commitWrites(TransactionService transactionService) {
                 // We must do this before we check that our locks are still valid to ensure that other transactions that
                 // will hold these locks are sure to have start timestamps after our commit timestamp.
                 // Timing is still useful, as this may perform operations pertaining to lock watches.
-                long commitTimestamp = timedAndTraced(
+                // TODO(jakubk): Lalala, trying to code this fast. We know commitLocksToken is empty at this point!
+                GetCommitTimestampResponse commitTimestampResponse = timedAndTraced(
                         "getCommitTimestamp",
-                        () -> timelockService.getCommitTimestamp(getStartTimestamp(), commitLocksToken));
+                        () -> timelockService.getCommitTimestamp(
+                                getStartTimestamp(), Iterables.getOnlyElement(commitLocksToken)));
+                commitLocksToken.add(
+                        commitTimestampResponse.immutableTimestamp().getLock());
+                long commitTimestamp = commitTimestampResponse.timestamp();
                 commitTsForScrubbing = commitTimestamp;
 
                 // Punch on commit so that if hard delete is the only thing happening on a system,
@@ -2051,7 +2086,11 @@ private void commitWrites(TransactionService transactionService) {
                         "transactionKvsValidityCheck",
                         () -> throwIfTransactionKeyValueServiceNoLongerValid(commitTimestamp));
 
-                // Serializable transactions need to check their reads haven't changed, by reading again at
+                // TODO(jakubk): Add the special writes here. This can probably run in parallel with everything else.
+                // TODO(jakubk): Eventually we'd fold this into some other RPC, but for now just grab some fresh
+                // timestamps.
+                timedAndTraced("writeDelayedWrites", this::writeDelayedWrites);
+
                 // commitTs + 1. This must happen before the lock check for thorough tables, because the lock check
                 // verifies the immutable timestamp hasn't moved forward - thorough sweep might sweep a conflict out
                 // from underneath us.
@@ -2087,11 +2126,46 @@ private void commitWrites(TransactionService transactionService) {
                         .update(localWriteBuffer.getValuesByteCount());
             } finally {
                 // Not timed because tryUnlock() is an asynchronous operation.
-                traced("postCommitUnlock", () -> timelockService.tryUnlock(ImmutableSet.of(commitLocksToken)));
+                traced("postCommitUnlock", () -> timelockService.tryUnlock(commitLocksToken));
             }
         });
     }
 
+    private void writeDelayedWrites() {
+        List<DelayedWrite> delayedWrites = localWriteBuffer.getDelayedWrites();
+        if (delayedWrites.isEmpty()) {
+            return;
+        }
+        TimestampRange freshTimestamps = timelockService.getFreshTimestamps(delayedWrites.size());
+        AtomicLong curFreshTimestamp = new AtomicLong(freshTimestamps.getLowerBound());
+        LongSupplier freshTimestampsSupplier = () -> {
+            long freshTimestamp = curFreshTimestamp.getAndIncrement();
+            Preconditions.checkState(freshTimestamp <= freshTimestamps.getUpperBound(), "Oops");
+            return freshTimestamp;
+        };
+        Map<TableReference, Map<Cell, byte[]>> materializedWrites = new HashMap<>();
+        for (DelayedWrite writeAction : delayedWrites) {
+            long freshTimestamp = freshTimestampsSupplier.getAsLong();
+            writeAction.apply(freshTimestamp).forEach((tableReference, writes) -> {
+                Map<Cell, byte[]> writesForTable =
+                        materializedWrites.computeIfAbsent(tableReference, k -> new HashMap<>());
+                writesForTable.putAll(writes);
+            });
+        }
+
+        // This is likely not 100% reliable and will require work to actually prove out/figure out
+        // safe way to not drop these writes on failed transactions.
+        // However, this is the art of the possible and I'm sure we can figure something out.
+        // UPDATE: this will not work, because, at least in the default configuration, the sweepable timestamps
+        // value will be written twice.
+        //        timedAndTraced(
+        //                "writeDelayedWritesWritingToSweepQueue",
+        //                () -> sweepQueue.enqueue(materializedWrites, getStartTimestamp()));
+        timedAndTraced(
+                "writeDelayedWritesCommitWrite",
+                () -> transactionKeyValueService.multiPut(materializedWrites, getStartTimestamp()));
+    }
+
     private void throwIfTransactionKeyValueServiceNoLongerValid(long commitTimestamp) {
         if (!transactionKeyValueService.isValid(commitTimestamp)) {
             throw new SafeTransactionFailedRetriableException(
@@ -2127,9 +2201,10 @@ protected void throwIfReadWriteConflictForSerializable(long commitTimestamp) {
     }
 
     private boolean hasWrites() {
-        return !localWriteBuffer.getLocalWrites().isEmpty()
-                && localWriteBuffer.getLocalWrites().values().stream()
-                        .anyMatch(writesForTable -> !writesForTable.isEmpty());
+        return !localWriteBuffer.getDelayedWrites().isEmpty()
+                || (!localWriteBuffer.getLocalWrites().isEmpty()
+                        && localWriteBuffer.getLocalWrites().values().stream()
+                                .anyMatch(writesForTable -> !writesForTable.isEmpty()));
     }
 
     protected boolean hasReads() {
@@ -2148,7 +2223,7 @@ protected ConflictHandler getConflictHandlerForTable(TableReference tableRef) {
     /**
      * Make sure we have all the rows we are checking already locked before calling this.
      */
-    protected void throwIfConflictOnCommit(LockToken commitLocksToken, TransactionService transactionService)
+    protected void throwIfConflictOnCommit(Set<LockToken> commitLocksToken, TransactionService transactionService)
             throws TransactionConflictException {
         for (Map.Entry<TableReference, ConcurrentNavigableMap<Cell, byte[]>> write :
                 localWriteBuffer.getLocalWrites().entrySet()) {
@@ -2162,7 +2237,7 @@ protected void throwIfWriteAlreadyCommitted(
             TableReference tableRef,
             Map<Cell, byte[]> writes,
             ConflictHandler conflictHandler,
-            LockToken commitLocksToken,
+            Set<LockToken> commitLocksToken,
             TransactionService transactionService)
             throws TransactionConflictException {
         if (writes.isEmpty() || !conflictHandler.checkWriteWriteConflicts()) {
@@ -2202,7 +2277,7 @@ private void throwIfValueChangedConflict(
             Map<Cell, byte[]> writes,
             Set<CellConflict> spanningWrites,
             Set<CellConflict> dominatingWrites,
-            LockToken commitLocksToken) {
+            Set<LockToken> commitLocksToken) {
         Map<Cell, CellConflict> cellToConflict = new HashMap<>();
         Map<Cell, Long> cellToTs = new HashMap<>();
         for (CellConflict c : Sets.union(spanningWrites, dominatingWrites)) {
@@ -2547,7 +2622,8 @@ private LongSet getStartTimestampsForValues(Iterable<Value> values) {
      * @throws TransactionLockTimeoutException If our locks timed out while trying to commit.
      * @throws TransactionCommitFailedException failed when committing in a way that isn't retriable
      */
-    private void putCommitTimestamp(long commitTimestamp, LockToken locksToken, TransactionService transactionService)
+    private void putCommitTimestamp(
+            long commitTimestamp, Set<LockToken> locksToken, TransactionService transactionService)
             throws TransactionFailedException {
         Preconditions.checkArgument(commitTimestamp > getStartTimestamp(), "commitTs must be greater than startTs");
         try {
@@ -2566,7 +2642,7 @@ private void putCommitTimestamp(long commitTimestamp, LockToken locksToken, Tran
     }
 
     private void handleKeyAlreadyExistsException(
-            long commitTs, KeyAlreadyExistsException ex, LockToken commitLocksToken) {
+            long commitTs, KeyAlreadyExistsException ex, Set<LockToken> commitLocksToken) {
         try {
             if (wasCommitSuccessful(commitTs)) {
                 // We did actually commit successfully.  This case could happen if the impl
@@ -2592,7 +2668,11 @@ private void handleKeyAlreadyExistsException(
                                 .immutableTimestampLock()
                                 .map(token -> token.toSafeArg("immutableTimestampLock"))
                                 .orElseGet(() -> SafeArg.of("immutableTimestampLock", null)),
-                        lockCheckResult.userProvidedLock().toSafeArg("commitLocksToken"));
+                        SafeArg.of(
+                                "commitLocksToken",
+                                lockCheckResult.userProvidedLock().stream()
+                                        .map(LockToken::getRequestId)
+                                        .collect(Collectors.toSet())));
             }
         } catch (TransactionFailedException e1) {
             throw e1;
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransactionManager.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransactionManager.java
index 870a1ce961d..41f681f75c4 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransactionManager.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/SnapshotTransactionManager.java
@@ -512,6 +512,11 @@ public long getImmutableTimestamp() {
         return immutableTs;
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return timelockService.getCommitImmutableTimestamp();
+    }
+
     private void recordImmutableTimestamp(long immutableTs) {
         recentImmutableTs.updateAndGet(current -> Math.max(current, immutableTs));
     }
diff --git a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidator.java b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidator.java
index 5bca6ed9770..50bdc1774bf 100644
--- a/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidator.java
+++ b/atlasdb-impl-shared/src/main/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidator.java
@@ -31,6 +31,7 @@
 import com.palantir.logsafe.logger.SafeLoggerFactory;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 
 public final class DefaultPreCommitRequirementValidator implements PreCommitRequirementValidator {
     private static final SafeLogger log = SafeLoggerFactory.get(DefaultPreCommitRequirementValidator.class);
@@ -70,15 +71,15 @@ public void throwIfPreCommitConditionInvalidAtCommitOnWriteTransaction(
     }
 
     @Override
-    public void throwIfPreCommitRequirementsNotMet(LockToken commitLocksToken, long timestamp) {
+    public void throwIfPreCommitRequirementsNotMet(Set<LockToken> commitLocksToken, long timestamp) {
         throwIfImmutableTsOrCommitLocksExpired(commitLocksToken);
         throwIfPreCommitConditionInvalid(timestamp);
     }
 
     @Override
-    public void throwIfImmutableTsOrCommitLocksExpired(LockToken commitLocksToken) {
-        Optional<ExpiredLocks> expiredLocks = immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(
-                Optional.ofNullable(commitLocksToken));
+    public void throwIfImmutableTsOrCommitLocksExpired(Set<LockToken> commitLocksToken) {
+        Optional<ExpiredLocks> expiredLocks =
+                immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(commitLocksToken);
         if (expiredLocks.isPresent()) {
             throw createDefaultTransactionLockTimeoutException(expiredLocks.get());
         }
diff --git a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImplTest.java b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImplTest.java
index c9d593abe79..3c258377f4e 100644
--- a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImplTest.java
+++ b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/keyvalue/api/cache/LockWatchValueScopingCacheImplTest.java
@@ -183,7 +183,7 @@ public void updateCacheOnCommitThrowsOnLeaderElection() {
 
         // Throws this message because the leader election cleared the info entirely (as opposed to us knowing that
         // there was an election)
-        assertThatThrownBy(() -> valueCache.updateCacheWithCommitTimestampsInformation(ImmutableSet.of(TIMESTAMP_1)))
+        assertThatThrownBy(() -> eventCache.getCommitUpdate(TIMESTAMP_1))
                 .isExactlyInstanceOf(TransactionLockWatchFailedException.class)
                 .hasMessage("start or commit info not processed for start timestamp");
     }
diff --git a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManagerTest.java b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManagerTest.java
index d47843ef382..9840112969c 100644
--- a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManagerTest.java
+++ b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/ImmutableTimestampLockManagerTest.java
@@ -27,7 +27,9 @@
 import com.palantir.atlasdb.transaction.impl.precommit.LockValidityChecker;
 import com.palantir.lock.v2.LockToken;
 import java.util.Optional;
+import java.util.Set;
 import java.util.UUID;
+import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import org.junit.jupiter.api.Assumptions;
 import org.junit.jupiter.api.Test;
@@ -51,7 +53,8 @@ public void returnsNothingExpiredWhenAllLocksStillValid(
         when(validityChecker.getStillValidLockTokens(anySet()))
                 .thenAnswer(invocation -> invocation.getArguments()[0]);
 
-        assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(userCommitLock))
+        assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(
+                        userCommitLock.stream().collect(Collectors.toSet())))
                 .isEmpty();
     }
 
@@ -67,12 +70,12 @@ public void returnsLocksThatWereCheckedWhenGettingFullSummaryAndStillValid(
                 .thenAnswer(invocation -> invocation.getArguments()[0]);
 
         assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocksWithFullSummary(
-                        DEFAULT_COMMIT_LOCK_TOKEN))
+                        Set.of(DEFAULT_COMMIT_LOCK_TOKEN)))
                 .satisfies(summarizedLockCheckResult -> {
                     assertThat(summarizedLockCheckResult.expiredLocks()).isEmpty();
                     assertThat(summarizedLockCheckResult.immutableTimestampLock())
                             .isEqualTo(immutableTimestampLock);
-                    assertThat(summarizedLockCheckResult.userProvidedLock()).isEqualTo(DEFAULT_COMMIT_LOCK_TOKEN);
+                    assertThat(summarizedLockCheckResult.userProvidedLock()).containsExactly(DEFAULT_COMMIT_LOCK_TOKEN);
                 });
     }
 
@@ -90,10 +93,12 @@ public void returnsExpiredLocksWhenLocksNoLongerValid(
 
         when(validityChecker.getStillValidLockTokens(anySet())).thenReturn(ImmutableSet.of());
 
-        assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(userCommitLock))
+        assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(
+                        userCommitLock.stream().collect(Collectors.toSet())))
                 .hasValueSatisfying(expiredLocks -> {
                     assertThat(expiredLocks.errorDescription()).contains(immutableTimestampLock.toString());
-                    assertThat(expiredLocks.errorDescription()).contains(userCommitLock.toString());
+                    userCommitLock.ifPresent(lockToken ->
+                            assertThat(expiredLocks.errorDescription()).contains(lockToken.toString()));
                 });
     }
 
@@ -108,12 +113,12 @@ public void returnsLocksThatWereCheckedWhenGettingFullSummaryAndExpired(
         when(validityChecker.getStillValidLockTokens(anySet())).thenReturn(ImmutableSet.of());
 
         assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocksWithFullSummary(
-                        DEFAULT_COMMIT_LOCK_TOKEN))
+                        Set.of(DEFAULT_COMMIT_LOCK_TOKEN)))
                 .satisfies(summarizedLockCheckResult -> {
                     assertThat(summarizedLockCheckResult.expiredLocks()).isPresent();
                     assertThat(summarizedLockCheckResult.immutableTimestampLock())
                             .isEqualTo(immutableTimestampLock);
-                    assertThat(summarizedLockCheckResult.userProvidedLock()).isEqualTo(DEFAULT_COMMIT_LOCK_TOKEN);
+                    assertThat(summarizedLockCheckResult.userProvidedLock()).containsExactly(DEFAULT_COMMIT_LOCK_TOKEN);
                 });
     }
 
@@ -127,7 +132,7 @@ public void throwsIfOnlyCommitLockExpiredWhenCheckingBoth() {
                 .thenReturn(ImmutableSet.of(DEFAULT_IMMUTABLE_TIMESTAMP_LOCK_TOKEN));
 
         assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(
-                        Optional.of(DEFAULT_COMMIT_LOCK_TOKEN)))
+                        Set.of(DEFAULT_COMMIT_LOCK_TOKEN)))
                 .hasValueSatisfying(expiredLocks -> {
                     // This is a bit fragile, but emphasising readability here
                     assertThat(expiredLocks.errorDescription())
@@ -144,7 +149,7 @@ public void throwsIfOnlyImmutableTimestampLockExpiredWhenCheckingBoth() {
         when(validityChecker.getStillValidLockTokens(anySet())).thenReturn(ImmutableSet.of(DEFAULT_COMMIT_LOCK_TOKEN));
 
         assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(
-                        Optional.of(DEFAULT_COMMIT_LOCK_TOKEN)))
+                        Set.of(DEFAULT_COMMIT_LOCK_TOKEN)))
                 .hasValueSatisfying(expiredLocks -> {
                     // This is a bit fragile, but emphasising readability here
                     assertThat(expiredLocks.errorDescription())
@@ -159,7 +164,7 @@ public void doesNotCallLockRefresherIfNothingToCheck() {
         ImmutableTimestampLockManager immutableTimestampLockManager =
                 new ImmutableTimestampLockManager(Optional.empty(), validityChecker);
 
-        assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(Optional.empty()))
+        assertThat(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(Set.of()))
                 .isEmpty();
         verify(validityChecker, never()).getStillValidLockTokens(anySet());
     }
diff --git a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidatorTest.java b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidatorTest.java
index cedebaf7ac3..0f7fd697aa2 100644
--- a/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidatorTest.java
+++ b/atlasdb-impl-shared/src/test/java/com/palantir/atlasdb/transaction/impl/precommit/DefaultPreCommitRequirementValidatorTest.java
@@ -20,6 +20,7 @@
 import static org.assertj.core.api.Assertions.assertThatThrownBy;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.nullable;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
@@ -37,6 +38,7 @@
 import com.palantir.lock.v2.LockToken;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.UUID;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
@@ -57,7 +59,7 @@ public final class DefaultPreCommitRequirementValidatorTest {
                     PtBytes.toBytes("two"),
                     Cell.create(PtBytes.toBytes("suggestion"), PtBytes.toBytes("moot")),
                     PtBytes.toBytes("three")));
-    private static final LockToken LOCK_TOKEN = LockToken.of(UUID.randomUUID());
+    private static final Set<LockToken> LOCK_TOKEN = Set.of(LockToken.of(UUID.randomUUID()));
 
     @Mock
     private PreCommitCondition userPreCommitCondition;
@@ -105,10 +107,10 @@ public void throwIfPreCommitConditionInvalidAtCommitOnWriteTransactionPropagates
 
     @Test
     public void throwIfImmutableTsOrCommitLocksExpiredDelegatesEmptyRequestToLockManager() {
-        when(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(any()))
+        when(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(nullable(Set.class)))
                 .thenReturn(Optional.empty());
         validator.throwIfImmutableTsOrCommitLocksExpired(null);
-        verify(immutableTimestampLockManager).getExpiredImmutableTimestampAndCommitLocks(Optional.empty());
+        verify(immutableTimestampLockManager).getExpiredImmutableTimestampAndCommitLocks(null);
     }
 
     @Test
@@ -116,7 +118,7 @@ public void throwIfImmutableTsOrCommitLocksExpiredDelegatesRequestWithLockTokenT
         when(immutableTimestampLockManager.getExpiredImmutableTimestampAndCommitLocks(any()))
                 .thenReturn(Optional.empty());
         validator.throwIfImmutableTsOrCommitLocksExpired(LOCK_TOKEN);
-        verify(immutableTimestampLockManager).getExpiredImmutableTimestampAndCommitLocks(Optional.of(LOCK_TOKEN));
+        verify(immutableTimestampLockManager).getExpiredImmutableTimestampAndCommitLocks(LOCK_TOKEN);
     }
 
     @Test
@@ -150,6 +152,6 @@ public void throwIfPreCommitRequirementsNotMetDoesNotThrowIfNoLocksExpiredAndPre
         assertThatCode(() -> validator.throwIfPreCommitRequirementsNotMet(LOCK_TOKEN, TIMESTAMP))
                 .doesNotThrowAnyException();
         verify(userPreCommitCondition).throwIfConditionInvalid(TIMESTAMP);
-        verify(immutableTimestampLockManager).getExpiredImmutableTimestampAndCommitLocks(Optional.of(LOCK_TOKEN));
+        verify(immutableTimestampLockManager).getExpiredImmutableTimestampAndCommitLocks(LOCK_TOKEN);
     }
 }
diff --git a/atlasdb-tests-shared/src/main/java/com/palantir/timelock/paxos/InMemoryNamespacedTimelockRpcClient.java b/atlasdb-tests-shared/src/main/java/com/palantir/timelock/paxos/InMemoryNamespacedTimelockRpcClient.java
index fa78fd2f406..92d63b00900 100644
--- a/atlasdb-tests-shared/src/main/java/com/palantir/timelock/paxos/InMemoryNamespacedTimelockRpcClient.java
+++ b/atlasdb-tests-shared/src/main/java/com/palantir/timelock/paxos/InMemoryNamespacedTimelockRpcClient.java
@@ -32,6 +32,11 @@ public long getImmutableTimestamp() {
         return delegate.getImmutableTimestamp();
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return delegate.getCommitImmutableTimestamp();
+    }
+
     @Override
     public long currentTimeMillis() {
         return delegate.currentTimeMillis();
diff --git a/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/transaction/impl/AbstractSnapshotTransactionTest.java b/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/transaction/impl/AbstractSnapshotTransactionTest.java
index ed53c138309..71bef66d9cc 100644
--- a/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/transaction/impl/AbstractSnapshotTransactionTest.java
+++ b/atlasdb-tests-shared/src/test/java/com/palantir/atlasdb/transaction/impl/AbstractSnapshotTransactionTest.java
@@ -140,6 +140,7 @@
 import com.palantir.lock.LockService;
 import com.palantir.lock.SimpleTimeDuration;
 import com.palantir.lock.TimeDuration;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockResponse;
 import com.palantir.lock.v2.LockToken;
@@ -1612,7 +1613,9 @@ public void commitDoesNotThrowIfAlreadySuccessfullyCommitted() {
                 getSnapshotTransactionWith(spiedTimeLockService, () -> transactionTs, res, PreCommitConditions.NO_OP);
 
         when(spiedTimeLockService.getFreshTimestamp()).thenReturn(transactionTs + 1);
-        doReturn(transactionTs + 1).when(spiedTimeLockService).getCommitTimestamp(anyLong(), any());
+        doReturn(GetCommitTimestampResponse.of(res, transactionTs + 1))
+                .when(spiedTimeLockService)
+                .getCommitTimestamp(anyLong(), any());
 
         // forcing to try to commit a transaction that is already committed
         transactionService.putUnlessExists(transactionTs, spiedTimeLockService.getFreshTimestamp());
diff --git a/atlasdb-workload-server/src/main/java/com/palantir/atlasdb/workload/store/UnreliableTimestampManager.java b/atlasdb-workload-server/src/main/java/com/palantir/atlasdb/workload/store/UnreliableTimestampManager.java
index a9fb97891d8..70733ed6e12 100644
--- a/atlasdb-workload-server/src/main/java/com/palantir/atlasdb/workload/store/UnreliableTimestampManager.java
+++ b/atlasdb-workload-server/src/main/java/com/palantir/atlasdb/workload/store/UnreliableTimestampManager.java
@@ -19,6 +19,7 @@
 import com.google.common.annotations.VisibleForTesting;
 import com.palantir.atlasdb.buggify.impl.DefaultNativeSamplingSecureRandomFactory;
 import com.palantir.lock.client.RandomizedTimestampManager;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.lock.v2.TimelockService;
 import com.palantir.logsafe.SafeArg;
@@ -96,7 +97,7 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return runWithReadLock(() -> delegate.getCommitTimestamp(startTs, commitLocksToken));
     }
 
diff --git a/lock-api-objects/src/main/java/com/palantir/lock/v2/GetCommitTimestampResponse.java b/lock-api-objects/src/main/java/com/palantir/lock/v2/GetCommitTimestampResponse.java
new file mode 100644
index 00000000000..1e3a2d1ae95
--- /dev/null
+++ b/lock-api-objects/src/main/java/com/palantir/lock/v2/GetCommitTimestampResponse.java
@@ -0,0 +1,32 @@
+/*
+ * (c) Copyright 2024 Palantir Technologies Inc. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.palantir.lock.v2;
+
+import org.immutables.value.Value;
+
+@Value.Immutable
+public interface GetCommitTimestampResponse {
+    @Value.Parameter
+    LockImmutableTimestampResponse immutableTimestamp();
+
+    @Value.Parameter
+    long timestamp();
+
+    static GetCommitTimestampResponse of(LockImmutableTimestampResponse immutableTimestamp, long timestamp) {
+        return ImmutableGetCommitTimestampResponse.of(immutableTimestamp, timestamp);
+    }
+}
diff --git a/lock-api-objects/src/main/java/com/palantir/lock/v2/TimelockService.java b/lock-api-objects/src/main/java/com/palantir/lock/v2/TimelockService.java
index 84d9529103f..c761d269738 100644
--- a/lock-api-objects/src/main/java/com/palantir/lock/v2/TimelockService.java
+++ b/lock-api-objects/src/main/java/com/palantir/lock/v2/TimelockService.java
@@ -42,7 +42,7 @@ default boolean isInitialized() {
 
     long getFreshTimestamp();
 
-    long getCommitTimestamp(long startTs, LockToken commitLocksToken);
+    GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken);
 
     TimestampRange getFreshTimestamps(@Safe @QueryParam("number") int numTimestampsRequested);
 
@@ -76,6 +76,8 @@ default List<StartIdentifiedAtlasDbTransactionResponse> startIdentifiedAtlasDbTr
 
     long getImmutableTimestamp();
 
+    long getCommitImmutableTimestamp();
+
     LockResponse lock(LockRequest request);
 
     /**
diff --git a/lock-api/src/main/java/com/palantir/lock/client/BatchingCommitTimestampGetter.java b/lock-api/src/main/java/com/palantir/lock/client/BatchingCommitTimestampGetter.java
index 356b02e92dd..dc85d736003 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/BatchingCommitTimestampGetter.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/BatchingCommitTimestampGetter.java
@@ -18,11 +18,15 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Streams;
+import com.google.common.primitives.Ints;
 import com.palantir.atlasdb.autobatch.Autobatchers;
 import com.palantir.atlasdb.autobatch.BatchElement;
 import com.palantir.atlasdb.autobatch.DisruptorAutobatcher;
 import com.palantir.atlasdb.futures.AtlasFutures;
 import com.palantir.atlasdb.timelock.api.GetCommitTimestampsResponse;
+import com.palantir.common.base.Throwables;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
+import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.lock.watch.LockWatchCache;
 import com.palantir.lock.watch.LockWatchVersion;
@@ -34,20 +38,22 @@
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 import java.util.stream.LongStream;
+import java.util.stream.Stream;
 import org.immutables.value.Value;
 
 /**
  * This class batches getCommitTimestamps requests to TimeLock server for a single client/namespace.
- * */
+ */
 final class BatchingCommitTimestampGetter implements CommitTimestampGetter {
-    private final DisruptorAutobatcher<Request, Long> autobatcher;
+    private final DisruptorAutobatcher<Request, GetCommitTimestampResponse> autobatcher;
 
-    private BatchingCommitTimestampGetter(DisruptorAutobatcher<Request, Long> autobatcher) {
+    private BatchingCommitTimestampGetter(DisruptorAutobatcher<Request, GetCommitTimestampResponse> autobatcher) {
         this.autobatcher = autobatcher;
     }
 
     public static BatchingCommitTimestampGetter create(LockLeaseService leaseService, LockWatchCache cache) {
-        DisruptorAutobatcher<Request, Long> autobatcher = Autobatchers.independent(consumer(leaseService, cache))
+        DisruptorAutobatcher<Request, GetCommitTimestampResponse> autobatcher = Autobatchers.independent(
+                        consumer(leaseService, cache))
                 .safeLoggablePurpose("get-commit-timestamp")
                 .batchFunctionTimeout(Duration.ofSeconds(30))
                 .build();
@@ -55,7 +61,7 @@ public static BatchingCommitTimestampGetter create(LockLeaseService leaseService
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return AtlasFutures.getUnchecked(autobatcher.apply(ImmutableRequest.builder()
                 .startTs(startTs)
                 .commitLocksToken(commitLocksToken)
@@ -63,10 +69,24 @@ public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
     }
 
     @VisibleForTesting
-    static Consumer<List<BatchElement<Request, Long>>> consumer(LockLeaseService leaseService, LockWatchCache cache) {
+    static Consumer<List<BatchElement<Request, GetCommitTimestampResponse>>> consumer(
+            LockLeaseService leaseService, LockWatchCache cache) {
         return batch -> {
             int count = batch.size();
-            List<Long> commitTimestamps = new ArrayList<>();
+            List<GetCommitTimestampResponse> commitTimestamps = getResponses(leaseService, cache, batch, count);
+            for (int i = 0; i < count; i++) {
+                batch.get(i).result().set(commitTimestamps.get(i));
+            }
+        };
+    }
+
+    private static List<GetCommitTimestampResponse> getResponses(
+            LockLeaseService leaseService,
+            LockWatchCache cache,
+            List<BatchElement<Request, GetCommitTimestampResponse>> batch,
+            int count) {
+        List<GetCommitTimestampResponse> commitTimestamps = new ArrayList<>();
+        try {
             while (commitTimestamps.size() < count) {
                 Optional<LockWatchVersion> requestedVersion =
                         cache.getEventCache().lastKnownVersion();
@@ -74,15 +94,31 @@ static Consumer<List<BatchElement<Request, Long>>> consumer(LockLeaseService lea
                         leaseService.getCommitTimestamps(requestedVersion, count - commitTimestamps.size());
                 commitTimestamps.addAll(process(batch.subList(commitTimestamps.size(), count), response, cache));
             }
-
-            for (int i = 0; i < count; i++) {
-                batch.get(i).result().set(commitTimestamps.get(i));
-            }
-        };
+            return commitTimestamps;
+        } catch (Throwable t) {
+            // Something else should cleanup the caches, that's fine. The joys of underhanded design.
+            // But we need to unlock the commit immutable timestamp locks.
+            // This method call is weird, but this is what BatchingIdentifiedAtlasDbTransactionStarter does,
+            // so I'll allow it.
+            TransactionStarterHelper.unlock(
+                    commitTimestamps.stream()
+                            .map(response -> response.immutableTimestamp().getLock())
+                            .collect(Collectors.toSet()),
+                    leaseService);
+            throw Throwables.throwUncheckedException(t);
+        }
     }
 
-    private static List<Long> process(
-            List<BatchElement<Request, Long>> requests, GetCommitTimestampsResponse response, LockWatchCache cache) {
+    private static List<GetCommitTimestampResponse> process(
+            List<BatchElement<Request, GetCommitTimestampResponse>> requests,
+            GetCommitTimestampsResponse response,
+            LockWatchCache cache) {
+        LockToken immutableTsLock = response.getCommitImmutableTimestamp().getLock();
+        long commitImmutableTs = response.getCommitImmutableTimestamp().getImmutableTimestamp();
+        Stream<LockImmutableTimestampResponse> immutableTsAndLocks = LockTokenShare.share(
+                        immutableTsLock,
+                        Ints.checkedCast(response.getInclusiveUpper() - response.getInclusiveLower() + 1))
+                .map(tokenShare -> LockImmutableTimestampResponse.of(commitImmutableTs, tokenShare));
         List<Long> timestamps = LongStream.rangeClosed(response.getInclusiveLower(), response.getInclusiveUpper())
                 .boxed()
                 .collect(Collectors.toList());
@@ -94,7 +130,8 @@ private static List<Long> process(
                                 .build())
                 .collect(Collectors.toList());
         cache.processCommitTimestampsUpdate(transactionUpdates, response.getLockWatchUpdate());
-        return timestamps;
+        return Streams.zip(immutableTsAndLocks, timestamps.stream(), GetCommitTimestampResponse::of)
+                .collect(Collectors.toList());
     }
 
     @Override
diff --git a/lock-api/src/main/java/com/palantir/lock/client/CommitTimestampGetter.java b/lock-api/src/main/java/com/palantir/lock/client/CommitTimestampGetter.java
index 890e34f51c8..ddcaec21e16 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/CommitTimestampGetter.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/CommitTimestampGetter.java
@@ -16,10 +16,11 @@
 
 package com.palantir.lock.client;
 
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 
 public interface CommitTimestampGetter extends AutoCloseable {
-    long getCommitTimestamp(long startTs, LockToken commitLocksToken);
+    GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken);
 
     @Override
     void close();
diff --git a/lock-api/src/main/java/com/palantir/lock/client/LockLeaseService.java b/lock-api/src/main/java/com/palantir/lock/client/LockLeaseService.java
index c732c2e253b..772764ded3b 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/LockLeaseService.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/LockLeaseService.java
@@ -126,7 +126,21 @@ GetCommitTimestampsResponse getCommitTimestamps(Optional<LockWatchVersion> maybe
                 .numTimestamps(batchSize)
                 .lastKnownVersion(ConjureLockRequests.toConjure(maybeVersion))
                 .build();
-        return delegate.getCommitTimestamps(request);
+        return assignLeasedLockTokenToImmutableCommitTimestampLock(delegate.getCommitTimestamps(request));
+    }
+
+    public static GetCommitTimestampsResponse assignLeasedLockTokenToImmutableCommitTimestampLock(
+            GetCommitTimestampsResponse response) {
+        Lease lease = response.getLease();
+        LeasedLockToken leasedLockToken = LeasedLockToken.of(
+                ConjureLockToken.of(
+                        response.getCommitImmutableTimestamp().getLock().getRequestId()),
+                lease);
+        long immutableTs = response.getCommitImmutableTimestamp().getImmutableTimestamp();
+        return GetCommitTimestampsResponse.builder()
+                .from(response)
+                .commitImmutableTimestamp(LockImmutableTimestampResponse.of(immutableTs, leasedLockToken))
+                .build();
     }
 
     LockResponse lock(LockRequest request) {
diff --git a/lock-api/src/main/java/com/palantir/lock/client/MultiClientCommitTimestampGetter.java b/lock-api/src/main/java/com/palantir/lock/client/MultiClientCommitTimestampGetter.java
index 1e4928458ad..92f480f1a5e 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/MultiClientCommitTimestampGetter.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/MultiClientCommitTimestampGetter.java
@@ -20,6 +20,7 @@
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.Streams;
+import com.google.common.primitives.Ints;
 import com.palantir.atlasdb.autobatch.Autobatchers;
 import com.palantir.atlasdb.autobatch.BatchElement;
 import com.palantir.atlasdb.autobatch.DisruptorAutobatcher;
@@ -28,6 +29,8 @@
 import com.palantir.atlasdb.timelock.api.GetCommitTimestampsResponse;
 import com.palantir.atlasdb.timelock.api.Namespace;
 import com.palantir.common.streams.KeyedStream;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
+import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.lock.watch.LockWatchCache;
 import com.palantir.lock.watch.LockWatchStateUpdate;
@@ -43,24 +46,27 @@
 import java.util.function.Consumer;
 import java.util.stream.Collectors;
 import java.util.stream.LongStream;
+import java.util.stream.Stream;
 import org.immutables.value.Value;
 
 public final class MultiClientCommitTimestampGetter implements AutoCloseable {
-    private final DisruptorAutobatcher<NamespacedRequest, Long> autobatcher;
+    private final DisruptorAutobatcher<NamespacedRequest, GetCommitTimestampResponse> autobatcher;
 
-    private MultiClientCommitTimestampGetter(DisruptorAutobatcher<NamespacedRequest, Long> autobatcher) {
+    private MultiClientCommitTimestampGetter(
+            DisruptorAutobatcher<NamespacedRequest, GetCommitTimestampResponse> autobatcher) {
         this.autobatcher = autobatcher;
     }
 
     public static MultiClientCommitTimestampGetter create(InternalMultiClientConjureTimelockService delegate) {
-        DisruptorAutobatcher<NamespacedRequest, Long> autobatcher = Autobatchers.independent(consumer(delegate))
+        DisruptorAutobatcher<NamespacedRequest, GetCommitTimestampResponse> autobatcher = Autobatchers.independent(
+                        consumer(delegate))
                 .safeLoggablePurpose("multi-client-commit-timestamp-getter")
                 .batchFunctionTimeout(Duration.ofSeconds(30))
                 .build();
         return new MultiClientCommitTimestampGetter(autobatcher);
     }
 
-    public long getCommitTimestamp(
+    public GetCommitTimestampResponse getCommitTimestamp(
             Namespace namespace, long startTs, LockToken commitLocksToken, LockWatchCache cache) {
         return AtlasFutures.getUnchecked(autobatcher.apply(ImmutableNamespacedRequest.builder()
                 .namespace(namespace)
@@ -71,12 +77,15 @@ public long getCommitTimestamp(
     }
 
     @VisibleForTesting
-    static Consumer<List<BatchElement<NamespacedRequest, Long>>> consumer(
+    static Consumer<List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>>> consumer(
             InternalMultiClientConjureTimelockService delegate) {
         return batch -> {
             BatchStateManager batchStateManager = BatchStateManager.createFromRequestBatch(batch);
             while (batchStateManager.hasPendingRequests()) {
-                batchStateManager.processResponse(delegate.getCommitTimestamps(batchStateManager.getRequests()));
+                batchStateManager.processResponse(
+                        KeyedStream.stream(delegate.getCommitTimestamps(batchStateManager.getRequests()))
+                                .map(LockLeaseService::assignLeasedLockTokenToImmutableCommitTimestampLock)
+                                .collectToMap());
             }
         };
     }
@@ -88,10 +97,11 @@ private BatchStateManager(Map<Namespace, NamespacedBatchStateManager> requestMap
             this.requestMap = requestMap;
         }
 
-        static BatchStateManager createFromRequestBatch(List<BatchElement<NamespacedRequest, Long>> batch) {
+        static BatchStateManager createFromRequestBatch(
+                List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> batch) {
             Map<Namespace, NamespacedBatchStateManager> requestMap = new HashMap<>();
 
-            for (BatchElement<NamespacedRequest, Long> elem : batch) {
+            for (BatchElement<NamespacedRequest, GetCommitTimestampResponse> elem : batch) {
                 NamespacedRequest argument = elem.argument();
                 Namespace namespace = argument.namespace();
                 NamespacedBatchStateManager namespacedBatchStateManager = requestMap.computeIfAbsent(
@@ -120,7 +130,7 @@ private void processResponse(Map<Namespace, GetCommitTimestampsResponse> respons
     }
 
     private static final class NamespacedBatchStateManager {
-        private final Queue<BatchElement<NamespacedRequest, Long>> pendingRequestQueue;
+        private final Queue<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> pendingRequestQueue;
         private final LockWatchCache cache;
         private Optional<LockWatchVersion> lastKnownVersion;
 
@@ -134,7 +144,7 @@ private boolean hasPendingRequests() {
             return !pendingRequestQueue.isEmpty();
         }
 
-        private void addRequest(BatchElement<NamespacedRequest, Long> elem) {
+        private void addRequest(BatchElement<NamespacedRequest, GetCommitTimestampResponse> elem) {
             pendingRequestQueue.add(elem);
         }
 
@@ -151,22 +161,35 @@ private Optional<LockWatchVersion> updateAndGetLastKnownVersion() {
         }
 
         private void serviceRequests(GetCommitTimestampsResponse commitTimestampsResponse) {
-            List<Long> commitTimestamps = getCommitTimestampValues(commitTimestampsResponse);
+            LockToken immutableTsLock =
+                    commitTimestampsResponse.getCommitImmutableTimestamp().getLock();
+            long immutableTs =
+                    commitTimestampsResponse.getCommitImmutableTimestamp().getImmutableTimestamp();
+            Stream<LockImmutableTimestampResponse> immutableTsAndLocks = LockTokenShare.share(
+                            immutableTsLock,
+                            Ints.checkedCast(commitTimestampsResponse.getInclusiveUpper()
+                                    - commitTimestampsResponse.getInclusiveLower()
+                                    + 1))
+                    .map(tokenShare -> LockImmutableTimestampResponse.of(immutableTs, tokenShare));
+            List<Long> timestamps = LongStream.rangeClosed(
+                            commitTimestampsResponse.getInclusiveLower(), commitTimestampsResponse.getInclusiveUpper())
+                    .boxed()
+                    .collect(Collectors.toList());
+            List<GetCommitTimestampResponse> commitTimestamps = Streams.zip(
+                            immutableTsAndLocks, timestamps.stream(), GetCommitTimestampResponse::of)
+                    .collect(Collectors.toList());
 
-            processLockWatchUpdate(commitTimestamps, commitTimestampsResponse.getLockWatchUpdate());
+            processLockWatchUpdate(
+                    commitTimestamps.stream()
+                            .map(GetCommitTimestampResponse::timestamp)
+                            .collect(Collectors.toList()),
+                    commitTimestampsResponse.getLockWatchUpdate());
 
-            for (Long commitTimestamp : commitTimestamps) {
+            for (GetCommitTimestampResponse commitTimestamp : commitTimestamps) {
                 pendingRequestQueue.poll().result().set(commitTimestamp);
             }
         }
 
-        private List<Long> getCommitTimestampValues(GetCommitTimestampsResponse commitTimestampsResponse) {
-            return LongStream.rangeClosed(
-                            commitTimestampsResponse.getInclusiveLower(), commitTimestampsResponse.getInclusiveUpper())
-                    .boxed()
-                    .collect(Collectors.toList());
-        }
-
         private void processLockWatchUpdate(List<Long> timestamps, LockWatchStateUpdate lockWatchUpdate) {
             List<TransactionUpdate> transactionUpdates = Streams.zip(
                             timestamps.stream(),
diff --git a/lock-api/src/main/java/com/palantir/lock/client/NamespacedCommitTimestampGetter.java b/lock-api/src/main/java/com/palantir/lock/client/NamespacedCommitTimestampGetter.java
index 08a7f4b3ec7..76d14dba5e9 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/NamespacedCommitTimestampGetter.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/NamespacedCommitTimestampGetter.java
@@ -17,6 +17,7 @@
 package com.palantir.lock.client;
 
 import com.palantir.atlasdb.timelock.api.Namespace;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.lock.watch.LockWatchCache;
 
@@ -35,7 +36,7 @@ public NamespacedCommitTimestampGetter(
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return referenceTrackingBatcher.getDelegate().getCommitTimestamp(namespace, startTs, commitLocksToken, cache);
     }
 
diff --git a/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java b/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java
index bfa07884c79..ad1c62c0a8b 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/ProfilingTimelockService.java
@@ -21,6 +21,7 @@
 import com.google.common.util.concurrent.RateLimiter;
 import com.palantir.common.base.Throwables;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockRequest;
 import com.palantir.lock.v2.LockResponse;
@@ -50,11 +51,11 @@
  * operation every {@link ProfilingTimelockService#LOGGING_TIME_WINDOW}. It will log what the longest operation that
  * completed in the past {@link ProfilingTimelockService#LOGGING_TIME_WINDOW} window was, and how long it took. If no
  * operation took longer than {@link ProfilingTimelockService#SLOW_THRESHOLD} it will not log.
- *
+ * <p>
  * The {@link ProfilingTimelockService} does not cover specific operations which are at risk for taking long
  * times owing to contention - in particular, this includes {@link #lock(LockRequest)} and
  * {@link #waitForLocks(WaitForLocksRequest)}.
- *
+ * <p>
  * Profiling is done explicitly at this level (and not at {@link com.palantir.lock.v2.TimelockRpcClient} level)
  * to reflect the impact of timelock operations and cluster state changes (e.g. leader elections, rolling bounces) on
  * clients.
@@ -102,7 +103,7 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return runTaskTimed("getCommitTimestamp", () -> delegate.getCommitTimestamp(startTs, commitLocksToken));
     }
 
@@ -128,6 +129,11 @@ public long getImmutableTimestamp() {
         return runTaskTimed("getImmutableTimestamp", delegate::getImmutableTimestamp);
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return runTaskTimed("getCommitImmutableTimestamp", delegate::getCommitImmutableTimestamp);
+    }
+
     @Override
     public LockResponse lock(LockRequest request) {
         // Don't profile this, as it may be skewed by user contention on locks.
diff --git a/lock-api/src/main/java/com/palantir/lock/client/RandomizedTimestampManager.java b/lock-api/src/main/java/com/palantir/lock/client/RandomizedTimestampManager.java
index 09638018012..8d30b6560d8 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/RandomizedTimestampManager.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/RandomizedTimestampManager.java
@@ -16,6 +16,7 @@
 
 package com.palantir.lock.client;
 
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.timestamp.TimestampRange;
 
@@ -28,7 +29,7 @@ public interface RandomizedTimestampManager {
 
     long getFreshTimestamp();
 
-    long getCommitTimestamp(long startTs, LockToken commitLocksToken);
+    GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken);
 
     TimestampRange getFreshTimestamps(int numTimestampsRequested);
 }
diff --git a/lock-api/src/main/java/com/palantir/lock/client/RemoteTimelockServiceAdapter.java b/lock-api/src/main/java/com/palantir/lock/client/RemoteTimelockServiceAdapter.java
index 1db5f4894ae..91fd3dba25f 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/RemoteTimelockServiceAdapter.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/RemoteTimelockServiceAdapter.java
@@ -21,6 +21,7 @@
 import com.palantir.atlasdb.timelock.api.ConjureTimestampRange;
 import com.palantir.atlasdb.timelock.api.Namespace;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockRequest;
 import com.palantir.lock.v2.LockResponse;
@@ -102,7 +103,7 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return commitTimestampGetter.getCommitTimestamp(startTs, commitLocksToken);
     }
 
@@ -119,6 +120,11 @@ public long getImmutableTimestamp() {
         return rpcClient.getImmutableTimestamp();
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return rpcClient.getCommitImmutableTimestamp();
+    }
+
     @Override
     public WaitForLocksResponse waitForLocks(WaitForLocksRequest request) {
         return lockLeaseService.waitForLocks(request);
diff --git a/lock-api/src/main/java/com/palantir/lock/client/TimeLockClient.java b/lock-api/src/main/java/com/palantir/lock/client/TimeLockClient.java
index fd5b31c1b0e..3c2ea9433b6 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/TimeLockClient.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/TimeLockClient.java
@@ -22,6 +22,7 @@
 import com.palantir.common.concurrent.PTExecutors;
 import com.palantir.leader.NotCurrentLeaderException;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockLeaseRefresher;
 import com.palantir.lock.v2.LockRequest;
@@ -102,7 +103,7 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         return delegate.getCommitTimestamp(startTs, commitLocksToken);
     }
 
@@ -139,6 +140,11 @@ public long getImmutableTimestamp() {
         return executeOnTimeLock(delegate::getImmutableTimestamp);
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return executeOnTimeLock(delegate::getCommitImmutableTimestamp);
+    }
+
     @Override
     public LockResponse lock(LockRequest request) {
         return lock(request, ClientLockingOptions.getDefault());
diff --git a/lock-api/src/main/java/com/palantir/lock/client/UnreliableTimeLockService.java b/lock-api/src/main/java/com/palantir/lock/client/UnreliableTimeLockService.java
index e45252af686..4107202417a 100644
--- a/lock-api/src/main/java/com/palantir/lock/client/UnreliableTimeLockService.java
+++ b/lock-api/src/main/java/com/palantir/lock/client/UnreliableTimeLockService.java
@@ -20,6 +20,7 @@
 import com.palantir.atlasdb.buggify.api.BuggifyFactory;
 import com.palantir.atlasdb.buggify.impl.DefaultBuggifyFactory;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockRequest;
 import com.palantir.lock.v2.LockResponse;
@@ -76,7 +77,7 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
         maybeRandomlyIncreaseTimestamp();
         return timestampManager.getCommitTimestamp(startTs, commitLocksToken);
     }
@@ -102,6 +103,12 @@ public long getImmutableTimestamp() {
         return delegate.getImmutableTimestamp();
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        // Feature not active
+        return Long.MAX_VALUE;
+    }
+
     @Override
     public LockResponse lock(LockRequest request) {
         LockResponse response = delegate.lock(request);
diff --git a/lock-api/src/main/java/com/palantir/lock/v2/DefaultNamespacedTimelockRpcClient.java b/lock-api/src/main/java/com/palantir/lock/v2/DefaultNamespacedTimelockRpcClient.java
index 3335a2d3549..f7f0e415173 100644
--- a/lock-api/src/main/java/com/palantir/lock/v2/DefaultNamespacedTimelockRpcClient.java
+++ b/lock-api/src/main/java/com/palantir/lock/v2/DefaultNamespacedTimelockRpcClient.java
@@ -30,6 +30,11 @@ public long getImmutableTimestamp() {
         return timelockRpcClient.getImmutableTimestamp(namespace);
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        return timelockRpcClient.getCommitImmutableTimestamp(namespace);
+    }
+
     @Override
     public long currentTimeMillis() {
         return timelockRpcClient.currentTimeMillis(namespace);
diff --git a/lock-api/src/main/java/com/palantir/lock/v2/NamespacedTimelockRpcClient.java b/lock-api/src/main/java/com/palantir/lock/v2/NamespacedTimelockRpcClient.java
index ac8d4c5f15c..6f8622fd0e7 100644
--- a/lock-api/src/main/java/com/palantir/lock/v2/NamespacedTimelockRpcClient.java
+++ b/lock-api/src/main/java/com/palantir/lock/v2/NamespacedTimelockRpcClient.java
@@ -23,5 +23,7 @@
 public interface NamespacedTimelockRpcClient {
     long getImmutableTimestamp();
 
+    long getCommitImmutableTimestamp();
+
     long currentTimeMillis();
 }
diff --git a/lock-api/src/main/java/com/palantir/lock/v2/TimelockRpcClient.java b/lock-api/src/main/java/com/palantir/lock/v2/TimelockRpcClient.java
index c242825cc74..d1f39e491a2 100644
--- a/lock-api/src/main/java/com/palantir/lock/v2/TimelockRpcClient.java
+++ b/lock-api/src/main/java/com/palantir/lock/v2/TimelockRpcClient.java
@@ -40,6 +40,10 @@ public interface TimelockRpcClient {
     @Path("immutable-timestamp")
     long getImmutableTimestamp(@PathParam("namespace") String namespace);
 
+    @POST
+    @Path("commit-immutable-timestamp")
+    long getCommitImmutableTimestamp(@PathParam("namespace") String namespace);
+
     @POST
     @Path("current-time-millis")
     long currentTimeMillis(@PathParam("namespace") String namespace);
diff --git a/lock-api/src/test/java/com/palantir/lock/client/BatchingCommitTimestampGetterTest.java b/lock-api/src/test/java/com/palantir/lock/client/BatchingCommitTimestampGetterTest.java
index 7c787703f79..039bb9d46df 100644
--- a/lock-api/src/test/java/com/palantir/lock/client/BatchingCommitTimestampGetterTest.java
+++ b/lock-api/src/test/java/com/palantir/lock/client/BatchingCommitTimestampGetterTest.java
@@ -31,7 +31,13 @@
 import com.palantir.atlasdb.autobatch.BatchElement;
 import com.palantir.atlasdb.autobatch.DisruptorAutobatcher;
 import com.palantir.atlasdb.timelock.api.GetCommitTimestampsResponse;
+import com.palantir.common.time.NanoTime;
 import com.palantir.lock.StringLockDescriptor;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
+import com.palantir.lock.v2.LeaderTime;
+import com.palantir.lock.v2.LeadershipId;
+import com.palantir.lock.v2.Lease;
+import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.lock.watch.LockEvent;
 import com.palantir.lock.watch.LockWatchCache;
@@ -42,11 +48,13 @@
 import com.palantir.lock.watch.LockWatchVersion;
 import com.palantir.lock.watch.TransactionUpdate;
 import com.palantir.tritium.metrics.registry.DefaultTaggedMetricRegistry;
+import java.time.Duration;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
 import java.util.UUID;
 import java.util.function.Consumer;
+import java.util.stream.Collectors;
 import org.junit.jupiter.api.Test;
 import org.mockito.InOrder;
 import org.mockito.Mockito;
@@ -68,13 +76,19 @@ public final class BatchingCommitTimestampGetterTest {
     private static final Optional<LockWatchVersion> IDENTIFIED_VERSION_1 = Optional.empty();
     private static final Optional<LockWatchVersion> IDENTIFIED_VERSION_2 =
             Optional.of(LockWatchVersion.of(UUID.randomUUID(), -1));
+    private static final LeadershipId LEADERSHIP_ID = LeadershipId.random();
+    private static final LeaderTime LEADER_TIME = LeaderTime.of(LEADERSHIP_ID, NanoTime.createForTests(1L));
+    private static final Lease LEASE = Lease.of(LEADER_TIME, Duration.ofSeconds(977));
+    private static final LockToken COMMIT_TS_LOCK_TOKEN = mock(LockToken.class);
+    private static final LockImmutableTimestampResponse COMMIT_TS_RESPONSE =
+            LockImmutableTimestampResponse.of(1L, COMMIT_TS_LOCK_TOKEN);
 
     private final LockLeaseService lockLeaseService = mock(LockLeaseService.class);
     private final LockWatchEventCache eventCache = mock(LockWatchEventCache.class);
     private final LockWatchValueCache valueCache = mock(LockWatchValueCache.class);
     private final LockWatchCache cache = spy(new LockWatchCacheImpl(eventCache, valueCache));
-    private final Consumer<List<BatchElement<BatchingCommitTimestampGetter.Request, Long>>> batchProcessor =
-            BatchingCommitTimestampGetter.consumer(lockLeaseService, cache);
+    private final Consumer<List<BatchElement<BatchingCommitTimestampGetter.Request, GetCommitTimestampResponse>>>
+            batchProcessor = BatchingCommitTimestampGetter.consumer(lockLeaseService, cache);
 
     @Test
     public void consumerFillsTheWholeBatch() {
@@ -87,7 +101,10 @@ public void consumerFillsTheWholeBatch() {
         whenGetCommitTimestamps(IDENTIFIED_VERSION_1, 4, 5, 6, UPDATE_1);
         whenGetCommitTimestamps(IDENTIFIED_VERSION_2, 2, 7, 8, UPDATE_2);
 
-        assertThat(processBatch(request1, request2, request3, request4)).containsExactly(5L, 6L, 7L, 8L);
+        assertThat(processBatch(request1, request2, request3, request4).stream()
+                        .map(GetCommitTimestampResponse::timestamp)
+                        .collect(Collectors.toList()))
+                .containsExactly(5L, 6L, 7L, 8L);
 
         InOrder inOrder = Mockito.inOrder(lockLeaseService, cache);
         inOrder.verify(lockLeaseService).getCommitTimestamps(IDENTIFIED_VERSION_1, 4);
@@ -109,16 +126,22 @@ private void whenGetCommitTimestamps(
                         .inclusiveLower(start)
                         .inclusiveUpper(end)
                         .lockWatchUpdate(update)
+                        .commitImmutableTimestamp(COMMIT_TS_RESPONSE)
+                        .lease(LEASE)
                         .build());
     }
 
-    private List<Long> processBatch(BatchingCommitTimestampGetter.Request... requests) {
-        List<BatchElement<BatchingCommitTimestampGetter.Request, Long>> elements = Arrays.stream(requests)
-                .map(request -> ImmutableTestBatchElement.<BatchingCommitTimestampGetter.Request, Long>builder()
-                        .argument(request)
-                        .result(new DisruptorAutobatcher.DisruptorFuture<>(
-                                AutobatcherTelemetryComponents.create("test", new DefaultTaggedMetricRegistry())))
-                        .build())
+    private List<GetCommitTimestampResponse> processBatch(BatchingCommitTimestampGetter.Request... requests) {
+        List<BatchElement<BatchingCommitTimestampGetter.Request, GetCommitTimestampResponse>> elements = Arrays.stream(
+                        requests)
+                .map(request ->
+                        ImmutableTestBatchElement
+                                .<BatchingCommitTimestampGetter.Request, GetCommitTimestampResponse>builder()
+                                .argument(request)
+                                .result(new DisruptorAutobatcher.DisruptorFuture<>(
+                                        AutobatcherTelemetryComponents.create(
+                                                "test", new DefaultTaggedMetricRegistry())))
+                                .build())
                 .collect(toList());
         batchProcessor.accept(elements);
         return Futures.getUnchecked(Futures.allAsList(Lists.transform(elements, BatchElement::result)));
diff --git a/lock-api/src/test/java/com/palantir/lock/client/MultiClientCommitTimestampGetterTest.java b/lock-api/src/test/java/com/palantir/lock/client/MultiClientCommitTimestampGetterTest.java
index 4f425f8cfe4..0e5266651ae 100644
--- a/lock-api/src/test/java/com/palantir/lock/client/MultiClientCommitTimestampGetterTest.java
+++ b/lock-api/src/test/java/com/palantir/lock/client/MultiClientCommitTimestampGetterTest.java
@@ -40,13 +40,20 @@
 import com.palantir.atlasdb.timelock.api.GetCommitTimestampsResponse;
 import com.palantir.atlasdb.timelock.api.Namespace;
 import com.palantir.common.streams.KeyedStream;
+import com.palantir.common.time.NanoTime;
 import com.palantir.lock.client.MultiClientCommitTimestampGetter.NamespacedRequest;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
+import com.palantir.lock.v2.LeaderTime;
+import com.palantir.lock.v2.LeadershipId;
+import com.palantir.lock.v2.Lease;
+import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockToken;
 import com.palantir.lock.watch.LockWatchCache;
 import com.palantir.lock.watch.LockWatchCacheImpl;
 import com.palantir.lock.watch.LockWatchStateUpdate;
 import com.palantir.logsafe.exceptions.SafeIllegalStateException;
 import com.palantir.tritium.metrics.registry.DefaultTaggedMetricRegistry;
+import java.time.Duration;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
@@ -61,6 +68,12 @@
 public class MultiClientCommitTimestampGetterTest {
     private static final int COMMIT_TS_LIMIT_PER_REQUEST = 5;
     private static final SafeIllegalStateException EXCEPTION = new SafeIllegalStateException("Something went wrong!");
+    private static final LeadershipId LEADERSHIP_ID = LeadershipId.random();
+    private static final LeaderTime LEADER_TIME = LeaderTime.of(LEADERSHIP_ID, NanoTime.createForTests(1L));
+    private static final Lease LEASE = Lease.of(LEADER_TIME, Duration.ofSeconds(977));
+    private static final LockToken COMMIT_TS_LOCK_TOKEN = LockToken.of(UUID.randomUUID());
+    private static final LockImmutableTimestampResponse COMMIT_TS_RESPONSE =
+            LockImmutableTimestampResponse.of(1L, COMMIT_TS_LOCK_TOKEN);
 
     private final Map<Namespace, Long> lowestStartTsMap = new HashMap<>();
     private final Map<Namespace, LockWatchCache> lockWatchCacheMap = new HashMap<>();
@@ -69,7 +82,7 @@ public class MultiClientCommitTimestampGetterTest {
     private final InternalMultiClientConjureTimelockService timelockService =
             mock(InternalMultiClientConjureTimelockService.class);
 
-    private final Consumer<List<BatchElement<NamespacedRequest, Long>>> consumer =
+    private final Consumer<List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>>> consumer =
             MultiClientCommitTimestampGetter.consumer(timelockService);
 
     private final LockWatchStateUpdate lockWatchStateUpdate =
@@ -102,7 +115,8 @@ public void canServiceMultipleClientsWithMultipleServerCalls() {
     @Test
     public void updatesCacheWhileProcessingResponse() {
         Namespace client = Namespace.of("Kitty");
-        List<BatchElement<NamespacedRequest, Long>> batchElements = IntStream.range(0, COMMIT_TS_LIMIT_PER_REQUEST * 2)
+        List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> batchElements = IntStream.range(
+                        0, COMMIT_TS_LIMIT_PER_REQUEST * 2)
                 .mapToObj(ind -> batchElementForNamespace(client))
                 .collect(toList());
         setupServiceAndAssertSanityOfResponse(batchElements);
@@ -116,11 +130,13 @@ public void doesNotUpdateCacheIfClientNotServed() {
         Namespace alpha = Namespace.of("alpha" + UUID.randomUUID());
         Namespace beta = Namespace.of("beta" + UUID.randomUUID());
 
-        BatchElement<NamespacedRequest, Long> requestForAlpha = batchElementForNamespace(alpha);
-        BatchElement<NamespacedRequest, Long> requestForBeta = batchElementForNamespace(beta);
+        BatchElement<NamespacedRequest, GetCommitTimestampResponse> requestForAlpha = batchElementForNamespace(alpha);
+        BatchElement<NamespacedRequest, GetCommitTimestampResponse> requestForBeta = batchElementForNamespace(beta);
 
-        List<BatchElement<NamespacedRequest, Long>> allRequests = ImmutableList.of(requestForAlpha, requestForBeta);
-        List<BatchElement<NamespacedRequest, Long>> alphaRequestList = ImmutableList.of(requestForAlpha);
+        List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> allRequests =
+                ImmutableList.of(requestForAlpha, requestForBeta);
+        List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> alphaRequestList =
+                ImmutableList.of(requestForAlpha);
         Map<Namespace, GetCommitTimestampsResponse> responseMap = getCommitTimestamps(alphaRequestList);
 
         when(timelockService.getCommitTimestamps(any())).thenReturn(responseMap).thenThrow(EXCEPTION);
@@ -141,7 +157,8 @@ public void doesNotUpdateCacheIfClientNotServed() {
         verify(betaCache, never()).processCommitTimestampsUpdate(any(), any());
     }
 
-    private void setupServiceAndAssertSanityOfResponse(List<BatchElement<NamespacedRequest, Long>> batch) {
+    private void setupServiceAndAssertSanityOfResponse(
+            List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> batch) {
         Map<Namespace, List<GetCommitTimestampsResponse>> expectedResponseMap = new HashMap<>();
 
         when(timelockService.getCommitTimestamps(any())).thenAnswer(invocation -> {
@@ -160,13 +177,13 @@ private void setupServiceAndAssertSanityOfResponse(List<BatchElement<NamespacedR
     }
 
     private void assertSanityOfResponse(
-            List<BatchElement<NamespacedRequest, Long>> batch,
+            List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> batch,
             Map<Namespace, List<GetCommitTimestampsResponse>> expectedResponseMap) {
         assertThat(batch.stream().filter(elem -> !elem.result().isDone()).collect(Collectors.toSet()))
                 .as("All requests must be served")
                 .isEmpty();
 
-        Map<Namespace, List<Long>> partitionedResponseMap = batch.stream()
+        Map<Namespace, List<GetCommitTimestampResponse>> partitionedResponseMap = batch.stream()
                 .collect(groupingBy(
                         elem -> elem.argument().namespace(),
                         Collectors.mapping(elem -> Futures.getUnchecked(elem.result()), toList())));
@@ -177,14 +194,18 @@ private void assertSanityOfResponse(
 
     private static void assertCorrectnessOfCompletedRequests(
             Map<Namespace, List<GetCommitTimestampsResponse>> expectedResponseMap,
-            Map<Namespace, List<Long>> partitionedResponseMap) {
+            Map<Namespace, List<GetCommitTimestampResponse>> partitionedResponseMap) {
         KeyedStream.stream(partitionedResponseMap)
                 .forEach((namespace, commitTsList) ->
                         assertCorrectnessOfServedTimestamps(expectedResponseMap.get(namespace), commitTsList));
     }
 
     private static void assertCorrectnessOfServedTimestamps(
-            List<GetCommitTimestampsResponse> expectedCommitTimestampsResponses, List<Long> commitTsList) {
+            List<GetCommitTimestampsResponse> expectedCommitTimestampsResponses,
+            List<GetCommitTimestampResponse> responsesList) {
+        List<Long> commitTsList = responsesList.stream()
+                .map(GetCommitTimestampResponse::timestamp)
+                .collect(Collectors.toList());
         long requestedCommitTsCount = expectedCommitTimestampsResponses.stream()
                 .mapToLong(resp -> resp.getInclusiveUpper() - resp.getInclusiveLower() + 1)
                 .sum();
@@ -202,8 +223,8 @@ private static void assertCorrectnessOfServedTimestamps(
     }
 
     private Map<Namespace, GetCommitTimestampsResponse> getCommitTimestamps(
-            List<BatchElement<NamespacedRequest, Long>> batch) {
-        Map<Namespace, List<BatchElement<NamespacedRequest, Long>>> partitionedRequests =
+            List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> batch) {
+        Map<Namespace, List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>>> partitionedRequests =
                 batch.stream().collect(groupingBy(elem -> elem.argument().namespace(), toList()));
         return getCommitTimestampResponse(KeyedStream.stream(partitionedRequests)
                 .map(requestList -> GetCommitTimestampsRequest.builder()
@@ -226,6 +247,8 @@ private Map<Namespace, GetCommitTimestampsResponse> getCommitTimestampResponse(
                                     .inclusiveLower(inclusiveLower)
                                     .inclusiveUpper(exclusiveUpper - 1)
                                     .lockWatchUpdate(lockWatchStateUpdate)
+                                    .commitImmutableTimestamp(COMMIT_TS_RESPONSE)
+                                    .lease(LEASE)
                                     .build());
                 })
                 .collectToMap();
@@ -239,15 +262,15 @@ private void updateLowerBound(Namespace namespace, long numTimestamps) {
         lowestStartTsMap.put(namespace, lowestStartTsMap.getOrDefault(namespace, 1L) + numTimestamps);
     }
 
-    private List<BatchElement<NamespacedRequest, Long>> getCommitTimestampRequestsForClients(
+    private List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> getCommitTimestampRequestsForClients(
             int clientCount, int requestCount) {
-        List<BatchElement<NamespacedRequest, Long>> test = IntStream.range(0, requestCount)
+        List<BatchElement<NamespacedRequest, GetCommitTimestampResponse>> test = IntStream.range(0, requestCount)
                 .mapToObj(ind -> batchElementForNamespace(Namespace.of("Test_" + (ind % clientCount))))
                 .collect(Collectors.toList());
         return test;
     }
 
-    private BatchElement<NamespacedRequest, Long> batchElementForNamespace(Namespace namespace) {
+    private BatchElement<NamespacedRequest, GetCommitTimestampResponse> batchElementForNamespace(Namespace namespace) {
         return BatchElement.of(
                 ImmutableNamespacedRequest.builder()
                         .namespace(namespace)
@@ -255,7 +278,7 @@ private BatchElement<NamespacedRequest, Long> batchElementForNamespace(Namespace
                         .cache(lockWatchCacheMap.computeIfAbsent(namespace, _unused -> spy(LockWatchCacheImpl.noOp())))
                         .commitLocksToken(lockToken)
                         .build(),
-                new DisruptorFuture<Long>(
+                new DisruptorFuture<>(
                         AutobatcherTelemetryComponents.create("test", new DefaultTaggedMetricRegistry())));
     }
 }
diff --git a/lock-api/src/test/java/com/palantir/lock/client/TimestampCorroboratingTimelockServiceTest.java b/lock-api/src/test/java/com/palantir/lock/client/TimestampCorroboratingTimelockServiceTest.java
index 036b4a76918..3e6575218c5 100644
--- a/lock-api/src/test/java/com/palantir/lock/client/TimestampCorroboratingTimelockServiceTest.java
+++ b/lock-api/src/test/java/com/palantir/lock/client/TimestampCorroboratingTimelockServiceTest.java
@@ -168,7 +168,13 @@ public void startTransactionsThrowsIfSpanningBound() {
     @Test
     public void getCommitTimestampsShouldFail() {
         when(rawTimelockService.getCommitTimestamps(any()))
-                .thenReturn(GetCommitTimestampsResponse.of(1L, 3L, LOCK_WATCH_UPDATE));
+                .thenReturn(GetCommitTimestampsResponse.builder()
+                        .inclusiveLower(1L)
+                        .inclusiveUpper(3L)
+                        .lockWatchUpdate(LOCK_WATCH_UPDATE)
+                        .commitImmutableTimestamp(LOCK_IMMUTABLE_TIMESTAMP_RESPONSE)
+                        .lease(Lease.of(LeaderTime.of(LeadershipId.random(), NanoTime.now()), Duration.ZERO))
+                        .build());
         assertThrowsOnSecondCall(() -> timelockService.getCommitTimestamps(
                 GetCommitTimestampsRequest.of(3, ConjureIdentifiedVersion.of(UUID.randomUUID(), 3L))));
         assertThat(timelockService
diff --git a/lock-impl/src/main/java/com/palantir/lock/impl/LegacyTimelockService.java b/lock-impl/src/main/java/com/palantir/lock/impl/LegacyTimelockService.java
index 0ff9978a39a..7ae30923948 100644
--- a/lock-impl/src/main/java/com/palantir/lock/impl/LegacyTimelockService.java
+++ b/lock-impl/src/main/java/com/palantir/lock/impl/LegacyTimelockService.java
@@ -25,6 +25,7 @@
 import com.palantir.lock.LockService;
 import com.palantir.lock.SimpleTimeDuration;
 import com.palantir.lock.v2.ClientLockingOptions;
+import com.palantir.lock.v2.GetCommitTimestampResponse;
 import com.palantir.lock.v2.LockImmutableTimestampResponse;
 import com.palantir.lock.v2.LockRequest;
 import com.palantir.lock.v2.LockResponse;
@@ -49,12 +50,17 @@ public class LegacyTimelockService implements TimelockService {
     private final TimestampService timestampService;
     private final LockService lockService;
     private final LockClient immutableTsLockClient;
+    private final LockClient commitImmutableTsLockClient;
 
     public LegacyTimelockService(
-            TimestampService timestampService, LockService lockService, LockClient immutableTsLockClient) {
+            TimestampService timestampService,
+            LockService lockService,
+            LockClient immutableTsLockClient,
+            LockClient commitImmutableTsLockClient) {
         this.timestampService = timestampService;
         this.lockService = lockService;
         this.immutableTsLockClient = immutableTsLockClient;
+        this.commitImmutableTsLockClient = commitImmutableTsLockClient;
     }
 
     @Override
@@ -68,8 +74,10 @@ public long getFreshTimestamp() {
     }
 
     @Override
-    public long getCommitTimestamp(long startTs, LockToken commitLocksToken) {
-        return getFreshTimestamp();
+    public GetCommitTimestampResponse getCommitTimestamp(long startTs, LockToken commitLocksToken) {
+        LockImmutableTimestampResponse lockImmutableTimestampResponse =
+                getLockImmutableTimestampResponse(commitImmutableTsLockClient);
+        return GetCommitTimestampResponse.of(lockImmutableTimestampResponse, getFreshTimestamp());
     }
 
     @Override
@@ -79,39 +87,19 @@ public TimestampRange getFreshTimestamps(int numTimestampsRequested) {
 
     @Override
     public LockImmutableTimestampResponse lockImmutableTimestamp() {
-        long immutableLockTs = timestampService.getFreshTimestamp();
-        LockDescriptor lockDesc = AtlasTimestampLockDescriptor.of(immutableLockTs);
-        com.palantir.lock.LockRequest lockRequest = com.palantir.lock.LockRequest.builder(
-                        ImmutableSortedMap.of(lockDesc, LockMode.READ))
-                .withLockedInVersionId(immutableLockTs)
-                .build();
-        LockRefreshToken lock;
-
-        try {
-            lock = lockService.lock(immutableTsLockClient.getClientId(), lockRequest);
-        } catch (InterruptedException e) {
-            throw Throwables.throwUncheckedException(e);
-        }
-
-        try {
-            return LockImmutableTimestampResponse.of(
-                    getImmutableTimestampInternal(immutableLockTs), LockTokenConverter.toTokenV2(lock));
-        } catch (Throwable e) {
-            if (lock != null) {
-                try {
-                    lockService.unlock(lock);
-                } catch (Throwable unlockThrowable) {
-                    e.addSuppressed(unlockThrowable);
-                }
-            }
-            throw Throwables.rewrapAndThrowUncheckedException(e);
-        }
+        return getLockImmutableTimestampResponse(immutableTsLockClient);
     }
 
     @Override
     public long getImmutableTimestamp() {
         long ts = timestampService.getFreshTimestamp();
-        return getImmutableTimestampInternal(ts);
+        return getImmutableTimestampInternal(ts, immutableTsLockClient);
+    }
+
+    @Override
+    public long getCommitImmutableTimestamp() {
+        long ts = timestampService.getFreshTimestamp();
+        return getImmutableTimestampInternal(ts, commitImmutableTsLockClient);
     }
 
     @Override
@@ -181,11 +169,6 @@ public long currentTimeMillis() {
         return lockService.currentTimeMillis();
     }
 
-    private long getImmutableTimestampInternal(long ts) {
-        Long minLocked = lockService.getMinLockedInVersionId(immutableTsLockClient.getClientId());
-        return minLocked == null ? ts : minLocked;
-    }
-
     private LockRefreshToken lockAnonymous(com.palantir.lock.LockRequest request) {
         try {
             return lockService.lock(LockClient.ANONYMOUS.getClientId(), request);
@@ -202,4 +185,39 @@ private SortedMap<LockDescriptor, LockMode> buildLockMap(Set<LockDescriptor> loc
         }
         return locks;
     }
+
+    private LockImmutableTimestampResponse getLockImmutableTimestampResponse(LockClient lockClient) {
+        long immutableLockTs = timestampService.getFreshTimestamp();
+        LockDescriptor lockDesc = AtlasTimestampLockDescriptor.of(immutableLockTs);
+        com.palantir.lock.LockRequest lockRequest = com.palantir.lock.LockRequest.builder(
+                        ImmutableSortedMap.of(lockDesc, LockMode.READ))
+                .withLockedInVersionId(immutableLockTs)
+                .build();
+        LockRefreshToken lock;
+
+        try {
+            lock = lockService.lock(lockClient.getClientId(), lockRequest);
+        } catch (InterruptedException e) {
+            throw Throwables.throwUncheckedException(e);
+        }
+
+        try {
+            return LockImmutableTimestampResponse.of(
+                    getImmutableTimestampInternal(immutableLockTs, lockClient), LockTokenConverter.toTokenV2(lock));
+        } catch (Throwable e) {
+            if (lock != null) {
+                try {
+                    lockService.unlock(lock);
+                } catch (Throwable unlockThrowable) {
+                    e.addSuppressed(unlockThrowable);
+                }
+            }
+            throw Throwables.rewrapAndThrowUncheckedException(e);
+        }
+    }
+
+    private long getImmutableTimestampInternal(long ts, LockClient lockClient) {
+        Long minLocked = lockService.getMinLockedInVersionId(lockClient.getClientId());
+        return minLocked == null ? ts : minLocked;
+    }
 }
diff --git a/lock-impl/src/test/java/com/palantir/lock/impl/LegacyTimelockServiceTest.java b/lock-impl/src/test/java/com/palantir/lock/impl/LegacyTimelockServiceTest.java
index a291826694d..d1943f47568 100644
--- a/lock-impl/src/test/java/com/palantir/lock/impl/LegacyTimelockServiceTest.java
+++ b/lock-impl/src/test/java/com/palantir/lock/impl/LegacyTimelockServiceTest.java
@@ -54,6 +54,7 @@
 public class LegacyTimelockServiceTest {
 
     private static final LockClient LOCK_CLIENT = LockClient.of("foo");
+    private static final LockClient COMMIT_LOCK_CLIENT = LockClient.of("foo1");
 
     private static final long FRESH_TIMESTAMP = 5L;
 
@@ -69,7 +70,7 @@ public class LegacyTimelockServiceTest {
     private static final long TIMEOUT = 10_000;
 
     private final LegacyTimelockService timelock =
-            new LegacyTimelockService(timestampService, lockService, LOCK_CLIENT);
+            new LegacyTimelockService(timestampService, lockService, LOCK_CLIENT, COMMIT_LOCK_CLIENT);
 
     @BeforeEach
     public void before() {
diff --git a/timelock-api/src/main/conjure/timelock-api.yml b/timelock-api/src/main/conjure/timelock-api.yml
index bb3e8d6fbb7..77de1f936d6 100644
--- a/timelock-api/src/main/conjure/timelock-api.yml
+++ b/timelock-api/src/main/conjure/timelock-api.yml
@@ -204,6 +204,8 @@ types:
           inclusiveLower: Long
           inclusiveUpper: Long
           lockWatchUpdate: LockWatchStateUpdate
+          commitImmutableTimestamp: LockImmutableTimestampResponse
+          lease: Lease
       GetCommitTimestampRequest:
         fields:
           lastKnownVersion: optional<ConjureIdentifiedVersion>
diff --git a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockService.java b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockService.java
index ccd79d47354..d760425c9f9 100644
--- a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockService.java
+++ b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockService.java
@@ -57,6 +57,8 @@ public interface AsyncTimelockService
 
     long getImmutableTimestamp();
 
+    long getCommitImmutableTimestamp();
+
     StartAtlasDbTransactionResponse deprecatedStartTransaction(IdentifiedTimeLockRequest request);
 
     StartAtlasDbTransactionResponseV3 startTransaction(StartIdentifiedAtlasDbTransactionRequest request);
diff --git a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockServiceImpl.java b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockServiceImpl.java
index ada32c9f8a1..f387265980f 100644
--- a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockServiceImpl.java
+++ b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/AsyncTimelockServiceImpl.java
@@ -55,6 +55,7 @@
 import com.palantir.lock.watch.LockWatchVersion;
 import com.palantir.timestamp.ManagedTimestampService;
 import com.palantir.timestamp.TimestampRange;
+import com.palantir.tritium.ids.UniqueIds;
 import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
@@ -106,6 +107,12 @@ public long getImmutableTimestamp() {
         return lockService.getImmutableTimestamp().orElse(timestamp);
     }
 
+    @Override
+    public long getCommitImmutableTimestamp() {
+        long timestamp = timestampService.getFreshTimestamp();
+        return lockService.getCommitImmutableTimestamp().orElse(timestamp);
+    }
+
     @Override
     public ListenableFuture<LockResponseV2> lock(IdentifiedLockRequest request) {
         AsyncResult<Leased<LockToken>> result = lockService.lock(
@@ -206,6 +213,18 @@ private Leased<LockImmutableTimestampResponse> lockImmutableTimestampWithLease(U
         return Leased.of(lockImmutableTimestampResponse, leasedLock.lease());
     }
 
+    private Leased<LockImmutableTimestampResponse> lockCommitImmutableTimestampWithLease(long timestamp) {
+        Leased<LockToken> leasedLock = lockService
+                .lockCommitImmutableTimestamp(UniqueIds.pseudoRandomUuidV4(), timestamp)
+                .get();
+        long immutableTs = lockService.getCommitImmutableTimestamp().orElse(timestamp);
+
+        LockImmutableTimestampResponse lockImmutableTimestampResponse =
+                LockImmutableTimestampResponse.of(immutableTs, leasedLock.value());
+
+        return Leased.of(lockImmutableTimestampResponse, leasedLock.lease());
+    }
+
     @Override
     public ListenableFuture<ConjureStartTransactionsResponse> startTransactionsWithWatches(
             ConjureStartTransactionsRequest request) {
@@ -234,11 +253,19 @@ private ConjureStartTransactionsResponse startTransactionsWithWatchesSync(Conjur
     @Override
     public ListenableFuture<GetCommitTimestampsResponse> getCommitTimestamps(
             int numTimestamps, Optional<LockWatchVersion> lastKnownVersion) {
+        // TODO(jakubk): This is a pretty obvious race between grabbing the timestamps
+        // and locking the immutable timestamp, but this exists in the case of the normal immutable timestamp,
+        // so let's not worry about it for now.
         TimestampRange freshTimestamps = getFreshTimestamps(numTimestamps);
-        return Futures.immediateFuture(GetCommitTimestampsResponse.of(
-                freshTimestamps.getLowerBound(),
-                freshTimestamps.getUpperBound(),
-                getWatchStateUpdate(lastKnownVersion)));
+        Leased<LockImmutableTimestampResponse> leasedLockImmutableCommitTimestampResponse =
+                lockCommitImmutableTimestampWithLease(freshTimestamps.getLowerBound());
+        return Futures.immediateFuture(GetCommitTimestampsResponse.builder()
+                .inclusiveLower(freshTimestamps.getLowerBound())
+                .inclusiveUpper(freshTimestamps.getUpperBound())
+                .lockWatchUpdate(getWatchStateUpdate(lastKnownVersion))
+                .commitImmutableTimestamp(leasedLockImmutableCommitTimestampResponse.value())
+                .lease(leasedLockImmutableCommitTimestampResponse.lease())
+                .build());
     }
 
     @Override
diff --git a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/UndertowAsyncTimelockResource.java b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/UndertowAsyncTimelockResource.java
index a207eb721f1..0429cb34fda 100644
--- a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/UndertowAsyncTimelockResource.java
+++ b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/UndertowAsyncTimelockResource.java
@@ -138,6 +138,16 @@ public long getImmutableTimestamp(
         return getAsyncTimelockService(namespace, userAgent).getImmutableTimestamp();
     }
 
+    @Handle(method = HttpMethod.POST, path = "/{namespace}/timelock/commit-immutable-timestamp")
+    public long getCommitImmutableTimestamp(
+            @Safe @Handle.PathParam String namespace,
+            @Safe
+                    @HeaderParam(TimelockNamespaces.USER_AGENT_HEADER)
+                    @Handle.Header(TimelockNamespaces.USER_AGENT_HEADER)
+                    Optional<String> userAgent) {
+        return getAsyncTimelockService(namespace, userAgent).getCommitImmutableTimestamp();
+    }
+
     @Handle(method = HttpMethod.POST, path = "/{namespace}/timelock/lock")
     public ListenableFuture<LockResponse> deprecatedLock(
             @Safe @Handle.PathParam String namespace,
diff --git a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/lock/AsyncLockService.java b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/lock/AsyncLockService.java
index d8fdf17d283..74ff8336e81 100644
--- a/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/lock/AsyncLockService.java
+++ b/timelock-impl/src/main/java/com/palantir/atlasdb/timelock/lock/AsyncLockService.java
@@ -44,6 +44,7 @@ public class AsyncLockService implements Closeable {
     private final HeldLocksCollection heldLocks;
     private final AwaitedLocksCollection awaitedLocks;
     private final ImmutableTimestampTracker immutableTsTracker;
+    private final ImmutableTimestampTracker immutableCommitTsTracker;
     private final LeaderClock leaderClock;
     private final LockLog lockLog;
     private final LockWatchingService lockWatchingService;
@@ -73,6 +74,7 @@ public static AsyncLockService createDefault(
         return new AsyncLockService(
                 new LockCollection(),
                 new ImmutableTimestampTracker(),
+                new ImmutableTimestampTracker(),
                 lockAcquirer,
                 heldLocks,
                 new AwaitedLocksCollection(),
@@ -86,6 +88,7 @@ public static AsyncLockService createDefault(
     AsyncLockService(
             LockCollection locks,
             ImmutableTimestampTracker immutableTimestampTracker,
+            ImmutableTimestampTracker immutableCommitTimestampTracker,
             LockAcquirer acquirer,
             HeldLocksCollection heldLocks,
             AwaitedLocksCollection awaitedLocks,
@@ -96,6 +99,7 @@ public static AsyncLockService createDefault(
             LockLog lockLog) {
         this.locks = locks;
         this.immutableTsTracker = immutableTimestampTracker;
+        this.immutableCommitTsTracker = immutableCommitTimestampTracker;
         this.heldLocks = heldLocks;
         this.awaitedLocks = awaitedLocks;
         this.reaperExecutor = reaperExecutor;
@@ -142,6 +146,14 @@ public AsyncResult<Leased<LockToken>> lockImmutableTimestamp(UUID requestId, lon
         return immutableTimestampLockResult;
     }
 
+    public AsyncResult<Leased<LockToken>> lockCommitImmutableTimestamp(UUID requestId, long timestamp) {
+        AsyncResult<Leased<LockToken>> immutableTimestampLockResult = heldLocks.getExistingOrAcquire(
+                requestId, () -> acquireCommitImmutableTimestampLock(requestId, timestamp));
+        // TODO(fdesouza): Remove this once PDS-95791 is resolved.
+        //        lockLog.registerLockImmutableTimestampRequest(requestId, timestamp, immutableTimestampLockResult);
+        return immutableTimestampLockResult;
+    }
+
     public AsyncResult<Void> waitForLocks(UUID requestId, Set<LockDescriptor> lockDescriptors, TimeLimit timeout) {
         return awaitedLocks.getExistingOrAwait(requestId, () -> awaitLocks(requestId, lockDescriptors, timeout));
     }
@@ -150,6 +162,10 @@ public Optional<Long> getImmutableTimestamp() {
         return immutableTsTracker.getImmutableTimestamp();
     }
 
+    public Optional<Long> getCommitImmutableTimestamp() {
+        return immutableCommitTsTracker.getImmutableTimestamp();
+    }
+
     private AsyncResult<HeldLocks> acquireLocks(
             UUID requestId,
             Set<LockDescriptor> lockDescriptors,
@@ -169,6 +185,11 @@ private AsyncResult<HeldLocks> acquireImmutableTimestampLock(UUID requestId, lon
         return lockAcquirer.acquireLocks(requestId, OrderedLocks.fromSingleLock(immutableTsLock), TimeLimit.zero());
     }
 
+    private AsyncResult<HeldLocks> acquireCommitImmutableTimestampLock(UUID requestId, long timestamp) {
+        AsyncLock immutableTsLock = immutableCommitTsTracker.getLockFor(timestamp);
+        return lockAcquirer.acquireLocks(requestId, OrderedLocks.fromSingleLock(immutableTsLock), TimeLimit.zero());
+    }
+
     public boolean unlock(LockToken token) {
         return unlock(ImmutableSet.of(token)).contains(token);
     }
diff --git a/timelock-impl/src/test/java/com/palantir/atlasdb/timelock/batch/MultiClientConjureTimelockResourceTest.java b/timelock-impl/src/test/java/com/palantir/atlasdb/timelock/batch/MultiClientConjureTimelockResourceTest.java
index 46ffc5805bb..a915a9713c0 100644
--- a/timelock-impl/src/test/java/com/palantir/atlasdb/timelock/batch/MultiClientConjureTimelockResourceTest.java
+++ b/timelock-impl/src/test/java/com/palantir/atlasdb/timelock/batch/MultiClientConjureTimelockResourceTest.java
@@ -72,6 +72,12 @@ public class MultiClientConjureTimelockResourceTest {
     private static final RedirectRetryTargeter TARGETER =
             RedirectRetryTargeter.create(LOCAL, ImmutableList.of(LOCAL, REMOTE));
     private static final int DUMMY_COMMIT_TS_COUNT = 5;
+    private static final LeadershipId LEADERSHIP_ID = LeadershipId.random();
+    private static final LeaderTime LEADER_TIME = LeaderTime.of(LEADERSHIP_ID, NanoTime.createForTests(1L));
+    private static final Lease LEASE = Lease.of(LEADER_TIME, Duration.ofSeconds(977));
+    private static final LockToken COMMIT_TS_LOCK_TOKEN = mock(LockToken.class);
+    private static final LockImmutableTimestampResponse COMMIT_TS_RESPONSE =
+            LockImmutableTimestampResponse.of(1L, COMMIT_TS_LOCK_TOKEN);
 
     private Map<String, AsyncTimelockService> namespaces = new HashMap<>();
     private Map<String, LeadershipId> namespaceToLeaderMap = new HashMap<>();
@@ -234,8 +240,13 @@ private AsyncTimelockService createAsyncTimeLockServiceForClient(String client)
 
     private GetCommitTimestampsResponse getCommitTimestampResponse(String namespace) {
         int inclusiveLower = getInclusiveLowerCommitTs(namespace);
-        return GetCommitTimestampsResponse.of(
-                inclusiveLower, inclusiveLower + DUMMY_COMMIT_TS_COUNT, lockWatchStateUpdate);
+        return GetCommitTimestampsResponse.builder()
+                .inclusiveLower(inclusiveLower)
+                .inclusiveUpper(inclusiveLower + DUMMY_COMMIT_TS_COUNT)
+                .lockWatchUpdate(lockWatchStateUpdate)
+                .commitImmutableTimestamp(COMMIT_TS_RESPONSE)
+                .lease(LEASE)
+                .build();
     }
 
     private Integer getInclusiveLowerCommitTs(String namespace) {
diff --git a/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceEteTest.java b/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceEteTest.java
index 75295e575f1..2f4f2b72044 100644
--- a/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceEteTest.java
+++ b/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceEteTest.java
@@ -69,6 +69,7 @@ public class AsyncLockServiceEteTest {
     private final AsyncLockService service = new AsyncLockService(
             new LockCollection(),
             new ImmutableTimestampTracker(),
+            new ImmutableTimestampTracker(),
             new LockAcquirer(
                     new LockLog(new MetricRegistry(), () -> 2L),
                     Executors.newSingleThreadScheduledExecutor(),
diff --git a/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceTest.java b/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceTest.java
index dd82982f47e..0c9c7ab98c5 100644
--- a/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceTest.java
+++ b/timelock-server/src/test/java/com/palantir/atlasdb/timelock/lock/AsyncLockServiceTest.java
@@ -58,10 +58,12 @@ public class AsyncLockServiceTest {
     private final HeldLocksCollection heldLocks = spy(HeldLocksCollection.create(leaderClock));
     private final AwaitedLocksCollection awaitedLocks = spy(new AwaitedLocksCollection());
     private final ImmutableTimestampTracker immutableTimestampTracker = mock(ImmutableTimestampTracker.class);
+    private final ImmutableTimestampTracker commitImmutableTimestampTracker = mock(ImmutableTimestampTracker.class);
     private final DeterministicScheduler reaperExecutor = new DeterministicScheduler();
     private final AsyncLockService lockService = new AsyncLockService(
             locks,
             immutableTimestampTracker,
+            commitImmutableTimestampTracker,
             acquirer,
             heldLocks,
             awaitedLocks,