diff --git a/broker/src/main/java/com/automq/rocketmq/broker/BrokerController.java b/broker/src/main/java/com/automq/rocketmq/broker/BrokerController.java index 05233983b..31495cbc8 100644 --- a/broker/src/main/java/com/automq/rocketmq/broker/BrokerController.java +++ b/broker/src/main/java/com/automq/rocketmq/broker/BrokerController.java @@ -27,6 +27,8 @@ import com.automq.rocketmq.metadata.DefaultStoreMetadataService; import com.automq.rocketmq.metadata.api.ProxyMetadataService; import com.automq.rocketmq.metadata.api.StoreMetadataService; +import com.automq.rocketmq.metadata.s3.DefaultS3MetadataService; +import com.automq.rocketmq.metadata.api.S3MetadataService; import com.automq.rocketmq.proxy.config.ProxyConfiguration; import com.automq.rocketmq.proxy.grpc.GrpcProtocolServer; import com.automq.rocketmq.proxy.processor.ExtendMessagingProcessor; @@ -64,7 +66,9 @@ public BrokerController(BrokerConfig brokerConfig) throws Exception { metadataStore = MetadataStoreBuilder.build(brokerConfig); proxyMetadataService = new DefaultProxyMetadataService(metadataStore); - storeMetadataService = new DefaultStoreMetadataService(metadataStore); + S3MetadataService s3MetadataService = new DefaultS3MetadataService(metadataStore.config(), + metadataStore.sessionFactory(), metadataStore.asyncExecutor()); + storeMetadataService = new DefaultStoreMetadataService(metadataStore, s3MetadataService); dlqService = new DeadLetterService(brokerConfig, proxyMetadataService); diff --git a/common/src/main/java/com/automq/rocketmq/common/config/ControllerConfig.java b/common/src/main/java/com/automq/rocketmq/common/config/ControllerConfig.java index cf5456453..b5f1e29dd 100644 --- a/common/src/main/java/com/automq/rocketmq/common/config/ControllerConfig.java +++ b/common/src/main/java/com/automq/rocketmq/common/config/ControllerConfig.java @@ -70,10 +70,6 @@ default int workloadTolerance() { return 1; } - default boolean circuitStreamMetadata() { - return true; - } - String dbUrl(); String dbUserName(); diff --git a/controller/src/main/java/com/automq/rocketmq/controller/ControllerClient.java b/controller/src/main/java/com/automq/rocketmq/controller/ControllerClient.java index bfd5309b1..c275381a1 100644 --- a/controller/src/main/java/com/automq/rocketmq/controller/ControllerClient.java +++ b/controller/src/main/java/com/automq/rocketmq/controller/ControllerClient.java @@ -20,10 +20,6 @@ import apache.rocketmq.controller.v1.CloseStreamReply; import apache.rocketmq.controller.v1.CloseStreamRequest; import apache.rocketmq.controller.v1.Cluster; -import apache.rocketmq.controller.v1.CommitStreamObjectReply; -import apache.rocketmq.controller.v1.CommitStreamObjectRequest; -import apache.rocketmq.controller.v1.CommitWALObjectReply; -import apache.rocketmq.controller.v1.CommitWALObjectRequest; import apache.rocketmq.controller.v1.ConsumerGroup; import apache.rocketmq.controller.v1.CreateGroupReply; import apache.rocketmq.controller.v1.CreateGroupRequest; @@ -36,13 +32,9 @@ import apache.rocketmq.controller.v1.ListTopicsRequest; import apache.rocketmq.controller.v1.OpenStreamReply; import apache.rocketmq.controller.v1.OpenStreamRequest; -import apache.rocketmq.controller.v1.PrepareS3ObjectsReply; -import apache.rocketmq.controller.v1.PrepareS3ObjectsRequest; import apache.rocketmq.controller.v1.TerminateNodeReply; import apache.rocketmq.controller.v1.TerminateNodeRequest; import apache.rocketmq.controller.v1.Topic; -import apache.rocketmq.controller.v1.TrimStreamReply; -import apache.rocketmq.controller.v1.TrimStreamRequest; import apache.rocketmq.controller.v1.CreateTopicRequest; import apache.rocketmq.controller.v1.UpdateGroupRequest; import apache.rocketmq.controller.v1.UpdateTopicRequest; @@ -91,14 +83,6 @@ public interface ControllerClient extends Closeable { CompletableFuture listOpenStreams(String target, ListOpenStreamsRequest request); - CompletableFuture trimStream(String target, TrimStreamRequest request); - - CompletableFuture prepareS3Objects(String target, PrepareS3ObjectsRequest request); - - CompletableFuture commitStreamObject(String target, CommitStreamObjectRequest request); - - CompletableFuture commitWALObject(String target, CommitWALObjectRequest request); - CompletableFuture updateTopic(String target, UpdateTopicRequest request); void terminateNode(String target, TerminateNodeRequest request, StreamObserver observer); diff --git a/controller/src/main/java/com/automq/rocketmq/controller/MetadataStore.java b/controller/src/main/java/com/automq/rocketmq/controller/MetadataStore.java index 8c7257e6f..7deedade6 100644 --- a/controller/src/main/java/com/automq/rocketmq/controller/MetadataStore.java +++ b/controller/src/main/java/com/automq/rocketmq/controller/MetadataStore.java @@ -23,8 +23,6 @@ import apache.rocketmq.controller.v1.CreateGroupRequest; import apache.rocketmq.controller.v1.CreateTopicRequest; import apache.rocketmq.controller.v1.DescribeClusterRequest; -import apache.rocketmq.controller.v1.S3StreamObject; -import apache.rocketmq.controller.v1.S3WALObject; import apache.rocketmq.controller.v1.StreamMetadata; import apache.rocketmq.controller.v1.StreamRole; import apache.rocketmq.controller.v1.TerminationStage; @@ -48,8 +46,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; -import org.apache.commons.lang3.tuple.Pair; import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; public interface MetadataStore extends Closeable { @@ -67,6 +65,8 @@ public interface MetadataStore extends Closeable { */ SqlSession openSession(); + SqlSessionFactory sessionFactory(); + ControllerClient controllerClient(); void addBrokerNode(Node node); @@ -177,35 +177,17 @@ CompletableFuture> listAssignments(Long topicId, Integer s */ CompletableFuture onQueueClosed(long topicId, int queueId); - CompletableFuture trimStream(long streamId, long streamEpoch, long newStartOffset); - CompletableFuture openStream(long streamId, long streamEpoch, int nodeId); CompletableFuture closeStream(long streamId, long streamEpoch, int nodeId); CompletableFuture> listOpenStreams(int nodeId); - CompletableFuture prepareS3Objects(int count, int ttlInMinutes); - - CompletableFuture commitWalObject(S3WALObject walObject, List streamObjects, - List compactedObjects); - - CompletableFuture commitStreamObject(S3StreamObject streamObject, - List compactedObjects) throws ControllerException; - - CompletableFuture> listWALObjects(); - - CompletableFuture> listWALObjects(long streamId, long startOffset, long endOffset, int limit); - - CompletableFuture> listStreamObjects(long streamId, long startOffset, long endOffset, - int limit); CompletableFuture getConsumerOffset(long consumerGroupId, long topicId, int queueId); CompletableFuture addressOfNode(int nodeId); - CompletableFuture, List>> listObjects(long streamId, long startOffset, - long endOffset, int limit); boolean maintainLeadershipWithSharedLock(SqlSession session); diff --git a/controller/src/main/java/com/automq/rocketmq/controller/client/GrpcControllerClient.java b/controller/src/main/java/com/automq/rocketmq/controller/client/GrpcControllerClient.java index 5b557ad05..75a5d62a7 100644 --- a/controller/src/main/java/com/automq/rocketmq/controller/client/GrpcControllerClient.java +++ b/controller/src/main/java/com/automq/rocketmq/controller/client/GrpcControllerClient.java @@ -22,10 +22,6 @@ import apache.rocketmq.controller.v1.Cluster; import apache.rocketmq.controller.v1.CommitOffsetReply; import apache.rocketmq.controller.v1.CommitOffsetRequest; -import apache.rocketmq.controller.v1.CommitStreamObjectReply; -import apache.rocketmq.controller.v1.CommitStreamObjectRequest; -import apache.rocketmq.controller.v1.CommitWALObjectReply; -import apache.rocketmq.controller.v1.CommitWALObjectRequest; import apache.rocketmq.controller.v1.ConsumerGroup; import apache.rocketmq.controller.v1.CreateGroupReply; import apache.rocketmq.controller.v1.CreateGroupRequest; @@ -58,15 +54,11 @@ import apache.rocketmq.controller.v1.NotifyMessageQueuesAssignableRequest; import apache.rocketmq.controller.v1.OpenStreamReply; import apache.rocketmq.controller.v1.OpenStreamRequest; -import apache.rocketmq.controller.v1.PrepareS3ObjectsReply; -import apache.rocketmq.controller.v1.PrepareS3ObjectsRequest; import apache.rocketmq.controller.v1.ReassignMessageQueueReply; import apache.rocketmq.controller.v1.ReassignMessageQueueRequest; import apache.rocketmq.controller.v1.TerminateNodeReply; import apache.rocketmq.controller.v1.TerminateNodeRequest; import apache.rocketmq.controller.v1.Topic; -import apache.rocketmq.controller.v1.TrimStreamReply; -import apache.rocketmq.controller.v1.TrimStreamRequest; import apache.rocketmq.controller.v1.UpdateGroupReply; import apache.rocketmq.controller.v1.UpdateGroupRequest; import apache.rocketmq.controller.v1.UpdateTopicReply; @@ -744,104 +736,6 @@ public void onFailure(@Nonnull Throwable t) { return future; } - @Override - public CompletableFuture trimStream(String target, - TrimStreamRequest request) { - ControllerServiceGrpc.ControllerServiceFutureStub stub; - try { - stub = getOrCreateStubForTarget(target); - } catch (ControllerException e) { - return CompletableFuture.failedFuture(e); - } - - CompletableFuture future = new CompletableFuture<>(); - Futures.addCallback(stub.trimStream(request), new FutureCallback<>() { - @Override - public void onSuccess(TrimStreamReply result) { - future.complete(result); - } - - @Override - public void onFailure(@Nonnull Throwable t) { - future.completeExceptionally(t); - } - }, MoreExecutors.directExecutor()); - return future; - } - - @Override - public CompletableFuture prepareS3Objects(String target, - PrepareS3ObjectsRequest request) { - ControllerServiceGrpc.ControllerServiceFutureStub stub; - try { - stub = getOrCreateStubForTarget(target); - } catch (ControllerException e) { - return CompletableFuture.failedFuture(e); - } - - CompletableFuture future = new CompletableFuture<>(); - Futures.addCallback(stub.prepareS3Objects(request), new FutureCallback<>() { - @Override - public void onSuccess(PrepareS3ObjectsReply result) { - future.complete(result); - } - - @Override - public void onFailure(@Nonnull Throwable t) { - future.completeExceptionally(t); - } - }, MoreExecutors.directExecutor()); - return future; - } - - @Override - public CompletableFuture commitStreamObject(String target, - CommitStreamObjectRequest request) { - ControllerServiceGrpc.ControllerServiceFutureStub stub; - try { - stub = getOrCreateStubForTarget(target); - } catch (ControllerException e) { - return CompletableFuture.failedFuture(e); - } - - CompletableFuture future = new CompletableFuture<>(); - Futures.addCallback(stub.commitStreamObject(request), new FutureCallback<>() { - @Override - public void onSuccess(CommitStreamObjectReply result) { - future.complete(result); - } - - @Override - public void onFailure(@Nonnull Throwable t) { - future.completeExceptionally(t); - } - }, MoreExecutors.directExecutor()); - return future; - } - - @Override - public CompletableFuture commitWALObject(String target, CommitWALObjectRequest request) { - ControllerServiceGrpc.ControllerServiceFutureStub stub; - try { - stub = getOrCreateStubForTarget(target); - } catch (ControllerException e) { - return CompletableFuture.failedFuture(e); - } - CompletableFuture future = new CompletableFuture<>(); - Futures.addCallback(stub.commitWALObject(request), new FutureCallback<>() { - @Override - public void onSuccess(CommitWALObjectReply result) { - future.complete(result); - } - - @Override - public void onFailure(@Nonnull Throwable t) { - future.completeExceptionally(t); - } - }, MoreExecutors.directExecutor()); - return future; - } - @Override public void close() throws IOException { for (Map.Entry entry : stubs.entrySet()) { diff --git a/controller/src/main/java/com/automq/rocketmq/controller/server/ControllerServiceImpl.java b/controller/src/main/java/com/automq/rocketmq/controller/server/ControllerServiceImpl.java index 88df19343..d6f5b2cef 100644 --- a/controller/src/main/java/com/automq/rocketmq/controller/server/ControllerServiceImpl.java +++ b/controller/src/main/java/com/automq/rocketmq/controller/server/ControllerServiceImpl.java @@ -22,10 +22,6 @@ import apache.rocketmq.controller.v1.Code; import apache.rocketmq.controller.v1.CommitOffsetReply; import apache.rocketmq.controller.v1.CommitOffsetRequest; -import apache.rocketmq.controller.v1.CommitStreamObjectReply; -import apache.rocketmq.controller.v1.CommitStreamObjectRequest; -import apache.rocketmq.controller.v1.CommitWALObjectReply; -import apache.rocketmq.controller.v1.CommitWALObjectRequest; import apache.rocketmq.controller.v1.ConsumerGroup; import apache.rocketmq.controller.v1.ControllerServiceGrpc; import apache.rocketmq.controller.v1.CreateGroupReply; @@ -61,8 +57,6 @@ import apache.rocketmq.controller.v1.NotifyMessageQueuesAssignableRequest; import apache.rocketmq.controller.v1.OpenStreamReply; import apache.rocketmq.controller.v1.OpenStreamRequest; -import apache.rocketmq.controller.v1.PrepareS3ObjectsReply; -import apache.rocketmq.controller.v1.PrepareS3ObjectsRequest; import apache.rocketmq.controller.v1.ReassignMessageQueueReply; import apache.rocketmq.controller.v1.ReassignMessageQueueRequest; import apache.rocketmq.controller.v1.Status; @@ -71,8 +65,6 @@ import apache.rocketmq.controller.v1.TerminateNodeRequest; import apache.rocketmq.controller.v1.TerminationStage; import apache.rocketmq.controller.v1.Topic; -import apache.rocketmq.controller.v1.TrimStreamReply; -import apache.rocketmq.controller.v1.TrimStreamRequest; import apache.rocketmq.controller.v1.UpdateGroupReply; import apache.rocketmq.controller.v1.UpdateGroupRequest; import apache.rocketmq.controller.v1.UpdateTopicReply; @@ -580,22 +572,6 @@ public void closeStream(CloseStreamRequest request, StreamObserver responseObserver) { - metadataStore.trimStream(request.getStreamId(), request.getStreamEpoch(), request.getNewStartOffset()) - .whenComplete((res, e) -> { - if (null != e) { - responseObserver.onError(e); - } else { - TrimStreamReply reply = TrimStreamReply.newBuilder() - .setStatus(Status.newBuilder().setCode(Code.OK).build()) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - } - }); - } - @Override public void listOpenStreams(ListOpenStreamsRequest request, StreamObserver responseObserver) { @@ -616,92 +592,6 @@ public void listOpenStreams(ListOpenStreamsRequest request, }); } - @Override - public void prepareS3Objects(PrepareS3ObjectsRequest request, - StreamObserver responseObserver) { - metadataStore.prepareS3Objects(request.getPreparedCount(), (int) request.getTimeToLiveMinutes()) - .whenComplete((objectId, e) -> { - if (null != e) { - responseObserver.onError(e); - return; - } - - PrepareS3ObjectsReply reply = PrepareS3ObjectsReply.newBuilder() - .setStatus(Status.newBuilder() - .setCode(Code.OK).build()) - .setFirstObjectId(objectId) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - }); - } - - @Override - public void commitWALObject(CommitWALObjectRequest - request, StreamObserver responseObserver) { - metadataStore.commitWalObject(request.getS3WalObject(), request.getS3StreamObjectsList(), request.getCompactedObjectIdsList()) - .whenComplete((res, e) -> { - if (null != e) { - if (e instanceof ControllerException ex) { - CommitWALObjectReply reply = CommitWALObjectReply.newBuilder() - .setStatus(Status.newBuilder() - .setCode(Code.forNumber(ex.getErrorCode())) - .setMessage(e.getMessage()).build()) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - } else { - responseObserver.onError(e); - } - } else { - CommitWALObjectReply reply = CommitWALObjectReply.newBuilder() - .setStatus(Status.newBuilder().setCode(Code.OK).build()) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - } - }); - } - - @SuppressWarnings("checkstyle:Indentation") - @Override - public void commitStreamObject(CommitStreamObjectRequest request, - StreamObserver responseObserver) { - - try { - metadataStore.commitStreamObject(request.getS3StreamObject(), request.getCompactedObjectIdsList()) - .whenComplete((res, e) -> { - if (null != e) { - if (e instanceof ControllerException ex) { - CommitStreamObjectReply reply = CommitStreamObjectReply.newBuilder() - .setStatus(Status.newBuilder() - .setCode(Code.forNumber(ex.getErrorCode())) - .setMessage(e.getMessage()).build()) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - } else { - responseObserver.onError(e); - } - } else { - CommitStreamObjectReply reply = CommitStreamObjectReply.newBuilder() - .setStatus(Status.newBuilder().setCode(Code.OK).build()) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - } - }); - } catch (ControllerException e) { - CommitStreamObjectReply reply = CommitStreamObjectReply.newBuilder() - .setStatus(Status.newBuilder() - .setCode(Code.forNumber(e.getErrorCode())) - .setMessage(e.getMessage()).build()) - .build(); - responseObserver.onNext(reply); - responseObserver.onCompleted(); - } - } - @Override public void terminateNode(TerminateNodeRequest request, StreamObserver responseObserver) { if (request.getNodeId() != metadataStore.config().nodeId()) { diff --git a/controller/src/main/java/com/automq/rocketmq/controller/server/store/DefaultMetadataStore.java b/controller/src/main/java/com/automq/rocketmq/controller/server/store/DefaultMetadataStore.java index cd1fdfa12..cf06f1a1d 100644 --- a/controller/src/main/java/com/automq/rocketmq/controller/server/store/DefaultMetadataStore.java +++ b/controller/src/main/java/com/automq/rocketmq/controller/server/store/DefaultMetadataStore.java @@ -31,8 +31,6 @@ import apache.rocketmq.controller.v1.ListOpenStreamsRequest; import apache.rocketmq.controller.v1.OpenStreamReply; import apache.rocketmq.controller.v1.OpenStreamRequest; -import apache.rocketmq.controller.v1.S3StreamObject; -import apache.rocketmq.controller.v1.S3WALObject; import apache.rocketmq.controller.v1.StreamMetadata; import apache.rocketmq.controller.v1.StreamRole; import apache.rocketmq.controller.v1.StreamState; @@ -46,7 +44,6 @@ import com.automq.rocketmq.controller.exception.ControllerException; import com.automq.rocketmq.controller.MetadataStore; import com.automq.rocketmq.controller.server.store.impl.GroupManager; -import com.automq.rocketmq.controller.server.store.impl.S3MetadataManager; import com.automq.rocketmq.controller.server.store.impl.TopicManager; import com.automq.rocketmq.controller.server.tasks.ScanGroupTask; import com.automq.rocketmq.controller.server.tasks.ScanStreamTask; @@ -98,7 +95,6 @@ import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; -import org.apache.commons.lang3.tuple.Pair; import org.apache.ibatis.session.SqlSession; import org.apache.ibatis.session.SqlSessionFactory; import org.slf4j.Logger; @@ -129,8 +125,6 @@ public class DefaultMetadataStore implements MetadataStore { private final GroupManager groupManager; - private final S3MetadataManager s3MetadataManager; - private DataStore dataStore; public DefaultMetadataStore(ControllerClient client, SqlSessionFactory sessionFactory, ControllerConfig config) { @@ -143,7 +137,6 @@ public DefaultMetadataStore(ControllerClient client, SqlSessionFactory sessionFa new PrefixThreadFactory("Controller")); this.asyncExecutorService = Executors.newFixedThreadPool(10, new PrefixThreadFactory("Controller-Async")); this.topicManager = new TopicManager(this); - this.s3MetadataManager = new S3MetadataManager(this); this.groupManager = new GroupManager(this); } @@ -162,6 +155,11 @@ public SqlSession openSession() { return sessionFactory.openSession(false); } + @Override + public SqlSessionFactory sessionFactory() { + return sessionFactory; + } + @Override public ControllerClient controllerClient() { return controllerClient; @@ -1180,49 +1178,4 @@ public void applyGroupChange(List groups) { public void applyStreamChange(List streams) { this.topicManager.getStreamCache().apply(streams); } - - @Override - public CompletableFuture trimStream(long streamId, long streamEpoch, long newStartOffset) { - return s3MetadataManager.trimStream(streamId, streamEpoch, newStartOffset); - } - - @Override - public CompletableFuture prepareS3Objects(int count, int ttlInMinutes) { - return s3MetadataManager.prepareS3Objects(count, ttlInMinutes); - } - - @Override - public CompletableFuture commitWalObject(S3WALObject walObject, List streamObjects, - List compactedObjects) { - return s3MetadataManager.commitWalObject(walObject, streamObjects, compactedObjects); - } - - @Override - public CompletableFuture commitStreamObject(S3StreamObject streamObject, - List compactedObjects) throws ControllerException { - return s3MetadataManager.commitStreamObject(streamObject, compactedObjects); - } - - @Override - public CompletableFuture> listWALObjects() { - return s3MetadataManager.listWALObjects(); - } - - @Override - public CompletableFuture> listWALObjects(long streamId, long startOffset, long endOffset, - int limit) { - return s3MetadataManager.listWALObjects(streamId, startOffset, endOffset, limit); - } - - @Override - public CompletableFuture> listStreamObjects(long streamId, long startOffset, long endOffset, - int limit) { - return s3MetadataManager.listStreamObjects(streamId, startOffset, endOffset, limit); - } - - @Override - public CompletableFuture, List>> listObjects(long streamId, long startOffset, - long endOffset, int limit) { - return s3MetadataManager.listObjects(streamId, startOffset, endOffset, limit); - } } \ No newline at end of file diff --git a/controller/src/main/java/com/automq/rocketmq/controller/server/store/impl/S3MetadataManager.java b/controller/src/main/java/com/automq/rocketmq/controller/server/store/impl/S3MetadataManager.java deleted file mode 100644 index 4e8df6e24..000000000 --- a/controller/src/main/java/com/automq/rocketmq/controller/server/store/impl/S3MetadataManager.java +++ /dev/null @@ -1,849 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.automq.rocketmq.controller.server.store.impl; - -import apache.rocketmq.controller.v1.Code; -import apache.rocketmq.controller.v1.CommitStreamObjectRequest; -import apache.rocketmq.controller.v1.CommitWALObjectRequest; -import apache.rocketmq.controller.v1.PrepareS3ObjectsReply; -import apache.rocketmq.controller.v1.PrepareS3ObjectsRequest; -import apache.rocketmq.controller.v1.S3ObjectState; -import apache.rocketmq.controller.v1.S3StreamObject; -import apache.rocketmq.controller.v1.S3WALObject; -import apache.rocketmq.controller.v1.StreamState; -import apache.rocketmq.controller.v1.SubStream; -import apache.rocketmq.controller.v1.SubStreams; -import apache.rocketmq.controller.v1.TrimStreamRequest; -import com.automq.rocketmq.common.system.S3Constants; -import com.automq.rocketmq.common.system.StreamConstants; -import com.automq.rocketmq.controller.exception.ControllerException; -import com.automq.rocketmq.controller.MetadataStore; -import com.automq.rocketmq.controller.server.store.impl.cache.S3StreamObjectCache; -import com.automq.rocketmq.metadata.dao.Range; -import com.automq.rocketmq.metadata.dao.S3Object; -import com.automq.rocketmq.metadata.dao.S3WalObject; -import com.automq.rocketmq.metadata.dao.Stream; -import com.automq.rocketmq.metadata.mapper.RangeMapper; -import com.automq.rocketmq.metadata.mapper.S3ObjectMapper; -import com.automq.rocketmq.metadata.mapper.S3StreamObjectMapper; -import com.automq.rocketmq.metadata.mapper.S3WalObjectMapper; -import com.automq.rocketmq.metadata.mapper.SequenceMapper; -import com.automq.rocketmq.metadata.mapper.StreamMapper; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.TextFormat; -import com.google.protobuf.util.JsonFormat; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.apache.commons.lang3.tuple.ImmutablePair; -import org.apache.commons.lang3.tuple.Pair; -import org.apache.ibatis.session.SqlSession; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class S3MetadataManager { - - private static final Logger LOGGER = LoggerFactory.getLogger(S3MetadataManager.class); - - private final MetadataStore metadataStore; - - private final S3StreamObjectCache s3StreamObjectCache; - - public S3MetadataManager(MetadataStore metadataStore) { - this.metadataStore = metadataStore; - this.s3StreamObjectCache = new S3StreamObjectCache(); - } - - public CompletableFuture prepareS3Objects(int count, int ttlInMinutes) { - CompletableFuture future = new CompletableFuture<>(); - for (; ; ) { - if (metadataStore.config().circuitStreamMetadata() || metadataStore.isLeader()) { - try (SqlSession session = metadataStore.openSession()) { - if (!metadataStore.config().circuitStreamMetadata() && - !metadataStore.maintainLeadershipWithSharedLock(session)) { - continue; - } - - // Get and update sequence - SequenceMapper sequenceMapper = session.getMapper(SequenceMapper.class); - long next = sequenceMapper.next(S3ObjectMapper.SEQUENCE_NAME); - sequenceMapper.update(S3ObjectMapper.SEQUENCE_NAME, next + count); - - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - Calendar calendar = Calendar.getInstance(); - calendar.add(Calendar.MINUTE, ttlInMinutes); - IntStream.range(0, count).forEach(i -> { - S3Object object = new S3Object(); - object.setId(next + i); - object.setState(S3ObjectState.BOS_PREPARED); - object.setExpiredTimestamp(calendar.getTime()); - s3ObjectMapper.prepare(object); - }); - session.commit(); - future.complete(next); - } catch (Exception e) { - LOGGER.error("PrepareS3Objects failed", e); - ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "PrepareS3Objects failed" + e.getMessage()); - future.completeExceptionally(ex); - } - } else { - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(count) - .setTimeToLiveMinutes(ttlInMinutes) - .build(); - try { - metadataStore.controllerClient().prepareS3Objects(metadataStore.leaderAddress(), request) - .thenApply(PrepareS3ObjectsReply::getFirstObjectId); - } catch (ControllerException e) { - future.completeExceptionally(e); - } - } - break; - } - return future; - } - - public CompletableFuture commitWalObject(S3WALObject walObject, - List streamObjects, List compactedObjects) { - if (Objects.isNull(walObject)) { - LOGGER.error("S3WALObject is unexpectedly null"); - ControllerException e = new ControllerException(Code.INTERNAL_VALUE, "S3WALObject is unexpectedly null"); - return CompletableFuture.failedFuture(e); - } - - LOGGER.info("commitWalObject with walObject: {}, streamObjects: {}, compactedObjects: {}", - TextFormat.shortDebugString(walObject), - streamObjects.stream() - .map(TextFormat::shortDebugString) - .collect(Collectors.joining()), compactedObjects - ); - - CompletableFuture future = new CompletableFuture<>(); - for (; ; ) { - if (metadataStore.config().circuitStreamMetadata() || metadataStore.isLeader()) { - try (SqlSession session = metadataStore.openSession()) { - if (!metadataStore.config().circuitStreamMetadata() && - !metadataStore.maintainLeadershipWithSharedLock(session)) { - continue; - } - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - - int brokerId = walObject.getBrokerId(); - long objectId = walObject.getObjectId(); - - if (Objects.isNull(compactedObjects) || compactedObjects.isEmpty()) { - // verify stream continuity - List offsets = java.util.stream.Stream.concat( - streamObjects.stream() - .map(s3StreamObject -> new long[] {s3StreamObject.getStreamId(), s3StreamObject.getStartOffset(), s3StreamObject.getEndOffset()}), - walObject.getSubStreams().getSubStreamsMap().entrySet() - .stream() - .map(obj -> new long[] {obj.getKey(), obj.getValue().getStartOffset(), obj.getValue().getEndOffset()}) - ).toList(); - - if (!checkStreamAdvance(session, offsets)) { - LOGGER.error("S3WALObject[object-id={}]'s stream advance check failed", walObject.getObjectId()); - ControllerException e = new ControllerException(Code.NOT_FOUND_VALUE, String.format("S3WALObject[object-id=%d]'s stream advance check failed", walObject.getObjectId())); - future.completeExceptionally(e); - return future; - } - } - - // commit S3 object - if (objectId != S3Constants.NOOP_OBJECT_ID && !commitObject(objectId, StreamConstants.NOOP_STREAM_ID, walObject.getObjectSize(), session)) { - ControllerException e = new ControllerException(Code.ILLEGAL_STATE_VALUE, - String.format("S3WALObject[object-id=%d] is not ready for commit", walObject.getObjectId())); - future.completeExceptionally(e); - return future; - } - - long dataTs = System.currentTimeMillis(); - long sequenceId = objectId; - if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { - List s3WalObjects = compactedObjects.stream() - .map(id -> { - // mark destroy compacted object - S3Object object = s3ObjectMapper.getById(id); - object.setState(S3ObjectState.BOS_WILL_DELETE); - object.setMarkedForDeletionTimestamp(new Date()); - s3ObjectMapper.markToDelete(object.getId(), new Date()); - - return s3WALObjectMapper.getByObjectId(id); - }) - .toList(); - - if (!s3WalObjects.isEmpty()) { - // update dataTs to the min compacted object's dataTs - dataTs = s3WalObjects.stream() - .map(S3WalObject::getBaseDataTimestamp) - .map(Date::getTime) - .min(Long::compareTo).get(); - // update sequenceId to the min compacted object's sequenceId - sequenceId = s3WalObjects.stream().mapToLong(S3WalObject::getSequenceId).min().getAsLong(); - } - } - - Map> toCache = - new HashMap<>(); - - // commit stream objects; - if (!streamObjects.isEmpty()) { - for (apache.rocketmq.controller.v1.S3StreamObject s3StreamObject : streamObjects) { - long oId = s3StreamObject.getObjectId(); - long objectSize = s3StreamObject.getObjectSize(); - long streamId = s3StreamObject.getStreamId(); - if (!commitObject(oId, streamId, objectSize, session)) { - String msg = String.format("S3StreamObject[object-id=%d] is not ready to commit", oId); - ControllerException e = new ControllerException(Code.ILLEGAL_STATE_VALUE, msg); - future.completeExceptionally(e); - return future; - } - } - // create stream object records - streamObjects.forEach(s3StreamObject -> { - com.automq.rocketmq.metadata.dao.S3StreamObject object = - new com.automq.rocketmq.metadata.dao.S3StreamObject(); - object.setStreamId(s3StreamObject.getStreamId()); - object.setObjectId(s3StreamObject.getObjectId()); - object.setCommittedTimestamp(new Date()); - object.setStartOffset(s3StreamObject.getStartOffset()); - object.setBaseDataTimestamp(new Date()); - object.setEndOffset(s3StreamObject.getEndOffset()); - object.setObjectSize(s3StreamObject.getObjectSize()); - s3StreamObjectMapper.commit(object); - if (toCache.containsKey(object.getStreamId())) { - toCache.get(object.getStreamId()).add(object); - } else { - toCache.put(object.getStreamId(), List.of(object)); - } - }); - } - - // generate compacted objects' remove record ... - if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { - compactedObjects.forEach(id -> s3WALObjectMapper.delete(id, null, null)); - } - - // update broker's wal object - if (objectId != S3Constants.NOOP_OBJECT_ID) { - // generate broker's wal object record - S3WalObject s3WALObject = new S3WalObject(); - s3WALObject.setObjectId(objectId); - s3WALObject.setObjectSize(walObject.getObjectSize()); - s3WALObject.setBaseDataTimestamp(new Date(dataTs)); - s3WALObject.setCommittedTimestamp(new Date()); - s3WALObject.setNodeId(brokerId); - s3WALObject.setSequenceId(sequenceId); - String subStreams = JsonFormat.printer().print(walObject.getSubStreams()); - s3WALObject.setSubStreams(subStreams); - s3WALObjectMapper.create(s3WALObject); - } - session.commit(); - - // Update Cache - for (Map.Entry> entry - : toCache.entrySet()) { - s3StreamObjectCache.cache(entry.getKey(), entry.getValue()); - } - LOGGER.info("broker[broke-id={}] commit wal object[object-id={}] success, compacted objects[{}], stream objects[{}]", - brokerId, walObject.getObjectId(), compactedObjects, streamObjects); - future.complete(null); - } catch (Exception e) { - LOGGER.error("CommitWalObject failed", e); - ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "CommitWalObject failed" + e.getMessage()); - future.completeExceptionally(ex); - } - } else { - CommitWALObjectRequest request = CommitWALObjectRequest.newBuilder() - .setS3WalObject(walObject) - .addAllS3StreamObjects(streamObjects) - .addAllCompactedObjectIds(compactedObjects) - .build(); - try { - metadataStore.controllerClient().commitWALObject(metadataStore.leaderAddress(), request).whenComplete(((reply, e) -> { - if (null != e) { - future.completeExceptionally(e); - } else { - future.complete(null); - } - })); - } catch (ControllerException e) { - future.completeExceptionally(e); - } - } - break; - } - return future; - } - - public CompletableFuture commitStreamObject(apache.rocketmq.controller.v1.S3StreamObject streamObject, - List compactedObjects) { - LOGGER.info("commitStreamObject with streamObject: {}, compactedObjects: {}", TextFormat.shortDebugString(streamObject), - compactedObjects); - - CompletableFuture future = new CompletableFuture<>(); - for (; ; ) { - if (metadataStore.config().circuitStreamMetadata() || metadataStore.isLeader()) { - try (SqlSession session = metadataStore.openSession()) { - if (!metadataStore.config().circuitStreamMetadata() && - !metadataStore.maintainLeadershipWithSharedLock(session)) { - continue; - } - if (streamObject.getObjectId() == S3Constants.NOOP_OBJECT_ID) { - LOGGER.error("S3StreamObject[object-id={}] is null or objectId is unavailable", streamObject.getObjectId()); - String msg = String.format("S3StreamObject[object-id=%d] is null or objectId is unavailable", - streamObject.getObjectId()); - ControllerException e = new ControllerException(Code.NOT_FOUND_VALUE, msg); - future.completeExceptionally(e); - return future; - } - - long committedTs = System.currentTimeMillis(); - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - - // commit object - if (!commitObject(streamObject.getObjectId(), streamObject.getStreamId(), streamObject.getObjectSize(), session)) { - String msg = String.format("S3StreamObject[object-id=%d] is not ready for commit", - streamObject.getObjectId()); - ControllerException e = new ControllerException(Code.ILLEGAL_STATE_VALUE, msg); - future.completeExceptionally(e); - return future; - } - long dataTs = committedTs; - if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { - dataTs = compactedObjects.stream() - .map(id -> { - // mark destroy compacted object - S3Object object = s3ObjectMapper.getById(id); - object.setState(S3ObjectState.BOS_WILL_DELETE); - object.setMarkedForDeletionTimestamp(new Date()); - s3ObjectMapper.markToDelete(object.getId(), new Date()); - - // update dataTs to the min compacted object's dataTs - com.automq.rocketmq.metadata.dao.S3StreamObject s3StreamObject = - s3StreamObjectMapper.getByObjectId(id); - return s3StreamObject.getBaseDataTimestamp().getTime(); - }) - .min(Long::compareTo).get(); - } - - List - toCache = new ArrayList<>(); - - // create a new S3StreamObject to replace committed ones - if (streamObject.getObjectId() != S3Constants.NOOP_OBJECT_ID) { - com.automq.rocketmq.metadata.dao.S3StreamObject newS3StreamObj = - new com.automq.rocketmq.metadata.dao.S3StreamObject(); - newS3StreamObj.setStreamId(streamObject.getStreamId()); - newS3StreamObj.setObjectId(streamObject.getObjectId()); - newS3StreamObj.setObjectSize(streamObject.getObjectSize()); - newS3StreamObj.setStartOffset(streamObject.getStartOffset()); - newS3StreamObj.setEndOffset(streamObject.getEndOffset()); - newS3StreamObj.setBaseDataTimestamp(new Date(dataTs)); - newS3StreamObj.setCommittedTimestamp(new Date(committedTs)); - s3StreamObjectMapper.create(newS3StreamObj); - toCache.add(newS3StreamObj); - } - - // delete the compactedObjects of S3Stream - if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { - compactedObjects.forEach(id -> s3StreamObjectMapper.delete(null, null, id)); - } - session.commit(); - - // Update Cache - s3StreamObjectCache.cache(streamObject.getStreamId(), toCache); - s3StreamObjectCache.onCompact(streamObject.getStreamId(), compactedObjects); - - LOGGER.info("S3StreamObject[object-id={}] commit success, compacted objects: {}", - streamObject.getObjectId(), compactedObjects); - future.complete(null); - } catch (Exception e) { - LOGGER.error("CommitStream failed", e); - ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "CommitStream failed" + e.getMessage()); - future.completeExceptionally(ex); - } - } else { - CommitStreamObjectRequest request = CommitStreamObjectRequest.newBuilder() - .setS3StreamObject(streamObject) - .addAllCompactedObjectIds(compactedObjects) - .build(); - try { - metadataStore.controllerClient().commitStreamObject(metadataStore.leaderAddress(), request) - .whenComplete(((reply, e) -> { - if (null != e) { - future.completeExceptionally(e); - } else { - future.complete(null); - } - })); - } catch (ControllerException e) { - future.completeExceptionally(e); - } - } - break; - } - return future; - } - - public CompletableFuture> listWALObjects() { - CompletableFuture> future = new CompletableFuture<>(); - try (SqlSession session = metadataStore.openSession()) { - S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); - List walObjects = s3WalObjectMapper.list(metadataStore.config().nodeId(), null).stream() - .map(s3WALObject -> { - try { - return buildS3WALObject(s3WALObject, decode(s3WALObject.getSubStreams())); - } catch (InvalidProtocolBufferException e) { - LOGGER.error("Failed to deserialize SubStreams", e); - return null; - } - }) - .filter(Objects::nonNull) - .toList(); - future.complete(walObjects); - } - return future; - } - - public CompletableFuture> listWALObjects(long streamId, long startOffset, - long endOffset, int limit) { - CompletableFuture> future = new CompletableFuture<>(); - try (SqlSession session = metadataStore.openSession()) { - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - - List nodes = rangeMapper.listByStreamId(streamId) - .stream() - .filter(range -> range.getEndOffset() > startOffset && range.getStartOffset() < endOffset) - .mapToInt(Range::getNodeId) - .distinct() - .boxed() - .toList(); - - S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); - List s3WALObjects = new ArrayList<>(); - for (int nodeId : nodes) { - List s3WalObjects = s3WalObjectMapper.list(nodeId, null); - s3WalObjects.stream() - .map(s3WalObject -> { - try { - Map subStreams = decode(s3WalObject.getSubStreams()).getSubStreamsMap(); - Map streamsRecords = new HashMap<>(); - if (subStreams.containsKey(streamId)) { - SubStream subStream = subStreams.get(streamId); - if (subStream.getStartOffset() <= endOffset && subStream.getEndOffset() > startOffset) { - streamsRecords.put(streamId, subStream); - } - } - if (!streamsRecords.isEmpty()) { - return buildS3WALObject(s3WalObject, SubStreams.newBuilder() - .putAllSubStreams(streamsRecords) - .build()); - } - } catch (InvalidProtocolBufferException e) { - LOGGER.error("Failed to deserialize SubStreams", e); - } - return null; - }) - .filter(Objects::nonNull) - .forEach(s3WALObjects::add); - } - - // Sort by start-offset of the given stream - s3WALObjects.sort((l, r) -> { - long lhs = l.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); - long rhs = r.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); - return Long.compare(lhs, rhs); - }); - - future.complete(s3WALObjects.stream().limit(limit).toList()); - } - return future; - } - - public CompletableFuture> listStreamObjects0( - long streamId, long startOffset, long endOffset, int limit) { - boolean skipCache = false; - // Serve with cache - if (s3StreamObjectCache.streamExclusive(streamId)) { - List list = - s3StreamObjectCache.listStreamObjects(streamId, startOffset, endOffset, limit); - if (!list.isEmpty()) { - return CompletableFuture.completedFuture(list.stream().toList()); - } - skipCache = true; - } - - CompletableFuture> future = - new CompletableFuture<>(); - try (SqlSession session = metadataStore.openSession()) { - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); - if (!skipCache && s3WalObjectMapper.streamExclusive(metadataStore.config().nodeId(), streamId)) { - s3StreamObjectCache.makeStreamExclusive(streamId); - List list = - s3StreamObjectMapper.listByStreamId(streamId); - s3StreamObjectCache.initStream(streamId, list); - return listStreamObjects0(streamId, startOffset, endOffset, limit); - } - List streamObjects = s3StreamObjectMapper - .list(null, streamId, startOffset, endOffset, limit); - future.complete(streamObjects); - } - return future; - } - - public CompletableFuture> listStreamObjects(long streamId, long startOffset, long endOffset, - int limit) { - return listStreamObjects0(streamId, startOffset, endOffset, limit) - .thenApply(list -> list.stream().map(this::buildS3StreamObject).toList()); - } - - private S3StreamObject buildS3StreamObject( - com.automq.rocketmq.metadata.dao.S3StreamObject originalObject) { - return S3StreamObject.newBuilder() - .setStreamId(originalObject.getStreamId()) - .setObjectSize(originalObject.getObjectSize()) - .setObjectId(originalObject.getObjectId()) - .setStartOffset(originalObject.getStartOffset()) - .setEndOffset(originalObject.getEndOffset()) - .setBaseDataTimestamp(originalObject.getBaseDataTimestamp().getTime()) - .setCommittedTimestamp(originalObject.getCommittedTimestamp() != null ? - originalObject.getCommittedTimestamp().getTime() : S3Constants.NOOP_OBJECT_COMMIT_TIMESTAMP) - .build(); - } - - private S3WALObject buildS3WALObject( - S3WalObject originalObject, - SubStreams subStreams) { - return S3WALObject.newBuilder() - .setObjectId(originalObject.getObjectId()) - .setObjectSize(originalObject.getObjectSize()) - .setBrokerId(originalObject.getNodeId()) - .setSequenceId(originalObject.getSequenceId()) - .setBaseDataTimestamp(originalObject.getBaseDataTimestamp().getTime()) - .setCommittedTimestamp(originalObject.getCommittedTimestamp() != null ? - originalObject.getCommittedTimestamp().getTime() : S3Constants.NOOP_OBJECT_COMMIT_TIMESTAMP) - .setSubStreams(subStreams) - .build(); - } - - private SubStreams decode(String json) throws InvalidProtocolBufferException { - SubStreams.Builder builder = SubStreams.newBuilder(); - JsonFormat.parser().ignoringUnknownFields().merge(json, builder); - return builder.build(); - } - - private boolean commitObject(Long objectId, long streamId, long objectSize, SqlSession session) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3Object s3Object = s3ObjectMapper.getById(objectId); - if (Objects.isNull(s3Object)) { - LOGGER.error("object[object-id={}] not exist", objectId); - return false; - } - // verify the state - if (s3Object.getState() == S3ObjectState.BOS_COMMITTED) { - LOGGER.warn("object[object-id={}] already committed", objectId); - return false; - } - if (s3Object.getState() != S3ObjectState.BOS_PREPARED) { - LOGGER.error("object[object-id={}] is not prepared but try to commit", objectId); - return false; - } - - Date commitData = new Date(); - if (s3Object.getExpiredTimestamp().getTime() < commitData.getTime()) { - LOGGER.error("object[object-id={}] is expired", objectId); - return false; - } - - s3Object.setCommittedTimestamp(commitData); - s3Object.setStreamId(streamId); - s3Object.setObjectSize(objectSize); - s3Object.setState(S3ObjectState.BOS_COMMITTED); - s3ObjectMapper.commit(s3Object); - return true; - } - - private boolean checkStreamAdvance(SqlSession session, List offsets) { - if (offsets == null || offsets.isEmpty()) { - return true; - } - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - for (long[] offset : offsets) { - long streamId = offset[0], startOffset = offset[1], endOffset = offset[2]; - // verify the stream exists and is open - Stream stream = streamMapper.getByStreamId(streamId); - if (stream.getState() != StreamState.OPEN) { - LOGGER.warn("Stream[stream-id={}] not opened", streamId); - return false; - } - - Range range = rangeMapper.get(stream.getRangeId(), streamId, null); - if (Objects.isNull(range)) { - // should not happen - LOGGER.error("Stream[stream-id={}]'s current range[range-id={}] not exist when stream has been created", - streamId, stream.getRangeId()); - return false; - } - - if (range.getEndOffset() != startOffset) { - LOGGER.warn("Stream[stream-id={}]'s current range[range-id={}]'s end offset[{}] is not equal to request start offset[{}]", - streamId, range.getRangeId(), range.getEndOffset(), startOffset); - return false; - } - - range.setEndOffset(endOffset); - rangeMapper.update(range); - } - return true; - } - - public CompletableFuture, List>> listObjects( - long streamId, long startOffset, long endOffset, int limit) { - return CompletableFuture.supplyAsync(() -> { - try (SqlSession session = metadataStore.openSession()) { - S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); - - List s3StreamObjects = - listStreamObjects(streamId, startOffset, endOffset, limit).join(); - - List walObjects = new ArrayList<>(); - s3WalObjectMapper.list(null, null) - .stream() - .map(s3WalObject -> { - try { - Map subStreams = decode(s3WalObject.getSubStreams()).getSubStreamsMap(); - Map streamsRecords = new HashMap<>(); - subStreams.entrySet().stream() - .filter(entry -> !Objects.isNull(entry) && entry.getKey().equals(streamId)) - .filter(entry -> entry.getValue().getStartOffset() <= endOffset && entry.getValue().getEndOffset() > startOffset) - .forEach(entry -> streamsRecords.put(entry.getKey(), entry.getValue())); - return streamsRecords.isEmpty() ? null : buildS3WALObject(s3WalObject, - SubStreams.newBuilder().putAllSubStreams(streamsRecords).build()); - } catch (InvalidProtocolBufferException e) { - LOGGER.error("Failed to deserialize SubStreams", e); - return null; - } - }) - .filter(Objects::nonNull) - .limit(limit) - .forEach(walObjects::add); - - if (!walObjects.isEmpty()) { - walObjects.sort((l, r) -> { - long lhs = l.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); - long rhs = r.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); - return Long.compare(lhs, rhs); - }); - } - - // apply limit in whole. - Set objectIds = java.util.stream.Stream.concat( - s3StreamObjects.stream() - .map(s3StreamObject -> new long[] { - s3StreamObject.getObjectId(), - s3StreamObject.getStartOffset(), - s3StreamObject.getEndOffset() - }), - walObjects.stream() - .map(s3WALObject -> new long[] { - s3WALObject.getObjectId(), - s3WALObject.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(), - s3WALObject.getSubStreams().getSubStreamsMap().get(streamId).getEndOffset() - }) - ).sorted((l, r) -> { - if (l[1] == r[1]) { - return Long.compare(l[0], r[0]); - } - return Long.compare(l[1], r[1]); - }).limit(limit) - .map(offset -> offset[0]) - .collect(Collectors.toSet()); - - List limitedStreamObjects = s3StreamObjects.stream() - .filter(s3StreamObject -> objectIds.contains(s3StreamObject.getObjectId())) - .toList(); - - List limitedWalObjectList = walObjects.stream() - .filter(s3WALObject -> objectIds.contains(s3WALObject.getObjectId())) - .toList(); - - return new ImmutablePair<>(limitedStreamObjects, limitedWalObjectList); - } - }, metadataStore.asyncExecutor()); - } - - public CompletableFuture trimStream(long streamId, long streamEpoch, long newStartOffset) { - CompletableFuture future = new CompletableFuture<>(); - for (; ; ) { - if (metadataStore.config().circuitStreamMetadata() || metadataStore.isLeader()) { - try (SqlSession session = metadataStore.openSession()) { - if (!metadataStore.config().circuitStreamMetadata() && - !metadataStore.maintainLeadershipWithSharedLock(session)) { - continue; - } - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - - Stream stream = streamMapper.getByStreamId(streamId); - if (null == stream) { - ControllerException e = new ControllerException(Code.NOT_FOUND_VALUE, - String.format("Stream[stream-id=%d] is not found", streamId) - ); - future.completeExceptionally(e); - return future; - } - if (stream.getState() == StreamState.CLOSED) { - LOGGER.warn("Stream[{}]‘s state is CLOSED, can't trim", streamId); - return null; - } - if (stream.getStartOffset() > newStartOffset) { - LOGGER.warn("Stream[{}]‘s start offset {} is larger than request new start offset {}", - streamId, stream.getStartOffset(), newStartOffset); - return null; - } - if (stream.getStartOffset() == newStartOffset) { - // regard it as redundant trim operation, just return success - return null; - } - - // now the request is valid - // update the stream metadata start offset - stream.setEpoch(streamEpoch); - stream.setStartOffset(newStartOffset); - streamMapper.update(stream); - - // remove range or update range's start offset - rangeMapper.listByStreamId(streamId).forEach(range -> { - if (newStartOffset <= range.getStartOffset()) { - return; - } - if (stream.getRangeId().equals(range.getRangeId())) { - // current range, update start offset - // if current range is [50, 100) - // 1. try to trim to 40, then current range will be [50, 100) - // 2. try to trim to 60, then current range will be [60, 100) - // 3. try to trim to 100, then current range will be [100, 100) - // 4. try to trim to 110, then current range will be [100, 100) - long newRangeStartOffset = newStartOffset < range.getEndOffset() ? newStartOffset : range.getEndOffset(); - range.setStartOffset(newRangeStartOffset); - rangeMapper.update(range); - return; - } - if (newStartOffset >= range.getEndOffset()) { - // remove range - rangeMapper.delete(range.getRangeId(), streamId); - return; - } - // update range's start offset - range.setStartOffset(newStartOffset); - rangeMapper.update(range); - }); - // remove stream object - s3StreamObjectMapper.listByStreamId(streamId).forEach(streamObject -> { - long streamStartOffset = streamObject.getStartOffset(); - long streamEndOffset = streamObject.getEndOffset(); - if (newStartOffset <= streamStartOffset) { - return; - } - if (newStartOffset >= streamEndOffset) { - // stream object - s3StreamObjectMapper.delete(null, streamId, streamObject.getObjectId()); - // markDestroyObjects - S3Object s3Object = s3ObjectMapper.getById(streamObject.getObjectId()); - s3Object.setMarkedForDeletionTimestamp(new Date()); - s3ObjectMapper.markToDelete(s3Object.getId(), new Date()); - } - }); - - // remove wal object or remove sub-stream range in wal object - s3WALObjectMapper.list(stream.getDstNodeId(), null).stream() - .map(s3WALObject -> { - try { - return buildS3WALObject(s3WALObject, decode(s3WALObject.getSubStreams())); - } catch (InvalidProtocolBufferException e) { - LOGGER.error("Failed to decode"); - return null; - } - }) - .filter(Objects::nonNull) - .filter(s3WALObject -> s3WALObject.getSubStreams().getSubStreamsMap().containsKey(streamId)) - .filter(s3WALObject -> s3WALObject.getSubStreams().getSubStreamsMap().get(streamId).getEndOffset() <= newStartOffset) - .forEach(s3WALObject -> { - if (s3WALObject.getSubStreams().getSubStreamsMap().size() == 1) { - // only this range, but we will remove this range, so now we can remove this wal object - S3Object s3Object = s3ObjectMapper.getById(s3WALObject.getObjectId()); - s3Object.setMarkedForDeletionTimestamp(new Date()); - s3ObjectMapper.markToDelete(s3Object.getId(), new Date()); - } - - // remove offset range about sub-stream ... - }); - session.commit(); - - // Update cache - s3StreamObjectCache.onTrim(streamId, newStartOffset); - - LOGGER.info("Node[node-id={}] trim stream [stream-id={}] with epoch={} and newStartOffset={}", - metadataStore.config().nodeId(), streamId, streamEpoch, newStartOffset); - future.complete(null); - } catch (Exception e) { - LOGGER.error("TrimStream failed", e); - ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "TrimStream failed" + e.getMessage()); - future.completeExceptionally(ex); - } - } else { - TrimStreamRequest request = TrimStreamRequest.newBuilder() - .setStreamId(streamId) - .setStreamEpoch(streamEpoch) - .setNewStartOffset(newStartOffset) - .build(); - try { - metadataStore.controllerClient().trimStream(metadataStore.leaderAddress(), request).whenComplete(((reply, e) -> { - if (null != e) { - future.completeExceptionally(e); - } else { - future.complete(null); - } - })); - } catch (ControllerException e) { - future.completeExceptionally(e); - } - } - break; - } - return future; - } -} diff --git a/controller/src/test/java/com/automq/rocketmq/controller/ControllerServiceImplTest.java b/controller/src/test/java/com/automq/rocketmq/controller/ControllerServiceImplTest.java index 8c35e03dd..b98b5b3a7 100644 --- a/controller/src/test/java/com/automq/rocketmq/controller/ControllerServiceImplTest.java +++ b/controller/src/test/java/com/automq/rocketmq/controller/ControllerServiceImplTest.java @@ -22,10 +22,6 @@ import apache.rocketmq.controller.v1.CloseStreamReply; import apache.rocketmq.controller.v1.CloseStreamRequest; import apache.rocketmq.controller.v1.Code; -import apache.rocketmq.controller.v1.CommitStreamObjectReply; -import apache.rocketmq.controller.v1.CommitStreamObjectRequest; -import apache.rocketmq.controller.v1.CommitWALObjectReply; -import apache.rocketmq.controller.v1.CommitWALObjectRequest; import apache.rocketmq.controller.v1.ConsumerGroup; import apache.rocketmq.controller.v1.ControllerServiceGrpc; import apache.rocketmq.controller.v1.CreateGroupReply; @@ -45,20 +41,14 @@ import apache.rocketmq.controller.v1.NodeRegistrationRequest; import apache.rocketmq.controller.v1.OpenStreamReply; import apache.rocketmq.controller.v1.OpenStreamRequest; -import apache.rocketmq.controller.v1.PrepareS3ObjectsReply; -import apache.rocketmq.controller.v1.PrepareS3ObjectsRequest; -import apache.rocketmq.controller.v1.S3ObjectState; import apache.rocketmq.controller.v1.StreamMetadata; import apache.rocketmq.controller.v1.StreamRole; import apache.rocketmq.controller.v1.StreamState; -import apache.rocketmq.controller.v1.SubStream; import apache.rocketmq.controller.v1.SubscriptionMode; import apache.rocketmq.controller.v1.TopicStatus; -import apache.rocketmq.controller.v1.TrimStreamRequest; import apache.rocketmq.controller.v1.UpdateGroupRequest; import apache.rocketmq.controller.v1.UpdateTopicReply; import apache.rocketmq.controller.v1.UpdateTopicRequest; -import com.automq.rocketmq.common.system.StreamConstants; import com.automq.rocketmq.controller.exception.ControllerException; import com.automq.rocketmq.controller.server.ControllerServiceImpl; import com.automq.rocketmq.controller.store.DatabaseTestBase; @@ -70,9 +60,6 @@ import com.automq.rocketmq.metadata.dao.Node; import com.automq.rocketmq.metadata.dao.QueueAssignment; import com.automq.rocketmq.metadata.dao.Range; -import com.automq.rocketmq.metadata.dao.S3Object; -import com.automq.rocketmq.metadata.dao.S3StreamObject; -import com.automq.rocketmq.metadata.dao.S3WalObject; import com.automq.rocketmq.metadata.dao.Stream; import com.automq.rocketmq.metadata.dao.StreamCriteria; import com.automq.rocketmq.metadata.dao.Topic; @@ -81,9 +68,6 @@ import com.automq.rocketmq.metadata.mapper.NodeMapper; import com.automq.rocketmq.metadata.mapper.QueueAssignmentMapper; import com.automq.rocketmq.metadata.mapper.RangeMapper; -import com.automq.rocketmq.metadata.mapper.S3ObjectMapper; -import com.automq.rocketmq.metadata.mapper.S3StreamObjectMapper; -import com.automq.rocketmq.metadata.mapper.S3WalObjectMapper; import com.automq.rocketmq.metadata.mapper.StreamMapper; import com.automq.rocketmq.metadata.mapper.TopicMapper; import com.google.protobuf.util.JsonFormat; @@ -93,13 +77,8 @@ import io.grpc.stub.StreamObserver; import java.io.IOException; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.Collections; -import java.util.Date; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -943,243 +922,23 @@ public void testCloseStream_NotFound() throws IOException, ExecutionException, I } @Test - public void testTrimStream() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - - long topicId = 1; - int queueId = 2; - int srcNodeId = 1; - int dstNodeId = 1; - long streamId; - long newStartOffset = 40; - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - - Stream stream = new Stream(); - stream.setStreamRole(StreamRole.STREAM_ROLE_DATA); - stream.setTopicId(topicId); - stream.setQueueId(queueId); - stream.setEpoch(1L); - stream.setState(StreamState.OPEN); - stream.setStartOffset(0L); - stream.setRangeId(0); - stream.setSrcNodeId(srcNodeId); - stream.setDstNodeId(dstNodeId); - streamMapper.create(stream); - streamId = stream.getId(); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - Range range = new Range(); - range.setRangeId(0); - range.setStartOffset(0L); - range.setNodeId(2); - range.setStreamId(streamId); - range.setEpoch(1L); - range.setEndOffset(100L); - rangeMapper.create(range); - - session.commit(); - } - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - TrimStreamRequest request = TrimStreamRequest.newBuilder() - .setStreamId(streamId) - .setStreamEpoch(1) - .setNewStartOffset(newStartOffset) - .build(); - client.trimStream(String.format("localhost:%d", port), request).get(); - } - } - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - StreamCriteria criteria = StreamCriteria.newBuilder() - .withTopicId(topicId) - .withQueueId(queueId) - .build(); - List streams = streamMapper.byCriteria(criteria); - Assertions.assertEquals(1, streams.size()); - Assertions.assertEquals(newStartOffset, streams.get(0).getStartOffset()); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - List ranges = rangeMapper.list(null, streamId, null); - Assertions.assertEquals(1, ranges.size()); - Assertions.assertEquals(newStartOffset, ranges.get(0).getStartOffset()); - Assertions.assertEquals(100, ranges.get(0).getEndOffset()); - } - - } - - @Test - public void testTrimStream_WithS3Stream() throws IOException, ExecutionException, InterruptedException { + public void testListOpenStreams() throws IOException, ExecutionException, InterruptedException { ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long topicId = 1; - int queueId = 2; - int srcNodeId = 1; - int dstNodeId = 2; - long streamId; - long objectId; - long newStartOffset = 40L; - try (SqlSession session = getSessionFactory().openSession()) { StreamMapper streamMapper = session.getMapper(StreamMapper.class); Stream stream = new Stream(); - stream.setStreamRole(StreamRole.STREAM_ROLE_DATA); - stream.setTopicId(topicId); - stream.setQueueId(queueId); stream.setEpoch(1L); + stream.setTopicId(2L); + stream.setQueueId(3); + stream.setDstNodeId(4); + stream.setSrcNodeId(5); stream.setState(StreamState.OPEN); - stream.setStartOffset(0L); - stream.setSrcNodeId(srcNodeId); - stream.setDstNodeId(dstNodeId); - streamMapper.create(stream); - streamId = stream.getId(); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - Range range = new Range(); - range.setRangeId(0); - range.setStartOffset(0L); - range.setStreamId(streamId); - range.setNodeId(2); - range.setEpoch(1L); - range.setEndOffset(100L); - rangeMapper.create(range); - - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3Object s3Object = new S3Object(); - s3Object.setId(nextS3ObjectId()); - s3Object.setStreamId(streamId); - s3Object.setState(S3ObjectState.BOS_COMMITTED); - s3Object.setObjectSize(1000L); - Calendar calendar = Calendar.getInstance(); - calendar.add(Calendar.MINUTE, 5); - s3Object.setExpiredTimestamp(calendar.getTime()); - s3ObjectMapper.prepare(s3Object); - objectId = s3Object.getId(); - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - S3StreamObject s3StreamObject = new S3StreamObject(); - s3StreamObject.setBaseDataTimestamp(new Date()); - s3StreamObject.setStreamId(streamId); - s3StreamObject.setObjectId(objectId); - s3StreamObject.setStartOffset(0L); - s3StreamObject.setEndOffset(40L); - s3StreamObject.setObjectSize(1000L); - s3StreamObjectMapper.create(s3StreamObject); - - session.commit(); - } - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - TrimStreamRequest request = TrimStreamRequest.newBuilder() - .setStreamId(streamId) - .setStreamEpoch(1) - .setNewStartOffset(newStartOffset) - .build(); - client.trimStream(String.format("localhost:%d", port), request).get(); - } - } - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - StreamCriteria criteria = StreamCriteria.newBuilder() - .withTopicId(topicId) - .withQueueId(queueId) - .build(); - List streams = streamMapper.byCriteria(criteria); - Assertions.assertEquals(1, streams.size()); - Assertions.assertEquals(newStartOffset, streams.get(0).getStartOffset()); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - List ranges = rangeMapper.list(null, streamId, null); - Assertions.assertEquals(1, ranges.size()); - Assertions.assertEquals(newStartOffset, ranges.get(0).getStartOffset()); - Assertions.assertEquals(100, ranges.get(0).getEndOffset()); - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - List objects = s3StreamObjectMapper.listByStreamId(streamId); - Assertions.assertEquals(0, objects.size()); - } - } - - @Test - public void testTrimStream_WithS3WAL() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - - long topicId = 1; - int queueId = 2; - int srcNodeId = 1; - int dstNodeId = 2; - long streamId; - long objectId; - long newStartOffset = 40L; - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - - Stream stream = new Stream(); + stream.setStartOffset(6L); + stream.setRangeId(7); stream.setStreamRole(StreamRole.STREAM_ROLE_DATA); - stream.setTopicId(topicId); - stream.setQueueId(queueId); - stream.setEpoch(1L); - stream.setState(StreamState.OPEN); - stream.setStartOffset(0L); - stream.setSrcNodeId(srcNodeId); - stream.setDstNodeId(dstNodeId); streamMapper.create(stream); - streamId = stream.getId(); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - Range range = new Range(); - range.setRangeId(0); - range.setStartOffset(0L); - range.setStreamId(streamId); - range.setNodeId(2); - range.setEpoch(1L); - range.setEndOffset(100L); - rangeMapper.create(range); - - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3Object s3Object = new S3Object(); - s3Object.setId(nextS3ObjectId()); - s3Object.setStreamId(streamId); - s3Object.setState(S3ObjectState.BOS_COMMITTED); - s3Object.setObjectSize(1000L); - Calendar calendar = Calendar.getInstance(); - calendar.add(Calendar.MINUTE, 5); - s3Object.setExpiredTimestamp(calendar.getTime()); - s3ObjectMapper.prepare(s3Object); - objectId = s3Object.getId(); - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - buildS3WalObjs(objectId, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 0, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); session.commit(); } @@ -1194,912 +953,15 @@ public void testTrimStream_WithS3WAL() throws IOException, ExecutionException, I ) { testServer.start(); int port = testServer.getPort(); - TrimStreamRequest request = TrimStreamRequest.newBuilder() - .setStreamId(streamId) - .setStreamEpoch(1) - .setNewStartOffset(newStartOffset) - .build(); - client.trimStream(String.format("localhost:%d", port), request).get(); - } - } - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - StreamCriteria criteria = StreamCriteria.newBuilder() - .withTopicId(topicId) - .withQueueId(queueId) - .build(); - List streams = streamMapper.byCriteria(criteria); - Assertions.assertEquals(1, streams.size()); - Assertions.assertEquals(newStartOffset, streams.get(0).getStartOffset()); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - List ranges = rangeMapper.list(null, streamId, null); - Assertions.assertEquals(1, ranges.size()); - Assertions.assertEquals(newStartOffset, ranges.get(0).getStartOffset()); - Assertions.assertEquals(100, ranges.get(0).getEndOffset()); - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3WalObject object = s3WALObjectMapper.getByObjectId(objectId); - Assertions.assertEquals(100, object.getObjectSize()); - Assertions.assertEquals(objectId, object.getSequenceId()); - } - } - - @Test - public void testTrimStream_WithRange() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - - long topicId = 1; - int queueId = 2; - int srcNodeId = 1; - int dstNodeId = 2; - long streamId; - long objectId; - long newStartOffset = 60; - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - - Stream stream = new Stream(); - stream.setStreamRole(StreamRole.STREAM_ROLE_DATA); - stream.setTopicId(topicId); - stream.setQueueId(queueId); - stream.setEpoch(1L); - stream.setState(StreamState.OPEN); - stream.setStartOffset(0L); - stream.setSrcNodeId(srcNodeId); - stream.setDstNodeId(dstNodeId); - streamMapper.create(stream); - streamId = stream.getId(); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - Range range = new Range(); - range.setRangeId(0); - range.setStartOffset(0L); - range.setStreamId(streamId); - range.setNodeId(2); - range.setEpoch(1L); - range.setEndOffset(30L); - rangeMapper.create(range); - - Range range1 = new Range(); - range1.setRangeId(1); - range1.setStartOffset(50L); - range1.setStreamId(streamId); - range1.setNodeId(2); - range1.setEpoch(1L); - range1.setEndOffset(100L); - rangeMapper.create(range1); - - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3Object s3Object = new S3Object(); - s3Object.setId(nextS3ObjectId()); - s3Object.setStreamId(streamId); - s3Object.setState(S3ObjectState.BOS_COMMITTED); - s3Object.setObjectSize(1000L); - Calendar calendar = Calendar.getInstance(); - calendar.add(Calendar.MINUTE, 5); - s3Object.setExpiredTimestamp(calendar.getTime()); - s3ObjectMapper.prepare(s3Object); - objectId = s3Object.getId(); - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - buildS3StreamObjs(objectId, 1, 0, 40).forEach(s3StreamObjectMapper::create); - session.commit(); - } - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - TrimStreamRequest request = TrimStreamRequest.newBuilder() - .setStreamId(streamId) - .setStreamEpoch(1) - .setNewStartOffset(newStartOffset) + ListOpenStreamsRequest request = ListOpenStreamsRequest.newBuilder() + .setBrokerId(1) + .setBrokerEpoch(2) .build(); - client.trimStream(String.format("localhost:%d", port), request).get(); + client.listOpenStreams(String.format("localhost:%d", port), request).get(); } } - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - StreamCriteria criteria = StreamCriteria.newBuilder() - .withTopicId(topicId) - .withQueueId(queueId) - .build(); - List streams = streamMapper.byCriteria(criteria); - Assertions.assertEquals(1, streams.size()); - Assertions.assertEquals(newStartOffset, streams.get(0).getStartOffset()); - - RangeMapper rangeMapper = session.getMapper(RangeMapper.class); - List ranges = rangeMapper.list(null, streamId, null); - Assertions.assertEquals(1, ranges.size()); - Assertions.assertEquals(newStartOffset, ranges.get(0).getStartOffset()); - Assertions.assertEquals(100, ranges.get(0).getEndOffset()); - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - List objects = s3StreamObjectMapper.listByStreamId(streamId); - Assertions.assertEquals(0, objects.size()); - } } - - @Test - public void testListOpenStreams() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - - try (SqlSession session = getSessionFactory().openSession()) { - StreamMapper streamMapper = session.getMapper(StreamMapper.class); - - Stream stream = new Stream(); - stream.setEpoch(1L); - stream.setTopicId(2L); - stream.setQueueId(3); - stream.setDstNodeId(4); - stream.setSrcNodeId(5); - stream.setState(StreamState.OPEN); - stream.setStartOffset(6L); - stream.setRangeId(7); - stream.setStreamRole(StreamRole.STREAM_ROLE_DATA); - streamMapper.create(stream); - session.commit(); - } - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - ListOpenStreamsRequest request = ListOpenStreamsRequest.newBuilder() - .setBrokerId(1) - .setBrokerEpoch(2) - .build(); - client.listOpenStreams(String.format("localhost:%d", port), request).get(); - } - } - } - - @Test - public void test3StreamObjects_2PC() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId, streamId = 1; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(3) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - try (SqlSession session = this.getSessionFactory().openSession()) { - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - buildS3StreamObjs(objectId, 2, 1234, 1234).forEach(s3StreamObjectMapper::create); - session.commit(); - } - - apache.rocketmq.controller.v1.S3StreamObject s3StreamObject = apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(objectId + 2) - .setStreamId(streamId) - .setObjectSize(111L) - .build(); - - List compactedObjects = new ArrayList<>(); - compactedObjects.add(objectId); - compactedObjects.add(objectId + 1); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitStreamObjectRequest request1 = CommitStreamObjectRequest.newBuilder() - .setS3StreamObject(s3StreamObject) - .addAllCompactedObjectIds(compactedObjects) - .build(); - - client.commitStreamObject(String.format("localhost:%d", port), request1).get(); - - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - for (long index = objectId; index < objectId + 2; index++) { - S3Object object = s3ObjectMapper.getById(index); - Assertions.assertEquals(S3ObjectState.BOS_WILL_DELETE, object.getState()); - } - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - for (long index = objectId; index < objectId + 2; index++) { - S3StreamObject object = s3StreamObjectMapper.getByObjectId(index); - Assertions.assertNull(object); - } - - S3StreamObject object = s3StreamObjectMapper.getByObjectId(objectId + 2); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 0); - Assertions.assertTrue(object.getCommittedTimestamp().getTime() > 0); - } - } - - } - - @Test - public void test3StreamObjects_2PC_Expired() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId, streamId = 1; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (SqlSession session = this.getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3Object s3Object = new S3Object(); - s3Object.setId(nextS3ObjectId()); - s3Object.setStreamId(streamId); - s3Object.setState(S3ObjectState.BOS_COMMITTED); - s3Object.setExpiredTimestamp(new Date()); - s3ObjectMapper.prepare(s3Object); - objectId = s3Object.getId(); - - S3Object s3Object1 = new S3Object(); - s3Object1.setId(nextS3ObjectId()); - s3Object1.setStreamId(streamId); - s3Object1.setState(S3ObjectState.BOS_PREPARED); - s3Object1.setExpiredTimestamp(new Date()); - s3ObjectMapper.prepare(s3Object1); - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - buildS3StreamObjs(objectId, 1, 1234, 1234).forEach(s3StreamObjectMapper::create); - session.commit(); - } - - apache.rocketmq.controller.v1.S3StreamObject s3StreamObject = apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(objectId + 1) - .setStreamId(streamId) - .setObjectSize(111L) - .build(); - - List compactedObjects = new ArrayList<>(); - compactedObjects.add(objectId); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitStreamObjectRequest request = CommitStreamObjectRequest.newBuilder() - .setS3StreamObject(s3StreamObject) - .addAllCompactedObjectIds(compactedObjects) - .build(); - - CommitStreamObjectReply commitStreamObjectReply = client.commitStreamObject(String.format("localhost:%d", port), request).get(); - Assertions.assertEquals(Code.ILLEGAL_STATE, commitStreamObjectReply.getStatus().getCode()); - } - } - } - - @Test - public void test3StreamObjects_2PC_NoCompacted() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId, streamId = 1; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(3) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - apache.rocketmq.controller.v1.S3StreamObject s3StreamObject = apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(objectId + 2) - .setStreamId(streamId) - .setObjectSize(111L) - .build(); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitStreamObjectRequest request1 = CommitStreamObjectRequest.newBuilder() - .setS3StreamObject(s3StreamObject) - .addAllCompactedObjectIds(Collections.emptyList()) - .build(); - - client.commitStreamObject(String.format("localhost:%d", port), request1).get(); - - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - S3Object s3Object = s3ObjectMapper.getById(objectId + 2); - Assertions.assertEquals(S3ObjectState.BOS_COMMITTED, s3Object.getState()); - Assertions.assertEquals(111L, s3Object.getObjectSize()); - Assertions.assertEquals(streamId, s3Object.getStreamId()); - - S3StreamObject object = s3StreamObjectMapper.getByObjectId(objectId + 2); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 1); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 0); - Assertions.assertTrue(object.getCommittedTimestamp().getTime() > 0); - Assertions.assertTrue(object.getCommittedTimestamp().getTime() > 0); - } - } - - } - - @Test - public void test3StreamObjects_2PC_duplicate() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId, streamId = 1; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(3) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - apache.rocketmq.controller.v1.S3StreamObject s3StreamObject = apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(objectId + 2) - .setStreamId(streamId) - .setObjectSize(111L) - .build(); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitStreamObjectRequest request1 = CommitStreamObjectRequest.newBuilder() - .setS3StreamObject(s3StreamObject) - .addAllCompactedObjectIds(Collections.emptyList()) - .build(); - - client.commitStreamObject(String.format("localhost:%d", port), request1).get(); - - client.commitStreamObject(String.format("localhost:%d", port), request1).get(); - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - S3Object s3Object = s3ObjectMapper.getById(objectId + 2); - Assertions.assertEquals(S3ObjectState.BOS_COMMITTED, s3Object.getState()); - - S3StreamObject object = s3StreamObjectMapper.getByObjectId(objectId + 2); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 0); - Assertions.assertTrue(object.getCommittedTimestamp().getTime() > 0); - } - } - - } - - @Test - public void test3WALObjects_2PC_NoS3Stream() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId; - int nodeId = 2; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(5) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - try (SqlSession session = this.getSessionFactory().openSession()) { - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - buildS3WalObjs(objectId + 2, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 0, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - buildS3WalObjs(objectId + 3, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 10, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - session.commit(); - } - - apache.rocketmq.controller.v1.S3WALObject walObject = apache.rocketmq.controller.v1.S3WALObject.newBuilder() - .setObjectId(objectId + 4) - .setObjectSize(222L) - .setBrokerId(nodeId) - .build(); - - List compactedObjects = new ArrayList<>(); - compactedObjects.add(objectId + 2); - compactedObjects.add(objectId + 3); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitWALObjectRequest request = CommitWALObjectRequest.newBuilder() - .setS3WalObject(walObject) - .addAllS3StreamObjects(Collections.emptyList()) - .addAllCompactedObjectIds(compactedObjects) - .build(); - - client.commitWALObject(String.format("localhost:%d", port), request).get(); - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - - for (long index = objectId + 2; index < objectId + 4; index++) { - S3Object object = s3ObjectMapper.getById(index); - Assertions.assertEquals(S3ObjectState.BOS_WILL_DELETE, object.getState()); - } - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3WalObject object = s3WALObjectMapper.getByObjectId(objectId + 4); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 0); - Assertions.assertEquals(objectId + 2, object.getSequenceId()); - Assertions.assertTrue(object.getCommittedTimestamp().getTime() > 0); - } - } - - } - - @Test - public void test3WALObjects_2PC_NoCompacted() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId; - int nodeId = 2; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(5) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - apache.rocketmq.controller.v1.S3WALObject walObject = apache.rocketmq.controller.v1.S3WALObject.newBuilder() - .setObjectId(objectId + 4) - .setObjectSize(222L) - .setBrokerId(nodeId) - .build(); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitWALObjectRequest request = CommitWALObjectRequest.newBuilder() - .setS3WalObject(walObject) - .addAllS3StreamObjects(Collections.emptyList()) - .addAllCompactedObjectIds(Collections.emptyList()) - .build(); - - client.commitWALObject(String.format("localhost:%d", port), request).get(); - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - S3Object s3Object = s3ObjectMapper.getById(objectId + 4); - Assertions.assertEquals(S3ObjectState.BOS_COMMITTED, s3Object.getState()); - Assertions.assertEquals(222L, s3Object.getObjectSize()); - Assertions.assertEquals(StreamConstants.NOOP_STREAM_ID, s3Object.getStreamId()); - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3WalObject object = s3WALObjectMapper.getByObjectId(objectId + 4); - Assertions.assertEquals(objectId + 4, object.getSequenceId()); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 1); - Assertions.assertTrue(object.getCommittedTimestamp().getTime() > 0); - } - } - - } - - @Test - public void test3WALObjects_2PC_Expired() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId; - int nodeId = 2; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(5) - .setTimeToLiveMinutes(0) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - try (SqlSession session = this.getSessionFactory().openSession()) { - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - buildS3WalObjs(objectId + 2, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 0, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - buildS3WalObjs(objectId + 3, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 10, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - session.commit(); - } - - List s3StreamObjects = buildS3StreamObjs(objectId, 2, 20, 10) - .stream().map(s3StreamObject -> apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(s3StreamObject.getObjectId()) - .setStreamId(s3StreamObject.getStreamId()) - .setObjectSize(s3StreamObject.getObjectSize()) - .setBaseDataTimestamp(s3StreamObject.getBaseDataTimestamp().getTime()) - .setStartOffset(s3StreamObject.getStartOffset()) - .setEndOffset(s3StreamObject.getEndOffset()) - .build()) - .toList(); - - apache.rocketmq.controller.v1.S3WALObject walObject = apache.rocketmq.controller.v1.S3WALObject.newBuilder() - .setObjectId(objectId + 4) - .setSequenceId(11) - .setObjectSize(222L) - .setBrokerId(nodeId) - .build(); - - List compactedObjects = new ArrayList<>(); - compactedObjects.add(objectId + 2); - compactedObjects.add(objectId + 3); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitWALObjectRequest request = CommitWALObjectRequest.newBuilder() - .setS3WalObject(walObject) - .addAllS3StreamObjects(s3StreamObjects) - .addAllCompactedObjectIds(compactedObjects) - .build(); - - CommitWALObjectReply commitWALObjectReply = client.commitWALObject(String.format("localhost:%d", port), request).get(); - Assertions.assertEquals(Code.ILLEGAL_STATE, commitWALObjectReply.getStatus().getCode()); - } - } - - } - - @Test - public void test3WALObjects_2PC() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId; - int nodeId = 2; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(5) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - try (SqlSession session = this.getSessionFactory().openSession()) { - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - buildS3WalObjs(objectId + 2, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 0, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - buildS3WalObjs(objectId + 3, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 10, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - session.commit(); - } - - List s3StreamObjects = buildS3StreamObjs(objectId, 2, 20, 10) - .stream().map(s3StreamObject -> apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(s3StreamObject.getObjectId()) - .setStreamId(s3StreamObject.getStreamId()) - .setObjectSize(s3StreamObject.getObjectSize()) - .setBaseDataTimestamp(s3StreamObject.getBaseDataTimestamp().getTime()) - .setStartOffset(s3StreamObject.getStartOffset()) - .setEndOffset(s3StreamObject.getEndOffset()) - .build()) - .toList(); - - apache.rocketmq.controller.v1.S3WALObject walObject = apache.rocketmq.controller.v1.S3WALObject.newBuilder() - .setObjectId(objectId + 4) - .setObjectSize(222L) - .setBrokerId(nodeId) - .build(); - - long time = System.currentTimeMillis(); - List compactedObjects = new ArrayList<>(); - compactedObjects.add(objectId + 2); - compactedObjects.add(objectId + 3); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitWALObjectRequest request = CommitWALObjectRequest.newBuilder() - .setS3WalObject(walObject) - .addAllS3StreamObjects(s3StreamObjects) - .addAllCompactedObjectIds(compactedObjects) - .build(); - - client.commitWALObject(String.format("localhost:%d", port), request).get(); - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - for (long index = objectId; index < objectId + 2; index++) { - S3Object object = s3ObjectMapper.getById(index); - Assertions.assertEquals(S3ObjectState.BOS_COMMITTED, object.getState()); - } - - for (long index = objectId + 2; index < objectId + 4; index++) { - S3Object object = s3ObjectMapper.getById(index); - Assertions.assertEquals(S3ObjectState.BOS_WILL_DELETE, object.getState()); - } - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - for (long index = objectId; index < objectId + 2; index++) { - S3StreamObject object = s3StreamObjectMapper.getByObjectId(index); - if (object.getCommittedTimestamp().getTime() - time > 5 * 60) { - Assertions.fail(); - } - } - - S3Object s3Object = s3ObjectMapper.getById(objectId + 4); - Assertions.assertEquals(222L, s3Object.getObjectSize()); - Assertions.assertEquals(StreamConstants.NOOP_STREAM_ID, s3Object.getStreamId()); - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3WalObject object = s3WALObjectMapper.getByObjectId(objectId + 4); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 0); - Assertions.assertEquals(objectId + 2, object.getSequenceId()); - if (object.getCommittedTimestamp().getTime() - time > 5 * 60) { - Assertions.fail(); - } - } - } - - } - - @Test - public void test3WALObjects_2PC_duplicate() throws IOException, ExecutionException, InterruptedException { - ControllerClient controllerClient = Mockito.mock(ControllerClient.class); - long objectId; - int nodeId = 2; - - try (MetadataStore metadataStore = new DefaultMetadataStore(controllerClient, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - PrepareS3ObjectsRequest request = PrepareS3ObjectsRequest.newBuilder() - .setPreparedCount(5) - .setTimeToLiveMinutes(5) - .build(); - - PrepareS3ObjectsReply reply = client.prepareS3Objects(String.format("localhost:%d", port), request).get(); - objectId = reply.getFirstObjectId(); - } - - try (SqlSession session = this.getSessionFactory().openSession()) { - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - buildS3WalObjs(objectId + 2, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 0, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - buildS3WalObjs(objectId + 3, 1).stream() - .peek(s3WalObject -> { - Map subStreams = buildWalSubStreams(1, 10, 10); - s3WalObject.setSubStreams(toJson(subStreams)); - }).forEach(s3WALObjectMapper::create); - - session.commit(); - } - - List s3StreamObjects = buildS3StreamObjs(objectId, 2, 20, 10) - .stream().map(s3StreamObject -> apache.rocketmq.controller.v1.S3StreamObject.newBuilder() - .setObjectId(s3StreamObject.getObjectId()) - .setStreamId(s3StreamObject.getStreamId()) - .setObjectSize(s3StreamObject.getObjectSize()) - .setBaseDataTimestamp(s3StreamObject.getBaseDataTimestamp().getTime()) - .setStartOffset(s3StreamObject.getStartOffset()) - .setEndOffset(s3StreamObject.getEndOffset()) - .build()) - .toList(); - - apache.rocketmq.controller.v1.S3WALObject walObject = apache.rocketmq.controller.v1.S3WALObject.newBuilder() - .setObjectId(objectId + 4) - .setObjectSize(222L) - .setBrokerId(nodeId) - .build(); - - long time = System.currentTimeMillis(); - List compactedObjects = new ArrayList<>(); - compactedObjects.add(objectId + 2); - compactedObjects.add(objectId + 3); - - try (ControllerTestServer testServer = new ControllerTestServer(0, new ControllerServiceImpl(metadataStore)); - ControllerClient client = new GrpcControllerClient(config) - ) { - testServer.start(); - int port = testServer.getPort(); - - CommitWALObjectRequest request = CommitWALObjectRequest.newBuilder() - .setS3WalObject(walObject) - .addAllS3StreamObjects(s3StreamObjects) - .addAllCompactedObjectIds(compactedObjects) - .build(); - - client.commitWALObject(String.format("localhost:%d", port), request).get(); - client.commitWALObject(String.format("localhost:%d", port), request).get(); - } - - try (SqlSession session = getSessionFactory().openSession()) { - S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); - for (long index = objectId; index < objectId + 2; index++) { - S3Object object = s3ObjectMapper.getById(index); - Assertions.assertEquals(S3ObjectState.BOS_COMMITTED, object.getState()); - } - - for (long index = objectId + 2; index < objectId + 4; index++) { - S3Object object = s3ObjectMapper.getById(index); - Assertions.assertEquals(S3ObjectState.BOS_WILL_DELETE, object.getState()); - } - - S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); - for (long index = objectId; index < objectId + 2; index++) { - S3StreamObject object = s3StreamObjectMapper.getByObjectId(index); - if (object.getCommittedTimestamp().getTime() - time > 5 * 60) { - Assertions.fail(); - } - } - - S3Object s3Object = s3ObjectMapper.getById(objectId + 4); - Assertions.assertEquals(222L, s3Object.getObjectSize()); - Assertions.assertEquals(StreamConstants.NOOP_STREAM_ID, s3Object.getStreamId()); - - S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); - S3WalObject object = s3WALObjectMapper.getByObjectId(objectId + 4); - Assertions.assertEquals(objectId + 2, object.getSequenceId()); - Assertions.assertTrue(object.getBaseDataTimestamp().getTime() > 0); - if (object.getCommittedTimestamp().getTime() - time > 5 * 60) { - Assertions.fail(); - } - } - } - - } - @Test public void testCreateTopic_OpenStream_CloseStream() throws IOException, ExecutionException, InterruptedException, ControllerException { ControllerClient controllerClient = Mockito.mock(ControllerClient.class); diff --git a/metadata-jdbc/src/main/java/com/automq/rocketmq/metadata/HikariCPDataSourceFactory.java b/metadata-jdbc/src/main/java/com/automq/rocketmq/metadata/HikariCPDataSourceFactory.java index 4e27c2f53..44034cb7a 100644 --- a/metadata-jdbc/src/main/java/com/automq/rocketmq/metadata/HikariCPDataSourceFactory.java +++ b/metadata-jdbc/src/main/java/com/automq/rocketmq/metadata/HikariCPDataSourceFactory.java @@ -32,6 +32,7 @@ public void setProperties(Properties properties) { if (null == dataSource) { HikariConfig config = new HikariConfig(properties); config.setMaximumPoolSize(10); + config.setAutoCommit(false); dataSource = new HikariDataSource(config); } } diff --git a/metadata/pom.xml b/metadata/pom.xml index 34e9f8b3b..0227eaf1c 100644 --- a/metadata/pom.xml +++ b/metadata/pom.xml @@ -39,5 +39,26 @@ metadata-jdbc ${project.version} + + + org.testcontainers + testcontainers + 1.19.0 + test + + + + org.testcontainers + mysql + 1.19.0 + test + + + + org.testcontainers + junit-jupiter + 1.19.0 + test + \ No newline at end of file diff --git a/metadata/src/main/java/com/automq/rocketmq/metadata/DefaultStoreMetadataService.java b/metadata/src/main/java/com/automq/rocketmq/metadata/DefaultStoreMetadataService.java index 7b58422a4..9592dc317 100644 --- a/metadata/src/main/java/com/automq/rocketmq/metadata/DefaultStoreMetadataService.java +++ b/metadata/src/main/java/com/automq/rocketmq/metadata/DefaultStoreMetadataService.java @@ -23,25 +23,23 @@ import apache.rocketmq.controller.v1.StreamMetadata; import apache.rocketmq.controller.v1.StreamRole; import com.automq.rocketmq.common.config.ControllerConfig; -import com.automq.rocketmq.controller.exception.ControllerException; import com.automq.rocketmq.controller.MetadataStore; -import com.automq.rocketmq.controller.server.store.DefaultMetadataStore; import com.automq.rocketmq.metadata.api.StoreMetadataService; +import com.automq.rocketmq.metadata.api.S3MetadataService; import java.util.List; import java.util.Optional; import java.util.concurrent.CompletableFuture; import org.apache.commons.lang3.tuple.Pair; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; public class DefaultStoreMetadataService implements StoreMetadataService { - private static final Logger LOGGER = LoggerFactory.getLogger(DefaultMetadataStore.class); - private final MetadataStore metadataStore; - public DefaultStoreMetadataService(MetadataStore metadataStore) { + private final S3MetadataService s3MetadataService; + + public DefaultStoreMetadataService(MetadataStore metadataStore, S3MetadataService s3MetadataService) { this.metadataStore = metadataStore; + this.s3MetadataService = s3MetadataService; } @Override @@ -71,7 +69,7 @@ public CompletableFuture maxDeliveryAttemptsOf(long consumerGroupId) { @Override public CompletableFuture trimStream(long streamId, long streamEpoch, long newStartOffset) { - return metadataStore.trimStream(streamId, streamEpoch, newStartOffset); + return s3MetadataService.trimStream(streamId, streamEpoch, newStartOffset); } @Override @@ -91,7 +89,7 @@ public CompletableFuture> listOpenStreams() { @Override public CompletableFuture prepareS3Objects(int count, int ttlInMinutes) { - return metadataStore.prepareS3Objects(count, ttlInMinutes); + return s3MetadataService.prepareS3Objects(count, ttlInMinutes); } @Override @@ -100,40 +98,35 @@ public CompletableFuture commitWalObject(S3WALObject walObject, List commitStreamObject(S3StreamObject streamObject, List compactedObjects) { - try { - return metadataStore.commitStreamObject(streamObject, compactedObjects); - } catch (ControllerException e) { - LOGGER.error("Exception raised while commit Stream Object for {}, {}", streamObject, compactedObjects, e); - return null; - } + return s3MetadataService.commitStreamObject(streamObject, compactedObjects); } @Override public CompletableFuture> listWALObjects() { - return metadataStore.listWALObjects(); + return s3MetadataService.listWALObjects(); } @Override public CompletableFuture> listWALObjects(long streamId, long startOffset, long endOffset, int limit) { - return metadataStore.listWALObjects(streamId, startOffset, endOffset, limit); + return s3MetadataService.listWALObjects(streamId, startOffset, endOffset, limit); } @Override public CompletableFuture> listStreamObjects(long streamId, long startOffset, long endOffset, int limit) { - return metadataStore.listStreamObjects(streamId, startOffset, endOffset, limit); + return s3MetadataService.listStreamObjects(streamId, startOffset, endOffset, limit); } @Override public CompletableFuture, List>> listObjects(long streamId, long startOffset, long endOffset, int limit) { - return metadataStore.listObjects(streamId, startOffset, endOffset, limit); + return s3MetadataService.listObjects(streamId, startOffset, endOffset, limit); } @Override diff --git a/metadata/src/main/java/com/automq/rocketmq/metadata/api/S3MetadataService.java b/metadata/src/main/java/com/automq/rocketmq/metadata/api/S3MetadataService.java new file mode 100644 index 000000000..042984131 --- /dev/null +++ b/metadata/src/main/java/com/automq/rocketmq/metadata/api/S3MetadataService.java @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.rocketmq.metadata.api; + +import apache.rocketmq.controller.v1.S3StreamObject; +import apache.rocketmq.controller.v1.S3WALObject; +import java.io.Closeable; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import org.apache.commons.lang3.tuple.Pair; + +/** + * Localized accessor to S3 metadata. + */ +public interface S3MetadataService extends Closeable { + + CompletableFuture prepareS3Objects(int count, int ttlInMinutes); + + CompletableFuture commitWalObject(S3WALObject walObject, List streamObjects, + List compactedObjects); + + + CompletableFuture commitStreamObject(apache.rocketmq.controller.v1.S3StreamObject streamObject, + List compactedObjects); + + CompletableFuture> listWALObjects(); + + CompletableFuture> listWALObjects(long streamId, long startOffset, long endOffset, int limit); + + CompletableFuture> listStreamObjects(long streamId, long startOffset, long endOffset, int limit); + + CompletableFuture, List>> listObjects(long streamId, long startOffset, + long endOffset, int limit); + + CompletableFuture trimStream(long streamId, long streamEpoch, long newStartOffset); +} diff --git a/metadata/src/main/java/com/automq/rocketmq/metadata/s3/DefaultS3MetadataService.java b/metadata/src/main/java/com/automq/rocketmq/metadata/s3/DefaultS3MetadataService.java new file mode 100644 index 000000000..29253f1ca --- /dev/null +++ b/metadata/src/main/java/com/automq/rocketmq/metadata/s3/DefaultS3MetadataService.java @@ -0,0 +1,759 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.rocketmq.metadata.s3; + +import apache.rocketmq.controller.v1.Code; +import apache.rocketmq.controller.v1.S3ObjectState; +import apache.rocketmq.controller.v1.S3StreamObject; +import apache.rocketmq.controller.v1.S3WALObject; +import apache.rocketmq.controller.v1.StreamState; +import apache.rocketmq.controller.v1.SubStream; +import apache.rocketmq.controller.v1.SubStreams; +import com.automq.rocketmq.common.config.ControllerConfig; +import com.automq.rocketmq.common.system.S3Constants; +import com.automq.rocketmq.common.system.StreamConstants; +import com.automq.rocketmq.controller.exception.ControllerException; +import com.automq.rocketmq.metadata.api.S3MetadataService; +import com.automq.rocketmq.metadata.dao.Range; +import com.automq.rocketmq.metadata.dao.S3Object; +import com.automq.rocketmq.metadata.dao.S3WalObject; +import com.automq.rocketmq.metadata.dao.Stream; +import com.automq.rocketmq.metadata.mapper.RangeMapper; +import com.automq.rocketmq.metadata.mapper.S3ObjectMapper; +import com.automq.rocketmq.metadata.mapper.S3StreamObjectMapper; +import com.automq.rocketmq.metadata.mapper.S3WalObjectMapper; +import com.automq.rocketmq.metadata.mapper.SequenceMapper; +import com.automq.rocketmq.metadata.mapper.StreamMapper; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.TextFormat; +import com.google.protobuf.util.JsonFormat; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.apache.commons.lang3.tuple.ImmutablePair; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class DefaultS3MetadataService implements S3MetadataService { + + private static final Logger LOGGER = LoggerFactory.getLogger(DefaultS3MetadataService.class); + + private final ControllerConfig nodeConfig; + + private final SqlSessionFactory sessionFactory; + + private final ExecutorService asyncExecutorService; + + private final S3StreamObjectCache s3StreamObjectCache; + + public DefaultS3MetadataService(ControllerConfig nodeConfig, SqlSessionFactory sessionFactory, + ExecutorService asyncExecutorService) { + this.nodeConfig = nodeConfig; + this.sessionFactory = sessionFactory; + this.asyncExecutorService = asyncExecutorService; + this.s3StreamObjectCache = new S3StreamObjectCache(); + } + + public CompletableFuture prepareS3Objects(int count, int ttlInMinutes) { + CompletableFuture future = new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + // Get and update sequence + SequenceMapper sequenceMapper = session.getMapper(SequenceMapper.class); + long next = sequenceMapper.next(S3ObjectMapper.SEQUENCE_NAME); + sequenceMapper.update(S3ObjectMapper.SEQUENCE_NAME, next + count); + + S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); + Calendar calendar = Calendar.getInstance(); + calendar.add(Calendar.MINUTE, ttlInMinutes); + IntStream.range(0, count).forEach(i -> { + S3Object object = new S3Object(); + object.setId(next + i); + object.setState(S3ObjectState.BOS_PREPARED); + object.setExpiredTimestamp(calendar.getTime()); + s3ObjectMapper.prepare(object); + }); + session.commit(); + future.complete(next); + } catch (Exception e) { + LOGGER.error("PrepareS3Objects failed", e); + ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "PrepareS3Objects failed" + e.getMessage()); + future.completeExceptionally(ex); + } + return future; + } + + public CompletableFuture commitWalObject(S3WALObject walObject, + List streamObjects, List compactedObjects) { + if (Objects.isNull(walObject)) { + LOGGER.error("S3WALObject is unexpectedly null"); + ControllerException e = new ControllerException(Code.INTERNAL_VALUE, "S3WALObject is unexpectedly null"); + return CompletableFuture.failedFuture(e); + } + + LOGGER.info("commitWalObject with walObject: {}, streamObjects: {}, compactedObjects: {}", + TextFormat.shortDebugString(walObject), + streamObjects.stream() + .map(TextFormat::shortDebugString) + .collect(Collectors.joining()), compactedObjects + ); + + CompletableFuture future = new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); + S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); + S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); + + int brokerId = walObject.getBrokerId(); + long objectId = walObject.getObjectId(); + + if (Objects.isNull(compactedObjects) || compactedObjects.isEmpty()) { + // verify stream continuity + List offsets = java.util.stream.Stream.concat( + streamObjects.stream() + .map(s3StreamObject -> new long[] {s3StreamObject.getStreamId(), s3StreamObject.getStartOffset(), s3StreamObject.getEndOffset()}), + walObject.getSubStreams().getSubStreamsMap().entrySet() + .stream() + .map(obj -> new long[] {obj.getKey(), obj.getValue().getStartOffset(), obj.getValue().getEndOffset()}) + ).toList(); + + if (!checkStreamAdvance(session, offsets)) { + LOGGER.error("S3WALObject[object-id={}]'s stream advance check failed", walObject.getObjectId()); + ControllerException e = new ControllerException(Code.NOT_FOUND_VALUE, String.format("S3WALObject[object-id=%d]'s stream advance check failed", walObject.getObjectId())); + future.completeExceptionally(e); + return future; + } + } + + // commit S3 object + if (objectId != S3Constants.NOOP_OBJECT_ID && !commitObject(objectId, StreamConstants.NOOP_STREAM_ID, walObject.getObjectSize(), session)) { + ControllerException e = new ControllerException(Code.ILLEGAL_STATE_VALUE, + String.format("S3WALObject[object-id=%d] is not ready for commit", walObject.getObjectId())); + future.completeExceptionally(e); + return future; + } + + long dataTs = System.currentTimeMillis(); + long sequenceId = objectId; + if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { + List s3WalObjects = compactedObjects.stream() + .map(id -> { + // mark destroy compacted object + S3Object object = s3ObjectMapper.getById(id); + object.setState(S3ObjectState.BOS_WILL_DELETE); + object.setMarkedForDeletionTimestamp(new Date()); + s3ObjectMapper.markToDelete(object.getId(), new Date()); + + return s3WALObjectMapper.getByObjectId(id); + }) + .toList(); + + if (!s3WalObjects.isEmpty()) { + // update dataTs to the min compacted object's dataTs + dataTs = s3WalObjects.stream() + .map(S3WalObject::getBaseDataTimestamp) + .map(Date::getTime) + .min(Long::compareTo).get(); + // update sequenceId to the min compacted object's sequenceId + sequenceId = s3WalObjects.stream().mapToLong(S3WalObject::getSequenceId).min().getAsLong(); + } + } + + Map> toCache = + new HashMap<>(); + + // commit stream objects; + if (!streamObjects.isEmpty()) { + for (apache.rocketmq.controller.v1.S3StreamObject s3StreamObject : streamObjects) { + long oId = s3StreamObject.getObjectId(); + long objectSize = s3StreamObject.getObjectSize(); + long streamId = s3StreamObject.getStreamId(); + if (!commitObject(oId, streamId, objectSize, session)) { + String msg = String.format("S3StreamObject[object-id=%d] is not ready to commit", oId); + ControllerException e = new ControllerException(Code.ILLEGAL_STATE_VALUE, msg); + future.completeExceptionally(e); + return future; + } + } + // create stream object records + streamObjects.forEach(s3StreamObject -> { + com.automq.rocketmq.metadata.dao.S3StreamObject object = + new com.automq.rocketmq.metadata.dao.S3StreamObject(); + object.setStreamId(s3StreamObject.getStreamId()); + object.setObjectId(s3StreamObject.getObjectId()); + object.setCommittedTimestamp(new Date()); + object.setStartOffset(s3StreamObject.getStartOffset()); + object.setBaseDataTimestamp(new Date()); + object.setEndOffset(s3StreamObject.getEndOffset()); + object.setObjectSize(s3StreamObject.getObjectSize()); + s3StreamObjectMapper.commit(object); + if (toCache.containsKey(object.getStreamId())) { + toCache.get(object.getStreamId()).add(object); + } else { + toCache.put(object.getStreamId(), List.of(object)); + } + }); + } + + // generate compacted objects' remove record ... + if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { + compactedObjects.forEach(id -> s3WALObjectMapper.delete(id, null, null)); + } + + // update broker's wal object + if (objectId != S3Constants.NOOP_OBJECT_ID) { + // generate broker's wal object record + S3WalObject s3WALObject = new S3WalObject(); + s3WALObject.setObjectId(objectId); + s3WALObject.setObjectSize(walObject.getObjectSize()); + s3WALObject.setBaseDataTimestamp(new Date(dataTs)); + s3WALObject.setCommittedTimestamp(new Date()); + s3WALObject.setNodeId(brokerId); + s3WALObject.setSequenceId(sequenceId); + String subStreams = JsonFormat.printer().print(walObject.getSubStreams()); + s3WALObject.setSubStreams(subStreams); + s3WALObjectMapper.create(s3WALObject); + } + session.commit(); + + // Update Cache + for (Map.Entry> entry + : toCache.entrySet()) { + s3StreamObjectCache.cache(entry.getKey(), entry.getValue()); + } + LOGGER.info("broker[broke-id={}] commit wal object[object-id={}] success, compacted objects[{}], stream objects[{}]", + brokerId, walObject.getObjectId(), compactedObjects, streamObjects); + future.complete(null); + } catch (Exception e) { + LOGGER.error("CommitWalObject failed", e); + ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "CommitWalObject failed" + e.getMessage()); + future.completeExceptionally(ex); + } + return future; + } + + public CompletableFuture commitStreamObject(apache.rocketmq.controller.v1.S3StreamObject streamObject, + List compactedObjects) { + LOGGER.info("commitStreamObject with streamObject: {}, compactedObjects: {}", TextFormat.shortDebugString(streamObject), + compactedObjects); + + CompletableFuture future = new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + if (streamObject.getObjectId() == S3Constants.NOOP_OBJECT_ID) { + LOGGER.error("S3StreamObject[object-id={}] is null or objectId is unavailable", streamObject.getObjectId()); + String msg = String.format("S3StreamObject[object-id=%d] is null or objectId is unavailable", + streamObject.getObjectId()); + ControllerException e = new ControllerException(Code.NOT_FOUND_VALUE, msg); + future.completeExceptionally(e); + return future; + } + + long committedTs = System.currentTimeMillis(); + S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); + S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); + + // commit object + if (!commitObject(streamObject.getObjectId(), streamObject.getStreamId(), streamObject.getObjectSize(), session)) { + String msg = String.format("S3StreamObject[object-id=%d] is not ready for commit", + streamObject.getObjectId()); + ControllerException e = new ControllerException(Code.ILLEGAL_STATE_VALUE, msg); + future.completeExceptionally(e); + return future; + } + long dataTs = committedTs; + if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { + dataTs = compactedObjects.stream() + .map(id -> { + // mark destroy compacted object + S3Object object = s3ObjectMapper.getById(id); + object.setState(S3ObjectState.BOS_WILL_DELETE); + object.setMarkedForDeletionTimestamp(new Date()); + s3ObjectMapper.markToDelete(object.getId(), new Date()); + + // update dataTs to the min compacted object's dataTs + com.automq.rocketmq.metadata.dao.S3StreamObject s3StreamObject = + s3StreamObjectMapper.getByObjectId(id); + return s3StreamObject.getBaseDataTimestamp().getTime(); + }) + .min(Long::compareTo).get(); + } + + List + toCache = new ArrayList<>(); + + // create a new S3StreamObject to replace committed ones + if (streamObject.getObjectId() != S3Constants.NOOP_OBJECT_ID) { + com.automq.rocketmq.metadata.dao.S3StreamObject newS3StreamObj = + new com.automq.rocketmq.metadata.dao.S3StreamObject(); + newS3StreamObj.setStreamId(streamObject.getStreamId()); + newS3StreamObj.setObjectId(streamObject.getObjectId()); + newS3StreamObj.setObjectSize(streamObject.getObjectSize()); + newS3StreamObj.setStartOffset(streamObject.getStartOffset()); + newS3StreamObj.setEndOffset(streamObject.getEndOffset()); + newS3StreamObj.setBaseDataTimestamp(new Date(dataTs)); + newS3StreamObj.setCommittedTimestamp(new Date(committedTs)); + s3StreamObjectMapper.create(newS3StreamObj); + toCache.add(newS3StreamObj); + } + + // delete the compactedObjects of S3Stream + if (!Objects.isNull(compactedObjects) && !compactedObjects.isEmpty()) { + compactedObjects.forEach(id -> s3StreamObjectMapper.delete(null, null, id)); + } + session.commit(); + + // Update Cache + s3StreamObjectCache.cache(streamObject.getStreamId(), toCache); + s3StreamObjectCache.onCompact(streamObject.getStreamId(), compactedObjects); + + LOGGER.info("S3StreamObject[object-id={}] commit success, compacted objects: {}", + streamObject.getObjectId(), compactedObjects); + future.complete(null); + } catch (Exception e) { + LOGGER.error("CommitStream failed", e); + ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "CommitStream failed" + e.getMessage()); + future.completeExceptionally(ex); + } + return future; + } + + public CompletableFuture> listWALObjects() { + CompletableFuture> future = new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); + List walObjects = s3WalObjectMapper.list(nodeConfig.nodeId(), null).stream() + .map(s3WALObject -> { + try { + return buildS3WALObject(s3WALObject, decode(s3WALObject.getSubStreams())); + } catch (InvalidProtocolBufferException e) { + LOGGER.error("Failed to deserialize SubStreams", e); + return null; + } + }) + .filter(Objects::nonNull) + .toList(); + future.complete(walObjects); + } + return future; + } + + public CompletableFuture> listWALObjects(long streamId, long startOffset, + long endOffset, int limit) { + CompletableFuture> future = new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + RangeMapper rangeMapper = session.getMapper(RangeMapper.class); + + List nodes = rangeMapper.listByStreamId(streamId) + .stream() + .filter(range -> range.getEndOffset() > startOffset && range.getStartOffset() < endOffset) + .mapToInt(Range::getNodeId) + .distinct() + .boxed() + .toList(); + + S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); + List s3WALObjects = new ArrayList<>(); + for (int nodeId : nodes) { + List s3WalObjects = s3WalObjectMapper.list(nodeId, null); + s3WalObjects.stream() + .map(s3WalObject -> { + try { + Map subStreams = decode(s3WalObject.getSubStreams()).getSubStreamsMap(); + Map streamsRecords = new HashMap<>(); + if (subStreams.containsKey(streamId)) { + SubStream subStream = subStreams.get(streamId); + if (subStream.getStartOffset() <= endOffset && subStream.getEndOffset() > startOffset) { + streamsRecords.put(streamId, subStream); + } + } + if (!streamsRecords.isEmpty()) { + return buildS3WALObject(s3WalObject, SubStreams.newBuilder() + .putAllSubStreams(streamsRecords) + .build()); + } + } catch (InvalidProtocolBufferException e) { + LOGGER.error("Failed to deserialize SubStreams", e); + } + return null; + }) + .filter(Objects::nonNull) + .forEach(s3WALObjects::add); + } + + // Sort by start-offset of the given stream + s3WALObjects.sort((l, r) -> { + long lhs = l.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); + long rhs = r.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); + return Long.compare(lhs, rhs); + }); + + future.complete(s3WALObjects.stream().limit(limit).toList()); + } + return future; + } + + public CompletableFuture> listStreamObjects0( + long streamId, long startOffset, long endOffset, int limit) { + boolean skipCache = false; + // Serve with cache + if (s3StreamObjectCache.streamExclusive(streamId)) { + List list = + s3StreamObjectCache.listStreamObjects(streamId, startOffset, endOffset, limit); + if (!list.isEmpty()) { + return CompletableFuture.completedFuture(list.stream().toList()); + } + skipCache = true; + } + + CompletableFuture> future = + new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); + S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); + if (!skipCache && s3WalObjectMapper.streamExclusive(nodeConfig.nodeId(), streamId)) { + s3StreamObjectCache.makeStreamExclusive(streamId); + List list = + s3StreamObjectMapper.listByStreamId(streamId); + s3StreamObjectCache.initStream(streamId, list); + return listStreamObjects0(streamId, startOffset, endOffset, limit); + } + List streamObjects = s3StreamObjectMapper + .list(null, streamId, startOffset, endOffset, limit); + future.complete(streamObjects); + } + return future; + } + + public CompletableFuture> listStreamObjects(long streamId, long startOffset, long endOffset, + int limit) { + return listStreamObjects0(streamId, startOffset, endOffset, limit) + .thenApply(list -> list.stream().map(this::buildS3StreamObject).toList()); + } + + private S3StreamObject buildS3StreamObject( + com.automq.rocketmq.metadata.dao.S3StreamObject originalObject) { + return S3StreamObject.newBuilder() + .setStreamId(originalObject.getStreamId()) + .setObjectSize(originalObject.getObjectSize()) + .setObjectId(originalObject.getObjectId()) + .setStartOffset(originalObject.getStartOffset()) + .setEndOffset(originalObject.getEndOffset()) + .setBaseDataTimestamp(originalObject.getBaseDataTimestamp().getTime()) + .setCommittedTimestamp(originalObject.getCommittedTimestamp() != null ? + originalObject.getCommittedTimestamp().getTime() : S3Constants.NOOP_OBJECT_COMMIT_TIMESTAMP) + .build(); + } + + private S3WALObject buildS3WALObject( + S3WalObject originalObject, + SubStreams subStreams) { + return S3WALObject.newBuilder() + .setObjectId(originalObject.getObjectId()) + .setObjectSize(originalObject.getObjectSize()) + .setBrokerId(originalObject.getNodeId()) + .setSequenceId(originalObject.getSequenceId()) + .setBaseDataTimestamp(originalObject.getBaseDataTimestamp().getTime()) + .setCommittedTimestamp(originalObject.getCommittedTimestamp() != null ? + originalObject.getCommittedTimestamp().getTime() : S3Constants.NOOP_OBJECT_COMMIT_TIMESTAMP) + .setSubStreams(subStreams) + .build(); + } + + private SubStreams decode(String json) throws InvalidProtocolBufferException { + SubStreams.Builder builder = SubStreams.newBuilder(); + JsonFormat.parser().ignoringUnknownFields().merge(json, builder); + return builder.build(); + } + + private boolean commitObject(Long objectId, long streamId, long objectSize, SqlSession session) { + S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); + S3Object s3Object = s3ObjectMapper.getById(objectId); + if (Objects.isNull(s3Object)) { + LOGGER.error("object[object-id={}] not exist", objectId); + return false; + } + // verify the state + if (s3Object.getState() == S3ObjectState.BOS_COMMITTED) { + LOGGER.warn("object[object-id={}] already committed", objectId); + return false; + } + if (s3Object.getState() != S3ObjectState.BOS_PREPARED) { + LOGGER.error("object[object-id={}] is not prepared but try to commit", objectId); + return false; + } + + Date commitData = new Date(); + if (s3Object.getExpiredTimestamp().getTime() < commitData.getTime()) { + LOGGER.error("object[object-id={}] is expired", objectId); + return false; + } + + s3Object.setCommittedTimestamp(commitData); + s3Object.setStreamId(streamId); + s3Object.setObjectSize(objectSize); + s3Object.setState(S3ObjectState.BOS_COMMITTED); + s3ObjectMapper.commit(s3Object); + return true; + } + + private boolean checkStreamAdvance(SqlSession session, List offsets) { + if (offsets == null || offsets.isEmpty()) { + return true; + } + StreamMapper streamMapper = session.getMapper(StreamMapper.class); + RangeMapper rangeMapper = session.getMapper(RangeMapper.class); + for (long[] offset : offsets) { + long streamId = offset[0], startOffset = offset[1], endOffset = offset[2]; + // verify the stream exists and is open + Stream stream = streamMapper.getByStreamId(streamId); + if (stream.getState() != StreamState.OPEN) { + LOGGER.warn("Stream[stream-id={}] not opened", streamId); + return false; + } + + Range range = rangeMapper.get(stream.getRangeId(), streamId, null); + if (Objects.isNull(range)) { + // should not happen + LOGGER.error("Stream[stream-id={}]'s current range[range-id={}] not exist when stream has been created", + streamId, stream.getRangeId()); + return false; + } + + if (range.getEndOffset() != startOffset) { + LOGGER.warn("Stream[stream-id={}]'s current range[range-id={}]'s end offset[{}] is not equal to request start offset[{}]", + streamId, range.getRangeId(), range.getEndOffset(), startOffset); + return false; + } + + range.setEndOffset(endOffset); + rangeMapper.update(range); + } + return true; + } + + public CompletableFuture, List>> listObjects( + long streamId, long startOffset, long endOffset, int limit) { + return CompletableFuture.supplyAsync(() -> { + try (SqlSession session = sessionFactory.openSession()) { + S3WalObjectMapper s3WalObjectMapper = session.getMapper(S3WalObjectMapper.class); + + List s3StreamObjects = + listStreamObjects(streamId, startOffset, endOffset, limit).join(); + + List walObjects = new ArrayList<>(); + s3WalObjectMapper.list(null, null) + .stream() + .map(s3WalObject -> { + try { + Map subStreams = decode(s3WalObject.getSubStreams()).getSubStreamsMap(); + Map streamsRecords = new HashMap<>(); + subStreams.entrySet().stream() + .filter(entry -> !Objects.isNull(entry) && entry.getKey().equals(streamId)) + .filter(entry -> entry.getValue().getStartOffset() <= endOffset && entry.getValue().getEndOffset() > startOffset) + .forEach(entry -> streamsRecords.put(entry.getKey(), entry.getValue())); + return streamsRecords.isEmpty() ? null : buildS3WALObject(s3WalObject, + SubStreams.newBuilder().putAllSubStreams(streamsRecords).build()); + } catch (InvalidProtocolBufferException e) { + LOGGER.error("Failed to deserialize SubStreams", e); + return null; + } + }) + .filter(Objects::nonNull) + .limit(limit) + .forEach(walObjects::add); + + if (!walObjects.isEmpty()) { + walObjects.sort((l, r) -> { + long lhs = l.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); + long rhs = r.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(); + return Long.compare(lhs, rhs); + }); + } + + // apply limit in whole. + Set objectIds = java.util.stream.Stream.concat( + s3StreamObjects.stream() + .map(s3StreamObject -> new long[] { + s3StreamObject.getObjectId(), + s3StreamObject.getStartOffset(), + s3StreamObject.getEndOffset() + }), + walObjects.stream() + .map(s3WALObject -> new long[] { + s3WALObject.getObjectId(), + s3WALObject.getSubStreams().getSubStreamsMap().get(streamId).getStartOffset(), + s3WALObject.getSubStreams().getSubStreamsMap().get(streamId).getEndOffset() + }) + ).sorted((l, r) -> { + if (l[1] == r[1]) { + return Long.compare(l[0], r[0]); + } + return Long.compare(l[1], r[1]); + }).limit(limit) + .map(offset -> offset[0]) + .collect(Collectors.toSet()); + + List limitedStreamObjects = s3StreamObjects.stream() + .filter(s3StreamObject -> objectIds.contains(s3StreamObject.getObjectId())) + .toList(); + + List limitedWalObjectList = walObjects.stream() + .filter(s3WALObject -> objectIds.contains(s3WALObject.getObjectId())) + .toList(); + + return new ImmutablePair<>(limitedStreamObjects, limitedWalObjectList); + } + }, asyncExecutorService); + } + + public CompletableFuture trimStream(long streamId, long streamEpoch, long newStartOffset) { + CompletableFuture future = new CompletableFuture<>(); + try (SqlSession session = sessionFactory.openSession()) { + StreamMapper streamMapper = session.getMapper(StreamMapper.class); + RangeMapper rangeMapper = session.getMapper(RangeMapper.class); + S3StreamObjectMapper s3StreamObjectMapper = session.getMapper(S3StreamObjectMapper.class); + S3WalObjectMapper s3WALObjectMapper = session.getMapper(S3WalObjectMapper.class); + S3ObjectMapper s3ObjectMapper = session.getMapper(S3ObjectMapper.class); + + Stream stream = streamMapper.getByStreamId(streamId); + if (null == stream) { + ControllerException e = new ControllerException(Code.NOT_FOUND_VALUE, + String.format("Stream[stream-id=%d] is not found", streamId) + ); + future.completeExceptionally(e); + return future; + } + if (stream.getState() == StreamState.CLOSED) { + LOGGER.warn("Stream[{}]‘s state is CLOSED, can't trim", streamId); + return null; + } + if (stream.getStartOffset() > newStartOffset) { + LOGGER.warn("Stream[{}]‘s start offset {} is larger than request new start offset {}", + streamId, stream.getStartOffset(), newStartOffset); + return null; + } + if (stream.getStartOffset() == newStartOffset) { + // regard it as redundant trim operation, just return success + return null; + } + + // now the request is valid + // update the stream metadata start offset + stream.setEpoch(streamEpoch); + stream.setStartOffset(newStartOffset); + streamMapper.update(stream); + + // remove range or update range's start offset + rangeMapper.listByStreamId(streamId).forEach(range -> { + if (newStartOffset <= range.getStartOffset()) { + return; + } + if (stream.getRangeId().equals(range.getRangeId())) { + // current range, update start offset + // if current range is [50, 100) + // 1. try to trim to 40, then current range will be [50, 100) + // 2. try to trim to 60, then current range will be [60, 100) + // 3. try to trim to 100, then current range will be [100, 100) + // 4. try to trim to 110, then current range will be [100, 100) + long newRangeStartOffset = newStartOffset < range.getEndOffset() ? newStartOffset : range.getEndOffset(); + range.setStartOffset(newRangeStartOffset); + rangeMapper.update(range); + return; + } + if (newStartOffset >= range.getEndOffset()) { + // remove range + rangeMapper.delete(range.getRangeId(), streamId); + return; + } + // update range's start offset + range.setStartOffset(newStartOffset); + rangeMapper.update(range); + }); + // remove stream object + s3StreamObjectMapper.listByStreamId(streamId).forEach(streamObject -> { + long streamStartOffset = streamObject.getStartOffset(); + long streamEndOffset = streamObject.getEndOffset(); + if (newStartOffset <= streamStartOffset) { + return; + } + if (newStartOffset >= streamEndOffset) { + // stream object + s3StreamObjectMapper.delete(null, streamId, streamObject.getObjectId()); + // markDestroyObjects + S3Object s3Object = s3ObjectMapper.getById(streamObject.getObjectId()); + s3Object.setMarkedForDeletionTimestamp(new Date()); + s3ObjectMapper.markToDelete(s3Object.getId(), new Date()); + } + }); + + // remove wal object or remove sub-stream range in wal object + s3WALObjectMapper.list(stream.getDstNodeId(), null).stream() + .map(s3WALObject -> { + try { + return buildS3WALObject(s3WALObject, decode(s3WALObject.getSubStreams())); + } catch (InvalidProtocolBufferException e) { + LOGGER.error("Failed to decode"); + return null; + } + }) + .filter(Objects::nonNull) + .filter(s3WALObject -> s3WALObject.getSubStreams().getSubStreamsMap().containsKey(streamId)) + .filter(s3WALObject -> s3WALObject.getSubStreams().getSubStreamsMap().get(streamId).getEndOffset() <= newStartOffset) + .forEach(s3WALObject -> { + if (s3WALObject.getSubStreams().getSubStreamsMap().size() == 1) { + // only this range, but we will remove this range, so now we can remove this wal object + S3Object s3Object = s3ObjectMapper.getById(s3WALObject.getObjectId()); + s3Object.setMarkedForDeletionTimestamp(new Date()); + s3ObjectMapper.markToDelete(s3Object.getId(), new Date()); + } + + // remove offset range about sub-stream ... + }); + session.commit(); + + // Update cache + s3StreamObjectCache.onTrim(streamId, newStartOffset); + + LOGGER.info("Node[node-id={}] trim stream [stream-id={}] with epoch={} and newStartOffset={}", + nodeConfig.nodeId(), streamId, streamEpoch, newStartOffset); + future.complete(null); + } catch (Exception e) { + LOGGER.error("TrimStream failed", e); + ControllerException ex = new ControllerException(Code.INTERNAL_VALUE, "TrimStream failed" + e.getMessage()); + future.completeExceptionally(ex); + } + return future; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/controller/src/main/java/com/automq/rocketmq/controller/server/store/impl/cache/S3StreamObjectCache.java b/metadata/src/main/java/com/automq/rocketmq/metadata/s3/S3StreamObjectCache.java similarity index 98% rename from controller/src/main/java/com/automq/rocketmq/controller/server/store/impl/cache/S3StreamObjectCache.java rename to metadata/src/main/java/com/automq/rocketmq/metadata/s3/S3StreamObjectCache.java index 007e55626..fabdba6cf 100644 --- a/controller/src/main/java/com/automq/rocketmq/controller/server/store/impl/cache/S3StreamObjectCache.java +++ b/metadata/src/main/java/com/automq/rocketmq/metadata/s3/S3StreamObjectCache.java @@ -15,7 +15,7 @@ * limitations under the License. */ -package com.automq.rocketmq.controller.server.store.impl.cache; +package com.automq.rocketmq.metadata.s3; import com.automq.rocketmq.metadata.dao.S3StreamObject; import com.google.common.collect.Lists; diff --git a/metadata/src/test/java/com/automq/rocketmq/metadata/DefaultStoreMetadataServiceTest.java b/metadata/src/test/java/com/automq/rocketmq/metadata/DefaultStoreMetadataServiceTest.java index 1d7e7c761..20feeb880 100644 --- a/metadata/src/test/java/com/automq/rocketmq/metadata/DefaultStoreMetadataServiceTest.java +++ b/metadata/src/test/java/com/automq/rocketmq/metadata/DefaultStoreMetadataServiceTest.java @@ -24,6 +24,7 @@ import com.automq.rocketmq.common.config.ControllerConfig; import com.automq.rocketmq.controller.exception.ControllerException; import com.automq.rocketmq.controller.MetadataStore; +import com.automq.rocketmq.metadata.api.S3MetadataService; import java.util.ArrayList; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; @@ -42,12 +43,16 @@ class DefaultStoreMetadataServiceTest { @Mock private ControllerConfig config; + @Mock private MetadataStore metadataStore; + @Mock + private S3MetadataService s3MetadataService; + @Test public void testCommitWalObject() { - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); S3WALObject walObject = S3WALObject.newBuilder().setObjectId(1L).setBrokerId(10).build(); int nodeId = 100; when(metadataStore.config()).thenReturn(config); @@ -56,7 +61,7 @@ public void testCommitWalObject() { service.commitWalObject(walObject, new ArrayList<>(), new ArrayList<>()); // Verify the arguments passed to metadataStore.commitWalObject(). S3WALObject newWal = S3WALObject.newBuilder(walObject).setBrokerId(nodeId).build(); - Mockito.verify(metadataStore).commitWalObject(ArgumentMatchers.eq(newWal), ArgumentMatchers.anyList(), ArgumentMatchers.anyList()); + Mockito.verify(s3MetadataService).commitWalObject(ArgumentMatchers.eq(newWal), ArgumentMatchers.anyList(), ArgumentMatchers.anyList()); } @Test @@ -69,7 +74,7 @@ public void testGetStreamId() { ArgumentMatchers.nullable(Long.class), ArgumentMatchers.eq(StreamRole.STREAM_ROLE_DATA))) .thenReturn(future); - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); Assertions.assertEquals(1L, service.dataStreamOf(1L, 2).join().getStreamId()); } @@ -80,7 +85,7 @@ public void testGetStreamId_throws() { when(metadataStore.getStream(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.nullable(Long.class), ArgumentMatchers.eq(StreamRole.STREAM_ROLE_DATA))) .thenReturn(future); - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); CompletableFuture streamCf = service.dataStreamOf(1L, 2); // Assert exception thrown ControllerException exception = (ControllerException) Assertions.assertThrows(ExecutionException.class, streamCf::get).getCause(); @@ -96,7 +101,7 @@ public void testGetOperationLogStreamId() { when(metadataStore.getStream(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.nullable(Long.class), ArgumentMatchers.eq(StreamRole.STREAM_ROLE_OPS))) .thenReturn(future); - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); Assertions.assertEquals(1L, service.operationStreamOf(1L, 2).join().getStreamId()); } @@ -107,7 +112,7 @@ public void testGetOperationLogStreamId_throws() { when(metadataStore.getStream(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.nullable(Long.class), ArgumentMatchers.eq(StreamRole.STREAM_ROLE_OPS))) .thenReturn(future); - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); CompletableFuture streamCf = service.operationStreamOf(1L, 2); // Assert exception thrown ControllerException exception = (ControllerException) Assertions.assertThrows(ExecutionException.class, streamCf::get).getCause(); @@ -123,7 +128,7 @@ public void testGetRetryStreamId() { when(metadataStore.getStream(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.nullable(Long.class), ArgumentMatchers.eq(StreamRole.STREAM_ROLE_RETRY))) .thenReturn(future); - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); Assertions.assertEquals(1L, service.retryStreamOf(3L, 1L, 2).join().getStreamId()); } @@ -134,7 +139,7 @@ public void testGetRetryStreamId_throws() { when(metadataStore.getStream(ArgumentMatchers.anyLong(), ArgumentMatchers.anyInt(), ArgumentMatchers.nullable(Long.class), ArgumentMatchers.eq(StreamRole.STREAM_ROLE_RETRY))) .thenReturn(future); - DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore); + DefaultStoreMetadataService service = new DefaultStoreMetadataService(metadataStore, s3MetadataService); CompletableFuture streamCf = service.retryStreamOf(0L, 1L, 2); // Assert exception thrown diff --git a/metadata/src/test/java/com/automq/rocketmq/metadata/s3/DatabaseTestBase.java b/metadata/src/test/java/com/automq/rocketmq/metadata/s3/DatabaseTestBase.java new file mode 100644 index 000000000..97a7dbfa5 --- /dev/null +++ b/metadata/src/test/java/com/automq/rocketmq/metadata/s3/DatabaseTestBase.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.automq.rocketmq.metadata.s3; + +import apache.rocketmq.controller.v1.SubStream; +import apache.rocketmq.controller.v1.SubStreams; +import com.automq.rocketmq.metadata.dao.Lease; +import com.automq.rocketmq.metadata.dao.S3StreamObject; +import com.automq.rocketmq.metadata.dao.S3WalObject; +import com.automq.rocketmq.metadata.mapper.GroupMapper; +import com.automq.rocketmq.metadata.mapper.GroupProgressMapper; +import com.automq.rocketmq.metadata.mapper.LeaseMapper; +import com.automq.rocketmq.metadata.mapper.NodeMapper; +import com.automq.rocketmq.metadata.mapper.QueueAssignmentMapper; +import com.automq.rocketmq.metadata.mapper.RangeMapper; +import com.automq.rocketmq.metadata.mapper.S3ObjectMapper; +import com.automq.rocketmq.metadata.mapper.S3StreamObjectMapper; +import com.automq.rocketmq.metadata.mapper.S3WalObjectMapper; +import com.automq.rocketmq.metadata.mapper.StreamMapper; +import com.automq.rocketmq.metadata.mapper.TopicMapper; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.util.JsonFormat; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Calendar; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.atomic.AtomicLong; +import org.apache.ibatis.io.Resources; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; +import org.apache.ibatis.session.SqlSessionFactoryBuilder; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.testcontainers.containers.MySQLContainer; +import org.testcontainers.utility.DockerImageName; + +public class DatabaseTestBase { + + + AtomicLong s3ObjectIdSequence; + + public DatabaseTestBase() { + this.s3ObjectIdSequence = new AtomicLong(1); + } + + protected long nextS3ObjectId() { + return this.s3ObjectIdSequence.getAndIncrement(); + } + + static MySQLContainer mySQLContainer = new MySQLContainer<>(DockerImageName.parse("mysql:8")) + .withDatabaseName("metadata") + .withInitScript("ddl.sql") + .withReuse(true); + + @BeforeAll + public static void startMySQLContainer() { + mySQLContainer.start(); + } + + protected SqlSessionFactory getSessionFactory() throws IOException { + String resource = "database/mybatis-config.xml"; + InputStream inputStream = Resources.getResourceAsStream(resource); + + Properties properties = new Properties(); + properties.put("password", "test"); + properties.put("jdbcUrl", mySQLContainer.getJdbcUrl() + "?TC_REUSABLE=true"); + return new SqlSessionFactoryBuilder().build(inputStream, properties); + } + + @BeforeEach + protected void cleanTables() throws IOException { + try (SqlSession session = getSessionFactory().openSession(true)) { + session.getMapper(GroupMapper.class).delete(null); + session.getMapper(GroupProgressMapper.class).delete(null, null); + session.getMapper(NodeMapper.class).delete(null); + session.getMapper(QueueAssignmentMapper.class).delete(null); + session.getMapper(TopicMapper.class).delete(null); + session.getMapper(StreamMapper.class).delete(null); + session.getMapper(RangeMapper.class).delete(null, null); + session.getMapper(S3ObjectMapper.class).delete(null); + session.getMapper(S3StreamObjectMapper.class).delete(null, null, null); + session.getMapper(S3WalObjectMapper.class).delete(null, null, null); + + LeaseMapper mapper = session.getMapper(LeaseMapper.class); + Lease lease = mapper.currentWithWriteLock(); + lease.setNodeId(1); + lease.setEpoch(1); + Calendar calendar = Calendar.getInstance(); + calendar.set(2023, Calendar.JANUARY, 1); + lease.setExpirationTime(calendar.getTime()); + mapper.update(lease); + } + } + + protected String toJson(Map map) { + SubStreams subStreams = SubStreams.newBuilder().putAllSubStreams(map).build(); + try { + return JsonFormat.printer().print(subStreams); + } catch (InvalidProtocolBufferException e) { + Assertions.fail(e); + throw new RuntimeException(e); + } + } + + protected List buildS3StreamObjs(long objectId, + int count, long startOffset, long interval) { + List s3StreamObjects = new ArrayList<>(); + + for (long i = 0; i < count; i++) { + S3StreamObject s3StreamObject = new S3StreamObject(); + s3StreamObject.setObjectId(objectId + i); + s3StreamObject.setObjectSize(100 + i); + s3StreamObject.setStreamId(i + 1); + s3StreamObject.setStartOffset(startOffset + i * interval); + s3StreamObject.setEndOffset(startOffset + (i + 1) * interval); + s3StreamObject.setBaseDataTimestamp(new Date()); + s3StreamObjects.add(s3StreamObject); + } + + return s3StreamObjects; + } + + protected List buildS3WalObjs(long objectId, int count) { + List s3StreamObjects = new ArrayList<>(); + + for (long i = 0; i < count; i++) { + S3WalObject s3StreamObject = new S3WalObject(); + s3StreamObject.setObjectId(objectId + i); + s3StreamObject.setObjectSize(100 + i); + s3StreamObject.setSequenceId(objectId + i); + s3StreamObject.setNodeId((int) i + 1); + s3StreamObject.setBaseDataTimestamp(new Date()); + s3StreamObjects.add(s3StreamObject); + } + + return s3StreamObjects; + } + + protected Map buildWalSubStreams(int count, long startOffset, long interval) { + Map subStreams = new HashMap<>(); + for (int i = 0; i < count; i++) { + SubStream subStream = SubStream.newBuilder() + .setStreamId(i + 1) + .setStartOffset(startOffset + i * interval) + .setEndOffset(startOffset + (i + 1) * interval) + .build(); + + subStreams.put((long) i + 1, subStream); + } + return subStreams; + } +} diff --git a/controller/src/test/java/com/automq/rocketmq/controller/store/impl/S3MetadataManagerTest.java b/metadata/src/test/java/com/automq/rocketmq/metadata/s3/DefaultS3MetadataServiceTest.java similarity index 78% rename from controller/src/test/java/com/automq/rocketmq/controller/store/impl/S3MetadataManagerTest.java rename to metadata/src/test/java/com/automq/rocketmq/metadata/s3/DefaultS3MetadataServiceTest.java index 418d288a0..e4b58a6a7 100644 --- a/controller/src/test/java/com/automq/rocketmq/controller/store/impl/S3MetadataManagerTest.java +++ b/metadata/src/test/java/com/automq/rocketmq/metadata/s3/DefaultS3MetadataServiceTest.java @@ -6,17 +6,16 @@ * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * + * See the License for the specific language governing permissions and + * limitations under the License. */ -package com.automq.rocketmq.controller.store.impl; +package com.automq.rocketmq.metadata.s3; import apache.rocketmq.controller.v1.S3ObjectState; import apache.rocketmq.controller.v1.S3StreamObject; @@ -24,14 +23,10 @@ import apache.rocketmq.controller.v1.StreamRole; import apache.rocketmq.controller.v1.StreamState; import apache.rocketmq.controller.v1.SubStream; +import com.automq.rocketmq.common.config.ControllerConfig; import com.automq.rocketmq.common.system.StreamConstants; -import com.automq.rocketmq.controller.exception.ControllerException; import com.automq.rocketmq.controller.ControllerClient; -import com.automq.rocketmq.controller.store.DatabaseTestBase; -import com.automq.rocketmq.controller.MetadataStore; -import com.automq.rocketmq.controller.server.store.DefaultMetadataStore; -import com.automq.rocketmq.controller.server.store.Role; -import com.automq.rocketmq.metadata.dao.Lease; +import com.automq.rocketmq.metadata.api.S3MetadataService; import com.automq.rocketmq.metadata.dao.Range; import com.automq.rocketmq.metadata.dao.S3Object; import com.automq.rocketmq.metadata.dao.S3WalObject; @@ -41,9 +36,10 @@ import com.automq.rocketmq.metadata.mapper.S3StreamObjectMapper; import com.automq.rocketmq.metadata.mapper.S3WalObjectMapper; import com.automq.rocketmq.metadata.mapper.StreamMapper; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import org.apache.commons.lang3.tuple.Pair; import org.apache.ibatis.session.SqlSession; -import org.awaitility.Awaitility; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -55,14 +51,20 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -public class S3MetadataManagerTest extends DatabaseTestBase { +public class DefaultS3MetadataServiceTest extends DatabaseTestBase { ControllerClient client; - public S3MetadataManagerTest() { + ControllerConfig config; + + ExecutorService executorService; + + public DefaultS3MetadataServiceTest() { this.client = Mockito.mock(ControllerClient.class); + this.config = Mockito.mock(ControllerConfig.class); + Mockito.when(config.nodeId()).thenReturn(1); + this.executorService = Executors.newSingleThreadExecutor(); } @Test @@ -79,8 +81,8 @@ public void testListStreamObjects() throws IOException, ExecutionException, Inte session.commit(); } - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - List s3StreamObjects = metadataStore.listStreamObjects(streamId, startOffset, endOffset, limit).get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + List s3StreamObjects = service.listStreamObjects(streamId, startOffset, endOffset, limit).get(); S3StreamObject s3StreamObject = s3StreamObjects.get(0); Assertions.assertEquals(1, s3StreamObject.getObjectId()); Assertions.assertEquals(100, s3StreamObject.getObjectSize()); @@ -120,12 +122,8 @@ public void testListWALObjects_WithPrams() throws IOException, ExecutionExceptio Map expectedSubStream = buildWalSubStreams(1, 0, 10); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - List s3WALObjects = metadataStore.listWALObjects(streamId, startOffset, endOffset, limit).get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + List s3WALObjects = service.listWALObjects(streamId, startOffset, endOffset, limit).get(); Assertions.assertFalse(s3WALObjects.isEmpty()); S3WALObject s3WALObject = s3WALObjects.get(0); @@ -158,12 +156,8 @@ public void testListWALObjects_NotParams() throws IOException, ExecutionExceptio Map subStreams = buildWalSubStreams(4, 0, 10); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - List s3WALObjects = metadataStore.listWALObjects().get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + List s3WALObjects = service.listWALObjects().get(); Assertions.assertFalse(s3WALObjects.isEmpty()); S3WALObject s3WALObject = s3WALObjects.get(0); @@ -201,12 +195,8 @@ public void testListObjects_OnlyStream() throws IOException, ExecutionException, session.commit(); } - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - Pair, List> listPair = metadataStore.listObjects(1, startOffset, endOffset, limit).get(); + try (DefaultS3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + Pair, List> listPair = service.listObjects(1, startOffset, endOffset, limit).get(); Assertions.assertFalse(listPair.getLeft().isEmpty()); Assertions.assertTrue(listPair.getRight().isEmpty()); @@ -247,12 +237,8 @@ public void testListObjects_OnlyWAL() throws IOException, ExecutionException, In Map subStreams = buildWalSubStreams(1, 10, 10); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - Pair, List> listPair = metadataStore.listObjects(streamId, startOffset, endOffset, limit).get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + Pair, List> listPair = service.listObjects(streamId, startOffset, endOffset, limit).get(); Assertions.assertTrue(listPair.getLeft().isEmpty()); Assertions.assertFalse(listPair.getRight().isEmpty()); @@ -295,12 +281,8 @@ public void testListObjects_Both() throws IOException, ExecutionException, Inter Map subStreams = buildWalSubStreams(1, 10, 10); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - Pair, List> listPair = metadataStore.listObjects(streamId, startOffset, endOffset, limit).get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + Pair, List> listPair = service.listObjects(streamId, startOffset, endOffset, limit).get(); Assertions.assertFalse(listPair.getLeft().isEmpty()); Assertions.assertFalse(listPair.getRight().isEmpty()); @@ -359,12 +341,8 @@ public void testListObjects_Both_Interleaved() throws IOException, ExecutionExce Map subStreams1 = buildWalSubStreams(1, 0, 10); Map subStreams2 = buildWalSubStreams(1, 20, 20); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - Pair, List> listPair = metadataStore.listObjects(streamId, startOffset, endOffset, limit).get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + Pair, List> listPair = service.listObjects(streamId, startOffset, endOffset, limit).get(); Assertions.assertFalse(listPair.getLeft().isEmpty()); Assertions.assertFalse(listPair.getRight().isEmpty()); @@ -441,12 +419,8 @@ public void testTrimStream() throws IOException { session.commit(); } - try (MetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with().atMost(10, TimeUnit.SECONDS).pollInterval(100, TimeUnit.MILLISECONDS) - .until(metadataStore::isLeader); - - metadataStore.trimStream(streamId, streamEpoch, newStartOffset); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + service.trimStream(streamId, streamEpoch, newStartOffset); } try (SqlSession session = this.getSessionFactory().openSession()) { @@ -470,13 +444,8 @@ public void testTrimStream() throws IOException { public void testPrepareS3Objects() throws IOException { long objectId; - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - objectId = metadataStore.prepareS3Objects(3, 5).get(); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + objectId = service.prepareS3Objects(3, 5).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -491,16 +460,11 @@ public void testPrepareS3Objects() throws IOException { } @Test - public void testCommitStreamObject() throws IOException, ControllerException { + public void testCommitStreamObject() throws IOException { long objectId, streamId = 1; - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - objectId = metadataStore.prepareS3Objects(3, 5).get(); + try (S3MetadataService metadataService = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + objectId = metadataService.prepareS3Objects(3, 5).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -517,17 +481,11 @@ public void testCommitStreamObject() throws IOException, ControllerException { session.commit(); } - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { List compactedObjects = new ArrayList<>(); compactedObjects.add(objectId); compactedObjects.add(objectId + 1); - metadataStore.commitStreamObject(news3StreamObject, compactedObjects); + service.commitStreamObject(news3StreamObject, compactedObjects); } try (SqlSession session = getSessionFactory().openSession()) { @@ -553,16 +511,11 @@ public void testCommitStreamObject() throws IOException, ControllerException { } @Test - public void testCommitStreamObject_NoCompacted() throws IOException, ControllerException { + public void testCommitStreamObject_NoCompacted() throws IOException { long objectId, streamId = 1; - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - objectId = metadataStore.prepareS3Objects(3, 5).get(); + try (S3MetadataService metadataService = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + objectId = metadataService.prepareS3Objects(3, 5).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -573,15 +526,8 @@ public void testCommitStreamObject_NoCompacted() throws IOException, ControllerE .setObjectSize(111L) .build(); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - - metadataStore.commitStreamObject(news3StreamObject, Collections.emptyList()); + try (DefaultS3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + service.commitStreamObject(news3StreamObject, Collections.emptyList()); } try (SqlSession session = getSessionFactory().openSession()) { @@ -614,14 +560,8 @@ public void testCommitStreamObject_ObjectNotExist() throws IOException { } List compactedObjects = new ArrayList<>(); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - - Assertions.assertThrows(ExecutionException.class, () -> metadataStore.commitStreamObject(s3StreamObject, compactedObjects).get()); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + Assertions.assertThrows(ExecutionException.class, () -> service.commitStreamObject(s3StreamObject, compactedObjects).get()); } } @@ -642,14 +582,8 @@ public void testCommitStreamObject_StreamNotExist() throws IOException { } List compactedObjects = new ArrayList<>(); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - - Assertions.assertThrows(ExecutionException.class, () -> metadataStore.commitStreamObject(s3StreamObject, compactedObjects).get()); + try (S3MetadataService service = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + Assertions.assertThrows(ExecutionException.class, () -> service.commitStreamObject(s3StreamObject, compactedObjects).get()); } } @@ -659,13 +593,8 @@ public void testCommitWALObject() throws IOException, ExecutionException, Interr long objectId; int nodeId = 1; - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - objectId = metadataStore.prepareS3Objects(5, 5).get(); + try (S3MetadataService s3MetadataService = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + objectId = s3MetadataService.prepareS3Objects(5, 5).get(); } catch (Exception e) { throw new RuntimeException(e); } @@ -706,14 +635,8 @@ public void testCommitWALObject() throws IOException, ExecutionException, Interr .setEndOffset(s3StreamObject2.getEndOffset()) .build()).toList(); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - - metadataStore.commitWalObject(walObject, s3StreamObjects, compactedObjects).get(); + try (S3MetadataService manager = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + manager.commitWalObject(walObject, s3StreamObjects, compactedObjects).get(); } try (SqlSession session = getSessionFactory().openSession()) { @@ -785,15 +708,9 @@ public void testCommitWalObject_ObjectNotPrepare() throws IOException, Execution } List compactedObjects = new ArrayList<>(); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - Assertions.assertNull(metadataStore.getLease()); - Lease lease = new Lease(); - lease.setNodeId(config.nodeId()); - metadataStore.setLease(lease); - metadataStore.setRole(Role.Leader); - - List s3StreamObjects = metadataStore.listStreamObjects(streamId, startOffset, endOffset, 2).get(); - Assertions.assertThrows(ExecutionException.class, () -> metadataStore.commitWalObject(walObject, s3StreamObjects, compactedObjects).get()); + try (S3MetadataService manager = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { + List s3StreamObjects = manager.listStreamObjects(streamId, startOffset, endOffset, 2).get(); + Assertions.assertThrows(ExecutionException.class, () -> manager.commitWalObject(walObject, s3StreamObjects, compactedObjects).get()); } } @@ -850,13 +767,7 @@ public void testCommitWalObject_WalNotExist() throws IOException, ExecutionExcep } List compactedObjects = new ArrayList<>(); - try (DefaultMetadataStore metadataStore = new DefaultMetadataStore(client, getSessionFactory(), config)) { - metadataStore.start(); - Awaitility.await().with() - .pollInterval(100, TimeUnit.MILLISECONDS) - .atMost(10, TimeUnit.SECONDS) - .until(metadataStore::isLeader); - + try (S3MetadataService manager = new DefaultS3MetadataService(config, getSessionFactory(), executorService)) { calendar.add(Calendar.HOUR, 2); S3StreamObject streamObject = S3StreamObject.newBuilder() .setObjectId(objectId) @@ -869,7 +780,7 @@ public void testCommitWalObject_WalNotExist() throws IOException, ExecutionExcep List s3StreamObjects = new ArrayList<>(); s3StreamObjects.add(streamObject); - metadataStore.commitWalObject(walObject, s3StreamObjects, compactedObjects).get(); + manager.commitWalObject(walObject, s3StreamObjects, compactedObjects).get(); } try (SqlSession session = getSessionFactory().openSession()) { diff --git a/proto/src/main/proto/controller.proto b/proto/src/main/proto/controller.proto index 810e18993..182b469d3 100644 --- a/proto/src/main/proto/controller.proto +++ b/proto/src/main/proto/controller.proto @@ -505,19 +505,7 @@ service ControllerService { // The associated queue can be reassigned after the stream is closed. rpc closeStream(CloseStreamRequest) returns (CloseStreamReply) {} - // Trim a stream to a new start offset. - rpc trimStream(TrimStreamRequest) returns (TrimStreamReply) {} - // List all streams opened on a specific broker. rpc listOpenStreams(ListOpenStreamsRequest) returns (ListOpenStreamsReply) {} - - // Prepare S3 objects before uploading. - rpc prepareS3Objects(PrepareS3ObjectsRequest) returns (PrepareS3ObjectsReply) {} - - // Commit a WAL object. - rpc commitWALObject(CommitWALObjectRequest) returns (CommitWALObjectReply) {} - - // Commit a stream object. - rpc commitStreamObject(CommitStreamObjectRequest) returns (CommitStreamObjectReply) {} }