Skip to content

KAFKA-17259: Support to override serverProperties and restart cluster in ClusterTestExtensions #20

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: trunk
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,13 @@
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
Expand Down Expand Up @@ -314,8 +316,9 @@ private static void setupNodeDirectories(File baseDirectory,
private final Map<Integer, BrokerServer> brokers;
private final File baseDirectory;
private final SimpleFaultHandlerFactory faultHandlerFactory;
private final PreboundSocketFactoryManager socketFactoryManager;
private PreboundSocketFactoryManager socketFactoryManager;
private final String controllerListenerName;
private Map<Integer, Set<String>> nodeIdToListeners = new HashMap<>();

private KafkaClusterTestKit(
TestKitNodes nodes,
Expand Down Expand Up @@ -437,6 +440,130 @@ public void startup() throws ExecutionException, InterruptedException {
}
}

public void shutdown() throws Exception {
List<Entry<String, Future<?>>> futureEntries = new ArrayList<>();
try {
// Note the shutdown order here is chosen to be consistent with
// `KafkaRaftServer`. See comments in that class for an explanation.
for (Entry<Integer, BrokerServer> entry : brokers.entrySet()) {
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
nodeIdToListeners.computeIfAbsent(brokerId, __ -> new HashSet<>());
Set<String> listeners = nodeIdToListeners.get(brokerId);
broker.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
if (!broker.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(broker.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
broker.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
broker.socketServer().controlPlaneAcceptorOpt().get().localPort());
}
Comment on lines +451 to +460

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Consider extracting this logic for collecting listeners into a separate, well-named method to improve readability and maintainability. This would also reduce code duplication, as the same logic is repeated for brokers and controllers.

private Set<String> collectListeners(SharedServer server, int nodeId) {
    Set<String> listeners = nodeIdToListeners.computeIfAbsent(nodeId, __ -> new HashSet<>());
    server.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
        listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
    });
    if (!server.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
        listeners.add(server.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
            server.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
            server.socketServer().controlPlaneAcceptorOpt().get().localPort());
    }
    return listeners;
}

// Usage in shutdown method:
int brokerId = entry.getKey();
BrokerServer broker = entry.getValue();
Set<String> listeners = collectListeners(broker.sharedServer(), brokerId);
nodeIdToListeners.put(brokerId, listeners);

nodeIdToListeners.put(brokerId, listeners);
futureEntries.add(new SimpleImmutableEntry<>("broker" + brokerId,
executorService.submit((Runnable) broker::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
for (Entry<Integer, ControllerServer> entry : controllers.entrySet()) {
int controllerId = entry.getKey();
ControllerServer controller = entry.getValue();
nodeIdToListeners.computeIfAbsent(controllerId, __ -> new HashSet<>());
Set<String> listeners = nodeIdToListeners.get(controllerId);
controller.socketServer().dataPlaneAcceptors().forEach((endpoint, acceptor) -> {
listeners.add(endpoint.listenerName().value() + "://" + endpoint.host() + ":" + acceptor.localPort());
});
if (!controller.socketServer().controlPlaneAcceptorOpt().isEmpty()) {
listeners.add(controller.socketServer().controlPlaneAcceptorOpt().get().endPoint().listenerName().value() + "://" +
controller.socketServer().controlPlaneAcceptorOpt().get().endPoint().host() + ":" +
controller.socketServer().controlPlaneAcceptorOpt().get().localPort());
}
nodeIdToListeners.put(controllerId, listeners);
futureEntries.add(new SimpleImmutableEntry<>("controller" + controllerId,
executorService.submit(controller::shutdown)));
}
waitForAllFutures(futureEntries);
futureEntries.clear();
socketFactoryManager.close();
Comment on lines +443 to +486
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

shutdown() assumes acceptors are present – restart will break if the cluster was never started

broker.socketServer().dataPlaneAcceptors() and the control-plane accessor are empty until startup() has bound the ports.
When they are empty, nodeIdToListeners.get(id) remains null, and the subsequent String.join(",", null) in restart() throws an NPE.

Either bail out early when the servers were never started, or initialise nodeIdToListeners with the original listener strings from the config:

-                nodeIdToListeners.computeIfAbsent(brokerId, __ -> new HashSet<>());
+                nodeIdToListeners
+                        .computeIfAbsent(brokerId, __ ->
+                                new HashSet<>(List.of(broker.config()
+                                    .originals()
+                                    .getOrDefault(SocketServerConfigs.LISTENERS_CONFIG, "")
+                                    .toString().split(","))));

A similar fix is needed for the controller loop below.
Without this, any test that formats the cluster but calls restart() before startup() will consistently fail.

} catch (Exception e) {
for (Entry<String, Future<?>> entry : futureEntries) {
entry.getValue().cancel(true);
}
throw e;
}
}

public void restart(Map<Integer, Map<String, Object>> perServerOverriddenConfig) throws Exception {

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

It's crucial to ensure that the shutdown() method is robust and handles all possible exceptions gracefully. Consider adding more specific exception handling and logging to identify and address any potential issues during shutdown.

    public void restart(Map<Integer, Map<String, Object>> perServerOverriddenConfig) throws Exception {
        try {
            shutdown();
        } catch (Exception e) {
            log.error("Exception during shutdown: {}", e.getMessage(), e);
            throw e; // Re-throw the exception to prevent restart
        }

shutdown();

Map<Integer, SharedServer> jointServers = new HashMap<>();

socketFactoryManager = new PreboundSocketFactoryManager();

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

Reinitializing socketFactoryManager here might lead to resource leaks if the old socket factories are not properly closed. Ensure that the old socket factories are closed before creating new ones.

        if (socketFactoryManager != null) {
            try {
                socketFactoryManager.close();
            } catch (Exception e) {
                log.warn("Exception while closing socketFactoryManager: {}", e.getMessage(), e);
            }
        }
        socketFactoryManager = new PreboundSocketFactoryManager();

controllers.forEach((id, controller) -> {
Map<String, Object> config = controller.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This line uses String.join to create a comma-separated string of listeners. Ensure that the nodeIdToListeners map contains the correct listener information for each node before joining them. Also, consider adding a check to ensure that the listeners are not empty.

            Set<String> listenersForNode = nodeIdToListeners.get(id);
            if (listenersForNode == null || listenersForNode.isEmpty()) {
                log.warn("No listeners found for node {}", id);
                // Handle the case where there are no listeners, possibly by using a default listener
            }
            config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", listenersForNode != null ? listenersForNode : Collections.emptySet()));


Comment on lines +495 to +506
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Port reservations are lost on restart – risk of “Address already in use”

restart() creates a fresh PreboundSocketFactoryManager, but the new manager is not told to re-use the previously
bound ports captured in nodeIdToListeners.
If another process grabs one of those ports between shutdown and restart, the cluster start-up will fail.

Consider feeding the cached listener URIs back into the new manager before constructing servers, e.g.:

socketFactoryManager = new PreboundSocketFactoryManager();
nodeIdToListeners.forEach((id, listeners) ->
        listeners.forEach(l -> socketFactoryManager.reserve(id, l)));

(or expose a helper in PreboundSocketFactoryManager).

TestKitNode node = nodes.controllerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config, false);
SharedServer sharedServer = new SharedServer(
nodeConfig,
node.initialMetaPropertiesEnsemble(),
Time.SYSTEM,
new Metrics(),
CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(nodeConfig.quorumConfig().voters())),
Collections.emptyList(),
faultHandlerFactory,
socketFactoryManager.getOrCreateSocketFactory(node.id())
);
try {
controller = new ControllerServer(
sharedServer,
KafkaRaftServer.configSchema(),
nodes.bootstrapMetadata());
} catch (Throwable e) {
log.error("Error creating controller {}", node.id(), e);
Utils.swallow(log, Level.WARN, "sharedServer.stopForController error", sharedServer::stopForController);
throw e;
Comment on lines +524 to +527

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The error handling here uses Utils.swallow. While this prevents the exception from propagating, it might mask important information about why the controller creation failed. Consider re-throwing a more specific exception or logging additional details to aid debugging.

            } catch (Throwable e) {
                log.error("Error creating controller {}", node.id(), e);
                try {
                    sharedServer.stopForController();
                } catch (Throwable e2) {
                    log.warn("sharedServer.stopForController error", e2);
                }
                throw new RuntimeException("Error creating controller " + node.id(), e);
            }

}
controllers.put(node.id(), controller);
jointServers.put(node.id(), sharedServer);
});

brokers.forEach((id, broker) -> {
Map<String, Object> config = broker.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));

TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),
Comment on lines +533 to +542
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Mutating the live config map may have side-effects

controller.config().originals() and broker.config().originals() return the live backing map of the old
KafkaConfig. Mutating it after shutdown is risky (if another thread still holds a reference) and obscures intent.

Use a defensive copy before modifications:

-            Map<String, Object> config = broker.config().originals();
+            Map<String, Object> config = new HashMap<>(broker.config().originals());

Apply the same change to the controller block above.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
brokers.forEach((id, broker) -> {
Map<String, Object> config = broker.config().originals();
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));
TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),
brokers.forEach((id, broker) -> {
- Map<String, Object> config = broker.config().originals();
+ Map<String, Object> config = new HashMap<>(broker.config().originals());
config.putAll(perServerOverriddenConfig.getOrDefault(-1, Collections.emptyMap()));
config.putAll(perServerOverriddenConfig.getOrDefault(id, Collections.emptyMap()));
config.put(SocketServerConfigs.LISTENERS_CONFIG, String.join(",", nodeIdToListeners.get(id)));
TestKitNode node = nodes.brokerNodes().get(id);
KafkaConfig nodeConfig = new KafkaConfig(config);
SharedServer sharedServer = jointServers.computeIfAbsent(
node.id(),

nodeId -> new SharedServer(
nodeConfig,
node.initialMetaPropertiesEnsemble(),
Time.SYSTEM,
new Metrics(),
CompletableFuture.completedFuture(QuorumConfig.parseVoterConnections(nodeConfig.quorumConfig().voters())),
Collections.emptyList(),
faultHandlerFactory,
socketFactoryManager.getOrCreateSocketFactory(node.id())
)
);
try {
broker = new BrokerServer(sharedServer);
} catch (Throwable e) {
log.error("Error creating broker {}", node.id(), e);
Utils.swallow(log, Level.WARN, "sharedServer.stopForBroker error", sharedServer::stopForBroker);
throw e;
Comment on lines +556 to +559

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

Similar to the controller creation, the error handling here uses Utils.swallow. Consider re-throwing a more specific exception or logging additional details to aid debugging.

            } catch (Throwable e) {
                log.error("Error creating broker {}", node.id(), e);
                try {
                    sharedServer.stopForBroker();
                } catch (Throwable e2) {
                    log.warn("sharedServer.stopForBroker error", e2);
                }
                throw new RuntimeException("Error creating broker " + node.id(), e);
            }

}
brokers.put(node.id(), broker);
});

startup();
}

/**
* Wait for a controller to mark all the brokers as ready (registered and unfenced).
* And also wait for the metadata cache up-to-date in each broker server.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,12 @@ default SocketServer anyControllerSocketServer() {
.orElseThrow(() -> new RuntimeException("No controller SocketServers found"));
}

default void restart() throws Exception {
restart(Map.of());
}

void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception;

String clusterId();

//---------------------------[producer/consumer/admin]---------------------------//
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,11 @@ public void stop() {
}
}

@Override
public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
clusterTestKit.restart(perServerConfigOverrides);
}
Comment on lines +196 to +199
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Guard against restarting an instance that has never been started

If a test calls clusterInstance.restart() before start(), the underlying KafkaClusterTestKit#shutdown tries to gather listener
information from SocketServer acceptors that are not yet created, resulting in NPEs and an empty LISTENERS config on restart.

Add a fast-fail guard:

@@
     @Override
     public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
-        clusterTestKit.restart(perServerConfigOverrides);
+        if (!started.get()) {
+            throw new IllegalStateException("Cannot restart a cluster that has not been started");
+        }
+        clusterTestKit.restart(perServerConfigOverrides);
     }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@Override
public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
clusterTestKit.restart(perServerConfigOverrides);
}
@Override
public void restart(Map<Integer, Map<String, Object>> perServerConfigOverrides) throws Exception {
if (!started.get()) {
throw new IllegalStateException("Cannot restart a cluster that has not been started");
}
clusterTestKit.restart(perServerConfigOverrides);
}


@Override
public void shutdownBroker(int brokerId) {
findBrokerOrThrow(brokerId).shutdown();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -331,4 +331,32 @@ public void testControllerListenerName(ClusterInstance cluster) throws Execution
assertEquals(1, admin.describeMetadataQuorum().quorumInfo().get().nodes().size());
}
}

@ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, serverProperties = {
@ClusterConfigProperty(key = "offset.storage.replication.factor", value = "1"),
})
public void testRestartWithOverriddenConfig(ClusterInstance clusterInstance) throws Exception {
clusterInstance.restart(Collections.singletonMap(-1, Collections.singletonMap("default.replication.factor", 2)));
clusterInstance.waitForReadyBrokers();
clusterInstance.brokers().values().forEach(broker -> {
Assertions.assertEquals(2, broker.config().getInt("default.replication.factor"));
});
clusterInstance.controllers().values().forEach(controller -> {
Assertions.assertEquals(2, controller.config().getInt("default.replication.factor"));
});
}

@ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}, serverProperties = {
@ClusterConfigProperty(key = "offset.storage.replication.factor", value = "1"),
})
public void testRestartWithoutOverriddenConfig(ClusterInstance clusterInstance) throws Exception {
clusterInstance.restart();
clusterInstance.waitForReadyBrokers();
clusterInstance.brokers().values().forEach(broker -> {
Assertions.assertEquals(1, broker.config().getInt("default.replication.factor"));
});
clusterInstance.controllers().values().forEach(controller -> {
Assertions.assertEquals(1, controller.config().getInt("default.replication.factor"));
});
}
}
Loading