Skip to content

Commit

Permalink
[KO-259] Changing ports for headless service. (#250)
Browse files Browse the repository at this point in the history
* Changing ports for headless service when moving tls to non-ls and vice versa.

* changing ports for lb service and moving headless service creation out of rack-level reconciliation.
  • Loading branch information
tanmayja authored Oct 2, 2023
1 parent 28c323b commit 8fcd986
Show file tree
Hide file tree
Showing 8 changed files with 273 additions and 195 deletions.
35 changes: 10 additions & 25 deletions controllers/rack.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func (r *SingleClusterReconciler) reconcileRacks() reconcileResult {
ignorablePodNames,
)

// handle failed racks
// Handle failed racks
for idx := range rackStateList {
var podList []*corev1.Pod

Expand Down Expand Up @@ -232,11 +232,6 @@ func (r *SingleClusterReconciler) createEmptyRack(rackState *RackState) (
// NoOp if already exist
r.Log.Info("AerospikeCluster", "Spec", r.aeroCluster.Spec)

if err := r.createSTSHeadlessSvc(); err != nil {
r.Log.Error(err, "Failed to create headless service")
return nil, reconcileError(err)
}

// Bad config should not come here. It should be validated in validation hook
cmName := getNamespacedNameForSTSConfigMap(r.aeroCluster, rackState.Rack.ID)
if err := r.buildSTSConfigMap(cmName, rackState.Rack); err != nil {
Expand Down Expand Up @@ -584,17 +579,8 @@ func (r *SingleClusterReconciler) scaleUpRack(found *appsv1.StatefulSet, rackSta
}

// Create pod service for the scaled up pod when node network is used in network policy
if podServiceNeeded(r.aeroCluster.Spec.PodSpec.MultiPodPerHost, &r.aeroCluster.Spec.AerospikeNetworkPolicy) {
// Create services for each pod
for _, podName := range newPodNames {
if err = r.createPodService(
podName, r.aeroCluster.Namespace,
); err != nil {
if !errors.IsAlreadyExists(err) {
return found, reconcileError(err)
}
}
}
if err = r.createOrUpdatePodServiceIfNeeded(newPodNames); err != nil {
return nil, reconcileError(err)
}

// update replicas here to avoid new replicas count comparison while cleaning up dangling pods of rack
Expand Down Expand Up @@ -701,12 +687,11 @@ func (r *SingleClusterReconciler) upgradeRack(statefulSet *appsv1.StatefulSet, r
"rollingUpdateBatchSize", r.aeroCluster.Spec.RackConfig.RollingUpdateBatchSize,
)

if err = r.createOrUpdatePodServiceIfNeeded(podsBatch); err != nil {
podNames := getPodNames(podsBatch)
if err = r.createOrUpdatePodServiceIfNeeded(podNames); err != nil {
return nil, reconcileError(err)
}

podNames := getPodNames(podsBatch)

r.Recorder.Eventf(
r.aeroCluster, corev1.EventTypeNormal, "PodImageUpdate",
"[rack-%d] Updating Containers on Pods %v", rackState.Rack.ID, podNames,
Expand Down Expand Up @@ -833,7 +818,7 @@ func (r *SingleClusterReconciler) scaleDownRack(
// If scale down leads to unavailable or dead partition then we should scale up the cluster,
// This can be left to the user but if we would do it here on our own then we can reuse
// objects like pvc and service. These objects would have been removed if scaleup is left for the user.
// In case of rolling restart, no pod cleanup happens, therefor rolling config back is left to the user.
// In case of rolling restart, no pod cleanup happens, therefore rolling config back is left to the user.
if err = r.validateSCClusterState(policy, ignorablePods); err != nil {
// reset cluster size
newSize := *found.Spec.Replicas + 1
Expand Down Expand Up @@ -968,7 +953,7 @@ func (r *SingleClusterReconciler) rollingRestartRack(found *appsv1.StatefulSet,
var podsBatchList [][]*corev1.Pod

if len(failedPods) != 0 {
// creating a single batch of all failed pods in a rack, irrespective of batch size
// Creating a single batch of all failed pods in a rack, irrespective of batch size
r.Log.Info("Skipping batchSize for failed pods")

podsBatchList = make([][]*corev1.Pod, 1)
Expand All @@ -991,12 +976,12 @@ func (r *SingleClusterReconciler) rollingRestartRack(found *appsv1.StatefulSet,
"rollingUpdateBatchSize", r.aeroCluster.Spec.RackConfig.RollingUpdateBatchSize,
)

if err = r.createOrUpdatePodServiceIfNeeded(podsBatch); err != nil {
podNames := getPodNames(podsBatch)
if err = r.createOrUpdatePodServiceIfNeeded(podNames); err != nil {
return nil, reconcileError(err)
}

res := r.rollingRestartPods(rackState, podsBatch, ignorablePods, restartTypeMap)
if !res.isSuccess {
if res := r.rollingRestartPods(rackState, podsBatch, ignorablePods, restartTypeMap); !res.isSuccess {
return found, res
}

Expand Down
13 changes: 12 additions & 1 deletion controllers/reconciler.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,17 @@ func (r *SingleClusterReconciler) Reconcile() (ctrl.Result, error) {
}
}

if err := r.createOrUpdateSTSHeadlessSvc(); err != nil {
r.Log.Error(err, "Failed to create headless service")
r.Recorder.Eventf(
r.aeroCluster, corev1.EventTypeWarning, "ServiceCreateFailed",
"Failed to create Service(Headless) %s/%s",
r.aeroCluster.Namespace, r.aeroCluster.Name,
)

return reconcile.Result{}, err
}

// Reconcile all racks
if res := r.reconcileRacks(); !res.isSuccess {
if res.err != nil {
Expand All @@ -102,7 +113,7 @@ func (r *SingleClusterReconciler) Reconcile() (ctrl.Result, error) {
return res.getResult()
}

if err := r.createSTSLoadBalancerSvc(); err != nil {
if err := r.createOrUpdateSTSLoadBalancerSvc(); err != nil {
r.Log.Error(err, "Failed to create LoadBalancer service")
r.Recorder.Eventf(
r.aeroCluster, corev1.EventTypeWarning, "ServiceCreateFailed",
Expand Down
Loading

0 comments on commit 8fcd986

Please sign in to comment.