diff --git a/api/v1/aerospikecluster_types.go b/api/v1/aerospikecluster_types.go index 4f0ad56bc..028cb68ab 100644 --- a/api/v1/aerospikecluster_types.go +++ b/api/v1/aerospikecluster_types.go @@ -78,6 +78,9 @@ type AerospikeClusterSpec struct { //nolint:govet // for readability // RosterNodeBlockList is a list of blocked nodeIDs from roster in a strong-consistency setup // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Roster Node BlockList" RosterNodeBlockList []string `json:"rosterNodeBlockList,omitempty"` + // K8sNodeBlockList is a list of Kubernetes nodes which are not used for Aerospike pods. + // +operator-sdk:csv:customresourcedefinitions:type=spec,displayName="Kubernetes Node BlockList" + K8sNodeBlockList []string `json:"k8sNodeBlockList,omitempty"` } type SeedsFinderServices struct { @@ -568,9 +571,12 @@ type AerospikeStorageSpec struct { //nolint:govet // for readability // BlockVolumePolicy contains default policies for block volumes. BlockVolumePolicy AerospikePersistentVolumePolicySpec `json:"blockVolumePolicy,omitempty"` - // CleanupThreads contains maximum number of cleanup threads(dd or blkdiscard) per init container. + // CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. CleanupThreads int `json:"cleanupThreads,omitempty"` + // LocalStorageClasses contains a list of storage classes which provisions local volumes. + LocalStorageClasses []string `json:"localStorageClasses,omitempty"` + // Volumes list to attach to created pods. // +patchMergeKey=name // +patchStrategy=merge @@ -633,6 +639,8 @@ type AerospikeClusterStatusSpec struct { //nolint:govet // for readability SeedsFinderServices SeedsFinderServices `json:"seedsFinderServices,omitempty"` // RosterNodeBlockList is a list of blocked nodeIDs from roster in a strong-consistency setup RosterNodeBlockList []string `json:"rosterNodeBlockList,omitempty"` + // K8sNodeBlockList is a list of Kubernetes nodes which are not used for Aerospike pods. + K8sNodeBlockList []string `json:"k8sNodeBlockList,omitempty"` } // AerospikeClusterStatus defines the observed state of AerospikeCluster @@ -956,6 +964,16 @@ func CopySpecToStatus(spec *AerospikeClusterSpec) (*AerospikeClusterStatusSpec, status.RosterNodeBlockList = rosterNodeBlockList } + if len(spec.K8sNodeBlockList) != 0 { + var k8sNodeBlockList []string + + lib.DeepCopy( + &k8sNodeBlockList, &spec.K8sNodeBlockList, + ) + + status.K8sNodeBlockList = k8sNodeBlockList + } + return &status, nil } @@ -1047,5 +1065,15 @@ func CopyStatusToSpec(status *AerospikeClusterStatusSpec) (*AerospikeClusterSpec spec.RosterNodeBlockList = rosterNodeBlockList } + if len(status.K8sNodeBlockList) != 0 { + var k8sNodeBlockList []string + + lib.DeepCopy( + &k8sNodeBlockList, &status.K8sNodeBlockList, + ) + + spec.K8sNodeBlockList = k8sNodeBlockList + } + return &spec, nil } diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go index d28a8d239..489353971 100644 --- a/api/v1/zz_generated.deepcopy.go +++ b/api/v1/zz_generated.deepcopy.go @@ -188,6 +188,11 @@ func (in *AerospikeClusterSpec) DeepCopyInto(out *AerospikeClusterSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.K8sNodeBlockList != nil { + in, out := &in.K8sNodeBlockList, &out.K8sNodeBlockList + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeClusterSpec. @@ -265,6 +270,11 @@ func (in *AerospikeClusterStatusSpec) DeepCopyInto(out *AerospikeClusterStatusSp *out = make([]string, len(*in)) copy(*out, *in) } + if in.K8sNodeBlockList != nil { + in, out := &in.K8sNodeBlockList, &out.K8sNodeBlockList + *out = make([]string, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AerospikeClusterStatusSpec. @@ -662,6 +672,11 @@ func (in *AerospikeStorageSpec) DeepCopyInto(out *AerospikeStorageSpec) { *out = *in in.FileSystemVolumePolicy.DeepCopyInto(&out.FileSystemVolumePolicy) in.BlockVolumePolicy.DeepCopyInto(&out.BlockVolumePolicy) + if in.LocalStorageClasses != nil { + in, out := &in.LocalStorageClasses, &out.LocalStorageClasses + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes *out = make([]VolumeSpec, len(*in)) diff --git a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml index b154f1768..fdd1e6820 100644 --- a/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml +++ b/config/crd/bases/asdb.aerospike.com_aerospikeclusters.yaml @@ -280,6 +280,12 @@ spec: image: description: Aerospike server image type: string + k8sNodeBlockList: + description: K8sNodeBlockList is a list of Kubernetes nodes which + are not used for Aerospike pods. + items: + type: string + type: array maxUnavailable: anyOf: - type: integer @@ -5739,7 +5745,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -5794,6 +5800,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -7427,7 +7439,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -7482,6 +7494,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -8131,7 +8149,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number of cleanup + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -8186,6 +8204,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of storage classes + which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -8915,6 +8939,12 @@ spec: image: description: Aerospike server image type: string + k8sNodeBlockList: + description: K8sNodeBlockList is a list of Kubernetes nodes which + are not used for Aerospike pods. + items: + type: string + type: array maxUnavailable: anyOf: - type: integer @@ -14502,7 +14532,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -14557,6 +14587,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -16190,7 +16226,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -16245,6 +16281,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -16945,7 +16987,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number of cleanup + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -17000,6 +17042,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of storage classes + which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: diff --git a/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml b/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml index 7d00a72af..e9cec80a0 100644 --- a/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/aerospike-kubernetes-operator.clusterserviceversion.yaml @@ -50,6 +50,10 @@ spec: - description: Aerospike server image displayName: Server Image path: image + - description: K8sNodeBlockList is a list of Kubernetes nodes which are not + used for Aerospike pods. + displayName: Kubernetes Node BlockList + path: k8sNodeBlockList - description: MaxUnavailable is the percentage/number of pods that can be allowed to go down or unavailable before application disruption. This value is used to create PodDisruptionBudget. Defaults to 1. diff --git a/controllers/aero_info_calls.go b/controllers/aero_info_calls.go index 093da495b..4290a5d06 100644 --- a/controllers/aero_info_calls.go +++ b/controllers/aero_info_calls.go @@ -42,7 +42,7 @@ func (r *SingleClusterReconciler) waitForMultipleNodesSafeStopReady( return reconcileSuccess() } - // Remove a node only if cluster is stable + // Remove a node only if the cluster is stable if err := r.waitForAllSTSToBeReady(ignorablePodNames); err != nil { return reconcileError(fmt.Errorf("failed to wait for cluster to be ready: %v", err)) } diff --git a/controllers/pod.go b/controllers/pod.go index 302a3fe80..93ff94c55 100644 --- a/controllers/pod.go +++ b/controllers/pod.go @@ -63,6 +63,7 @@ func (r *SingleClusterReconciler) getRollingRestartTypeMap( return nil, err } + blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) requiredConfHash := confMap.Data[aerospikeConfHashFileName] for idx := range pods { @@ -70,6 +71,15 @@ func (r *SingleClusterReconciler) getRollingRestartTypeMap( continue } + if blockedK8sNodes.Has(pods[idx].Spec.NodeName) { + r.Log.Info("Pod found in blocked nodes list, will be migrated to a different node", + "podName", pods[idx].Name) + + restartTypeMap[pods[idx].Name] = podRestart + + continue + } + podStatus := r.aeroCluster.Status.Pods[pods[idx].Name] if addedNSDevices == nil && podStatus.AerospikeConfigHash != requiredConfHash { // Fetching all block devices that have been added in namespaces. @@ -260,6 +270,7 @@ func (r *SingleClusterReconciler) restartPods( } restartedPods := make([]*corev1.Pod, 0, len(podsToRestart)) + blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) for idx := range podsToRestart { pod := podsToRestart[idx] @@ -267,12 +278,21 @@ func (r *SingleClusterReconciler) restartPods( restartType := restartTypeMap[pod.Name] if restartType == quickRestart { - // If ASD restart fails then go ahead and restart the pod + // If ASD restart fails, then go ahead and restart the pod if err := r.restartASDInPod(rackState, pod); err == nil { continue } } + if blockedK8sNodes.Has(pod.Spec.NodeName) { + r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", + "podName", pod.Name) + + if err := r.deleteLocalPVCs(rackState, pod); err != nil { + return reconcileError(err) + } + } + if err := r.Client.Delete(context.TODO(), pod); err != nil { r.Log.Error(err, "Failed to delete pod") return reconcileError(err) @@ -414,16 +434,27 @@ func (r *SingleClusterReconciler) deletePodAndEnsureImageUpdated( return reconcileError(err) } + blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) + // Delete pods - for _, p := range podsToUpdate { - if err := r.Client.Delete(context.TODO(), p); err != nil { + for _, pod := range podsToUpdate { + if blockedK8sNodes.Has(pod.Spec.NodeName) { + r.Log.Info("Pod found in blocked nodes list, deleting corresponding local PVCs if any", + "podName", pod.Name) + + if err := r.deleteLocalPVCs(rackState, pod); err != nil { + return reconcileError(err) + } + } + + if err := r.Client.Delete(context.TODO(), pod); err != nil { return reconcileError(err) } - r.Log.V(1).Info("Pod deleted", "podName", p.Name) + r.Log.V(1).Info("Pod deleted", "podName", pod.Name) r.Recorder.Eventf( r.aeroCluster, corev1.EventTypeNormal, "PodWaitUpdate", - "[rack-%d] Waiting to update Pod %s", rackState.Rack.ID, p.Name, + "[rack-%d] Waiting to update Pod %s", rackState.Rack.ID, pod.Name, ) } diff --git a/controllers/pvc.go b/controllers/pvc.go index f354ce77e..7777b0d2e 100644 --- a/controllers/pvc.go +++ b/controllers/pvc.go @@ -103,6 +103,32 @@ func (r *SingleClusterReconciler) removePVCsAsync( return deletedPVCs, nil } +func (r *SingleClusterReconciler) deleteLocalPVCs(rackState *RackState, pod *corev1.Pod) error { + pvcItems, err := r.getPodsPVCList([]string{pod.Name}, rackState.Rack.ID) + if err != nil { + return fmt.Errorf("could not find pvc for pod %v: %v", pod.Name, err) + } + + for idx := range pvcItems { + pvcStorageClass := pvcItems[idx].Spec.StorageClassName + if pvcStorageClass == nil { + r.Log.Info("PVC does not have storageClass set, no need to delete PVC", "pvcName", pvcItems[idx].Name) + + continue + } + + if utils.ContainsString(rackState.Rack.Storage.LocalStorageClasses, *pvcStorageClass) { + if err := r.Client.Delete(context.TODO(), &pvcItems[idx]); err != nil { + return fmt.Errorf( + "could not delete pvc %s: %v", pvcItems[idx].Name, err, + ) + } + } + } + + return nil +} + func (r *SingleClusterReconciler) waitForPVCTermination(deletedPVCs []corev1.PersistentVolumeClaim) error { if len(deletedPVCs) == 0 { return nil diff --git a/controllers/rack.go b/controllers/rack.go index 66e269349..2748c59f9 100644 --- a/controllers/rack.go +++ b/controllers/rack.go @@ -350,7 +350,7 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack(found *appsv1.Stat // Always update configMap. We won't be able to find if a rack's config, and it's pod config is in sync or not // Checking rack.spec, rack.status will not work. // We may change config, let some pods restart with new config and then change config back to original value. - // Now rack.spec, rack.status will be same, but few pods will have changed config. + // Now rack.spec, rack.status will be the same, but few pods will have changed config. // So a check based on spec and status will skip configMap update. // Hence, a rolling restart of pod will never bring pod to desired config if err := r.updateSTSConfigMap( @@ -420,7 +420,14 @@ func (r *SingleClusterReconciler) upgradeOrRollingRestartRack(found *appsv1.Stat } if r.aeroCluster.Spec.RackConfig.MaxIgnorablePods != nil { - if res := r.handleNSOrDeviceRemovalForIgnorablePods(rackState, ignorablePodNames); !res.isSuccess { + if res = r.handleNSOrDeviceRemovalForIgnorablePods(rackState, ignorablePodNames); !res.isSuccess { + return found, res + } + } + + if r.aeroCluster.Spec.K8sNodeBlockList != nil { + found, res = r.handleK8sNodeBlockListPods(found, rackState, ignorablePodNames) + if !res.isSuccess { return found, res } } @@ -502,9 +509,9 @@ func (r *SingleClusterReconciler) reconcileRack( } if failedPods == nil { - // revert migrate-fill-delay to original value if it was set to 0 during scale down - // Reset will be done if there is Scale down or Rack redistribution - // This check won't cover scenario where scale down operation was done and then reverted to previous value + // Revert migrate-fill-delay to original value if it was set to 0 during scale down. + // Reset will be done if there is scale-down or Rack redistribution. + // This check won't cover a scenario where scale-down operation was done and then reverted to previous value // before the scale down could complete. if (r.aeroCluster.Status.Size > r.aeroCluster.Spec.Size) || (!r.IsStatusEmpty() && len(r.aeroCluster.Status.RackConfig.Racks) != len(r.aeroCluster.Spec.RackConfig.Racks)) { @@ -1061,6 +1068,49 @@ func (r *SingleClusterReconciler) rollingRestartRack(found *appsv1.StatefulSet, return found, reconcileSuccess() } +func (r *SingleClusterReconciler) handleK8sNodeBlockListPods(statefulSet *appsv1.StatefulSet, rackState *RackState, + ignorablePodNames sets.Set[string], +) (*appsv1.StatefulSet, reconcileResult) { + if err := r.updateSTS(statefulSet, rackState); err != nil { + return statefulSet, reconcileError( + fmt.Errorf("upgrade rack : %v", err), + ) + } + + podList, err := r.getOrderedRackPodList(rackState.Rack.ID) + if err != nil { + return statefulSet, reconcileError(fmt.Errorf("failed to list pods: %v", err)) + } + + blockedK8sNodes := sets.NewString(r.aeroCluster.Spec.K8sNodeBlockList...) + + for idx := range podList { + pod := podList[idx] + + if blockedK8sNodes.Has(pod.Spec.NodeName) { + r.Log.Info("Pod found in blocked nodes list, migrating to a different node", + "podName", pod.Name) + + if res := r.waitForMultipleNodesSafeStopReady([]*corev1.Pod{pod}, ignorablePodNames); !res.isSuccess { + return statefulSet, res + } + + restartTypeMap := map[string]RestartType{ + pod.Name: podRestart, + } + + if res := r.restartPods(rackState, []*corev1.Pod{pod}, restartTypeMap); !res.isSuccess { + return statefulSet, reconcileError(err) + } + + // handle next pod on blocked node in subsequent Reconcile. + return statefulSet, reconcileRequeueAfter(1) + } + } + + return statefulSet, reconcileSuccess() +} + func (r *SingleClusterReconciler) needRollingRestartRack(rackState *RackState, ignorablePodNames sets.Set[string]) ( needRestart bool, restartTypeMap map[string]RestartType, err error, ) { diff --git a/controllers/reconciler.go b/controllers/reconciler.go index f03c22a49..54e40f877 100644 --- a/controllers/reconciler.go +++ b/controllers/reconciler.go @@ -45,7 +45,7 @@ func (r *SingleClusterReconciler) Reconcile() (ctrl.Result, error) { r.aeroCluster.Status, ) - // Check DeletionTimestamp to see if cluster is being deleted + // Check DeletionTimestamp to see if the cluster is being deleted if !r.aeroCluster.ObjectMeta.DeletionTimestamp.IsZero() { r.Log.V(1).Info("Deleting AerospikeCluster") // The cluster is being deleted diff --git a/controllers/statefulset.go b/controllers/statefulset.go index e24bc6b3d..d075b5754 100644 --- a/controllers/statefulset.go +++ b/controllers/statefulset.go @@ -17,6 +17,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -651,11 +652,16 @@ func (r *SingleClusterReconciler) updateSTS( // TODO: Add validation. device, file, both should not exist in same storage class r.updateSTSStorage(statefulSet, rackState) - // Save the updated stateful set. - // Can we optimize this? Update stateful set only if there is any change - // in it. - err := r.Client.Update(context.TODO(), statefulSet, updateOption) - if err != nil { + if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { + found, err := r.getSTS(rackState) + if err != nil { + return err + } + + // Save the updated stateful set. + found.Spec = statefulSet.Spec + return r.Client.Update(context.TODO(), found, updateOption) + }); err != nil { return fmt.Errorf( "failed to update StatefulSet %s: %v", statefulSet.Name, @@ -919,6 +925,16 @@ func (r *SingleClusterReconciler) updateSTSSchedulingPolicy( ) } + if r.aeroCluster.Spec.K8sNodeBlockList != nil { + matchExpressions = append( + matchExpressions, corev1.NodeSelectorRequirement{ + Key: "kubernetes.io/hostname", + Operator: corev1.NodeSelectorOpNotIn, + Values: r.aeroCluster.Spec.K8sNodeBlockList, + }, + ) + } + if len(matchExpressions) != 0 { if affinity.NodeAffinity == nil { affinity.NodeAffinity = &corev1.NodeAffinity{} @@ -1526,8 +1542,18 @@ func getSTSContainerPort( multiPodPerHost bool, aeroConf *asdbv1.AerospikeConfigSpec, ) []corev1.ContainerPort { ports := make([]corev1.ContainerPort, 0, len(defaultContainerPorts)) + portNames := make([]string, 0, len(defaultContainerPorts)) + + // Sorting defaultContainerPorts to fetch map in ordered manner. + // Helps reduce unnecessary sts object updates. + for portName := range defaultContainerPorts { + portNames = append(portNames, portName) + } + + sort.Strings(portNames) - for portName, portInfo := range defaultContainerPorts { + for _, portName := range portNames { + portInfo := defaultContainerPorts[portName] configPort := asdbv1.GetPortFromConfig( aeroConf, portInfo.connectionType, portInfo.configParam, ) diff --git a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml index b154f1768..fdd1e6820 100644 --- a/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml +++ b/helm-charts/aerospike-kubernetes-operator/crds/customresourcedefinition_aerospikeclusters.asdb.aerospike.com.yaml @@ -280,6 +280,12 @@ spec: image: description: Aerospike server image type: string + k8sNodeBlockList: + description: K8sNodeBlockList is a list of Kubernetes nodes which + are not used for Aerospike pods. + items: + type: string + type: array maxUnavailable: anyOf: - type: integer @@ -5739,7 +5745,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -5794,6 +5800,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -7427,7 +7439,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -7482,6 +7494,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -8131,7 +8149,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number of cleanup + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -8186,6 +8204,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of storage classes + which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -8915,6 +8939,12 @@ spec: image: description: Aerospike server image type: string + k8sNodeBlockList: + description: K8sNodeBlockList is a list of Kubernetes nodes which + are not used for Aerospike pods. + items: + type: string + type: array maxUnavailable: anyOf: - type: integer @@ -14502,7 +14532,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -14557,6 +14587,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -16190,7 +16226,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -16245,6 +16281,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of + storage classes which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: @@ -16945,7 +16987,7 @@ spec: type: string type: object cleanupThreads: - description: CleanupThreads contains maximum number of cleanup + description: CleanupThreads contains the maximum number of cleanup threads(dd or blkdiscard) per init container. type: integer filesystemVolumePolicy: @@ -17000,6 +17042,12 @@ spec: - deleteFiles type: string type: object + localStorageClasses: + description: LocalStorageClasses contains a list of storage classes + which provisions local volumes. + items: + type: string + type: array volumes: description: Volumes list to attach to created pods. items: diff --git a/pkg/utils/pod.go b/pkg/utils/pod.go index b7c687dd8..629f6c9b2 100644 --- a/pkg/utils/pod.go +++ b/pkg/utils/pod.go @@ -212,7 +212,8 @@ func isPodError(reason string) bool { func IsPodReasonUnschedulable(pod *corev1.Pod) bool { for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodScheduled && condition.Reason == corev1.PodReasonUnschedulable { + if condition.Type == corev1.PodScheduled && (condition.Reason == corev1.PodReasonUnschedulable || + condition.Reason == corev1.PodReasonSchedulerError) { return true } } diff --git a/test/k8snode_block_list_test.go b/test/k8snode_block_list_test.go new file mode 100644 index 000000000..56e540407 --- /dev/null +++ b/test/k8snode_block_list_test.go @@ -0,0 +1 @@ +package test