diff --git a/apis/apps/v1/component_types.go b/apis/apps/v1/component_types.go
index bdb8c4ea52e..3b6c6b4f518 100644
--- a/apis/apps/v1/component_types.go
+++ b/apis/apps/v1/component_types.go
@@ -70,6 +70,12 @@ func init() {
// ComponentSpec defines the desired state of Component
type ComponentSpec struct {
+ // Specifies the behavior when a Component is deleted.
+ //
+ // +kubebuilder:default=Delete
+ // +optional
+ TerminationPolicy TerminationPolicyType `json:"terminationPolicy"`
+
// Specifies the name of the referenced ComponentDefinition.
//
// +kubebuilder:validation:Required
diff --git a/apis/apps/v1/deprecated.go b/apis/apps/v1/deprecated.go
index 71e5cd4de35..341293d3938 100644
--- a/apis/apps/v1/deprecated.go
+++ b/apis/apps/v1/deprecated.go
@@ -19,12 +19,6 @@ package v1
import (
"fmt"
"strings"
-
- corev1 "k8s.io/api/core/v1"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-
- "github.com/apecloud/kubeblocks/pkg/constant"
- viper "github.com/apecloud/kubeblocks/pkg/viperx"
)
const (
@@ -55,58 +49,6 @@ func (r *Cluster) GetComponentByName(componentName string) *ClusterComponentSpec
return nil
}
-// GetVolumeClaimNames gets all PVC names of component compName.
-//
-// r.Spec.GetComponentByName(compName).VolumeClaimTemplates[*].Name will be used if no claimNames provided
-//
-// nil return if:
-// 1. component compName not found or
-// 2. len(VolumeClaimTemplates)==0 or
-// 3. any claimNames not found
-func (r *Cluster) GetVolumeClaimNames(compName string, claimNames ...string) []string {
- if r == nil {
- return nil
- }
- comp := r.Spec.GetComponentByName(compName)
- if comp == nil {
- return nil
- }
- if len(comp.VolumeClaimTemplates) == 0 {
- return nil
- }
- if len(claimNames) == 0 {
- for _, template := range comp.VolumeClaimTemplates {
- claimNames = append(claimNames, template.Name)
- }
- }
- allExist := true
- for _, name := range claimNames {
- found := false
- for _, template := range comp.VolumeClaimTemplates {
- if template.Name == name {
- found = true
- break
- }
- }
- if !found {
- allExist = false
- break
- }
- }
- if !allExist {
- return nil
- }
-
- pvcNames := make([]string, 0)
- for _, claimName := range claimNames {
- for i := 0; i < int(comp.Replicas); i++ {
- pvcName := fmt.Sprintf("%s-%s-%s-%d", claimName, r.Name, compName, i)
- pvcNames = append(pvcNames, pvcName)
- }
- }
- return pvcNames
-}
-
func (r *ClusterSpec) GetComponentByName(componentName string) *ClusterComponentSpec {
for _, v := range r.ComponentSpecs {
if v.Name == componentName {
@@ -133,53 +75,11 @@ func (r *ClusterStatus) SetComponentStatus(name string, status ClusterComponentS
r.Components[name] = status
}
-func (r *ClusterComponentSpec) ToVolumeClaimTemplates() []corev1.PersistentVolumeClaimTemplate {
- if r == nil {
- return nil
- }
- var ts []corev1.PersistentVolumeClaimTemplate
- for _, t := range r.VolumeClaimTemplates {
- ts = append(ts, t.toVolumeClaimTemplate())
- }
- return ts
-}
-
func (r *ClusterComponentStatus) GetObjectMessage(objectKind, objectName string) string {
messageKey := fmt.Sprintf("%s/%s", objectKind, objectName)
return r.Message[messageKey]
}
-func (r *ClusterComponentVolumeClaimTemplate) toVolumeClaimTemplate() corev1.PersistentVolumeClaimTemplate {
- return corev1.PersistentVolumeClaimTemplate{
- ObjectMeta: metav1.ObjectMeta{
- Name: r.Name,
- },
- Spec: r.Spec.ToV1PersistentVolumeClaimSpec(),
- }
-}
-
-func (r *PersistentVolumeClaimSpec) ToV1PersistentVolumeClaimSpec() corev1.PersistentVolumeClaimSpec {
- return corev1.PersistentVolumeClaimSpec{
- AccessModes: r.AccessModes,
- Resources: r.Resources,
- StorageClassName: r.getStorageClassName(viper.GetString(constant.CfgKeyDefaultStorageClass)),
- VolumeMode: r.VolumeMode,
- VolumeAttributesClassName: r.VolumeAttributesClassName,
- }
-}
-
-// getStorageClassName returns PersistentVolumeClaimSpec.StorageClassName if a value is assigned; otherwise,
-// it returns the defaultStorageClass argument.
-func (r *PersistentVolumeClaimSpec) getStorageClassName(defaultStorageClass string) *string {
- if r.StorageClassName != nil && *r.StorageClassName != "" {
- return r.StorageClassName
- }
- if defaultStorageClass != "" {
- return &defaultStorageClass
- }
- return nil
-}
-
func GetClusterUpRunningPhases() []ClusterPhase {
return []ClusterPhase{
RunningClusterPhase,
diff --git a/config/crd/bases/apps.kubeblocks.io_components.yaml b/config/crd/bases/apps.kubeblocks.io_components.yaml
index 2cc1b07a038..52ea07969b6 100644
--- a/config/crd/bases/apps.kubeblocks.io_components.yaml
+++ b/config/crd/bases/apps.kubeblocks.io_components.yaml
@@ -5576,6 +5576,14 @@ spec:
- name
type: object
type: array
+ terminationPolicy:
+ default: Delete
+ description: Specifies the behavior when a Component is deleted.
+ enum:
+ - DoNotTerminate
+ - Delete
+ - WipeOut
+ type: string
tlsConfig:
description: "Specifies the TLS configuration for the Component, including:\n\n\n-
A boolean flag that indicates whether the Component should use Transport
diff --git a/controllers/apps/cluster/cluster_controller.go b/controllers/apps/cluster/cluster_controller.go
index d6edbccac60..1585397cca9 100644
--- a/controllers/apps/cluster/cluster_controller.go
+++ b/controllers/apps/cluster/cluster_controller.go
@@ -22,6 +22,7 @@ package cluster
import (
"context"
"math"
+ "time"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -34,6 +35,7 @@ import (
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/multicluster"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
@@ -108,7 +110,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
return intctrlutil.Requeue(reqCtx.Log, err.Error())
}
c := planBuilder.(*clusterPlanBuilder)
- sendWarningEventWithError(r.Recorder, c.transCtx.Cluster, corev1.EventTypeWarning, err)
+ appsutil.SendWarningEventWithError(r.Recorder, c.transCtx.Cluster, corev1.EventTypeWarning, err)
return intctrlutil.RequeueWithError(err, reqCtx.Log, "")
}
@@ -171,6 +173,10 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
// SetupWithManager sets up the controller with the Manager.
func (r *ClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ retryDurationMS := viper.GetInt(constant.CfgKeyCtrlrReconcileRetryDurationMS)
+ if retryDurationMS != 0 {
+ appsutil.RequeueDuration = time.Millisecond * time.Duration(retryDurationMS)
+ }
return intctrlutil.NewControllerManagedBy(mgr).
For(&appsv1.Cluster{}).
WithOptions(controller.Options{
diff --git a/controllers/apps/cluster/cluster_controller_test.go b/controllers/apps/cluster/cluster_controller_test.go
index 0f17c67cfa4..b18ba369dbb 100644
--- a/controllers/apps/cluster/cluster_controller_test.go
+++ b/controllers/apps/cluster/cluster_controller_test.go
@@ -37,7 +37,6 @@ import (
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- workloadsv1 "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/builder"
"github.com/apecloud/kubeblocks/pkg/generics"
@@ -1187,153 +1186,150 @@ var _ = Describe("Cluster Controller", func() {
})
// TODO: refactor the case and should not depend on objects created by the component controller
- PContext("cluster component annotations and labels", func() {
- BeforeEach(func() {
- cleanEnv()
- createAllDefinitionObjects()
- })
-
- AfterEach(func() {
- cleanEnv()
- })
-
- addMetaMap := func(metaMap *map[string]string, key string, value string) {
- if *metaMap == nil {
- *metaMap = make(map[string]string)
- }
- (*metaMap)[key] = value
- }
-
- // TODO: remove it
- // checkRelatedObject := func(compName string, checkFunc func(g Gomega, obj client.Object)) {
- // // check related services of the component
- // defaultSvcName := constant.GenerateComponentServiceName(clusterObj.Name, compName, "")
- // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: defaultSvcName,
- // Namespace: testCtx.DefaultNamespace}, func(g Gomega, svc *corev1.Service) {
- // checkFunc(g, svc)
- // })).Should(Succeed())
- //
- // // check related account secret of the component
- // rootAccountSecretName := constant.GenerateAccountSecretName(clusterObj.Name, compName, "root")
- // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: rootAccountSecretName,
- // Namespace: testCtx.DefaultNamespace}, func(g Gomega, secret *corev1.Secret) {
- // checkFunc(g, secret)
- // })).Should(Succeed())
- // }
-
- testUpdateAnnoAndLabels := func(compName string,
- changeCluster func(cluster *appsv1.Cluster),
- checkWorkloadFunc func(g Gomega, labels, annotations map[string]string, isInstanceSet bool),
- checkRelatedObjFunc func(g Gomega, obj client.Object)) {
- Expect(testapps.ChangeObj(&testCtx, clusterObj, func(obj *appsv1.Cluster) {
- changeCluster(obj)
- })).Should(Succeed())
-
- By("check component has updated")
- workloadName := constant.GenerateWorkloadNamePattern(clusterObj.Name, defaultCompName)
- Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName,
- Namespace: testCtx.DefaultNamespace}, func(g Gomega, compObj *appsv1.Component) {
- checkWorkloadFunc(g, compObj.Spec.Labels, compObj.Spec.Annotations, false)
- })).Should(Succeed())
-
- // TODO: remove it
- // By("check related objects annotations and labels")
- // checkRelatedObject(defaultCompName, func(g Gomega, obj client.Object) {
- // checkRelatedObjFunc(g, obj)
- // })
-
- // TODO: remove it
- // By("InstanceSet.spec.template.annotations/labels need to be consistent with component")
- //// The labels and annotations of the Pod will be kept consistent with those of the InstanceSet
- // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace},
- // func(g Gomega, instanceSet *workloadsv1.InstanceSet) {
- // checkWorkloadFunc(g, instanceSet.Spec.Template.GetLabels(), instanceSet.Spec.Template.GetAnnotations(), true)
- // })).Should(Succeed())
- }
-
- It("test add/override annotations and labels", func() {
- By("creating a cluster")
- clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, "").
- WithRandomName().
- AddComponent(defaultCompName, compDefObj.Name).
- SetServiceVersion(defaultServiceVersion).
- SetReplicas(3).
- Create(&testCtx).
- GetObject()
-
- By("add annotations and labels")
- key1 := "key1"
- value1 := "value1"
- testUpdateAnnoAndLabels(defaultCompName,
- func(cluster *appsv1.Cluster) {
- addMetaMap(&cluster.Spec.ComponentSpecs[0].Annotations, key1, value1)
- addMetaMap(&cluster.Spec.ComponentSpecs[0].Labels, key1, value1)
- },
- func(g Gomega, labels, annotations map[string]string, isInstanceSet bool) {
- g.Expect(labels[key1]).Should(Equal(value1))
- g.Expect(annotations[key1]).Should(Equal(value1))
- },
- func(g Gomega, obj client.Object) {
- g.Expect(obj.GetLabels()[key1]).Should(Equal(value1))
- g.Expect(obj.GetAnnotations()[key1]).Should(Equal(value1))
- })
-
- By("merge instanceSet template annotations")
- workloadName := constant.GenerateWorkloadNamePattern(clusterObj.Name, defaultCompName)
- podTemplateKey := "pod-template-key"
- podTemplateValue := "pod-template-value"
- Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace}, func(instanceSet *workloadsv1.InstanceSet) {
- instanceSet.Spec.Template.Annotations[podTemplateKey] = podTemplateValue
- })()).Should(Succeed())
-
- By("override annotations and labels")
- value2 := "value2"
- testUpdateAnnoAndLabels(defaultCompName,
- func(cluster *appsv1.Cluster) {
- addMetaMap(&cluster.Spec.ComponentSpecs[0].Annotations, key1, value2)
- addMetaMap(&cluster.Spec.ComponentSpecs[0].Labels, key1, value2)
- },
- func(g Gomega, labels, annotations map[string]string, isInstanceSet bool) {
- g.Expect(labels[key1]).Should(Equal(value2))
- g.Expect(annotations[key1]).Should(Equal(value2))
- },
- func(g Gomega, obj client.Object) {
- g.Expect(obj.GetLabels()[key1]).Should(Equal(value2))
- g.Expect(obj.GetAnnotations()[key1]).Should(Equal(value2))
- })
-
- By("check InstanceSet template annotations should keep the custom annotations")
- Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace},
- func(g Gomega, instanceSet *workloadsv1.InstanceSet) {
- g.Expect(instanceSet.Spec.Template.Annotations[podTemplateKey]).Should(Equal(podTemplateValue))
- })).Should(Succeed())
-
- By("delete the annotations and labels, but retain the deleted annotations and labels for related objects")
- key2 := "key2"
- testUpdateAnnoAndLabels(defaultCompName,
- func(cluster *appsv1.Cluster) {
- cluster.Spec.ComponentSpecs[0].Annotations = map[string]string{
- key2: value2,
- }
- cluster.Spec.ComponentSpecs[0].Labels = map[string]string{
- key2: value2,
- }
- },
- func(g Gomega, labels, annotations map[string]string, isInstanceSet bool) {
- g.Expect(labels).ShouldNot(HaveKey(key1))
- if !isInstanceSet {
- g.Expect(annotations).ShouldNot(HaveKey(key1))
- }
- g.Expect(labels[key2]).Should(Equal(value2))
- g.Expect(annotations[key2]).Should(Equal(value2))
- },
- func(g Gomega, obj client.Object) {
- g.Expect(obj.GetLabels()[key1]).Should(Equal(value2))
- g.Expect(obj.GetAnnotations()[key1]).Should(Equal(value2))
- g.Expect(obj.GetLabels()[key2]).Should(Equal(value2))
- g.Expect(obj.GetAnnotations()[key2]).Should(Equal(value2))
- })
- })
- })
+ // Context("cluster component annotations and labels", func() {
+ // BeforeEach(func() {
+ // cleanEnv()
+ // createAllDefinitionObjects()
+ // })
+ //
+ // AfterEach(func() {
+ // cleanEnv()
+ // })
+ //
+ // addMetaMap := func(metaMap *map[string]string, key string, value string) {
+ // if *metaMap == nil {
+ // *metaMap = make(map[string]string)
+ // }
+ // (*metaMap)[key] = value
+ // }
+ //
+ // checkRelatedObject := func(compName string, checkFunc func(g Gomega, obj client.Object)) {
+ // // check related services of the component
+ // defaultSvcName := constant.GenerateComponentServiceName(clusterObj.Name, compName, "")
+ // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: defaultSvcName,
+ // Namespace: testCtx.DefaultNamespace}, func(g Gomega, svc *corev1.Service) {
+ // checkFunc(g, svc)
+ // })).Should(Succeed())
+ //
+ // // check related account secret of the component
+ // rootAccountSecretName := constant.GenerateAccountSecretName(clusterObj.Name, compName, "root")
+ // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: rootAccountSecretName,
+ // Namespace: testCtx.DefaultNamespace}, func(g Gomega, secret *corev1.Secret) {
+ // checkFunc(g, secret)
+ // })).Should(Succeed())
+ // }
+ //
+ // testUpdateAnnoAndLabels := func(compName string,
+ // changeCluster func(cluster *appsv1.Cluster),
+ // checkWorkloadFunc func(g Gomega, labels, annotations map[string]string, isInstanceSet bool),
+ // checkRelatedObjFunc func(g Gomega, obj client.Object)) {
+ // Expect(testapps.ChangeObj(&testCtx, clusterObj, func(obj *appsv1.Cluster) {
+ // changeCluster(obj)
+ // })).Should(Succeed())
+ //
+ // By("check component has updated")
+ // workloadName := constant.GenerateWorkloadNamePattern(clusterObj.Name, defaultCompName)
+ // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName,
+ // Namespace: testCtx.DefaultNamespace}, func(g Gomega, compObj *appsv1.Component) {
+ // checkWorkloadFunc(g, compObj.Spec.Labels, compObj.Spec.Annotations, false)
+ // })).Should(Succeed())
+ //
+ // By("check related objects annotations and labels")
+ // checkRelatedObject(defaultCompName, func(g Gomega, obj client.Object) {
+ // checkRelatedObjFunc(g, obj)
+ // })
+ //
+ // By("InstanceSet.spec.template.annotations/labels need to be consistent with component")
+ // // The labels and annotations of the Pod will be kept consistent with those of the InstanceSet
+ // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace},
+ // func(g Gomega, instanceSet *workloadsv1.InstanceSet) {
+ // checkWorkloadFunc(g, instanceSet.Spec.Template.GetLabels(), instanceSet.Spec.Template.GetAnnotations(), true)
+ // })).Should(Succeed())
+ // }
+ //
+ // It("test add/override annotations and labels", func() {
+ // By("creating a cluster")
+ // clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, "").
+ // WithRandomName().
+ // AddComponent(defaultCompName, compDefObj.Name).
+ // SetServiceVersion(defaultServiceVersion).
+ // SetReplicas(3).
+ // Create(&testCtx).
+ // GetObject()
+ //
+ // By("add annotations and labels")
+ // key1 := "key1"
+ // value1 := "value1"
+ // testUpdateAnnoAndLabels(defaultCompName,
+ // func(cluster *appsv1.Cluster) {
+ // addMetaMap(&cluster.Spec.ComponentSpecs[0].Annotations, key1, value1)
+ // addMetaMap(&cluster.Spec.ComponentSpecs[0].Labels, key1, value1)
+ // },
+ // func(g Gomega, labels, annotations map[string]string, isInstanceSet bool) {
+ // g.Expect(labels[key1]).Should(Equal(value1))
+ // g.Expect(annotations[key1]).Should(Equal(value1))
+ // },
+ // func(g Gomega, obj client.Object) {
+ // g.Expect(obj.GetLabels()[key1]).Should(Equal(value1))
+ // g.Expect(obj.GetAnnotations()[key1]).Should(Equal(value1))
+ // })
+ //
+ // By("merge instanceSet template annotations")
+ // workloadName := constant.GenerateWorkloadNamePattern(clusterObj.Name, defaultCompName)
+ // podTemplateKey := "pod-template-key"
+ // podTemplateValue := "pod-template-value"
+ // Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace}, func(instanceSet *workloadsv1.InstanceSet) {
+ // instanceSet.Spec.Template.Annotations[podTemplateKey] = podTemplateValue
+ // })()).Should(Succeed())
+ //
+ // By("override annotations and labels")
+ // value2 := "value2"
+ // testUpdateAnnoAndLabels(defaultCompName,
+ // func(cluster *appsv1.Cluster) {
+ // addMetaMap(&cluster.Spec.ComponentSpecs[0].Annotations, key1, value2)
+ // addMetaMap(&cluster.Spec.ComponentSpecs[0].Labels, key1, value2)
+ // },
+ // func(g Gomega, labels, annotations map[string]string, isInstanceSet bool) {
+ // g.Expect(labels[key1]).Should(Equal(value2))
+ // g.Expect(annotations[key1]).Should(Equal(value2))
+ // },
+ // func(g Gomega, obj client.Object) {
+ // g.Expect(obj.GetLabels()[key1]).Should(Equal(value2))
+ // g.Expect(obj.GetAnnotations()[key1]).Should(Equal(value2))
+ // })
+ //
+ // By("check InstanceSet template annotations should keep the custom annotations")
+ // Eventually(testapps.CheckObj(&testCtx, client.ObjectKey{Name: workloadName, Namespace: testCtx.DefaultNamespace},
+ // func(g Gomega, instanceSet *workloadsv1.InstanceSet) {
+ // g.Expect(instanceSet.Spec.Template.Annotations[podTemplateKey]).Should(Equal(podTemplateValue))
+ // })).Should(Succeed())
+ //
+ // By("delete the annotations and labels, but retain the deleted annotations and labels for related objects")
+ // key2 := "key2"
+ // testUpdateAnnoAndLabels(defaultCompName,
+ // func(cluster *appsv1.Cluster) {
+ // cluster.Spec.ComponentSpecs[0].Annotations = map[string]string{
+ // key2: value2,
+ // }
+ // cluster.Spec.ComponentSpecs[0].Labels = map[string]string{
+ // key2: value2,
+ // }
+ // },
+ // func(g Gomega, labels, annotations map[string]string, isInstanceSet bool) {
+ // g.Expect(labels).ShouldNot(HaveKey(key1))
+ // if !isInstanceSet {
+ // g.Expect(annotations).ShouldNot(HaveKey(key1))
+ // }
+ // g.Expect(labels[key2]).Should(Equal(value2))
+ // g.Expect(annotations[key2]).Should(Equal(value2))
+ // },
+ // func(g Gomega, obj client.Object) {
+ // g.Expect(obj.GetLabels()[key1]).Should(Equal(value2))
+ // g.Expect(obj.GetAnnotations()[key1]).Should(Equal(value2))
+ // g.Expect(obj.GetLabels()[key2]).Should(Equal(value2))
+ // g.Expect(obj.GetAnnotations()[key2]).Should(Equal(value2))
+ // })
+ // })
+ // })
})
})
diff --git a/controllers/apps/cluster/cluster_plan_builder.go b/controllers/apps/cluster/cluster_plan_builder.go
index fcf086161f2..64604b0d453 100644
--- a/controllers/apps/cluster/cluster_plan_builder.go
+++ b/controllers/apps/cluster/cluster_plan_builder.go
@@ -25,8 +25,6 @@ import (
"reflect"
"github.com/go-logr/logr"
- snapshotv1beta1 "github.com/kubernetes-csi/external-snapshotter/client/v3/apis/volumesnapshot/v1beta1"
- snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@@ -37,11 +35,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
- appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
- appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1"
- dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1"
- workloadsv1 "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -167,19 +161,6 @@ func (c *clusterTransformContext) traverse(f func(spec *appsv1.ClusterComponentS
}
}
-func init() {
- model.AddScheme(appsv1alpha1.AddToScheme)
- model.AddScheme(appsv1beta1.AddToScheme)
- model.AddScheme(appsv1.AddToScheme)
- model.AddScheme(dpv1alpha1.AddToScheme)
- model.AddScheme(snapshotv1.AddToScheme)
- model.AddScheme(snapshotv1beta1.AddToScheme)
- model.AddScheme(extensionsv1alpha1.AddToScheme)
- model.AddScheme(workloadsv1.AddToScheme)
-}
-
-// PlanBuilder implementation
-
func (c *clusterPlanBuilder) Init() error {
cluster := &appsv1.Cluster{}
if err := c.cli.Get(c.transCtx.Context, c.req.NamespacedName, cluster); err != nil {
@@ -210,11 +191,11 @@ func (c *clusterPlanBuilder) Build() (graph.Plan, error) {
}
// if pre-check failed, this is a fast return, no need to set apply resource condition
if preCheckCondition.Status != metav1.ConditionTrue {
- sendWarningEventWithError(c.transCtx.GetRecorder(), c.transCtx.Cluster, ReasonPreCheckFailed, err)
+ appsutil.SendWarningEventWithError(c.transCtx.GetRecorder(), c.transCtx.Cluster, ReasonPreCheckFailed, err)
return
}
setApplyResourceCondition(&c.transCtx.Cluster.Status.Conditions, c.transCtx.Cluster.Generation, err)
- sendWarningEventWithError(c.transCtx.GetRecorder(), c.transCtx.Cluster, ReasonApplyResourcesFailed, err)
+ appsutil.SendWarningEventWithError(c.transCtx.GetRecorder(), c.transCtx.Cluster, ReasonApplyResourcesFailed, err)
}()
// new a DAG and apply chain on it
@@ -339,7 +320,7 @@ func (c *clusterPlanBuilder) reconcileObject(node *model.ObjectVertex) error {
}
func (c *clusterPlanBuilder) reconcileCreateObject(ctx context.Context, node *model.ObjectVertex) error {
- err := c.cli.Create(ctx, node.Obj, clientOption(node))
+ err := c.cli.Create(ctx, node.Obj, appsutil.ClientOption(node))
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
@@ -347,7 +328,7 @@ func (c *clusterPlanBuilder) reconcileCreateObject(ctx context.Context, node *mo
}
func (c *clusterPlanBuilder) reconcileUpdateObject(ctx context.Context, node *model.ObjectVertex) error {
- err := c.cli.Update(ctx, node.Obj, clientOption(node))
+ err := c.cli.Update(ctx, node.Obj, appsutil.ClientOption(node))
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@@ -356,7 +337,7 @@ func (c *clusterPlanBuilder) reconcileUpdateObject(ctx context.Context, node *mo
func (c *clusterPlanBuilder) reconcilePatchObject(ctx context.Context, node *model.ObjectVertex) error {
patch := client.MergeFrom(node.OriObj)
- err := c.cli.Patch(ctx, node.Obj, patch, clientOption(node))
+ err := c.cli.Patch(ctx, node.Obj, patch, appsutil.ClientOption(node))
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@@ -365,7 +346,7 @@ func (c *clusterPlanBuilder) reconcilePatchObject(ctx context.Context, node *mod
func (c *clusterPlanBuilder) reconcileDeleteObject(ctx context.Context, node *model.ObjectVertex) error {
if controllerutil.RemoveFinalizer(node.Obj, constant.DBClusterFinalizerName) {
- err := c.cli.Update(ctx, node.Obj, clientOption(node))
+ err := c.cli.Update(ctx, node.Obj, appsutil.ClientOption(node))
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@@ -375,7 +356,7 @@ func (c *clusterPlanBuilder) reconcileDeleteObject(ctx context.Context, node *mo
deleteOptions := &client.DeleteOptions{
PropagationPolicy: &deletePropagation,
}
- if err := c.cli.Delete(ctx, node.Obj, deleteOptions, clientOption(node)); err != nil {
+ if err := c.cli.Delete(ctx, node.Obj, deleteOptions, appsutil.ClientOption(node)); err != nil {
return client.IgnoreNotFound(err)
}
return nil
@@ -392,7 +373,7 @@ func (c *clusterPlanBuilder) reconcileDeleteObject(ctx context.Context, node *mo
func (c *clusterPlanBuilder) reconcileStatusObject(ctx context.Context, node *model.ObjectVertex) error {
patch := client.MergeFrom(node.OriObj)
- if err := c.cli.Status().Patch(ctx, node.Obj, patch, clientOption(node)); err != nil {
+ if err := c.cli.Status().Patch(ctx, node.Obj, patch, appsutil.ClientOption(node)); err != nil {
return err
}
// handle condition and phase changing triggered events
diff --git a/controllers/apps/cluster/types.go b/controllers/apps/cluster/scheme.go
similarity index 54%
rename from controllers/apps/cluster/types.go
rename to controllers/apps/cluster/scheme.go
index 1fb23fdf74e..9917679edc4 100644
--- a/controllers/apps/cluster/types.go
+++ b/controllers/apps/cluster/scheme.go
@@ -21,40 +21,24 @@ package cluster
import (
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
- batchv1 "k8s.io/api/batch/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
- "sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1"
- workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
-)
-
-var (
- rscheme = runtime.NewScheme()
+ "github.com/apecloud/kubeblocks/pkg/controller/model"
)
func init() {
- utilruntime.Must(clientgoscheme.AddToScheme(rscheme))
- utilruntime.Must(appsv1alpha1.AddToScheme(rscheme))
- utilruntime.Must(appsv1beta1.AddToScheme(rscheme))
- utilruntime.Must(appsv1.AddToScheme(rscheme))
- utilruntime.Must(dpv1alpha1.AddToScheme(rscheme))
- utilruntime.Must(snapshotv1.AddToScheme(rscheme))
- utilruntime.Must(extensionsv1alpha1.AddToScheme(rscheme))
- utilruntime.Must(batchv1.AddToScheme(rscheme))
- utilruntime.Must(workloads.AddToScheme(rscheme))
+ model.AddScheme(clientgoscheme.AddToScheme)
+ model.AddScheme(appsv1alpha1.AddToScheme)
+ model.AddScheme(appsv1beta1.AddToScheme)
+ model.AddScheme(appsv1.AddToScheme)
+ model.AddScheme(dpv1alpha1.AddToScheme)
+ model.AddScheme(snapshotv1.AddToScheme)
+ // model.AddScheme(snapshotv1beta1.AddToScheme)
+ // model.AddScheme(extensionsv1alpha1.AddToScheme)
+ // model.AddScheme(workloadsv1.AddToScheme)
+ // model.AddScheme(batchv1.AddToScheme)
}
-
-type gvkNObjKey struct {
- schema.GroupVersionKind
- client.ObjectKey
-}
-
-type owningObjects map[gvkNObjKey]client.Object
diff --git a/controllers/apps/cluster/suite_test.go b/controllers/apps/cluster/suite_test.go
index f7aa2991958..daad8ccd54d 100644
--- a/controllers/apps/cluster/suite_test.go
+++ b/controllers/apps/cluster/suite_test.go
@@ -46,12 +46,8 @@ import (
appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- opsv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1"
- workloadsv1 "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/controllers/apps"
- "github.com/apecloud/kubeblocks/controllers/apps/configuration"
"github.com/apecloud/kubeblocks/controllers/dataprotection"
- "github.com/apecloud/kubeblocks/controllers/k8score"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/model"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
@@ -127,10 +123,6 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
model.AddScheme(appsv1alpha1.AddToScheme)
- err = opsv1alpha1.AddToScheme(scheme.Scheme)
- Expect(err).NotTo(HaveOccurred())
- model.AddScheme(opsv1alpha1.AddToScheme)
-
err = appsv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
model.AddScheme(appsv1beta1.AddToScheme)
@@ -147,10 +139,6 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
model.AddScheme(snapshotv1.AddToScheme)
- err = workloadsv1.AddToScheme(scheme.Scheme)
- Expect(err).NotTo(HaveOccurred())
- model.AddScheme(workloadsv1.AddToScheme)
-
// +kubebuilder:scaffold:rscheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
@@ -215,14 +203,6 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
- clusterRecorder = k8sManager.GetEventRecorderFor("cluster-controller")
- err = (&ClusterReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: clusterRecorder,
- }).SetupWithManager(k8sManager)
- Expect(err).ToNot(HaveOccurred())
-
err = (&apps.ServiceDescriptorReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
@@ -230,31 +210,18 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
- err = (&k8score.EventReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("event-controller"),
- }).SetupWithManager(k8sManager, nil)
- Expect(err).ToNot(HaveOccurred())
-
- err = (&configuration.ConfigConstraintReconciler{
+ err = (&dataprotection.BackupPolicyTemplateReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("configuration-template-controller"),
+ Recorder: k8sManager.GetEventRecorderFor("backup-policy-template-controller"),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
- err = (&configuration.ConfigurationReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("configuration-controller"),
- }).SetupWithManager(k8sManager, nil)
- Expect(err).ToNot(HaveOccurred())
-
- err = (&dataprotection.BackupPolicyTemplateReconciler{
+ clusterRecorder = k8sManager.GetEventRecorderFor("cluster-controller")
+ err = (&ClusterReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("backup-policy-template-controller"),
+ Recorder: clusterRecorder,
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
diff --git a/controllers/apps/cluster/transformer_cluster_backup_policy.go b/controllers/apps/cluster/transformer_cluster_backup_policy.go
index 70d2ca64b69..9f24d4b59f4 100644
--- a/controllers/apps/cluster/transformer_cluster_backup_policy.go
+++ b/controllers/apps/cluster/transformer_cluster_backup_policy.go
@@ -32,7 +32,6 @@ import (
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/common"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/component"
@@ -348,7 +347,7 @@ func (r *backupPolicyBuilder) syncBackupPolicy(backupPolicy *dpv1alpha1.BackupPo
r.syncBackupPolicyTargetSpec(backupPolicy)
}
-func (r *backupPolicyBuilder) syncRoleLabelSelector(target *dpv1alpha1.BackupTarget, role, alternateRole, fullCompName string) {
+func (r *backupPolicyBuilder) syncRoleLabelSelector(target *dpv1alpha1.BackupTarget, role, alternateRole, compName string) {
if len(role) == 0 || target == nil {
return
}
@@ -356,7 +355,7 @@ func (r *backupPolicyBuilder) syncRoleLabelSelector(target *dpv1alpha1.BackupTar
if podSelector.LabelSelector == nil || podSelector.LabelSelector.MatchLabels == nil {
podSelector.LabelSelector = &metav1.LabelSelector{MatchLabels: map[string]string{}}
}
- if r.getCompReplicas(fullCompName) == 1 {
+ if r.getCompReplicas(compName) == 1 {
delete(podSelector.LabelSelector.MatchLabels, constant.RoleLabelKey)
if podSelector.FallbackLabelSelector != nil && podSelector.FallbackLabelSelector.MatchLabels != nil {
delete(podSelector.FallbackLabelSelector.MatchLabels, constant.RoleLabelKey)
@@ -372,13 +371,13 @@ func (r *backupPolicyBuilder) syncRoleLabelSelector(target *dpv1alpha1.BackupTar
}
}
-func (r *backupPolicyBuilder) getCompReplicas(fullCompName string) int32 {
- its := &workloads.InstanceSet{}
- name := fmt.Sprintf("%s-%s", r.Cluster.Name, fullCompName)
- if err := r.Client.Get(r.Context, client.ObjectKey{Name: name, Namespace: r.Cluster.Namespace}, its); err != nil {
+func (r *backupPolicyBuilder) getCompReplicas(compName string) int32 {
+ comp := &appsv1.Component{}
+ name := fmt.Sprintf("%s-%s", r.Cluster.Name, compName)
+ if err := r.Client.Get(r.Context, client.ObjectKey{Name: name, Namespace: r.Cluster.Namespace}, comp); err != nil {
return r.compSpec.Replicas
}
- return *its.Spec.Replicas
+ return comp.Spec.Replicas
}
// buildBackupPolicy builds a new backup policy by the backup policy template.
@@ -483,8 +482,8 @@ func (r *backupPolicyBuilder) buildBackupTargets(targets []dpv1alpha1.BackupTarg
}
var backupTargets []dpv1alpha1.BackupTarget
for _, v := range shardComponents {
- fullComponentName := v.Labels[constant.KBAppComponentLabelKey]
- target := r.buildBackupTarget(sourceTargetMap[fullComponentName], r.backupPolicyTPL.Spec.Target, fullComponentName)
+ compName := v.Labels[constant.KBAppComponentLabelKey]
+ target := r.buildBackupTarget(sourceTargetMap[compName], r.backupPolicyTPL.Spec.Target, compName)
if target != nil {
backupTargets = append(backupTargets, *target)
}
@@ -495,11 +494,11 @@ func (r *backupPolicyBuilder) buildBackupTargets(targets []dpv1alpha1.BackupTarg
func (r *backupPolicyBuilder) buildBackupTarget(
oldTarget *dpv1alpha1.BackupTarget,
targetTpl dpv1alpha1.TargetInstance,
- fullCompName string,
+ compName string,
) *dpv1alpha1.BackupTarget {
if oldTarget != nil {
// if the target already exists, only sync the role by component replicas automatically.
- r.syncRoleLabelSelector(oldTarget, targetTpl.Role, targetTpl.FallbackRole, fullCompName)
+ r.syncRoleLabelSelector(oldTarget, targetTpl.Role, targetTpl.FallbackRole, compName)
return oldTarget
}
clusterName := r.Cluster.Name
@@ -510,7 +509,7 @@ func (r *backupPolicyBuilder) buildBackupTarget(
PodSelector: &dpv1alpha1.PodSelector{
Strategy: targetTpl.Strategy,
LabelSelector: &metav1.LabelSelector{
- MatchLabels: r.buildTargetPodLabels(targetTpl.Role, fullCompName),
+ MatchLabels: r.buildTargetPodLabels(targetTpl.Role, compName),
},
},
// dataprotection will use its dedicated service account if this field is empty.
@@ -519,16 +518,16 @@ func (r *backupPolicyBuilder) buildBackupTarget(
}
if len(targetTpl.Role) != 0 && len(targetTpl.FallbackRole) != 0 {
target.PodSelector.FallbackLabelSelector = &metav1.LabelSelector{
- MatchLabels: r.buildTargetPodLabels(targetTpl.FallbackRole, fullCompName),
+ MatchLabels: r.buildTargetPodLabels(targetTpl.FallbackRole, compName),
}
}
if r.isSharding {
- target.Name = fullCompName
+ target.Name = compName
}
// build the target connection credential
if targetTpl.Account != "" {
target.ConnectionCredential = &dpv1alpha1.ConnectionCredential{
- SecretName: constant.GenerateAccountSecretName(clusterName, fullCompName, targetTpl.Account),
+ SecretName: constant.GenerateAccountSecretName(clusterName, compName, targetTpl.Account),
PasswordKey: constant.AccountPasswdForSecret,
UsernameKey: constant.AccountNameForSecret,
}
diff --git a/controllers/apps/cluster/transformer_cluster_component.go b/controllers/apps/cluster/transformer_cluster_component.go
index 9912135f803..e1291291cdb 100644
--- a/controllers/apps/cluster/transformer_cluster_component.go
+++ b/controllers/apps/cluster/transformer_cluster_component.go
@@ -186,6 +186,7 @@ func copyAndMergeComponent(oldCompObj, newCompObj *appsv1.Component) *appsv1.Com
ictrlutil.MergeMetadataMapInplace(compProto.Labels, &compObjCopy.Labels)
// merge spec
+ compObjCopy.Spec.TerminationPolicy = compProto.Spec.TerminationPolicy
compObjCopy.Spec.CompDef = compProto.Spec.CompDef
compObjCopy.Spec.ServiceVersion = compProto.Spec.ServiceVersion
compObjCopy.Spec.ServiceRefs = compProto.Spec.ServiceRefs
diff --git a/controllers/apps/cluster/transformer_cluster_component_status_test.go b/controllers/apps/cluster/transformer_cluster_component_status_test.go
index 0be5168d61a..e25f10ec24a 100644
--- a/controllers/apps/cluster/transformer_cluster_component_status_test.go
+++ b/controllers/apps/cluster/transformer_cluster_component_status_test.go
@@ -27,6 +27,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -72,7 +73,7 @@ var _ = Describe("cluster component status transformer", func() {
transCtx = &clusterTransformContext{
Context: testCtx.Ctx,
- Client: model.NewGraphClient(&mockReader{objs: []client.Object{}}),
+ Client: model.NewGraphClient(&appsutil.MockReader{Objects: []client.Object{}}),
EventRecorder: clusterRecorder,
Logger: logger,
Cluster: cluster.DeepCopy(),
@@ -97,8 +98,8 @@ var _ = Describe("cluster component status transformer", func() {
transCtx.Cluster.Status.Components = nil
// only have comp1 object in the cluster
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -137,8 +138,8 @@ var _ = Describe("cluster component status transformer", func() {
},
}
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -194,8 +195,8 @@ var _ = Describe("cluster component status transformer", func() {
}
// comp2 object is deleted???
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -235,8 +236,8 @@ var _ = Describe("cluster component status transformer", func() {
}
// delete comp2 object
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -267,8 +268,8 @@ var _ = Describe("cluster component status transformer", func() {
})
It("ok", func() {
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -319,8 +320,8 @@ var _ = Describe("cluster component status transformer", func() {
},
}
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -376,8 +377,8 @@ var _ = Describe("cluster component status transformer", func() {
transCtx.Cluster.Status.Shardings = nil
// only have sharding1 object in the cluster
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -417,8 +418,8 @@ var _ = Describe("cluster component status transformer", func() {
},
}
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -476,8 +477,8 @@ var _ = Describe("cluster component status transformer", func() {
}
// sharding2 object is deleted???
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -518,8 +519,8 @@ var _ = Describe("cluster component status transformer", func() {
}
// delete sharding2 object
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -551,8 +552,8 @@ var _ = Describe("cluster component status transformer", func() {
})
It("ok", func() {
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -596,8 +597,8 @@ var _ = Describe("cluster component status transformer", func() {
})
It("compose phases", func() {
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
@@ -647,8 +648,8 @@ var _ = Describe("cluster component status transformer", func() {
},
}
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
Namespace: testCtx.DefaultNamespace,
diff --git a/controllers/apps/cluster/transformer_cluster_component_test.go b/controllers/apps/cluster/transformer_cluster_component_test.go
index 02ed3bffe44..8f710f3c87c 100644
--- a/controllers/apps/cluster/transformer_cluster_component_test.go
+++ b/controllers/apps/cluster/transformer_cluster_component_test.go
@@ -32,6 +32,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/component"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
@@ -521,8 +522,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock first two components status as running and creating
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -547,8 +548,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock one of first two components status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -573,8 +574,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock first two components status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -603,8 +604,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock first two components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Spec.Replicas = 2 // to update
comp.Status.Phase = appsv1.RunningComponentPhase
@@ -633,8 +634,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.CreatingComponentPhase // not ready
}),
@@ -667,8 +668,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Spec.Replicas = 2 // to update
comp.Status.Phase = appsv1.RunningComponentPhase
@@ -705,8 +706,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyDefault)
// mock components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -740,8 +741,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyStop)
// mock to stop all components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -778,8 +779,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyStop)
// mock to stop all components and the first component has been stopped
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Spec.Stop = pointer.Bool(true)
comp.Status.Phase = appsv1.StoppedComponentPhase
@@ -817,8 +818,8 @@ var _ = Describe("cluster component transformer test", func() {
transformer, transCtx, dag := newTransformerNCtx(clusterTopologyProvisionNUpdateOOD)
// mock first two components status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -845,7 +846,7 @@ var _ = Describe("cluster component transformer test", func() {
}
// mock last two components status as running
- reader.objs = append(reader.objs, []client.Object{
+ reader.Objects = append(reader.Objects, []client.Object{
mockCompObj(transCtx, comp2aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -869,8 +870,8 @@ var _ = Describe("cluster component transformer test", func() {
Expect(transCtx.components[2].Name).Should(Equal(comp3aName))
// mock first component status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -908,8 +909,8 @@ var _ = Describe("cluster component transformer test", func() {
Expect(transCtx.components[4].Name).Should(Equal(comp3aName))
// mock first component status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -985,8 +986,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock first two components status as running and creating
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1016,8 +1017,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock one of first two components status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1047,8 +1048,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock first two components status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1082,8 +1083,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock first two components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Spec.Replicas = 2 // to update
comp.Status.Phase = appsv1.RunningComponentPhase
@@ -1117,8 +1118,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.CreatingComponentPhase // not ready
}),
@@ -1156,8 +1157,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Spec.Replicas = 2 // to update
comp.Status.Phase = appsv1.RunningComponentPhase
@@ -1199,8 +1200,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1238,8 +1239,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock to stop all components
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1283,8 +1284,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock to stop all components and the first component has been stopped
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Spec.Stop = pointer.Bool(true)
comp.Status.Phase = appsv1.StoppedComponentPhase
@@ -1330,8 +1331,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock first two components status as running
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockShardingCompObj(transCtx, sharding1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1358,7 +1359,7 @@ var _ = Describe("cluster component transformer test", func() {
}
// mock last two components status as running
- reader.objs = append(reader.objs, []client.Object{
+ reader.Objects = append(reader.Objects, []client.Object{
mockShardingCompObj(transCtx, sharding2aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1428,7 +1429,7 @@ var _ = Describe("cluster component transformer test", func() {
}
// mock first component status as running
- reader := &mockReader{objs: suit.mockObjects(transCtx)}
+ reader := &appsutil.MockReader{Objects: suit.mockObjects(transCtx)}
transCtx.Client = model.NewGraphClient(reader)
// try again and check the last component
@@ -1461,8 +1462,8 @@ var _ = Describe("cluster component transformer test", func() {
f.AddSharding(sharding1aName, "", "")
})
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Spec.Replicas = 2 // to update
comp.Status.Phase = appsv1.RunningComponentPhase
@@ -1506,8 +1507,8 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock to stop all components and shardings
- reader := &mockReader{
- objs: []client.Object{
+ reader := &appsutil.MockReader{
+ Objects: []client.Object{
mockCompObj(transCtx, comp1aName, func(comp *appsv1.Component) {
comp.Status.Phase = appsv1.RunningComponentPhase
}),
@@ -1594,7 +1595,7 @@ var _ = Describe("cluster component transformer test", func() {
})
// mock first component status as running
- reader := &mockReader{objs: suit.firstMockObjects(transCtx)}
+ reader := &appsutil.MockReader{Objects: suit.firstMockObjects(transCtx)}
transCtx.Client = model.NewGraphClient(reader)
// sharding1aName(comp1aName) is not ready (exist) when updating comp1aName(sharding1aName)
@@ -1613,7 +1614,7 @@ var _ = Describe("cluster component transformer test", func() {
}
// mock another component status as running
- reader.objs = append(reader.objs, suit.secondMockObjects(transCtx)...)
+ reader.Objects = append(reader.Objects, suit.secondMockObjects(transCtx)...)
// try again
err = transformer.Transform(transCtx, newDAG(graphCli, transCtx.Cluster))
diff --git a/controllers/apps/cluster/transformer_cluster_deletion.go b/controllers/apps/cluster/transformer_cluster_deletion.go
index fae53ff4745..1b0a727e83e 100644
--- a/controllers/apps/cluster/transformer_cluster_deletion.go
+++ b/controllers/apps/cluster/transformer_cluster_deletion.go
@@ -32,6 +32,7 @@ import (
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -128,10 +129,10 @@ func (t *clusterDeletionTransformer) Transform(ctx graph.TransformContext, dag *
delKindMap := map[string]sets.Empty{}
for _, o := range delObjs {
// skip the objects owned by the component and InstanceSet controller
- if isOwnedByComp(o) || isOwnedByInstanceSet(o) {
+ if isOwnedByComp(o) || appsutil.IsOwnedByInstanceSet(o) {
continue
}
- graphCli.Delete(dag, o, inUniversalContext4G())
+ graphCli.Delete(dag, o, appsutil.InUniversalContext4G())
delKindMap[o.GetObjectKind().GroupVersionKind().Kind] = sets.Empty{}
}
@@ -143,7 +144,7 @@ func (t *clusterDeletionTransformer) Transform(ctx graph.TransformContext, dag *
transCtx.Logger.Info(fmt.Sprintf("deleting the sub-resource kinds: %v", maps.Keys(delKindMap)))
graphCli.Status(dag, cluster, transCtx.Cluster)
// requeue since pvc isn't owned by cluster, and deleting it won't trigger event
- return newRequeueError(time.Second*1, "not all sub-resources deleted")
+ return intctrlutil.NewRequeueError(time.Second*1, "not all sub-resources deleted")
}
// fast return, that is stopping the plan.Build() stage and jump to plan.Execute() directly
diff --git a/controllers/apps/cluster/transformer_cluster_deletion_test.go b/controllers/apps/cluster/transformer_cluster_deletion_test.go
index cc3fe852d0f..5e2344ad881 100644
--- a/controllers/apps/cluster/transformer_cluster_deletion_test.go
+++ b/controllers/apps/cluster/transformer_cluster_deletion_test.go
@@ -30,6 +30,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -75,8 +76,8 @@ var _ = Describe("clusterDeletionTransformer", func() {
GetObject()
cluster.DeletionTimestamp = &metav1.Time{Time: time.Now()}
- reader = &mockReader{
- objs: []client.Object{
+ reader = &appsutil.MockReader{
+ Objects: []client.Object{
clusterDef,
&appsv1.Component{
ObjectMeta: metav1.ObjectMeta{
@@ -132,8 +133,8 @@ var _ = Describe("clusterDeletionTransformer", func() {
Expect(dag.Vertices()).Should(HaveLen(1 + 1))
// delete component 1
- mockReader := reader.(*mockReader)
- mockReader.objs = slices.DeleteFunc(mockReader.objs, func(obj client.Object) bool {
+ mockReader := reader.(*appsutil.MockReader)
+ mockReader.Objects = slices.DeleteFunc(mockReader.Objects, func(obj client.Object) bool {
return obj.GetName() == "test-cluster-comp1"
})
dag = newDag(transCtx.Client.(model.GraphClient))
@@ -143,7 +144,7 @@ var _ = Describe("clusterDeletionTransformer", func() {
Expect(dag.Vertices()).Should(HaveLen(1 + 1))
// delete component 2
- mockReader.objs = slices.DeleteFunc(mockReader.objs, func(obj client.Object) bool {
+ mockReader.Objects = slices.DeleteFunc(mockReader.Objects, func(obj client.Object) bool {
return obj.GetName() == "test-cluster-comp2"
})
dag = newDag(transCtx.Client.(model.GraphClient))
@@ -152,7 +153,7 @@ var _ = Describe("clusterDeletionTransformer", func() {
Expect(dag.Vertices()).Should(HaveLen(1 + 1))
// delete component 3
- mockReader.objs = slices.DeleteFunc(mockReader.objs, func(obj client.Object) bool {
+ mockReader.Objects = slices.DeleteFunc(mockReader.Objects, func(obj client.Object) bool {
return obj.GetName() == "test-cluster-comp3"
})
dag = newDag(transCtx.Client.(model.GraphClient))
diff --git a/controllers/apps/cluster/transformer_cluster_ownership.go b/controllers/apps/cluster/transformer_cluster_ownership.go
index 97f9adeb927..04afb4325af 100644
--- a/controllers/apps/cluster/transformer_cluster_ownership.go
+++ b/controllers/apps/cluster/transformer_cluster_ownership.go
@@ -43,7 +43,7 @@ func (f *clusterOwnershipTransformer) Transform(ctx graph.TransformContext, dag
controllerutil.AddFinalizer(cluster, constant.DBClusterFinalizerName)
for _, object := range objects {
- if err := intctrlutil.SetOwnership(cluster, object, rscheme, constant.DBClusterFinalizerName); err != nil {
+ if err := intctrlutil.SetOwnership(cluster, object, model.GetScheme(), constant.DBClusterFinalizerName); err != nil {
if _, ok := err.(*controllerutil.AlreadyOwnedError); ok {
continue
}
diff --git a/controllers/apps/cluster/transformer_cluster_placement.go b/controllers/apps/cluster/transformer_cluster_placement.go
index a06b9164078..a4533b57e7e 100644
--- a/controllers/apps/cluster/transformer_cluster_placement.go
+++ b/controllers/apps/cluster/transformer_cluster_placement.go
@@ -25,6 +25,7 @@ import (
"strings"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -49,7 +50,7 @@ func (t *clusterPlacementTransformer) Transform(ctx graph.TransformContext, dag
}
if t.assigned(transCtx) {
- transCtx.Context = intoContext(transCtx.Context, placement(transCtx.OrigCluster))
+ transCtx.Context = appsutil.IntoContext(transCtx.Context, appsutil.Placement(transCtx.OrigCluster))
return nil
}
@@ -60,7 +61,7 @@ func (t *clusterPlacementTransformer) Transform(ctx graph.TransformContext, dag
cluster.Annotations = make(map[string]string)
}
cluster.Annotations[constant.KBAppMultiClusterPlacementKey] = strings.Join(p, ",")
- transCtx.Context = intoContext(transCtx.Context, placement(cluster))
+ transCtx.Context = appsutil.IntoContext(transCtx.Context, appsutil.Placement(cluster))
return nil
}
diff --git a/controllers/apps/cluster/transformer_cluster_service.go b/controllers/apps/cluster/transformer_cluster_service.go
index 4253a7ca0c0..e739ca1830b 100644
--- a/controllers/apps/cluster/transformer_cluster_service.go
+++ b/controllers/apps/cluster/transformer_cluster_service.go
@@ -28,6 +28,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/common"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/builder"
@@ -68,13 +69,13 @@ func (t *clusterServiceTransformer) Transform(ctx graph.TransformContext, dag *g
toCreateServices, toDeleteServices, toUpdateServices := mapDiff(services, protoServices)
for svc := range toCreateServices {
- graphCli.Create(dag, protoServices[svc], inDataContext4G())
+ graphCli.Create(dag, protoServices[svc], appsutil.InDataContext4G())
}
for svc := range toUpdateServices {
t.updateService(dag, graphCli, services[svc], protoServices[svc])
}
for svc := range toDeleteServices {
- graphCli.Delete(dag, services[svc], inDataContext4G())
+ graphCli.Delete(dag, services[svc], appsutil.InDataContext4G())
}
return nil
}
@@ -224,9 +225,9 @@ func (t *clusterServiceTransformer) updateService(dag *graph.DAG, graphCli model
newSvc.Spec = proto.Spec
ctrlutil.MergeMetadataMapInplace(proto.Labels, &newSvc.Labels)
ctrlutil.MergeMetadataMapInplace(proto.Annotations, &newSvc.Annotations)
- resolveServiceDefaultFields(&running.Spec, &newSvc.Spec)
+ appsutil.ResolveServiceDefaultFields(&running.Spec, &newSvc.Spec)
if !reflect.DeepEqual(running, newSvc) {
- graphCli.Update(dag, running, newSvc, inDataContext4G())
+ graphCli.Update(dag, running, newSvc, appsutil.InDataContext4G())
}
}
diff --git a/controllers/apps/cluster/transformer_cluster_service_test.go b/controllers/apps/cluster/transformer_cluster_service_test.go
index aa9df856ba2..fcb4a6ea947 100644
--- a/controllers/apps/cluster/transformer_cluster_service_test.go
+++ b/controllers/apps/cluster/transformer_cluster_service_test.go
@@ -32,6 +32,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -46,7 +47,7 @@ var _ = Describe("cluster service transformer test", func() {
)
var (
- reader *mockReader
+ reader *appsutil.MockReader
dag *graph.DAG
transCtx *clusterTransformContext
)
@@ -58,7 +59,7 @@ var _ = Describe("cluster service transformer test", func() {
}
BeforeEach(func() {
- reader = &mockReader{}
+ reader = &appsutil.MockReader{}
graphCli := model.NewGraphClient(reader)
cluster := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, clusterDefName).
SetReplicas(1).
@@ -105,7 +106,7 @@ var _ = Describe("cluster service transformer test", func() {
Context("cluster service", func() {
It("deletion", func() {
- reader.objs = append(reader.objs, clusterNodePortService())
+ reader.Objects = append(reader.Objects, clusterNodePortService())
// remove cluster services
transCtx.Cluster.Spec.Services = nil
transformer := &clusterServiceTransformer{}
@@ -115,11 +116,11 @@ var _ = Describe("cluster service transformer test", func() {
// check services to delete
graphCli := transCtx.Client.(model.GraphClient)
objs := graphCli.FindAll(dag, &corev1.Service{})
- Expect(len(objs)).Should(Equal(len(reader.objs)))
+ Expect(len(objs)).Should(Equal(len(reader.Objects)))
slices.SortFunc(objs, func(a, b client.Object) int {
return strings.Compare(a.GetName(), b.GetName())
})
- for i := 0; i < len(reader.objs); i++ {
+ for i := 0; i < len(reader.Objects); i++ {
svc := objs[i].(*corev1.Service)
Expect(svc.Name).Should(Equal(clusterServiceName(clusterName, testapps.ServiceNodePortName)))
Expect(graphCli.IsAction(dag, svc, model.ActionDeletePtr())).Should(BeTrue())
@@ -139,7 +140,7 @@ var _ = Describe("cluster service transformer test", func() {
}
svc := clusterNodePortService()
svc.Spec.Ports = []corev1.ServicePort{port}
- reader.objs = append(reader.objs, svc)
+ reader.Objects = append(reader.Objects, svc)
transCtx.Cluster.Spec.Services[0].Spec.Ports = newPorts
transformer := &clusterServiceTransformer{}
err := transformer.Transform(transCtx, dag)
diff --git a/controllers/apps/cluster/transformer_cluster_validation.go b/controllers/apps/cluster/transformer_cluster_validation.go
index 56450ca3d0e..7afd4e72fa1 100644
--- a/controllers/apps/cluster/transformer_cluster_validation.go
+++ b/controllers/apps/cluster/transformer_cluster_validation.go
@@ -27,8 +27,10 @@ import (
"k8s.io/apimachinery/pkg/types"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/controller/component"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
+ intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
"github.com/apecloud/kubeblocks/pkg/generics"
)
@@ -49,22 +51,22 @@ func (t *clusterValidationTransformer) Transform(ctx graph.TransformContext, dag
}()
if err = t.apiValidation(cluster); err != nil {
- return newRequeueError(requeueDuration, err.Error())
+ return intctrlutil.NewRequeueError(appsutil.RequeueDuration, err.Error())
}
if err = loadNCheckClusterDefinition(transCtx, cluster); err != nil {
- return newRequeueError(requeueDuration, err.Error())
+ return intctrlutil.NewRequeueError(appsutil.RequeueDuration, err.Error())
}
if err = t.checkDefinitionNamePattern(cluster); err != nil {
- return newRequeueError(requeueDuration, err.Error())
+ return intctrlutil.NewRequeueError(appsutil.RequeueDuration, err.Error())
}
if withClusterTopology(cluster) {
// check again with cluster definition loaded,
// and update topology to cluster spec in case the default topology changed.
if err = t.checkNUpdateClusterTopology(transCtx, cluster); err != nil {
- return newRequeueError(requeueDuration, err.Error())
+ return intctrlutil.NewRequeueError(appsutil.RequeueDuration, err.Error())
}
}
return nil
diff --git a/controllers/apps/cluster/utils.go b/controllers/apps/cluster/utils.go
index ccce65f5291..94dd630fb53 100644
--- a/controllers/apps/cluster/utils.go
+++ b/controllers/apps/cluster/utils.go
@@ -21,33 +21,21 @@ package cluster
import (
"context"
- "fmt"
"reflect"
- "time"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
- "k8s.io/client-go/tools/record"
+ "k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/apiutil"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/model"
- "github.com/apecloud/kubeblocks/pkg/controller/multicluster"
- intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
dptypes "github.com/apecloud/kubeblocks/pkg/dataprotection/types"
)
-// default reconcile requeue after duration
-var requeueDuration = time.Millisecond * 1000
-
-func newRequeueError(after time.Duration, reason string) error {
- return intctrlutil.NewRequeueError(after, reason)
-}
-
func boolValue(b *bool) bool {
if b == nil {
return false
@@ -61,101 +49,12 @@ func mergeMap(dst, src map[string]string) {
}
}
-func placement(obj client.Object) string {
- if obj == nil || obj.GetAnnotations() == nil {
- return ""
- }
- return obj.GetAnnotations()[constant.KBAppMultiClusterPlacementKey]
+type gvkNObjKey struct {
+ schema.GroupVersionKind
+ client.ObjectKey
}
-func intoContext(ctx context.Context, placement string) context.Context {
- return multicluster.IntoContext(ctx, placement)
-}
-
-func inUniversalContext4C() *multicluster.ClientOption {
- return multicluster.InUniversalContext()
-}
-
-func inDataContext4G() model.GraphOption {
- return model.WithClientOption(multicluster.InDataContext())
-}
-
-func inUniversalContext4G() model.GraphOption {
- return model.WithClientOption(multicluster.InUniversalContext())
-}
-
-func clientOption(v *model.ObjectVertex) *multicluster.ClientOption {
- if v.ClientOpt != nil {
- opt, ok := v.ClientOpt.(*multicluster.ClientOption)
- if ok {
- return opt
- }
- panic(fmt.Sprintf("unknown client option: %T", v.ClientOpt))
- }
- return multicluster.InControlContext()
-}
-
-func resolveServiceDefaultFields(oldSpec, newSpec *corev1.ServiceSpec) {
- servicePorts := make(map[int32]corev1.ServicePort)
- for i, port := range oldSpec.Ports {
- servicePorts[port.Port] = oldSpec.Ports[i]
- }
- for i, port := range newSpec.Ports {
- servicePort, ok := servicePorts[port.Port]
- if !ok {
- continue // new port added
- }
- // if the service type is NodePort or LoadBalancer, and the nodeport is not set, we should use the nodeport of the exist service
- if shouldAllocateNodePorts(newSpec) && port.NodePort == 0 && servicePort.NodePort != 0 {
- port.NodePort = servicePort.NodePort
- newSpec.Ports[i].NodePort = servicePort.NodePort
- }
- if port.TargetPort.IntVal != 0 {
- continue
- }
- port.TargetPort = servicePort.TargetPort
- if reflect.DeepEqual(port, servicePort) {
- newSpec.Ports[i].TargetPort = servicePort.TargetPort
- }
- }
- if len(newSpec.ClusterIP) == 0 {
- newSpec.ClusterIP = oldSpec.ClusterIP
- }
- if len(newSpec.ClusterIPs) == 0 {
- newSpec.ClusterIPs = oldSpec.ClusterIPs
- }
- if len(newSpec.Type) == 0 {
- newSpec.Type = oldSpec.Type
- }
- if len(newSpec.SessionAffinity) == 0 {
- newSpec.SessionAffinity = oldSpec.SessionAffinity
- }
- if len(newSpec.IPFamilies) == 0 || (len(newSpec.IPFamilies) == 1 && *newSpec.IPFamilyPolicy != corev1.IPFamilyPolicySingleStack) {
- newSpec.IPFamilies = oldSpec.IPFamilies
- }
- if newSpec.IPFamilyPolicy == nil {
- newSpec.IPFamilyPolicy = oldSpec.IPFamilyPolicy
- }
- if newSpec.InternalTrafficPolicy == nil {
- newSpec.InternalTrafficPolicy = oldSpec.InternalTrafficPolicy
- }
- if newSpec.ExternalTrafficPolicy == "" && oldSpec.ExternalTrafficPolicy != "" {
- newSpec.ExternalTrafficPolicy = oldSpec.ExternalTrafficPolicy
- }
-}
-
-func shouldAllocateNodePorts(svc *corev1.ServiceSpec) bool {
- if svc.Type == corev1.ServiceTypeNodePort {
- return true
- }
- if svc.Type == corev1.ServiceTypeLoadBalancer {
- if svc.AllocateLoadBalancerNodePorts != nil {
- return *svc.AllocateLoadBalancerNodePorts
- }
- return true
- }
- return false
-}
+type owningObjects map[gvkNObjKey]client.Object
func getGVKName(object client.Object, scheme *runtime.Scheme) (*gvkNObjKey, error) {
gvk, err := apiutil.GVKForObject(object, scheme)
@@ -193,7 +92,7 @@ func getFailedBackups(ctx context.Context,
continue
}
if backup.Labels[dptypes.BackupTypeLabelKey] != string(dpv1alpha1.BackupTypeContinuous) {
- gvr, err := getGVKName(backup, rscheme)
+ gvr, err := getGVKName(backup, model.GetScheme())
if err != nil {
return err
}
@@ -209,14 +108,14 @@ func getOwningNamespacedObjects(ctx context.Context,
labels client.MatchingLabels,
kinds []client.ObjectList) (owningObjects, error) {
inNS := client.InNamespace(namespace)
- return getOwningObjectsWithOptions(ctx, cli, kinds, inNS, labels, inUniversalContext4C())
+ return getOwningObjectsWithOptions(ctx, cli, kinds, inNS, labels, appsutil.InUniversalContext4C())
}
func getOwningNonNamespacedObjects(ctx context.Context,
cli client.Reader,
labels client.MatchingLabels,
kinds []client.ObjectList) (owningObjects, error) {
- return getOwningObjectsWithOptions(ctx, cli, kinds, labels, inUniversalContext4C())
+ return getOwningObjectsWithOptions(ctx, cli, kinds, labels, appsutil.InUniversalContext4C())
}
func getOwningObjectsWithOptions(ctx context.Context,
@@ -235,7 +134,7 @@ func getOwningObjectsWithOptions(ctx context.Context,
for i := 0; i < l; i++ {
// get the underlying object
object := items.Index(i).Addr().Interface().(client.Object)
- name, err := getGVKName(object, rscheme)
+ name, err := getGVKName(object, model.GetScheme())
if err != nil {
return nil, err
}
@@ -245,23 +144,6 @@ func getOwningObjectsWithOptions(ctx context.Context,
return objs, nil
}
-// sendWarningEventWithError sends a warning event when occurs error.
-func sendWarningEventWithError(
- recorder record.EventRecorder,
- obj client.Object,
- reason string,
- err error) {
- // ignore requeue error
- if err == nil || intctrlutil.IsRequeueError(err) {
- return
- }
- controllerErr := intctrlutil.UnwrapControllerError(err)
- if controllerErr != nil {
- reason = string(controllerErr.Type)
- }
- recorder.Event(obj, corev1.EventTypeWarning, reason, err.Error())
-}
-
// isOwnedByComp is used to judge if the obj is owned by Component.
func isOwnedByComp(obj client.Object) bool {
for _, ref := range obj.GetOwnerReferences() {
@@ -271,13 +153,3 @@ func isOwnedByComp(obj client.Object) bool {
}
return false
}
-
-// isOwnedByInstanceSet is used to judge if the obj is owned by the InstanceSet controller
-func isOwnedByInstanceSet(obj client.Object) bool {
- for _, ref := range obj.GetOwnerReferences() {
- if ref.Kind == workloads.InstanceSetKind && ref.Controller != nil && *ref.Controller {
- return true
- }
- }
- return false
-}
diff --git a/controllers/apps/component/component_controller.go b/controllers/apps/component/component_controller.go
index e51fd464402..5e597546072 100644
--- a/controllers/apps/component/component_controller.go
+++ b/controllers/apps/component/component_controller.go
@@ -40,6 +40,7 @@ import (
appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/multicluster"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
@@ -126,7 +127,7 @@ func (r *ComponentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return intctrlutil.Requeue(reqCtx.Log, err.Error())
}
c := planBuilder.(*componentPlanBuilder)
- sendWarningEventWithError(r.Recorder, c.transCtx.Component, corev1.EventTypeWarning, err)
+ appsutil.SendWarningEventWithError(r.Recorder, c.transCtx.Component, corev1.EventTypeWarning, err)
return intctrlutil.RequeueWithError(err, reqCtx.Log, "")
}
@@ -188,7 +189,7 @@ func (r *ComponentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
func (r *ComponentReconciler) SetupWithManager(mgr ctrl.Manager, multiClusterMgr multicluster.Manager) error {
retryDurationMS := viper.GetInt(constant.CfgKeyCtrlrReconcileRetryDurationMS)
if retryDurationMS != 0 {
- requeueDuration = time.Millisecond * time.Duration(retryDurationMS)
+ appsutil.RequeueDuration = time.Millisecond * time.Duration(retryDurationMS)
}
if multiClusterMgr == nil {
return r.setupWithManager(mgr)
diff --git a/controllers/apps/component/component_controller_test.go b/controllers/apps/component/component_controller_test.go
index c658e773b23..550642a52b4 100644
--- a/controllers/apps/component/component_controller_test.go
+++ b/controllers/apps/component/component_controller_test.go
@@ -38,14 +38,12 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
- "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/utils/pointer"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
- "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
kbappsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
- dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/builder"
@@ -55,7 +53,6 @@ import (
"github.com/apecloud/kubeblocks/pkg/generics"
kbacli "github.com/apecloud/kubeblocks/pkg/kbagent/client"
testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps"
- testdp "github.com/apecloud/kubeblocks/pkg/testutil/dataprotection"
testk8s "github.com/apecloud/kubeblocks/pkg/testutil/k8s"
viper "github.com/apecloud/kubeblocks/pkg/viperx"
)
@@ -68,29 +65,29 @@ var _ = Describe("Component Controller", func() {
const (
compDefName = "test-compdef"
compVerName = "test-compver"
- clusterName = "test-cluster" // this become cluster prefix name if used with testapps.NewClusterFactory().WithRandomName()
+ clusterName = "test-cluster"
leader = "leader"
follower = "follower"
defaultCompName = "default"
)
var (
- compDefObj *kbappsv1.ComponentDefinition
- compVerObj *kbappsv1.ComponentVersion
- clusterObj *kbappsv1.Cluster
- clusterKey types.NamespacedName
- compObj *kbappsv1.Component
- compKey types.NamespacedName
- allSettings map[string]interface{}
+ compDefObj *kbappsv1.ComponentDefinition
+ compVerObj *kbappsv1.ComponentVersion
+ clusterKey types.NamespacedName
+ clusterUID string
+ clusterGeneration int64
+ compObj *kbappsv1.Component
+ compKey types.NamespacedName
+ settings map[string]interface{}
)
resetTestContext := func() {
compDefObj = nil
compVerObj = nil
- clusterObj = nil
- if allSettings != nil {
- Expect(viper.MergeConfigMap(allSettings)).ShouldNot(HaveOccurred())
- allSettings = nil
+ if settings != nil {
+ Expect(viper.MergeConfigMap(settings)).ShouldNot(HaveOccurred())
+ settings = nil
}
}
@@ -102,31 +99,27 @@ var _ = Describe("Component Controller", func() {
// create the new objects.
By("clean resources")
- // delete cluster(and all dependent sub-resources), cluster definition
- testapps.ClearClusterResourcesWithRemoveFinalizerOption(&testCtx)
+ // delete components (and all dependent sub-resources), and component definitions & versions
+ testapps.ClearComponentResourcesWithRemoveFinalizerOption(&testCtx)
// delete rest mocked objects
inNS := client.InNamespace(testCtx.DefaultNamespace)
ml := client.HasLabels{testCtx.TestObjLabelKey}
// namespaced
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ComponentSignature, true, inNS)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.ServiceAccountSignature, true, inNS)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.RoleSignature, true, inNS)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.RoleBindingSignature, true, inNS)
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml)
testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml)
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupSignature, true, inNS, ml)
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.BackupPolicySignature, true, inNS, ml)
+ testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml)
// non-namespaced
- testapps.ClearResources(&testCtx, generics.BackupPolicyTemplateSignature, ml)
- testapps.ClearResources(&testCtx, generics.ActionSetSignature, ml)
testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml)
+
resetTestContext()
}
BeforeEach(func() {
cleanEnv()
- allSettings = viper.AllSettings()
+ settings = viper.AllSettings()
})
AfterEach(func() {
@@ -138,145 +131,85 @@ var _ = Describe("Component Controller", func() {
return str
}
- // test function helpers
- createAllDefinitionObjects := func() {
- By("Create a componentDefinition obj")
+ createDefinitionObjects := func() {
+ By("create a componentDefinition obj")
compDefObj = testapps.NewComponentDefinitionFactory(compDefName).
AddAnnotations(constant.SkipImmutableCheckAnnotationKey, "true").
SetDefaultSpec().
Create(&testCtx).
GetObject()
- By("Create a componentVersion obj")
+ By("create a componentVersion obj")
compVerObj = testapps.NewComponentVersionFactory(compVerName).
SetDefaultSpec(compDefName).
Create(&testCtx).
GetObject()
- By("Mock kb-agent client for the default transformer of system accounts provision")
+ By("mock kb-agent client for the default transformer of system accounts provision")
testapps.MockKBAgentClientDefault()
}
- waitForCreatingResourceCompletely := func(clusterKey client.ObjectKey, compNames ...string) {
- Eventually(testapps.ClusterReconciled(&testCtx, clusterKey)).Should(BeTrue())
- cluster := &kbappsv1.Cluster{}
- Eventually(testapps.CheckObjExists(&testCtx, clusterKey, cluster, true)).Should(Succeed())
- for _, compName := range compNames {
- compPhase := kbappsv1.CreatingComponentPhase
- for _, spec := range cluster.Spec.ComponentSpecs {
- if spec.Name == compName && spec.Replicas == 0 {
- compPhase = kbappsv1.StoppedComponentPhase
- }
- }
- Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(compPhase))
- }
- }
-
- createClusterObjX := func(compName, compDefName string,
- processor func(*testapps.MockClusterFactory), phase *kbappsv1.ClusterPhase) {
- factory := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, "").
- WithRandomName().
- AddComponent(compName, compDefName).
+ createCompObjX := func(compName, compDefName string, processor func(*testapps.MockComponentFactory), phase *kbappsv1.ComponentPhase) {
+ By("randomize a cluster name and UID")
+ clusterKey = types.NamespacedName{
+ Namespace: testCtx.DefaultNamespace,
+ Name: testapps.GetRandomizedKey("", clusterName).Name,
+ }
+ clusterUID = string(uuid.NewUUID())
+ clusterGeneration = 1
+
+ By("creating a component")
+ compObjName := constant.GenerateClusterComponentName(clusterKey.Name, compName)
+ factory := testapps.NewComponentFactory(testCtx.DefaultNamespace, compObjName, compDefName).
+ AddLabels().
+ AddAnnotations(constant.KubeBlocksGenerationKey, strconv.FormatInt(clusterGeneration, 10)).
+ AddAnnotations(constant.CRDAPIVersionAnnotationKey, kbappsv1.GroupVersion.String()).
+ AddAnnotations(constant.KBAppClusterUIDKey, clusterUID).
+ AddLabelsInMap(constant.GetCompLabelsWithDef(clusterKey.Name, compName, compDefName)).
SetReplicas(1)
if processor != nil {
processor(factory)
}
- clusterObj = factory.Create(&testCtx).GetObject()
- clusterKey = client.ObjectKeyFromObject(clusterObj)
+ compObj = factory.Create(&testCtx).GetObject()
+ compKey = client.ObjectKeyFromObject(compObj)
- By("Waiting for the cluster enter expected phase")
- Eventually(testapps.ClusterReconciled(&testCtx, clusterKey)).Should(BeTrue())
- if phase == nil || *phase == "" {
- Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(kbappsv1.CreatingClusterPhase))
+ By("waiting for the component enter expected phase")
+ Eventually(testapps.CheckObjExists(&testCtx, compKey, compObj, true)).Should(Succeed())
+ if phase == nil || *phase != "" {
+ Eventually(testapps.ComponentReconciled(&testCtx, compKey)).Should(BeTrue())
} else {
- Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(*phase))
- }
-
- By("Waiting for the component enter expected phase")
- compKey = types.NamespacedName{
- Namespace: clusterObj.Namespace,
- Name: component.FullName(clusterObj.Name, compName),
+ Consistently(testapps.ComponentReconciled(&testCtx, compKey)).Should(BeFalse())
}
- compObj = &kbappsv1.Component{}
- Eventually(testapps.CheckObjExists(&testCtx, compKey, compObj, true)).Should(Succeed())
if phase == nil {
- Eventually(testapps.ComponentReconciled(&testCtx, compKey)).Should(BeTrue())
Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.CreatingComponentPhase))
+ } else if *phase != "" {
+ Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(*phase))
}
}
- createClusterObj := func(compName, compDefName string, processor func(*testapps.MockClusterFactory)) {
- By("Creating a cluster with new component definition")
- createClusterObjX(compName, compDefName, processor, nil)
+ createCompObj := func(compName, compDefName string, processor func(*testapps.MockComponentFactory)) {
+ createCompObjX(compName, compDefName, processor, nil)
}
- createClusterObjWithPhase := func(compName, compDefName string, processor func(*testapps.MockClusterFactory), phase kbappsv1.ClusterPhase) {
- By("Creating a cluster with new component definition")
- createClusterObjX(compName, compDefName, processor, &phase)
+ createCompObjWithPhase := func(compName, compDefName string, processor func(*testapps.MockComponentFactory), phase kbappsv1.ComponentPhase) {
+ createCompObjX(compName, compDefName, processor, &phase)
}
- mockCompRunning := func(compName string) {
- itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, client.ObjectKeyFromObject(clusterObj), compName)
+ mockCompRunning := func(compName string, comp *kbappsv1.Component) {
+ itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, compName)
Expect(itsList.Items).Should(HaveLen(1))
its := itsList.Items[0]
- pods := testapps.MockInstanceSetPods(&testCtx, &its, clusterObj, compName)
+ pods := testapps.MockInstanceSetPods2(&testCtx, &its, clusterKey.Name, compName, comp)
Expect(testapps.ChangeObjStatus(&testCtx, &its, func() {
testk8s.MockInstanceSetReady(&its, pods...)
})).ShouldNot(HaveOccurred())
Eventually(testapps.GetComponentPhase(&testCtx, types.NamespacedName{
- Namespace: clusterObj.Namespace,
- Name: component.FullName(clusterObj.Name, compName),
+ Namespace: clusterKey.Namespace,
+ Name: component.FullName(clusterKey.Name, compName),
})).Should(Equal(kbappsv1.RunningComponentPhase))
}
- // createCompObj := func(compName, compDefName, serviceVersion string, processor func(*testapps.MockComponentFactory)) {
- // By("Creating a component")
- // factory := testapps.NewComponentFactory(testCtx.DefaultNamespace, component.FullName(clusterObj.Name, compName), compDefName).
- // AddAnnotations(constant.KBAppClusterUIDKey, string(clusterObj.UID)),
- // AddLabels(constant.AppInstanceLabelKey, clusterObj.Name).
- // SetServiceVersion(serviceVersion).
- // SetReplicas(1)
- // if processor != nil {
- // processor(factory)
- // }
- // compObj = factory.Create(&testCtx).GetObject()
- // compKey = client.ObjectKeyFromObject(compObj)
- //
- // Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
- // g.Expect(comp.Status.ObservedGeneration).To(BeEquivalentTo(comp.Generation))
- // g.Expect(comp.Status.Phase).To(Equal(kbappsv1.CreatingComponentPhase))
- // })).Should(Succeed())
- // }
-
- changeCompReplicas := func(clusterName types.NamespacedName, replicas int32, comp *kbappsv1.ClusterComponentSpec) {
- Expect(testapps.GetAndChangeObj(&testCtx, clusterName, func(cluster *kbappsv1.Cluster) {
- for i, clusterComp := range cluster.Spec.ComponentSpecs {
- if clusterComp.Name == comp.Name {
- cluster.Spec.ComponentSpecs[i].Replicas = replicas
- }
- }
- })()).ShouldNot(HaveOccurred())
- }
-
- changeComponentReplicas := func(clusterName types.NamespacedName, replicas int32) {
- Expect(testapps.GetAndChangeObj(&testCtx, clusterName, func(cluster *kbappsv1.Cluster) {
- Expect(cluster.Spec.ComponentSpecs).Should(HaveLen(1))
- cluster.Spec.ComponentSpecs[0].Replicas = replicas
- })()).ShouldNot(HaveOccurred())
- }
-
- getStableClusterObservedGeneration := func(clusterKey types.NamespacedName, waitFor *time.Duration) (int64, *kbappsv1.Cluster) {
- sleepTime := 300 * time.Millisecond
- if waitFor != nil {
- sleepTime = *waitFor
- }
- time.Sleep(sleepTime)
- cluster := &kbappsv1.Cluster{}
- Expect(testCtx.Cli.Get(testCtx.Ctx, clusterKey, cluster)).Should(Succeed())
- return cluster.Status.ObservedGeneration, cluster
- }
-
- getStableComponentObservedGeneration := func(compKey types.NamespacedName, waitFor *time.Duration) (int64, *kbappsv1.Component) {
+ stableCompObservedGeneration := func(compKey types.NamespacedName, waitFor *time.Duration) (int64, *kbappsv1.Component) {
sleepTime := 300 * time.Millisecond
if waitFor != nil {
sleepTime = *waitFor
@@ -287,24 +220,29 @@ var _ = Describe("Component Controller", func() {
return comp.Status.ObservedGeneration, comp
}
+ changeCompReplicas := func(compKey types.NamespacedName, replicas int32) {
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
+ comp.Spec.Replicas = replicas
+ })()).ShouldNot(HaveOccurred())
+ }
+
testChangeReplicas := func(compName, compDefName string) {
compDefKey := client.ObjectKeyFromObject(compDefObj)
Eventually(testapps.GetAndChangeObj(&testCtx, compDefKey, func(compDef *kbappsv1.ComponentDefinition) {
compDef.Spec.LifecycleActions.MemberLeave = nil
})).Should(Succeed())
- createClusterObj(compName, compDefName, nil)
- replicasSeq := []int32{5, 3, 1, 2, 4}
+ createCompObj(compName, compDefName, nil)
expectedOG := int64(1)
- for _, replicas := range replicasSeq {
- By(fmt.Sprintf("Change replicas to %d", replicas))
- changeComponentReplicas(clusterKey, replicas)
+ for _, replicas := range []int32{5, 3, 1, 2, 4} {
+ By(fmt.Sprintf("change replicas to %d", replicas))
+ changeCompReplicas(compKey, replicas)
expectedOG++
- By("Checking cluster status and the number of replicas changed")
- Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, fetched *kbappsv1.Cluster) {
- g.Expect(fetched.Status.ObservedGeneration).To(BeEquivalentTo(expectedOG))
- g.Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(BeElementOf(kbappsv1.CreatingClusterPhase, kbappsv1.UpdatingClusterPhase))
+ By("checking component status and the number of replicas changed")
+ Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
+ g.Expect(comp.Status.ObservedGeneration).To(BeEquivalentTo(expectedOG))
+ g.Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(BeElementOf(kbappsv1.CreatingComponentPhase, kbappsv1.UpdatingComponentPhase))
})).Should(Succeed())
itsKey := compKey
@@ -320,12 +258,12 @@ var _ = Describe("Component Controller", func() {
target = int32(0)
)
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(init)
})
By(fmt.Sprintf("change replicas to %d", target))
- changeComponentReplicas(clusterKey, target)
+ changeCompReplicas(compKey, target)
By("checking the number of replicas in component as expected")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
@@ -344,27 +282,31 @@ var _ = Describe("Component Controller", func() {
})).Should(Succeed())
}
+ changeReplicasLimit := func(compDefName string, minReplicas, maxReplicas int32) {
+ By(fmt.Sprintf("set replicas limit to [%d, %d]", minReplicas, maxReplicas))
+ compDefKey := types.NamespacedName{Name: compDefName}
+ Eventually(testapps.GetAndChangeObj(&testCtx, compDefKey, func(compDef *kbappsv1.ComponentDefinition) {
+ compDef.Spec.ReplicasLimit = &kbappsv1.ReplicasLimit{
+ MinReplicas: minReplicas,
+ MaxReplicas: maxReplicas,
+ }
+ })).Should(Succeed())
+ }
+
testChangeReplicasToZeroWithReplicasLimit := func(compName, compDefName string) {
var (
init = int32(3)
target = int32(0)
)
- By("set min replicas limit to 0")
- compDefKey := client.ObjectKeyFromObject(compDefObj)
- Eventually(testapps.GetAndChangeObj(&testCtx, compDefKey, func(compDef *kbappsv1.ComponentDefinition) {
- compDef.Spec.ReplicasLimit = &kbappsv1.ReplicasLimit{
- MinReplicas: 0,
- MaxReplicas: 5,
- }
- })).Should(Succeed())
+ changeReplicasLimit(compDefName, 0, 16384)
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(init)
})
By(fmt.Sprintf("change replicas to %d", target))
- changeComponentReplicas(clusterKey, target)
+ changeCompReplicas(compKey, target)
By("checking the number of replicas in component as expected")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
@@ -399,15 +341,15 @@ var _ = Describe("Component Controller", func() {
CheckedCreate(&testCtx)
}
- mockComponentPVCsAndBound := func(comp *kbappsv1.ClusterComponentSpec, replicas int, create bool, storageClassName string) {
+ mockComponentPVCsAndBound := func(comp *kbappsv1.Component, compName string, replicas int, create bool, storageClassName string) {
for i := 0; i < replicas; i++ {
- for _, vct := range comp.VolumeClaimTemplates {
+ for _, vct := range comp.Spec.VolumeClaimTemplates {
pvcKey := types.NamespacedName{
Namespace: clusterKey.Namespace,
- Name: getPVCName(vct.Name, comp.Name, i),
+ Name: getPVCName(vct.Name, compName, i),
}
if create {
- createPVC(clusterKey.Name, pvcKey.Name, comp.Name, vct.Spec.Resources.Requests.Storage().String(), storageClassName)
+ createPVC(clusterKey.Name, pvcKey.Name, compName, vct.Spec.Resources.Requests.Storage().String(), storageClassName)
}
Eventually(testapps.CheckObjExists(&testCtx, pvcKey,
&corev1.PersistentVolumeClaim{}, true)).Should(Succeed())
@@ -422,9 +364,8 @@ var _ = Describe("Component Controller", func() {
}
}
- mockPodsForTest := func(cluster *kbappsv1.Cluster, componentName, compDefName string, number int) []*corev1.Pod {
- clusterName := cluster.Name
- itsName := cluster.Name + "-" + componentName
+ mockPodsForTest := func(clusterName, compName, compDefName string, number int) []*corev1.Pod {
+ itsName := clusterName + "-" + compName
pods := make([]*corev1.Pod, 0)
for i := 0; i < number; i++ {
pod := &corev1.Pod{
@@ -435,7 +376,7 @@ var _ = Describe("Component Controller", func() {
constant.AppManagedByLabelKey: constant.AppName,
constant.AppNameLabelKey: compDefName,
constant.AppInstanceLabelKey: clusterName,
- constant.KBAppComponentLabelKey: componentName,
+ constant.KBAppComponentLabelKey: compName,
appsv1.ControllerRevisionHashLabelKey: "mock-version",
},
Annotations: map[string]string{
@@ -457,16 +398,16 @@ var _ = Describe("Component Controller", func() {
return pods
}
- horizontalScaleComp := func(updatedReplicas int, comp *kbappsv1.ClusterComponentSpec, storageClassName string) {
+ horizontalScaleComp := func(updatedReplicas int, comp *kbappsv1.Component, compName, storageClassName string) {
By("Mocking component PVCs to bound")
- mockComponentPVCsAndBound(comp, int(comp.Replicas), true, storageClassName)
+ mockComponentPVCsAndBound(comp, compName, int(comp.Spec.Replicas), true, storageClassName)
By("Checking its replicas right")
- itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, comp.Name)
- Expect(int(*itsList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Replicas))
+ itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, compName)
+ Expect(int(*itsList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Spec.Replicas))
By("Creating mock pods in InstanceSet")
- pods := mockPodsForTest(clusterObj, comp.Name, comp.ComponentDef, int(comp.Replicas))
+ pods := mockPodsForTest(clusterKey.Name, compName, comp.Spec.CompDef, int(comp.Spec.Replicas))
for i := range pods {
if i == 0 {
pods[i].Labels[constant.RoleLabelKey] = leader
@@ -484,40 +425,36 @@ var _ = Describe("Component Controller", func() {
})).ShouldNot(HaveOccurred())
By("Waiting for the component enter Running phase")
- compKey := types.NamespacedName{
- Namespace: clusterKey.Namespace,
- Name: fmt.Sprintf("%s-%s", clusterKey.Name, comp.Name),
- }
Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.RunningComponentPhase))
By(fmt.Sprintf("Changing replicas to %d", updatedReplicas))
- changeCompReplicas(clusterKey, int32(updatedReplicas), comp)
+ changeCompReplicas(compKey, int32(updatedReplicas))
checkUpdatedItsReplicas := func() {
By("Checking updated its replicas")
Eventually(func() int32 {
- itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, comp.Name)
+ itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, compName)
return *itsList.Items[0].Spec.Replicas
}).Should(BeEquivalentTo(updatedReplicas))
}
scaleOutCheck := func() {
- if comp.Replicas == 0 {
+ if comp.Spec.Replicas == 0 {
return
}
By("Mock PVCs and set status to bound")
- mockComponentPVCsAndBound(comp, updatedReplicas, true, storageClassName)
+ mockComponentPVCsAndBound(comp, compName, updatedReplicas, true, storageClassName)
checkUpdatedItsReplicas()
By("Checking updated its replicas' PVC and size")
- for _, vct := range comp.VolumeClaimTemplates {
+ for _, vct := range comp.Spec.VolumeClaimTemplates {
var volumeQuantity resource.Quantity
for i := 0; i < updatedReplicas; i++ {
pvcKey := types.NamespacedName{
Namespace: clusterKey.Namespace,
- Name: getPVCName(vct.Name, comp.Name, i),
+ Name: getPVCName(vct.Name, compName, i),
}
Eventually(testapps.CheckObj(&testCtx, pvcKey, func(g Gomega, pvc *corev1.PersistentVolumeClaim) {
if volumeQuantity.IsZero() {
@@ -536,12 +473,12 @@ var _ = Describe("Component Controller", func() {
pvcList := corev1.PersistentVolumeClaimList{}
g.Expect(k8sClient.List(testCtx.Ctx, &pvcList, client.MatchingLabels{
constant.AppInstanceLabelKey: clusterKey.Name,
- constant.KBAppComponentLabelKey: comp.Name,
+ constant.KBAppComponentLabelKey: compName,
})).Should(Succeed())
for _, pvc := range pvcList.Items {
ss := strings.Split(pvc.Name, "-")
idx, _ := strconv.Atoi(ss[len(ss)-1])
- if idx >= updatedReplicas && idx < int(comp.Replicas) {
+ if idx >= updatedReplicas && idx < int(comp.Spec.Replicas) {
g.Expect(pvc.DeletionTimestamp).Should(BeNil())
}
}
@@ -556,12 +493,12 @@ var _ = Describe("Component Controller", func() {
pvcList := corev1.PersistentVolumeClaimList{}
g.Expect(k8sClient.List(testCtx.Ctx, &pvcList, client.MatchingLabels{
constant.AppInstanceLabelKey: clusterKey.Name,
- constant.KBAppComponentLabelKey: comp.Name,
+ constant.KBAppComponentLabelKey: compName,
})).Should(Succeed())
for _, pvc := range pvcList.Items {
ss := strings.Split(pvc.Name, "-")
idx, _ := strconv.Atoi(ss[len(ss)-1])
- if idx >= updatedReplicas && idx < int(comp.Replicas) {
+ if idx >= updatedReplicas && idx < int(comp.Spec.Replicas) {
g.Expect(pvc.DeletionTimestamp).ShouldNot(BeNil())
}
}
@@ -572,7 +509,7 @@ var _ = Describe("Component Controller", func() {
podList := corev1.PodList{}
g.Expect(k8sClient.List(testCtx.Ctx, &podList, client.MatchingLabels{
constant.AppInstanceLabelKey: clusterKey.Name,
- constant.KBAppComponentLabelKey: comp.Name,
+ constant.KBAppComponentLabelKey: compName,
})).Should(Succeed())
for _, pod := range podList.Items {
ss := strings.Split(pod.Name, "-")
@@ -586,49 +523,62 @@ var _ = Describe("Component Controller", func() {
}).Should(Succeed())
}
- if int(comp.Replicas) < updatedReplicas {
+ if int(comp.Spec.Replicas) < updatedReplicas {
scaleOutCheck()
}
- if int(comp.Replicas) > updatedReplicas {
+ if int(comp.Spec.Replicas) > updatedReplicas {
scaleInCheck()
}
}
- horizontalScale := func(updatedReplicas int, storageClassName string, compDefNames ...string) {
+ horizontalScale := func(updatedReplicas int, storageClassName, compName string, compDefNames ...string) {
defer kbacli.UnsetMockClient()
- initialGeneration, cluster := getStableClusterObservedGeneration(clusterKey, nil)
+ initialGeneration, comp := stableCompObservedGeneration(compKey, nil)
- By("Mocking all components' PVCs to bound")
- for _, comp := range cluster.Spec.ComponentSpecs {
- mockComponentPVCsAndBound(&comp, int(comp.Replicas), true, storageClassName)
- }
+ By("mock all component PVCs to bound")
+ mockComponentPVCsAndBound(comp, compName, int(comp.Spec.Replicas), true, storageClassName)
- for i, comp := range cluster.Spec.ComponentSpecs {
- testapps.MockKBAgentClient4HScale(&testCtx, clusterKey, comp.Name, podAnnotationKey4Test, updatedReplicas)
+ By("mock kb-agent for h-scale")
+ testapps.MockKBAgentClient4HScale(&testCtx, clusterKey, compName, podAnnotationKey4Test, updatedReplicas)
- By(fmt.Sprintf("H-scale component %s", comp.Name))
- horizontalScaleComp(updatedReplicas, &cluster.Spec.ComponentSpecs[i], storageClassName)
- }
+ By(fmt.Sprintf("h-scale component %s", compName))
+ horizontalScaleComp(updatedReplicas, comp, compName, storageClassName)
- By("Checking cluster status and the number of replicas changed")
- Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).
- Should(BeEquivalentTo(int(initialGeneration) + len(cluster.Spec.ComponentSpecs)))
+ By("check component status and the number of replicas changed")
+ Eventually(testapps.GetComponentObservedGeneration(&testCtx, compKey)).Should(BeEquivalentTo(int(initialGeneration) + 1))
}
testHorizontalScale := func(compName, compDefName string, initialReplicas, updatedReplicas int32) {
- By("Creating a single component cluster with VolumeClaimTemplate")
+ By("creating a component with VolumeClaimTemplate")
pvcSpec := testapps.NewPVCSpec("1Gi")
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(initialReplicas).
AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec).
AddVolumeClaimTemplate(testapps.LogVolumeName, pvcSpec)
})
- horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, compDefName)
+ horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, compName, compDefName)
+ }
+
+ testHorizontalScaleWithDataActions := func(compName, compDefName string, initialReplicas, updatedReplicas int32) {
+ By("update cmpd to enable data actions")
+ Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(compDefObj), func(cmpd *kbappsv1.ComponentDefinition) {
+ cmpd.Spec.LifecycleActions.DataDump = testapps.NewLifecycleAction("data-dump")
+ cmpd.Spec.LifecycleActions.DataLoad = testapps.NewLifecycleAction("data-load")
+ })()).Should(Succeed())
+
+ By("creating a component with VolumeClaimTemplate")
+ pvcSpec := testapps.NewPVCSpec("1Gi")
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
+ f.SetReplicas(initialReplicas).
+ AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec)
+ })
+
+ horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, compName, compDefName)
}
- testVolumeExpansion := func(compDef *kbappsv1.ComponentDefinition, compName string, storageClass *storagev1.StorageClass) {
+ testVolumeExpansion := func(compName, compDefName string, storageClass *storagev1.StorageClass) {
var (
insTPLName = "foo"
replicas = 3
@@ -641,22 +591,21 @@ var _ = Describe("Component Controller", func() {
compAndTPLName = fmt.Sprintf("%s-%s", compName, insTPLName)
)
- By("Mock a StorageClass which allows resize")
+ By("mock a StorageClass which allows resize")
Expect(*storageClass.AllowVolumeExpansion).Should(BeTrue())
- By("Creating a cluster with VolumeClaimTemplate")
+ By("creating a component with VolumeClaimTemplate")
pvcSpec := testapps.NewPVCSpec(volumeSize)
pvcSpec.StorageClassName = &storageClass.Name
By("Create cluster and waiting for the cluster initialized")
- createClusterObj(compName, compDef.GetName(), func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(int32(replicas)).
- SetServiceVersion(compDef.Spec.ServiceVersion).
AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec).
AddVolumeClaimTemplate(testapps.LogVolumeName, pvcSpec).
- AddInstances(compName, kbappsv1.InstanceTemplate{
+ AddInstances(kbappsv1.InstanceTemplate{
Name: insTPLName,
- Replicas: pointer.Int32(1),
+ Replicas: ptr.To(int32(1)),
VolumeClaimTemplates: []kbappsv1.ClusterComponentVolumeClaimTemplate{
{Name: testapps.DataVolumeName, Spec: pvcSpec},
{Name: testapps.LogVolumeName, Spec: pvcSpec},
@@ -664,8 +613,8 @@ var _ = Describe("Component Controller", func() {
})
})
- By("Checking the replicas")
- itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey)
+ By("checking the replicas")
+ itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, compName)
its := &itsList.Items[0]
Expect(*its.Spec.Replicas).Should(BeEquivalentTo(replicas))
pvcName := func(vctName string, index int) string {
@@ -693,7 +642,9 @@ var _ = Describe("Component Controller", func() {
constant.AppInstanceLabelKey: clusterKey.Name,
constant.KBAppComponentLabelKey: compName,
}},
- Spec: pvcSpec.ToV1PersistentVolumeClaimSpec(),
+ Spec: func() corev1.PersistentVolumeClaimSpec {
+ return intctrlutil.ToCoreV1PVCs([]kbappsv1.ClusterComponentVolumeClaimTemplate{{Spec: pvcSpec}})[0].Spec
+ }(),
}
if i == replicas-1 {
pvc.Labels[constant.KBAppComponentInstanceTemplateLabelKey] = insTPLName
@@ -710,18 +661,16 @@ var _ = Describe("Component Controller", func() {
}
By("mock pods of component are available")
- mockPods := testapps.MockInstanceSetPods(&testCtx, its, clusterObj, compName)
+ mockPods := testapps.MockInstanceSetPods2(&testCtx, its, clusterKey.Name, compName, compObj)
Expect(testapps.ChangeObjStatus(&testCtx, its, func() {
testk8s.MockInstanceSetReady(its, mockPods...)
})).ShouldNot(HaveOccurred())
- initialGeneration, _ := getStableClusterObservedGeneration(clusterKey, nil)
- Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(kbappsv1.RunningComponentPhase))
- Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(kbappsv1.RunningClusterPhase))
+ initialGeneration, _ := stableCompObservedGeneration(compKey, nil)
+ Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.RunningComponentPhase))
- By("Updating data PVC storage size")
- Expect(testapps.GetAndChangeObj(&testCtx, clusterKey, func(cluster *kbappsv1.Cluster) {
- comp := &cluster.Spec.ComponentSpecs[0]
+ By("updating data PVC storage size")
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
expandVolume := func(vcts []kbappsv1.ClusterComponentVolumeClaimTemplate, quantity resource.Quantity) {
for i, vct := range vcts {
if vct.Name == testapps.DataVolumeName {
@@ -729,19 +678,18 @@ var _ = Describe("Component Controller", func() {
}
}
}
- expandVolume(comp.VolumeClaimTemplates, newVolumeQuantity)
- for i, insTPL := range comp.Instances {
+ expandVolume(comp.Spec.VolumeClaimTemplates, newVolumeQuantity)
+ for i, insTPL := range comp.Spec.Instances {
if insTPL.Name == insTPLName {
- expandVolume(comp.Instances[i].VolumeClaimTemplates, newFooVolumeQuantity)
+ expandVolume(comp.Spec.Instances[i].VolumeClaimTemplates, newFooVolumeQuantity)
break
}
}
})()).ShouldNot(HaveOccurred())
- By("Checking the resize operation in progress for data volume")
- Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(initialGeneration + 1))
- Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(kbappsv1.UpdatingComponentPhase))
- Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(kbappsv1.UpdatingClusterPhase))
+ By("checking the resize operation in progress for data volume")
+ Eventually(testapps.GetComponentObservedGeneration(&testCtx, compKey)).Should(BeEquivalentTo(initialGeneration + 1))
+ Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.UpdatingComponentPhase))
for i := 0; i < replicas; i++ {
pvc := &corev1.PersistentVolumeClaim{}
pvcKey := types.NamespacedName{
@@ -755,7 +703,7 @@ var _ = Describe("Component Controller", func() {
}).Should(Succeed())
}
- By("Mock resizing of data volumes finished")
+ By("mock resizing of data volumes finished")
for i := 0; i < replicas; i++ {
pvcKey := types.NamespacedName{
Namespace: clusterKey.Namespace,
@@ -766,15 +714,14 @@ var _ = Describe("Component Controller", func() {
})()).ShouldNot(HaveOccurred())
}
- By("Checking the resize operation finished")
+ By("checking the resize operation finished")
Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(its), func(its *workloads.InstanceSet) {
testk8s.MockInstanceSetReady(its, mockPods...)
})()).ShouldNot(HaveOccurred())
- Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(initialGeneration + 1))
- Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(kbappsv1.RunningComponentPhase))
- Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(kbappsv1.RunningClusterPhase))
+ Eventually(testapps.GetComponentObservedGeneration(&testCtx, compKey)).Should(BeEquivalentTo(initialGeneration + 1))
+ Eventually(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.RunningComponentPhase))
- By("Checking data volumes are resized")
+ By("checking data volumes are resized")
for i := 0; i < replicas; i++ {
pvcKey := types.NamespacedName{
Namespace: clusterKey.Namespace,
@@ -785,7 +732,7 @@ var _ = Describe("Component Controller", func() {
})).Should(Succeed())
}
- By("Checking log volumes stay unchanged")
+ By("checking log volumes stay unchanged")
for i := 0; i < replicas; i++ {
pvc := &corev1.PersistentVolumeClaim{}
pvcKey := types.NamespacedName{
@@ -799,25 +746,29 @@ var _ = Describe("Component Controller", func() {
}
testVolumeExpansionFailedAndRecover := func(compName, compDefName string) {
+ const (
+ storageClassName = "test-sc"
+ replicas = 3
+ )
- const storageClassName = "test-sc"
- const replicas = 3
-
- By("Mock a StorageClass which allows resize")
+ By("mock a StorageClass which allows resize")
sc := testapps.CreateStorageClass(&testCtx, storageClassName, true)
- By("Creating a cluster with VolumeClaimTemplate")
+ By("creating a cluster with VolumeClaimTemplate")
pvcSpec := testapps.NewPVCSpec("1Gi")
pvcSpec.StorageClassName = &sc.Name
- By("Create cluster and waiting for the cluster initialized")
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
- f.SetReplicas(replicas).AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec)
+ By("create cluster and waiting for the cluster initialized")
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
+ f.SetReplicas(replicas).
+ AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec)
})
- By("Mock PVCs in Bound Status")
+ By("mock PVCs in Bound Status")
for i := 0; i < replicas; i++ {
- tmpSpec := pvcSpec.ToV1PersistentVolumeClaimSpec()
+ tmpSpec := func() corev1.PersistentVolumeClaimSpec {
+ return intctrlutil.ToCoreV1PVCs([]kbappsv1.ClusterComponentVolumeClaimTemplate{{Spec: pvcSpec}})[0].Spec
+ }()
tmpSpec.VolumeName = getPVCName(testapps.DataVolumeName, compName, i)
pvc := &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
@@ -866,9 +817,8 @@ var _ = Describe("Component Controller", func() {
}
changePVC := func(quantity resource.Quantity) {
- Expect(testapps.GetAndChangeObj(&testCtx, clusterKey, func(cluster *kbappsv1.Cluster) {
- comp := &cluster.Spec.ComponentSpecs[0]
- comp.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = quantity
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
+ comp.Spec.VolumeClaimTemplates[0].Spec.Resources.Requests[corev1.ResourceStorage] = quantity
})()).ShouldNot(HaveOccurred())
}
@@ -884,11 +834,9 @@ var _ = Describe("Component Controller", func() {
}
}
- initialClusterGeneration, _ := getStableClusterObservedGeneration(clusterKey, nil)
- initialComponentGeneration, _ := getStableComponentObservedGeneration(compKey, pointer.Duration(0) /* no need to sleep */)
+ initialComponentGeneration, _ := stableCompObservedGeneration(compKey, pointer.Duration(0) /* no need to sleep */)
checkResizeOperationFinished := func(diffGeneration int64) {
- Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(initialClusterGeneration + diffGeneration))
Eventually(testapps.GetComponentObservedGeneration(&testCtx, compKey)).Should(BeEquivalentTo(initialComponentGeneration + diffGeneration))
}
@@ -914,20 +862,19 @@ var _ = Describe("Component Controller", func() {
}
testCompFinalizerNLabel := func(compName, compDefName string) {
- createClusterObj(compName, compDefName, nil)
+ createCompObj(compName, compDefName, nil)
By("check component finalizers and labels")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
- // g.Expect(comp.Finalizers).Should(ContainElements(constant.DBComponentFinalizerName))
- g.Expect(comp.Finalizers).Should(ContainElements(constant.DBClusterFinalizerName))
+ g.Expect(comp.Finalizers).Should(ContainElements(constant.DBComponentFinalizerName))
g.Expect(comp.Labels).Should(HaveKeyWithValue(constant.AppManagedByLabelKey, constant.AppName))
- g.Expect(comp.Labels).Should(HaveKeyWithValue(constant.AppInstanceLabelKey, clusterObj.Name))
+ g.Expect(comp.Labels).Should(HaveKeyWithValue(constant.AppInstanceLabelKey, clusterKey.Name))
g.Expect(comp.Labels).Should(HaveKeyWithValue(constant.KBAppComponentLabelKey, compName))
})).Should(Succeed())
}
testCompService := func(compName, compDefName string) {
- createClusterObj(compName, compDefName, nil)
+ createCompObj(compName, compDefName, nil)
targetPort := corev1.ServicePort{
Protocol: corev1.ProtocolTCP,
@@ -941,12 +888,12 @@ var _ = Describe("Component Controller", func() {
By("check rw component services")
rwSvcKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateComponentServiceName(clusterObj.Name, compName, "rw"),
+ Name: constant.GenerateComponentServiceName(clusterKey.Name, compName, "rw"),
}
Eventually(testapps.CheckObj(&testCtx, rwSvcKey, func(g Gomega, svc *corev1.Service) {
g.Expect(svc.Spec.Ports).Should(ContainElements(targetPort))
g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.AppManagedByLabelKey, constant.AppName))
- g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.AppInstanceLabelKey, clusterObj.Name))
+ g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.AppInstanceLabelKey, clusterKey.Name))
g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.KBAppComponentLabelKey, compName))
g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.RoleLabelKey, "leader"))
@@ -955,24 +902,24 @@ var _ = Describe("Component Controller", func() {
By("check ro component services")
roSvcKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateComponentServiceName(clusterObj.Name, compName, "ro"),
+ Name: constant.GenerateComponentServiceName(clusterKey.Name, compName, "ro"),
}
Eventually(testapps.CheckObj(&testCtx, roSvcKey, func(g Gomega, svc *corev1.Service) {
g.Expect(svc.Spec.Ports).Should(ContainElements(targetPort))
g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.AppManagedByLabelKey, constant.AppName))
- g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.AppInstanceLabelKey, clusterObj.Name))
+ g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.AppInstanceLabelKey, clusterKey.Name))
g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.KBAppComponentLabelKey, compName))
g.Expect(svc.Spec.Selector).Should(HaveKeyWithValue(constant.RoleLabelKey, "follower"))
})).Should(Succeed())
}
testCompSystemAccount := func(compName, compDefName string) {
- createClusterObj(compName, compDefName, nil)
+ createCompObj(compName, compDefName, nil)
By("check root account")
rootSecretKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateAccountSecretName(clusterObj.Name, compName, "root"),
+ Name: constant.GenerateAccountSecretName(clusterKey.Name, compName, "root"),
}
Eventually(testapps.CheckObj(&testCtx, rootSecretKey, func(g Gomega, secret *corev1.Secret) {
g.Expect(secret.Data).Should(HaveKeyWithValue(constant.AccountNameForSecret, []byte("root")))
@@ -982,7 +929,7 @@ var _ = Describe("Component Controller", func() {
By("check admin account")
adminSecretKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateAccountSecretName(clusterObj.Name, compName, "admin"),
+ Name: constant.GenerateAccountSecretName(clusterKey.Name, compName, "admin"),
}
Eventually(testapps.CheckObj(&testCtx, adminSecretKey, func(g Gomega, secret *corev1.Secret) {
g.Expect(secret.Data).Should(HaveKeyWithValue(constant.AccountNameForSecret, []byte("admin")))
@@ -990,7 +937,7 @@ var _ = Describe("Component Controller", func() {
})).Should(Succeed())
By("mock component as Running")
- mockCompRunning(compName)
+ mockCompRunning(compName, compObj)
By("wait accounts to be provisioned")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
@@ -1030,7 +977,7 @@ var _ = Describe("Component Controller", func() {
}
}
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.AddSystemAccount("root", passwordConfig, nil).
AddSystemAccount("admin", nil, secretRef()).
AddSystemAccount("not-exist", nil, nil)
@@ -1039,7 +986,7 @@ var _ = Describe("Component Controller", func() {
By("check root account")
rootSecretKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateAccountSecretName(clusterObj.Name, compName, "root"),
+ Name: constant.GenerateAccountSecretName(clusterKey.Name, compName, "root"),
}
Eventually(testapps.CheckObj(&testCtx, rootSecretKey, func(g Gomega, secret *corev1.Secret) {
g.Expect(secret.Data).Should(HaveKeyWithValue(constant.AccountNameForSecret, []byte("root")))
@@ -1050,7 +997,7 @@ var _ = Describe("Component Controller", func() {
By("check admin account")
adminSecretKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateAccountSecretName(clusterObj.Name, compName, "admin"),
+ Name: constant.GenerateAccountSecretName(clusterKey.Name, compName, "admin"),
}
Eventually(testapps.CheckObj(&testCtx, adminSecretKey, func(g Gomega, secret *corev1.Secret) {
g.Expect(secret.Data).Should(HaveKeyWithValue(constant.AccountNameForSecret, []byte("admin")))
@@ -1116,13 +1063,13 @@ var _ = Describe("Component Controller", func() {
},
}
})).Should(Succeed())
- createClusterObj(compName, compDefName, nil)
+ createCompObj(compName, compDefName, nil)
By("check workload template env")
targetEnvVars := []corev1.EnvVar{
{
Name: "SERVICE_HOST",
- Value: constant.GenerateComponentServiceName(clusterObj.Name, compName, compDefObj.Spec.Services[0].ServiceName),
+ Value: constant.GenerateComponentServiceName(clusterKey.Name, compName, compDefObj.Spec.Services[0].ServiceName),
},
{
Name: "SERVICE_PORT",
@@ -1133,7 +1080,7 @@ var _ = Describe("Component Controller", func() {
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
- Name: constant.GenerateAccountSecretName(clusterObj.Name, compName, compDefObj.Spec.SystemAccounts[0].Name),
+ Name: constant.GenerateAccountSecretName(clusterKey.Name, compName, compDefObj.Spec.SystemAccounts[0].Name),
},
Key: constant.AccountNameForSecret,
},
@@ -1144,7 +1091,7 @@ var _ = Describe("Component Controller", func() {
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
- Name: constant.GenerateAccountSecretName(clusterObj.Name, compName, compDefObj.Spec.SystemAccounts[0].Name),
+ Name: constant.GenerateAccountSecretName(clusterKey.Name, compName, compDefObj.Spec.SystemAccounts[0].Name),
},
Key: constant.AccountPasswdForSecret,
},
@@ -1171,13 +1118,13 @@ var _ = Describe("Component Controller", func() {
}
g.Expect(envValueMapping).Should(BeEquivalentTo(targetEnvVarsMapping))
// check envData source
- g.Expect(c.EnvFrom).Should(ContainElement(envConfigMapSource(clusterObj.Name, compName)))
+ g.Expect(c.EnvFrom).Should(ContainElement(envConfigMapSource(clusterKey.Name, compName)))
}
}
})).Should(Succeed())
envCMKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: constant.GenerateClusterComponentEnvPattern(clusterObj.Name, compName),
+ Name: constant.GenerateClusterComponentEnvPattern(clusterKey.Name, compName),
}
Eventually(testapps.CheckObj(&testCtx, envCMKey, func(g Gomega, cm *corev1.ConfigMap) {
_, envData := buildEnvVarsNData(targetEnvVars)
@@ -1193,7 +1140,7 @@ var _ = Describe("Component Controller", func() {
MaxReplicas: 16,
}
By("create component w/o replicas limit set")
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(replicasLimit.MaxReplicas * 2)
})
itsKey := types.NamespacedName{
@@ -1212,7 +1159,7 @@ var _ = Describe("Component Controller", func() {
By("create component w/ replicas limit set - out-of-limit")
for _, replicas := range []int32{replicasLimit.MinReplicas / 2, replicasLimit.MaxReplicas * 2} {
- createClusterObjWithPhase(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObjWithPhase(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(replicas)
}, "")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
@@ -1231,7 +1178,7 @@ var _ = Describe("Component Controller", func() {
By("create component w/ replicas limit set - ok")
for _, replicas := range []int32{replicasLimit.MinReplicas, (replicasLimit.MinReplicas + replicasLimit.MaxReplicas) / 2, replicasLimit.MaxReplicas} {
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetReplicas(replicas)
})
itsKey := types.NamespacedName{
@@ -1244,8 +1191,8 @@ var _ = Describe("Component Controller", func() {
}
}
- testCompRole := func(compName, compDefName string) {
- createClusterObj(compName, compDefName, nil)
+ testCompRoles := func(compName, compDefName string) {
+ createCompObj(compName, compDefName, nil)
By("check default component roles")
targetRoles := []workloads.ReplicaRole{
@@ -1289,17 +1236,17 @@ var _ = Describe("Component Controller", func() {
compDef.Spec.TLS = &tls
})()).Should(Succeed())
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
issuer := &kbappsv1.Issuer{
Name: kbappsv1.IssuerKubeBlocks,
}
- f.SetTLS(true).SetIssuer(issuer)
+ f.SetTLSConfig(true, issuer)
})
By("check TLS secret")
secretKey := types.NamespacedName{
Namespace: compObj.Namespace,
- Name: plan.GenerateTLSSecretName(clusterObj.Name, compName),
+ Name: plan.GenerateTLSSecretName(clusterKey.Name, compName),
}
Eventually(testapps.CheckObj(&testCtx, secretKey, func(g Gomega, secret *corev1.Secret) {
g.Expect(secret.Data).Should(HaveKey(*tls.CAFile))
@@ -1357,10 +1304,10 @@ var _ = Describe("Component Controller", func() {
testCompRBAC := func(compName, compDefName, saName string) {
By("creating a component with target service account name")
if len(saName) == 0 {
- createClusterObj(compName, compDefName, nil)
+ createCompObj(compName, compDefName, nil)
saName = constant.GenerateDefaultServiceAccountName(compDefName)
} else {
- createClusterObj(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetServiceAccountName(saName)
})
}
@@ -1380,9 +1327,8 @@ var _ = Describe("Component Controller", func() {
testCompWithRBAC := func(compName, compDefName string) {
testCompRBAC(compName, compDefName, "")
- By("delete the cluster(component)")
- testapps.DeleteObject(&testCtx, clusterKey, &kbappsv1.Cluster{})
- Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &kbappsv1.Cluster{}, false)).Should(Succeed())
+ By("delete the component")
+ testapps.DeleteObject(&testCtx, compKey, &kbappsv1.Component{})
Eventually(testapps.CheckObjExists(&testCtx, compKey, &kbappsv1.Component{}, false)).Should(Succeed())
By("check the RBAC resources deleted")
@@ -1393,23 +1339,22 @@ var _ = Describe("Component Controller", func() {
testRecreateCompWithRBACCreateByKubeBlocks := func(compName, compDefName string) {
testCompRBAC(compName, compDefName, "")
- By("delete the cluster(component)")
- testapps.DeleteObject(&testCtx, clusterKey, &kbappsv1.Cluster{})
- Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &kbappsv1.Cluster{}, false)).Should(Succeed())
+ By("delete the component")
+ testapps.DeleteObject(&testCtx, compKey, &kbappsv1.Component{})
+ Eventually(testapps.CheckObjExists(&testCtx, compKey, &kbappsv1.Component{}, false)).Should(Succeed())
By("check the RBAC resources deleted")
saName := constant.GenerateDefaultServiceAccountName(compDefName)
checkRBACResourcesExistence(saName, fmt.Sprintf("%v-pod", saName), false)
- By("re-create cluster(component) with same name")
+ By("re-create component with same name")
testCompRBAC(compName, compDefName, "")
}
- testSharedRBACResoucreDeletion := func() {
- By("create first cluster")
- createClusterObj(defaultCompName+"-comp1", compDefName, nil)
+ testSharedRBACResourceDeletion := func(compNamePrefix, compDefName string) {
+ By("create first component")
+ createCompObj(compNamePrefix+"-comp1", compDefName, nil)
comp1Key := compKey
- cluster1Key := clusterKey
By("check rbac resources owner")
saName := constant.GenerateDefaultServiceAccountName(compDefName)
@@ -1427,7 +1372,7 @@ var _ = Describe("Component Controller", func() {
checkRBACResourcesExistence(saName, fmt.Sprintf("%v-pod", saName), true)
By("create second cluster")
- createClusterObj(defaultCompName+"-comp2", compDefName, nil)
+ createCompObj(compNamePrefix+"-comp2", compDefName, nil)
comp2Key := compKey
By("check rbac resources owner not modified")
Consistently(testapps.CheckObj(&testCtx, saKey, func(g Gomega, sa *corev1.ServiceAccount) {
@@ -1437,9 +1382,9 @@ var _ = Describe("Component Controller", func() {
g.Expect(owner.Name).Should(Equal(comp1Key.Name))
})).Should(Succeed())
- By("delete first cluster")
- testapps.DeleteObject(&testCtx, cluster1Key, &kbappsv1.Cluster{})
- Eventually(testapps.CheckObjExists(&testCtx, cluster1Key, &kbappsv1.Cluster{}, false)).Should(Succeed())
+ By("delete first component")
+ testapps.DeleteObject(&testCtx, comp1Key, &kbappsv1.Component{})
+ Eventually(testapps.CheckObjExists(&testCtx, comp1Key, &kbappsv1.Component{}, false)).Should(Succeed())
By("check rbac resources owner transferred")
Eventually(testapps.CheckObj(&testCtx, saKey, func(g Gomega, sa *corev1.ServiceAccount) {
@@ -1454,9 +1399,9 @@ var _ = Describe("Component Controller", func() {
saName := "test-sa-non-exist" + randomStr()
// component controller won't complete reconciliation, so the phase will be empty
- createClusterObjWithPhase(compName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObjWithPhase(compName, compDefName, func(f *testapps.MockComponentFactory) {
f.SetServiceAccountName(saName)
- }, kbappsv1.ClusterPhase(""))
+ }, "")
Consistently(testapps.GetComponentPhase(&testCtx, compKey)).Should(Equal(kbappsv1.ComponentPhase("")))
}
@@ -1469,184 +1414,17 @@ var _ = Describe("Component Controller", func() {
testCompRBAC(compName, compDefName, saName)
- By("delete the cluster(component)")
- testapps.DeleteObject(&testCtx, clusterKey, &kbappsv1.Cluster{})
- Eventually(testapps.CheckObjExists(&testCtx, clusterKey, &kbappsv1.Cluster{}, true)).Should(Succeed())
+ By("delete the component")
+ testapps.DeleteObject(&testCtx, compKey, &kbappsv1.Component{})
+ Eventually(testapps.CheckObjExists(&testCtx, compKey, &kbappsv1.Component{}, true)).Should(Succeed())
By("check the serviceaccount not deleted")
Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(sa), &corev1.ServiceAccount{}, true)).Should(Succeed())
}
- testThreeReplicas := func(compName, compDefName string) {
- const replicas = 3
-
- By("Mock a cluster obj")
- pvcSpec := testapps.NewPVCSpec("1Gi")
- clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, "").
- WithRandomName().
- AddComponent(compName, compDefName).
- SetReplicas(replicas).
- AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec).
- Create(&testCtx).GetObject()
- clusterKey = client.ObjectKeyFromObject(clusterObj)
-
- By("Waiting for the cluster controller to create resources completely")
- waitForCreatingResourceCompletely(clusterKey, compName)
-
- var its *workloads.InstanceSet
- Eventually(func(g Gomega) {
- itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey)
- g.Expect(itsList.Items).ShouldNot(BeEmpty())
- its = &itsList.Items[0]
- }).Should(Succeed())
-
- By("Creating mock pods in InstanceSet, and set controller reference")
- mockPods := mockPodsForTest(clusterObj, compName, compDefName, replicas)
- for i, pod := range mockPods {
- Expect(controllerutil.SetControllerReference(its, pod, scheme.Scheme)).Should(Succeed())
- Expect(testCtx.CreateObj(testCtx.Ctx, pod)).Should(Succeed())
- patch := client.MergeFrom(pod.DeepCopy())
- // mock the status to pass the isReady(pod) check in consensus_set
- pod.Status.Conditions = []corev1.PodCondition{{
- Type: corev1.PodReady,
- Status: corev1.ConditionTrue,
- }}
- Eventually(k8sClient.Status().Patch(ctx, pod, patch)).Should(Succeed())
- role := "follower"
- if i == 0 {
- role = "leader"
- }
- patch = client.MergeFrom(pod.DeepCopy())
- pod.Labels[constant.RoleLabelKey] = role
- Eventually(k8sClient.Patch(ctx, pod, patch)).Should(Succeed())
- }
-
- By("Checking pods' role are changed accordingly")
- Eventually(func(g Gomega) {
- pods, err := intctrlutil.GetPodListByInstanceSet(ctx, k8sClient, its)
- g.Expect(err).ShouldNot(HaveOccurred())
- // should have 3 pods
- g.Expect(pods).Should(HaveLen(3))
- // 1 leader
- // 2 followers
- leaderCount, followerCount := 0, 0
- for _, pod := range pods {
- switch pod.Labels[constant.RoleLabelKey] {
- case leader:
- leaderCount++
- case follower:
- followerCount++
- }
- }
- g.Expect(leaderCount).Should(Equal(1))
- g.Expect(followerCount).Should(Equal(2))
- }).Should(Succeed())
-
- // trigger its to reconcile as the underlying its is not created
- Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(its), func(its *workloads.InstanceSet) {
- its.Annotations["time"] = time.Now().Format(time.RFC3339)
- })()).Should(Succeed())
-
- By("Updating ITS status")
- itsPatch := client.MergeFrom(its.DeepCopy())
- its.Status.UpdateRevision = "mock-version"
- pods, err := intctrlutil.GetPodListByInstanceSet(ctx, k8sClient, its)
- Expect(err).Should(BeNil())
- var podList []*corev1.Pod
- for i := range pods {
- podList = append(podList, &pods[i])
- }
- testk8s.MockInstanceSetReady(its, podList...)
- Expect(k8sClient.Status().Patch(ctx, its, itsPatch)).Should(Succeed())
-
- By("Checking pods' role are updated in cluster status")
- Eventually(func(g Gomega) {
- fetched := &kbappsv1.Cluster{}
- g.Expect(k8sClient.Get(ctx, clusterKey, fetched)).To(Succeed())
- compName := fetched.Spec.ComponentSpecs[0].Name
- g.Expect(fetched.Status.Components != nil).To(BeTrue())
- g.Expect(fetched.Status.Components).To(HaveKey(compName))
- _, ok := fetched.Status.Components[compName]
- g.Expect(ok).Should(BeTrue())
- }).Should(Succeed())
-
- By("Waiting the component be running")
- Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(kbappsv1.RunningComponentPhase))
- }
-
- testRestoreClusterFromBackup := func(compName string, compDef *kbappsv1.ComponentDefinition) {
- By("mock backuptool object")
- backupPolicyName := "test-backup-policy"
- backupName := "test-backup"
- _ = testapps.CreateCustomizedObj(&testCtx, "backup/actionset.yaml", &dpv1alpha1.ActionSet{}, testapps.RandomizedObjName())
-
- By("creating backup")
- backup := testdp.NewBackupFactory(testCtx.DefaultNamespace, backupName).
- SetBackupPolicyName(backupPolicyName).
- SetBackupMethod(testdp.BackupMethodName).
- Create(&testCtx).GetObject()
-
- By("mocking backup status completed, we don't need backup reconcile here")
- Eventually(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(backup), func(backup *dpv1alpha1.Backup) {
- backup.Status.PersistentVolumeClaimName = "backup-pvc"
- backup.Status.Phase = dpv1alpha1.BackupPhaseCompleted
- testdp.MockBackupStatusMethod(backup, testdp.BackupMethodName, testapps.DataVolumeName, testdp.ActionSetName)
- })).Should(Succeed())
-
- By("creating cluster with backup")
- restoreFromBackup := fmt.Sprintf(`{"%s":{"name":"%s"}}`, compName, backupName)
- pvcSpec := testapps.NewPVCSpec("1Gi")
- replicas := 3
- clusterObj = testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, "").
- WithRandomName().
- AddComponent(compName, compDef.GetName()).
- SetServiceVersion(compDef.Spec.ServiceVersion).
- SetReplicas(int32(replicas)).
- AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec).
- AddAnnotations(constant.RestoreFromBackupAnnotationKey, restoreFromBackup).
- Create(&testCtx).
- GetObject()
- clusterKey = client.ObjectKeyFromObject(clusterObj)
-
- // mock pvcs have restored
- mockComponentPVCsAndBound(clusterObj.Spec.GetComponentByName(compName), replicas, true, testk8s.DefaultStorageClassName)
-
- By("wait for restore created")
- ml := client.MatchingLabels{
- constant.AppInstanceLabelKey: clusterKey.Name,
- constant.KBAppComponentLabelKey: compName,
- }
- Eventually(testapps.List(&testCtx, generics.RestoreSignature,
- ml, client.InNamespace(clusterKey.Namespace))).Should(HaveLen(1))
-
- By("Mocking restore phase to Completed")
- // mock prepareData restore completed
- testdp.MockRestoreCompleted(&testCtx, ml)
-
- By("Waiting for the cluster controller to create resources completely")
- waitForCreatingResourceCompletely(clusterKey, compName)
-
- itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey)
- its := &itsList.Items[0]
- By("mock pod are available and wait for component enter running phase")
- mockPods := testapps.MockInstanceSetPods(&testCtx, its, clusterObj, compName)
- Expect(testapps.ChangeObjStatus(&testCtx, its, func() {
- testk8s.MockInstanceSetReady(its, mockPods...)
- })).ShouldNot(HaveOccurred())
- Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(kbappsv1.RunningComponentPhase))
-
- By("clean up annotations after cluster running")
- Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, tmpCluster *kbappsv1.Cluster) {
- g.Expect(tmpCluster.Status.Phase).Should(Equal(kbappsv1.RunningClusterPhase))
- // mock postReady restore completed
- testdp.MockRestoreCompleted(&testCtx, ml)
- g.Expect(tmpCluster.Annotations[constant.RestoreFromBackupAnnotationKey]).Should(BeEmpty())
- })).Should(Succeed())
- }
-
Context("provisioning", func() {
BeforeEach(func() {
- createAllDefinitionObjects()
+ createDefinitionObjects()
})
AfterEach(func() {
@@ -1654,76 +1432,70 @@ var _ = Describe("Component Controller", func() {
})
It("component finalizers and labels", func() {
- testCompFinalizerNLabel(defaultCompName, compDefName)
+ testCompFinalizerNLabel(defaultCompName, compDefObj.Name)
})
It("with component zero replicas", func() {
- createClusterObjWithPhase(defaultCompName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObjWithPhase(defaultCompName, compDefObj.Name, func(f *testapps.MockComponentFactory) {
f.SetReplicas(0)
}, "")
By("checking the component status can't be reconciled well")
- Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
- g.Expect(comp.Generation > comp.Status.ObservedGeneration).Should(BeTrue())
- })).Should(Succeed())
+ Consistently(testapps.ComponentReconciled(&testCtx, compKey)).Should(BeFalse())
})
It("with component services", func() {
- testCompService(defaultCompName, compDefName)
+ testCompService(defaultCompName, compDefObj.Name)
})
It("with component system accounts", func() {
- testCompSystemAccount(defaultCompName, compDefName)
+ testCompSystemAccount(defaultCompName, compDefObj.Name)
})
It("with component system accounts - override", func() {
- testCompSystemAccountOverride(defaultCompName, compDefName)
+ testCompSystemAccountOverride(defaultCompName, compDefObj.Name)
})
It("with component vars", func() {
- testCompVars(defaultCompName, compDefName)
+ testCompVars(defaultCompName, compDefObj.Name)
})
It("with component replicas limit", func() {
- testCompReplicasLimit(defaultCompName, compDefName)
+ testCompReplicasLimit(defaultCompName, compDefObj.Name)
})
It("with component roles", func() {
- testCompRole(defaultCompName, compDefName)
- })
-
- It("with component roles - should success with one leader pod and two follower pods", func() {
- testThreeReplicas(defaultCompName, compDefObj.Name)
+ testCompRoles(defaultCompName, compDefObj.Name)
})
It("with component TlS", func() {
- testCompTLSConfig(defaultCompName, compDefName)
+ testCompTLSConfig(defaultCompName, compDefObj.Name)
})
It("creates component RBAC resources", func() {
- testCompWithRBAC(defaultCompName, compDefName)
+ testCompWithRBAC(defaultCompName, compDefObj.Name)
})
It("re-creates component with custom RBAC which is not exist and auto created by KubeBlocks", func() {
- testRecreateCompWithRBACCreateByKubeBlocks(defaultCompName, compDefName)
+ testRecreateCompWithRBACCreateByKubeBlocks(defaultCompName, compDefObj.Name)
})
It("transfers rbac resources' ownership when multiple components share them", func() {
- testSharedRBACResoucreDeletion()
+ testSharedRBACResourceDeletion(defaultCompName, compDefObj.Name)
})
It("creates component with non-exist serviceaccount", func() {
- testCreateCompWithNonExistRBAC(defaultCompName, compDefName)
+ testCreateCompWithNonExistRBAC(defaultCompName, compDefObj.Name)
})
It("create component with custom RBAC which is already exist created by User", func() {
- testCreateCompWithRBACCreateByUser(defaultCompName, compDefName)
+ testCreateCompWithRBACCreateByUser(defaultCompName, compDefObj.Name)
})
})
Context("h-scaling", func() {
BeforeEach(func() {
- createAllDefinitionObjects()
+ createDefinitionObjects()
})
AfterEach(func() {
@@ -1751,60 +1523,13 @@ var _ = Describe("Component Controller", func() {
})
It("scale-in to 0 and PVCs should not been deleted", func() {
+ changeReplicasLimit(compDefObj.Name, 0, 16384)
+
testHorizontalScale(defaultCompName, compDefObj.Name, 3, 0)
})
- Context("scale-out multiple components", func() {
- createNWaitClusterObj := func(components map[string]string,
- processor func(compName string, factory *testapps.MockClusterFactory),
- withFixedName ...bool) {
- Expect(components).ShouldNot(BeEmpty())
-
- By("Creating a cluster")
- clusterBuilder := testapps.NewClusterFactory(testCtx.DefaultNamespace, clusterName, "")
-
- compNames := make([]string, 0, len(components))
- for compName, compDefName := range components {
- clusterBuilder = clusterBuilder.AddComponent(compName, compDefName)
- if processor != nil {
- processor(compName, clusterBuilder)
- }
- compNames = append(compNames, compName)
- }
- if len(withFixedName) == 0 || !withFixedName[0] {
- clusterBuilder.WithRandomName()
- }
- clusterObj = clusterBuilder.Create(&testCtx).GetObject()
- clusterKey = client.ObjectKeyFromObject(clusterObj)
-
- By("Waiting for the cluster controller to create resources completely")
- waitForCreatingResourceCompletely(clusterKey, compNames...)
- }
-
- It("h-scale with data actions", func() {
- By("update cmpd to enable data actions")
- Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(compDefObj), func(cmpd *kbappsv1.ComponentDefinition) {
- cmpd.Spec.LifecycleActions.DataDump = testapps.NewLifecycleAction("data-dump")
- cmpd.Spec.LifecycleActions.DataLoad = testapps.NewLifecycleAction("data-load")
- })()).Should(Succeed())
-
- compNameNDef := map[string]string{
- fmt.Sprintf("%s-0", defaultCompName): compDefObj.Name,
- fmt.Sprintf("%s-1", defaultCompName): compDefObj.Name,
- fmt.Sprintf("%s-2", defaultCompName): compDefObj.Name,
- }
- initialReplicas := int32(1)
- updatedReplicas := int32(2)
-
- By("Creating a multi components cluster with VolumeClaimTemplate")
- pvcSpec := testapps.NewPVCSpec("1Gi")
-
- createNWaitClusterObj(compNameNDef, func(compName string, factory *testapps.MockClusterFactory) {
- factory.AddVolumeClaimTemplate(testapps.DataVolumeName, pvcSpec).SetReplicas(initialReplicas)
- }, false)
-
- horizontalScale(int(updatedReplicas), testk8s.DefaultStorageClassName, compDefObj.Name)
- })
+ It("h-scale with data actions", func() {
+ testHorizontalScaleWithDataActions(defaultCompName, compDefObj.Name, 1, 2)
})
})
@@ -1814,57 +1539,43 @@ var _ = Describe("Component Controller", func() {
)
BeforeEach(func() {
- createAllDefinitionObjects()
+ createDefinitionObjects()
mockStorageClass = testk8s.CreateMockStorageClass(&testCtx, testk8s.DefaultStorageClassName)
})
It("should update PVC request storage size accordingly", func() {
- testVolumeExpansion(compDefObj, defaultCompName, mockStorageClass)
+ testVolumeExpansion(defaultCompName, compDefObj.Name, mockStorageClass)
})
It("should be able to recover if volume expansion fails", func() {
- testVolumeExpansionFailedAndRecover(defaultCompName, compDefName)
+ testVolumeExpansionFailedAndRecover(defaultCompName, compDefObj.Name)
})
It("scale-out", func() {
- testVolumeExpansion(compDefObj, defaultCompName, mockStorageClass)
- horizontalScale(5, mockStorageClass.Name, compDefObj.Name)
- })
- })
-
- Context("restore", func() {
- BeforeEach(func() {
- createAllDefinitionObjects()
- })
-
- AfterEach(func() {
- cleanEnv()
- })
-
- It("test restore cluster from backup", func() {
- testRestoreClusterFromBackup(defaultCompName, compDefObj)
+ testVolumeExpansion(defaultCompName, compDefObj.Name, mockStorageClass)
+ horizontalScale(5, mockStorageClass.Name, defaultCompName, compDefObj.Name)
})
})
Context("start & stop", func() {
BeforeEach(func() {
cleanEnv()
- createAllDefinitionObjects()
+ createDefinitionObjects()
})
startComp := func() {
- Expect(testapps.GetAndChangeObj(&testCtx, clusterKey, func(cluster *kbappsv1.Cluster) {
- cluster.Spec.ComponentSpecs[0].Stop = nil
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
+ comp.Spec.Stop = nil
})()).Should(Succeed())
}
stopComp := func() {
- Expect(testapps.GetAndChangeObj(&testCtx, clusterKey, func(cluster *kbappsv1.Cluster) {
- cluster.Spec.ComponentSpecs[0].Stop = func() *bool { b := true; return &b }()
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
+ comp.Spec.Stop = ptr.To(true)
})()).Should(Succeed())
}
- checkCompRunningAs := func(phase kbappsv1.ComponentPhase) {
+ checkCompRunningWithPhase := func(phase kbappsv1.ComponentPhase) {
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
g.Expect(comp.Status.ObservedGeneration).To(BeEquivalentTo(comp.Generation))
if comp.Spec.Stop != nil {
@@ -1880,11 +1591,11 @@ var _ = Describe("Component Controller", func() {
}
checkCompCreating := func() {
- checkCompRunningAs(kbappsv1.CreatingComponentPhase)
+ checkCompRunningWithPhase(kbappsv1.CreatingComponentPhase)
}
checkCompRunning := func() {
- checkCompRunningAs(kbappsv1.StartingComponentPhase)
+ checkCompRunningWithPhase(kbappsv1.StartingComponentPhase)
}
checkCompStopped := func() {
@@ -1902,7 +1613,7 @@ var _ = Describe("Component Controller", func() {
}
It("stop a component", func() {
- createClusterObj(defaultCompName, compDefName, nil)
+ createCompObj(defaultCompName, compDefObj.Name, nil)
checkCompCreating()
By("stop it")
@@ -1915,7 +1626,7 @@ var _ = Describe("Component Controller", func() {
})
It("start a component", func() {
- createClusterObj(defaultCompName, compDefName, nil)
+ createCompObj(defaultCompName, compDefObj.Name, nil)
checkCompCreating()
By("start it")
@@ -1936,13 +1647,15 @@ var _ = Describe("Component Controller", func() {
})
It("h-scale a stopped component", func() {
- createClusterObjWithPhase(defaultCompName, compDefName, func(f *testapps.MockClusterFactory) {
- f.SetStop(func() *bool { b := true; return &b }())
- }, kbappsv1.StoppedClusterPhase)
+ createCompObjWithPhase(defaultCompName, compDefObj.Name, func(f *testapps.MockComponentFactory) {
+ f.SetStop(ptr.To(true))
+ }, kbappsv1.StoppedComponentPhase)
checkCompStopped()
By("scale-out")
- changeCompReplicas(clusterKey, 3, &clusterObj.Spec.ComponentSpecs[0])
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
+ comp.Spec.Replicas = 3
+ })()).ShouldNot(HaveOccurred())
By("check comp & its")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
@@ -1980,13 +1693,15 @@ var _ = Describe("Component Controller", func() {
cmpd.Spec.LifecycleActions.DataDump = testapps.NewLifecycleAction("data-dump")
})()).Should(Succeed())
- createClusterObjWithPhase(defaultCompName, compDefName, func(f *testapps.MockClusterFactory) {
- f.SetStop(func() *bool { b := true; return &b }())
- }, kbappsv1.StoppedClusterPhase)
+ createCompObjWithPhase(defaultCompName, compDefObj.Name, func(f *testapps.MockComponentFactory) {
+ f.SetStop(ptr.To(true))
+ }, kbappsv1.StoppedComponentPhase)
checkCompStopped()
By("scale-out")
- changeCompReplicas(clusterKey, 3, &clusterObj.Spec.ComponentSpecs[0])
+ Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
+ comp.Spec.Replicas = 3
+ })()).ShouldNot(HaveOccurred())
By("check comp & its")
Eventually(testapps.CheckObj(&testCtx, compKey, func(g Gomega, comp *kbappsv1.Component) {
@@ -2019,7 +1734,7 @@ var _ = Describe("Component Controller", func() {
Context("reconcile with definition and version", func() {
BeforeEach(func() {
cleanEnv()
- createAllDefinitionObjects()
+ createDefinitionObjects()
})
testImageUnchangedAfterNewReleasePublished := func(release kbappsv1.ComponentVersionRelease) {
@@ -2030,8 +1745,7 @@ var _ = Describe("Component Controller", func() {
Expect(maps.Keys(prevRelease.Images)).Should(BeEquivalentTo(maps.Keys(release.Images)))
Expect(maps.Values(prevRelease.Images)).ShouldNot(BeEquivalentTo(maps.Values(release.Images)))
- // createCompObj(defaultCompName, compDefName, compVerObj.Spec.Releases[0].ServiceVersion, nil)
- createClusterObj(defaultCompName, compDefName, func(f *testapps.MockClusterFactory) {
+ createCompObj(defaultCompName, compDefObj.Name, func(f *testapps.MockComponentFactory) {
f.SetServiceVersion(prevRelease.ServiceVersion)
})
@@ -2095,7 +1809,7 @@ var _ = Describe("Component Controller", func() {
})
})
- Context("with registry replace enabled", func() {
+ Context("registry config", func() {
registry := "foo.bar"
setRegistryConfig := func() {
viper.Set(constant.CfgRegistries, map[string]any{
@@ -2105,7 +1819,7 @@ var _ = Describe("Component Controller", func() {
}
BeforeEach(func() {
- createAllDefinitionObjects()
+ createDefinitionObjects()
})
AfterEach(func() {
@@ -2116,7 +1830,7 @@ var _ = Describe("Component Controller", func() {
It("replaces image registry", func() {
setRegistryConfig()
- createClusterObj(defaultCompName, compDefName, nil)
+ createCompObj(defaultCompName, compDefObj.Name, nil)
itsKey := compKey
Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) {
@@ -2127,7 +1841,9 @@ var _ = Describe("Component Controller", func() {
})
It("handles running its and upgrade", func() {
- createClusterObj(defaultCompName, compDefName, nil)
+ createCompObj(defaultCompName, compDefObj.Name, func(f *testapps.MockComponentFactory) {
+ f.SetServiceVersion(compDefObj.Spec.ServiceVersion)
+ })
itsKey := compKey
Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) {
// check the image
@@ -2136,6 +1852,7 @@ var _ = Describe("Component Controller", func() {
})).Should(Succeed())
setRegistryConfig()
+
By("trigger component reconcile")
now := time.Now().Format(time.RFC3339)
Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
@@ -2164,15 +1881,9 @@ var _ = Describe("Component Controller", func() {
compVer.Spec.CompatibilityRules[0].Releases = append(compVer.Spec.CompatibilityRules[0].Releases, release.Name)
})()).Should(Succeed())
- By("update serviceversion in cluster")
- Expect(testapps.GetAndChangeObj(&testCtx, clusterKey, func(cluster *kbappsv1.Cluster) {
- cluster.Spec.ComponentSpecs[0].ServiceVersion = "8.0.31"
- })()).Should(Succeed())
-
- By("trigger component reconcile")
- now = time.Now().Format(time.RFC3339)
+ By("update service version in component")
Expect(testapps.GetAndChangeObj(&testCtx, compKey, func(comp *kbappsv1.Component) {
- comp.Annotations["now"] = now
+ comp.Spec.ServiceVersion = "8.0.31"
})()).Should(Succeed())
Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) {
diff --git a/controllers/apps/component/component_plan_builder.go b/controllers/apps/component/component_plan_builder.go
index a366dcfe909..67e65e29d1b 100644
--- a/controllers/apps/component/component_plan_builder.go
+++ b/controllers/apps/component/component_plan_builder.go
@@ -31,6 +31,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
+ appsutil "github.com/apecloud/kubeblocks/controllers/apps/util"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/component"
"github.com/apecloud/kubeblocks/pkg/controller/graph"
@@ -44,7 +45,6 @@ type componentTransformContext struct {
Client client.Reader
record.EventRecorder
logr.Logger
- Cluster *appsv1.Cluster
CompDef *appsv1.ComponentDefinition
Component *appsv1.Component
ComponentOrig *appsv1.Component
@@ -189,7 +189,7 @@ func (c *componentPlanBuilder) defaultWalkFunc(v graph.Vertex) error {
}
func (c *componentPlanBuilder) reconcileCreateObject(ctx context.Context, vertex *model.ObjectVertex) error {
- err := c.cli.Create(ctx, vertex.Obj, clientOption(vertex))
+ err := c.cli.Create(ctx, vertex.Obj, appsutil.ClientOption(vertex))
if err != nil && !apierrors.IsAlreadyExists(err) {
return err
}
@@ -197,7 +197,7 @@ func (c *componentPlanBuilder) reconcileCreateObject(ctx context.Context, vertex
}
func (c *componentPlanBuilder) reconcileUpdateObject(ctx context.Context, vertex *model.ObjectVertex) error {
- err := c.cli.Update(ctx, vertex.Obj, clientOption(vertex))
+ err := c.cli.Update(ctx, vertex.Obj, appsutil.ClientOption(vertex))
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@@ -206,7 +206,7 @@ func (c *componentPlanBuilder) reconcileUpdateObject(ctx context.Context, vertex
func (c *componentPlanBuilder) reconcilePatchObject(ctx context.Context, vertex *model.ObjectVertex) error {
patch := client.MergeFrom(vertex.OriObj)
- err := c.cli.Patch(ctx, vertex.Obj, patch, clientOption(vertex))
+ err := c.cli.Patch(ctx, vertex.Obj, patch, appsutil.ClientOption(vertex))
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@@ -220,7 +220,7 @@ func (c *componentPlanBuilder) reconcileDeleteObject(ctx context.Context, vertex
finalizers := []string{constant.DBComponentFinalizerName, constant.DBClusterFinalizerName}
for _, finalizer := range finalizers {
if controllerutil.RemoveFinalizer(vertex.Obj, finalizer) {
- err := c.cli.Update(ctx, vertex.Obj, clientOption(vertex))
+ err := c.cli.Update(ctx, vertex.Obj, appsutil.ClientOption(vertex))
if err != nil && !apierrors.IsNotFound(err) {
return err
}
@@ -229,7 +229,7 @@ func (c *componentPlanBuilder) reconcileDeleteObject(ctx context.Context, vertex
if !model.IsObjectDeleting(vertex.Obj) {
var opts []client.DeleteOption
- opts = append(opts, clientOption(vertex))
+ opts = append(opts, appsutil.ClientOption(vertex))
if len(vertex.PropagationPolicy) > 0 {
opts = append(opts, vertex.PropagationPolicy)
}
@@ -242,5 +242,5 @@ func (c *componentPlanBuilder) reconcileDeleteObject(ctx context.Context, vertex
}
func (c *componentPlanBuilder) reconcileStatusObject(ctx context.Context, vertex *model.ObjectVertex) error {
- return c.cli.Status().Update(ctx, vertex.Obj, clientOption(vertex))
+ return c.cli.Status().Update(ctx, vertex.Obj, appsutil.ClientOption(vertex))
}
diff --git a/controllers/apps/component/component_plan_builder_test.go b/controllers/apps/component/component_plan_builder_test.go
index 5b394d035b9..4221f0bcf3a 100644
--- a/controllers/apps/component/component_plan_builder_test.go
+++ b/controllers/apps/component/component_plan_builder_test.go
@@ -29,7 +29,6 @@ import (
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil"
- "github.com/apecloud/kubeblocks/pkg/generics"
testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps"
)
@@ -47,21 +46,8 @@ var _ = Describe("component plan builder test", func() {
// create the new objects.
By("clean resources")
- // delete cluster(and all dependent sub-resources), cluster definition
- testapps.ClearClusterResourcesWithRemoveFinalizerOption(&testCtx)
-
- // delete rest mocked objects
- inNS := client.InNamespace(testCtx.DefaultNamespace)
- ml := client.HasLabels{testCtx.TestObjLabelKey}
- // namespaced
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PersistentVolumeClaimSignature, true, inNS, ml)
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.PodSignature, true, inNS, ml)
- testapps.ClearResources(&testCtx, generics.BackupSignature, inNS, ml)
- testapps.ClearResources(&testCtx, generics.BackupPolicySignature, inNS, ml)
- testapps.ClearResourcesWithRemoveFinalizerOption(&testCtx, generics.VolumeSnapshotSignature, true, inNS)
- // non-namespaced
- testapps.ClearResources(&testCtx, generics.BackupPolicyTemplateSignature, ml)
- testapps.ClearResources(&testCtx, generics.StorageClassSignature, ml)
+ // delete components (and all dependent sub-resources), and component definitions & versions
+ testapps.ClearComponentResourcesWithRemoveFinalizerOption(&testCtx)
}
BeforeEach(func() {
diff --git a/controllers/apps/component/types.go b/controllers/apps/component/scheme.go
similarity index 63%
rename from controllers/apps/component/types.go
rename to controllers/apps/component/scheme.go
index ff6ecf6f3ad..f263bbf3732 100644
--- a/controllers/apps/component/types.go
+++ b/controllers/apps/component/scheme.go
@@ -21,31 +21,24 @@ package component
import (
snapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v6/apis/volumesnapshot/v1"
- batchv1 "k8s.io/api/batch/v1"
- "k8s.io/apimachinery/pkg/runtime"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
appsv1 "github.com/apecloud/kubeblocks/apis/apps/v1"
appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- extensionsv1alpha1 "github.com/apecloud/kubeblocks/apis/extensions/v1alpha1"
workloads "github.com/apecloud/kubeblocks/apis/workloads/v1"
-)
-
-var (
- rscheme = runtime.NewScheme()
+ "github.com/apecloud/kubeblocks/pkg/controller/model"
)
func init() {
- utilruntime.Must(clientgoscheme.AddToScheme(rscheme))
- utilruntime.Must(appsv1alpha1.AddToScheme(rscheme))
- utilruntime.Must(appsv1beta1.AddToScheme(rscheme))
- utilruntime.Must(appsv1.AddToScheme(rscheme))
- utilruntime.Must(dpv1alpha1.AddToScheme(rscheme))
- utilruntime.Must(snapshotv1.AddToScheme(rscheme))
- utilruntime.Must(extensionsv1alpha1.AddToScheme(rscheme))
- utilruntime.Must(batchv1.AddToScheme(rscheme))
- utilruntime.Must(workloads.AddToScheme(rscheme))
+ model.AddScheme(clientgoscheme.AddToScheme)
+ model.AddScheme(appsv1alpha1.AddToScheme)
+ model.AddScheme(appsv1beta1.AddToScheme)
+ model.AddScheme(appsv1.AddToScheme)
+ model.AddScheme(dpv1alpha1.AddToScheme)
+ model.AddScheme(snapshotv1.AddToScheme)
+ model.AddScheme(workloads.AddToScheme)
+ // model.AddScheme(extensionsv1alpha1.AddToScheme)
+ // model.AddScheme(batchv1.AddToScheme)
}
diff --git a/controllers/apps/component/suite_test.go b/controllers/apps/component/suite_test.go
index d6d194373f6..3b465956d5d 100644
--- a/controllers/apps/component/suite_test.go
+++ b/controllers/apps/component/suite_test.go
@@ -46,12 +46,9 @@ import (
appsv1alpha1 "github.com/apecloud/kubeblocks/apis/apps/v1alpha1"
appsv1beta1 "github.com/apecloud/kubeblocks/apis/apps/v1beta1"
dpv1alpha1 "github.com/apecloud/kubeblocks/apis/dataprotection/v1alpha1"
- opsv1alpha1 "github.com/apecloud/kubeblocks/apis/operations/v1alpha1"
workloadsv1 "github.com/apecloud/kubeblocks/apis/workloads/v1"
"github.com/apecloud/kubeblocks/controllers/apps"
- "github.com/apecloud/kubeblocks/controllers/apps/cluster"
"github.com/apecloud/kubeblocks/controllers/apps/configuration"
- "github.com/apecloud/kubeblocks/controllers/dataprotection"
"github.com/apecloud/kubeblocks/controllers/k8score"
"github.com/apecloud/kubeblocks/pkg/constant"
"github.com/apecloud/kubeblocks/pkg/controller/model"
@@ -128,10 +125,6 @@ var _ = BeforeSuite(func() {
Expect(err).NotTo(HaveOccurred())
model.AddScheme(appsv1alpha1.AddToScheme)
- err = opsv1alpha1.AddToScheme(scheme.Scheme)
- Expect(err).NotTo(HaveOccurred())
- model.AddScheme(opsv1alpha1.AddToScheme)
-
err = appsv1beta1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
model.AddScheme(appsv1beta1.AddToScheme)
@@ -181,20 +174,6 @@ var _ = BeforeSuite(func() {
err = intctrlutil.InitHostPortManager(k8sClient)
Expect(err).ToNot(HaveOccurred())
- err = (&apps.ClusterDefinitionReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("cluster-definition-controller"),
- }).SetupWithManager(k8sManager)
- Expect(err).ToNot(HaveOccurred())
-
- err = (&apps.ShardingDefinitionReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("sharding-definition-controller"),
- }).SetupWithManager(k8sManager)
- Expect(err).ToNot(HaveOccurred())
-
err = (&apps.ComponentDefinitionReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
@@ -209,21 +188,6 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
- err = (&apps.SidecarDefinitionReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("sidecar-definition-controller"),
- }).SetupWithManager(k8sManager)
- Expect(err).ToNot(HaveOccurred())
-
- clusterRecorder = k8sManager.GetEventRecorderFor("cluster-controller")
- err = (&cluster.ClusterReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: clusterRecorder,
- }).SetupWithManager(k8sManager)
- Expect(err).ToNot(HaveOccurred())
-
err = (&ComponentReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
@@ -231,20 +195,6 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager, nil)
Expect(err).ToNot(HaveOccurred())
- err = (&apps.ServiceDescriptorReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("service-descriptor-controller"),
- }).SetupWithManager(k8sManager)
- Expect(err).ToNot(HaveOccurred())
-
- err = (&k8score.EventReconciler{
- Client: k8sManager.GetClient(),
- Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("event-controller"),
- }).SetupWithManager(k8sManager, nil)
- Expect(err).ToNot(HaveOccurred())
-
err = (&configuration.ConfigConstraintReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
@@ -259,11 +209,11 @@ var _ = BeforeSuite(func() {
}).SetupWithManager(k8sManager, nil)
Expect(err).ToNot(HaveOccurred())
- err = (&dataprotection.BackupPolicyTemplateReconciler{
+ err = (&k8score.EventReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
- Recorder: k8sManager.GetEventRecorderFor("backup-policy-template-controller"),
- }).SetupWithManager(k8sManager)
+ Recorder: k8sManager.GetEventRecorderFor("event-controller"),
+ }).SetupWithManager(k8sManager, nil)
Expect(err).ToNot(HaveOccurred())
testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv)
diff --git a/controllers/apps/component/test_utils.go b/controllers/apps/component/test_utils.go
deleted file mode 100644
index a3d4284138a..00000000000
--- a/controllers/apps/component/test_utils.go
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
-Copyright (C) 2022-2024 ApeCloud Co., Ltd
-
-This file is part of KubeBlocks project
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published by
-the Free Software Foundation, either version 3 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program. If not, see
+terminationPolicy + + +TerminationPolicyType + + + |
+
+(Optional)
+ Specifies the behavior when a Component is deleted. + |
+||
compDef string @@ -5953,6 +5967,20 @@ Instead, you can enable the creation of this service by specifying it explicitly
+ |
+terminationPolicy + + +TerminationPolicyType + + +
+(Optional)
+ |
+Specifies the behavior when a Component is deleted. +
| compDef string @@ -11639,7 +11667,7 @@ VarOption TerminationPolicyType
(
|