From 5163a18ca1794100dd28de65e3a81cc8c209f163 Mon Sep 17 00:00:00 2001 From: free6om Date: Fri, 19 Apr 2024 00:27:54 +0800 Subject: [PATCH] chore: say goodbye to the RSM and hello to the InstanceSet (#7084) --- PROJECT | 2 +- apis/apps/v1alpha1/clusterdefinition_types.go | 2 +- ...emachine_types.go => instanceset_types.go} | 76 +- ...hine_webhook.go => instanceset_webhook.go} | 32 +- ...ok_test.go => instanceset_webhook_test.go} | 26 +- apis/workloads/v1alpha1/webhook_suite_test.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 358 ++++---- cmd/manager/main.go | 16 +- ...apps.kubeblocks.io_clusterdefinitions.yaml | 24 +- .../bases/apps.kubeblocks.io_clusters.yaml | 3 +- .../workloads.kubeblocks.io_instancesets.yaml | 103 ++- config/crd/kustomization.yaml | 6 +- ...ainjection_in_workloads_instancesets.yaml} | 2 +- ...=> webhook_in_workloads_instancesets.yaml} | 2 +- config/rbac/role.yaml | 58 +- ...=> workloads_instanceset_editor_role.yaml} | 8 +- ...=> workloads_instanceset_viewer_role.yaml} | 8 +- ...ml => workloads_v1alpha1_instanceset.yaml} | 8 +- config/webhook/manifests.yaml | 12 +- controllers/apps/cluster_controller_test.go | 12 +- .../apps/cluster_status_event_handler_test.go | 8 +- controllers/apps/component_controller.go | 13 +- controllers/apps/component_controller_test.go | 182 ++-- controllers/apps/component_utils.go | 4 +- controllers/apps/component_utils_test.go | 2 +- .../combine_upgrade_policy_test.go | 4 +- .../configuration/config_reconcile_wrapper.go | 12 +- .../configuration/config_related_helper.go | 2 +- .../configuration/parallel_upgrade_policy.go | 2 +- .../parallel_upgrade_policy_test.go | 16 +- controllers/apps/configuration/policy_util.go | 16 +- .../apps/configuration/policy_util_test.go | 20 +- .../configuration/reconfigure_controller.go | 4 +- .../apps/configuration/reconfigure_policy.go | 12 +- .../configuration/rolling_upgrade_policy.go | 2 +- .../rolling_upgrade_policy_test.go | 10 +- .../apps/configuration/simple_policy.go | 2 +- .../apps/configuration/simple_policy_test.go | 24 +- .../apps/configuration/sync_upgrade_policy.go | 2 +- .../configuration/sync_upgrade_policy_test.go | 10 +- controllers/apps/configuration/types.go | 3 +- .../apps/operations/ops_progress_util.go | 12 +- controllers/apps/operations/ops_util.go | 8 +- controllers/apps/operations/restart.go | 2 +- controllers/apps/opsrequest_controller.go | 10 +- .../apps/opsrequest_controller_test.go | 54 +- .../apps/systemaccount_controller_test.go | 12 +- .../apps/transformer_cluster_backup_policy.go | 8 +- .../apps/transformer_component_deletion.go | 2 +- .../apps/transformer_component_rbac.go | 6 +- .../apps/transformer_component_rbac_test.go | 16 +- .../apps/transformer_component_service.go | 10 +- .../apps/transformer_component_status.go | 90 +- .../apps/transformer_component_tls_test.go | 4 +- .../apps/transformer_component_vars.go | 10 +- .../apps/transformer_component_workload.go | 258 +++--- controllers/k8score/event_controller_test.go | 4 +- ...ontroller.go => instanceset_controller.go} | 85 +- ...test.go => instanceset_controller_test.go} | 16 +- controllers/workloads/suite_test.go | 2 +- deploy/helm/config/rbac/role.yaml | 58 +- ...apps.kubeblocks.io_clusterdefinitions.yaml | 24 +- .../crds/apps.kubeblocks.io_clusters.yaml | 3 +- .../workloads.kubeblocks.io_instancesets.yaml | 103 ++- .../workloads_instanceset_editor_role.yaml | 26 + .../workloads_instanceset_viewer_role.yaml | 22 + ...ds_replicatedstatemachine_editor_role.yaml | 9 +- ...ds_replicatedstatemachine_viewer_role.yaml | 9 +- docs/developer_docs/api-reference/cluster.md | 774 +++++++++--------- .../v1alpha1/fake/fake_instanceset.go | 141 ++++ .../fake/fake_replicatedstatemachine.go | 141 ---- .../v1alpha1/fake/fake_workloads_client.go | 4 +- .../workloads/v1alpha1/generated_expansion.go | 2 +- .../typed/workloads/v1alpha1/instanceset.go | 195 +++++ .../v1alpha1/replicatedstatemachine.go | 195 ----- .../workloads/v1alpha1/workloads_client.go | 6 +- .../informers/externalversions/generic.go | 4 +- ...plicatedstatemachine.go => instanceset.go} | 38 +- .../workloads/v1alpha1/interface.go | 10 +- .../workloads/v1alpha1/expansion_generated.go | 12 +- .../listers/workloads/v1alpha1/instanceset.go | 99 +++ .../v1alpha1/replicatedstatemachine.go | 99 --- pkg/constant/const.go | 2 +- pkg/constant/pattern.go | 8 +- pkg/controller/builder/builder_base_test.go | 4 +- ...ate_machine.go => builder_instance_set.go} | 58 +- ...e_test.go => builder_instance_set_test.go} | 84 +- .../component_definition_convertor.go | 4 +- .../component_definition_convertor_test.go | 12 +- pkg/controller/component/pod_utils.go | 22 +- pkg/controller/component/rsm_convertor.go | 180 ++-- .../component/rsm_convertor_test.go | 6 +- pkg/controller/component/vars.go | 8 +- pkg/controller/component/workload_utils.go | 12 +- .../configuration/tool_image_builder_test.go | 6 +- pkg/controller/factory/builder.go | 32 +- pkg/controller/factory/builder_test.go | 48 +- .../handler/handler_builder_test.go | 12 +- .../in_place_update_util.go | 24 +- .../in_place_update_util_test.go | 20 +- .../{rsm2 => instanceset}/instance_util.go | 60 +- .../instance_util_test.go | 86 +- .../reconciler_assistant_object.go | 26 +- .../reconciler_assistant_object_test.go | 11 +- .../reconciler_deletion.go | 2 +- .../reconciler_deletion_test.go | 10 +- .../reconciler_fix_meta.go | 2 +- .../reconciler_fix_meta_test.go | 6 +- .../reconciler_instance_alignment.go | 22 +- .../reconciler_instance_alignment_test.go | 24 +- .../reconciler_revision_update.go | 22 +- .../reconciler_revision_update_test.go | 22 +- .../reconciler_status.go | 38 +- .../reconciler_status_test.go | 60 +- .../reconciler_update.go | 46 +- .../reconciler_update_test.go | 24 +- .../{rsm2 => instanceset}/revision_util.go | 16 +- .../revision_util_test.go | 18 +- .../{rsm2 => instanceset}/suite_test.go | 14 +- .../{rsm2 => instanceset}/tree_loader.go | 4 +- .../{rsm2 => instanceset}/tree_loader_test.go | 8 +- pkg/controller/{rsm2 => instanceset}/types.go | 8 +- pkg/controller/{rsm2 => instanceset}/utils.go | 14 +- .../{rsm2 => instanceset}/utils_test.go | 2 +- .../kubebuilderx/controller_test.go | 2 +- pkg/controller/kubebuilderx/plan_builder.go | 4 +- .../kubebuilderx/plan_builder_test.go | 78 +- .../kubebuilderx/reconciler_test.go | 2 +- pkg/controller/plan/restore.go | 6 +- pkg/controller/rsm/plan_builder.go | 4 +- pkg/controller/rsm/plan_builder_test.go | 12 +- pkg/controller/rsm/pod_role_event_handler.go | 4 +- .../rsm/pod_role_event_handler_test.go | 4 +- pkg/controller/rsm/suite_test.go | 6 +- .../rsm/transformer_deletion_test.go | 10 +- .../rsm/transformer_fix_meta_test.go | 4 +- pkg/controller/rsm/transformer_init.go | 6 +- pkg/controller/rsm/transformer_init_test.go | 4 +- .../rsm/transformer_member_reconfiguration.go | 16 +- ...transformer_member_reconfiguration_test.go | 2 +- .../rsm/transformer_object_generation.go | 20 +- .../transformer_objection_generation_test.go | 4 +- pkg/controller/rsm/transformer_status_test.go | 6 +- .../rsm/transformer_update_strategy.go | 6 +- .../rsm/transformer_update_strategy_test.go | 2 +- pkg/controller/rsm/types.go | 8 +- pkg/controller/rsm/update_plan.go | 10 +- pkg/controller/rsm/update_plan_test.go | 2 +- pkg/controller/rsm/utils.go | 34 +- pkg/controller/rsm/utils_test.go | 4 +- .../{rsm_utils.go => instance_set_utils.go} | 8 +- pkg/generics/type.go | 2 +- pkg/lorry/engines/custom/manager.go | 10 +- .../apps/cluster_consensus_test_util.go | 10 +- .../{rsm_factoy.go => instance_set_factoy.go} | 22 +- .../k8s/{rsm_util.go => instance_set_util.go} | 120 +-- 156 files changed, 2633 insertions(+), 2579 deletions(-) rename apis/workloads/v1alpha1/{replicatedstatemachine_types.go => instanceset_types.go} (87%) rename apis/workloads/v1alpha1/{replicatedstatemachine_webhook.go => instanceset_webhook.go} (65%) rename apis/workloads/v1alpha1/{replicatedstatemachine_webhook_test.go => instanceset_webhook_test.go} (82%) rename deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml => config/crd/bases/workloads.kubeblocks.io_instancesets.yaml (99%) rename config/crd/patches/{cainjection_in_workloads_replicatedstatemachines.yaml => cainjection_in_workloads_instancesets.yaml} (81%) rename config/crd/patches/{webhook_in_workloads_replicatedstatemachines.yaml => webhook_in_workloads_instancesets.yaml} (86%) rename config/rbac/{workloads_replicatedstatemachine_editor_role.yaml => workloads_instanceset_editor_role.yaml} (76%) rename config/rbac/{workloads_replicatedstatemachine_viewer_role.yaml => workloads_instanceset_viewer_role.yaml} (74%) rename config/samples/{workloads_v1alpha1_replicatedstatemachine.yaml => workloads_v1alpha1_instanceset.yaml} (56%) rename controllers/workloads/{replicatedstatemachine_controller.go => instanceset_controller.go} (70%) rename controllers/workloads/{replicatedstatemachine_controller_test.go => instanceset_controller_test.go} (84%) rename config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml => deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml (99%) create mode 100644 deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml create mode 100644 deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml create mode 100644 pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_instanceset.go delete mode 100644 pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go create mode 100644 pkg/client/clientset/versioned/typed/workloads/v1alpha1/instanceset.go delete mode 100644 pkg/client/clientset/versioned/typed/workloads/v1alpha1/replicatedstatemachine.go rename pkg/client/informers/externalversions/workloads/v1alpha1/{replicatedstatemachine.go => instanceset.go} (53%) create mode 100644 pkg/client/listers/workloads/v1alpha1/instanceset.go delete mode 100644 pkg/client/listers/workloads/v1alpha1/replicatedstatemachine.go rename pkg/controller/builder/{builder_replicated_state_machine.go => builder_instance_set.go} (51%) rename pkg/controller/builder/{builder_replicated_state_machine_test.go => builder_instance_set_test.go} (70%) rename pkg/controller/{rsm2 => instanceset}/in_place_update_util.go (94%) rename pkg/controller/{rsm2 => instanceset}/in_place_update_util_test.go (94%) rename pkg/controller/{rsm2 => instanceset}/instance_util.go (90%) rename pkg/controller/{rsm2 => instanceset}/instance_util_test.go (82%) rename pkg/controller/{rsm2 => instanceset}/reconciler_assistant_object.go (86%) rename pkg/controller/{rsm2 => instanceset}/reconciler_assistant_object_test.go (88%) rename pkg/controller/{rsm2 => instanceset}/reconciler_deletion.go (99%) rename pkg/controller/{rsm2 => instanceset}/reconciler_deletion_test.go (90%) rename pkg/controller/{rsm2 => instanceset}/reconciler_fix_meta.go (98%) rename pkg/controller/{rsm2 => instanceset}/reconciler_fix_meta_test.go (93%) rename pkg/controller/{rsm2 => instanceset}/reconciler_instance_alignment.go (90%) rename pkg/controller/{rsm2 => instanceset}/reconciler_instance_alignment_test.go (87%) rename pkg/controller/{rsm2 => instanceset}/reconciler_revision_update.go (83%) rename pkg/controller/{rsm2 => instanceset}/reconciler_revision_update_test.go (75%) rename pkg/controller/{rsm2 => instanceset}/reconciler_status.go (73%) rename pkg/controller/{rsm2 => instanceset}/reconciler_status_test.go (68%) rename pkg/controller/{rsm2 => instanceset}/reconciler_update.go (79%) rename pkg/controller/{rsm2 => instanceset}/reconciler_update_test.go (91%) rename pkg/controller/{rsm2 => instanceset}/revision_util.go (94%) rename pkg/controller/{rsm2 => instanceset}/revision_util_test.go (98%) rename pkg/controller/{rsm2 => instanceset}/suite_test.go (95%) rename pkg/controller/{rsm2 => instanceset}/tree_loader.go (95%) rename pkg/controller/{rsm2 => instanceset}/tree_loader_test.go (93%) rename pkg/controller/{rsm2 => instanceset}/types.go (91%) rename pkg/controller/{rsm2 => instanceset}/utils.go (88%) rename pkg/controller/{rsm2 => instanceset}/utils_test.go (99%) rename pkg/controllerutil/{rsm_utils.go => instance_set_utils.go} (80%) rename pkg/testutil/apps/{rsm_factoy.go => instance_set_factoy.go} (74%) rename pkg/testutil/k8s/{rsm_util.go => instance_set_util.go} (50%) diff --git a/PROJECT b/PROJECT index 66bb51ef783..021d51455a4 100644 --- a/PROJECT +++ b/PROJECT @@ -120,7 +120,7 @@ resources: controller: true domain: kubeblocks.io group: workloads - kind: ReplicatedStateMachine + kind: InstanceSet path: github.com/apecloud/kubeblocks/apis/workloads/v1alpha1 version: v1alpha1 webhooks: diff --git a/apis/apps/v1alpha1/clusterdefinition_types.go b/apis/apps/v1alpha1/clusterdefinition_types.go index 0c7e5baeb4b..77d9a52d399 100644 --- a/apis/apps/v1alpha1/clusterdefinition_types.go +++ b/apis/apps/v1alpha1/clusterdefinition_types.go @@ -550,7 +550,7 @@ type ClusterComponentDefinition struct { ReplicationSpec *ReplicationSetSpec `json:"replicationSpec,omitempty"` // Defines workload spec of this component. - // From KB 0.7.0, RSM(ReplicatedStateMachineSpec) will be the underlying CR which powers all kinds of workload in KB. + // From KB 0.7.0, RSM(InstanceSetSpec) will be the underlying CR which powers all kinds of workload in KB. // RSM is an enhanced stateful workload extension dedicated for heavy-state workloads like databases. // // +optional diff --git a/apis/workloads/v1alpha1/replicatedstatemachine_types.go b/apis/workloads/v1alpha1/instanceset_types.go similarity index 87% rename from apis/workloads/v1alpha1/replicatedstatemachine_types.go rename to apis/workloads/v1alpha1/instanceset_types.go index 956dcf4bd40..9bc00855e33 100644 --- a/apis/workloads/v1alpha1/replicatedstatemachine_types.go +++ b/apis/workloads/v1alpha1/instanceset_types.go @@ -27,8 +27,8 @@ import ( type InstanceTemplate struct { // Specifies the name of the template. - // Each instance of the template derives its name from the RSM's Name, the template's Name and the instance's ordinal. - // The constructed instance name follows the pattern $(rsm.name)-$(template.name)-$(ordinal). + // Each instance of the template derives its name from the InstanceSet Name, the template's Name and the instance's ordinal. + // The constructed instance name follows the pattern $(instance_set.name)-$(template.name)-$(ordinal). // The ordinal starts from 0 by default. // // +kubebuilder:validation:MaxLength=54 @@ -97,8 +97,8 @@ type InstanceTemplate struct { VolumeClaimTemplates []corev1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty"` } -// ReplicatedStateMachineSpec defines the desired state of ReplicatedStateMachine -type ReplicatedStateMachineSpec struct { +// InstanceSetSpec defines the desired state of InstanceSet +type InstanceSetSpec struct { // Specifies the desired number of replicas of the given Template. // These replicas are instantiations of the same Template, with each having a consistent identity. // Defaults to 1 if unspecified. @@ -143,17 +143,17 @@ type ReplicatedStateMachineSpec struct { // // Instance is the fundamental unit managed by KubeBlocks. // It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. - // A RSM manages instances with a total count of Replicas, + // An InstanceSet manages instances with a total count of Replicas, // and by default, all these instances are generated from the same template. // The InstanceTemplate provides a way to override values in the default template, - // allowing the RSM to manage instances from different templates. + // allowing the InstanceSet to manage instances from different templates. // - // The naming convention for instances (pods) based on the RSM Name, InstanceTemplate Name, and ordinal. - // The constructed instance name follows the pattern: $(rsm.name)-$(template.name)-$(ordinal). + // The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. + // The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). // By default, the ordinal starts from 0 for each InstanceTemplate. // It is important to ensure that the Name of each InstanceTemplate is unique. // - // The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the RSM. + // The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. // Any remaining replicas will be generated using the default template and will follow the default naming rules. // // +optional @@ -165,7 +165,7 @@ type ReplicatedStateMachineSpec struct { OfflineInstances []string `json:"offlineInstances,omitempty"` // Represents a list of claims that pods are allowed to reference. - // The ReplicatedStateMachine controller is responsible for mapping network identities to + // The InstanceSet controller is responsible for mapping network identities to // claims in a way that maintains the identity of a pod. Every claim in // this list must have at least one matching (by name) volumeMount in one // container in the template. A claim in this list takes precedence over @@ -186,7 +186,7 @@ type ReplicatedStateMachineSpec struct { PodManagementPolicy appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"` // Indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the RSM when a revision is made to + // employed to update Pods in the InstanceSet when a revision is made to // Template. // UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil UpdateStrategy appsv1.StatefulSetUpdateStrategy `json:"updateStrategy,omitempty"` @@ -213,7 +213,7 @@ type ReplicatedStateMachineSpec struct { // +optional MemberUpdateStrategy *MemberUpdateStrategy `json:"memberUpdateStrategy,omitempty"` - // Indicates that the rsm is paused, meaning the reconciliation of this rsm object will be paused. + // Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused. // +optional Paused bool `json:"paused,omitempty"` @@ -222,8 +222,8 @@ type ReplicatedStateMachineSpec struct { Credential *Credential `json:"credential,omitempty"` } -// ReplicatedStateMachineStatus defines the observed state of ReplicatedStateMachine -type ReplicatedStateMachineStatus struct { +// InstanceSetStatus defines the observed state of InstanceSet +type InstanceSetStatus struct { appsv1.StatefulSetStatus `json:",inline"` // Defines the initial number of pods (members) when the cluster is first initialized. @@ -236,7 +236,7 @@ type ReplicatedStateMachineStatus struct { // +optional ReadyInitReplicas int32 `json:"readyInitReplicas,omitempty"` - // When not empty, indicates the version of the Replicated State Machine (RSM) used to generate the underlying workload. + // When not empty, indicates the version of the InstanceSet used to generate the underlying workload. // // +optional CurrentGeneration int64 `json:"currentGeneration,omitempty"` @@ -246,13 +246,13 @@ type ReplicatedStateMachineStatus struct { // +optional MembersStatus []MemberStatus `json:"membersStatus,omitempty"` - // currentRevisions, if not empty, indicates the old version of the RSM used to generate Pods. + // currentRevisions, if not empty, indicates the old version of the InstanceSet used to generate the underlying workload. // key is the pod name, value is the revision. // // +optional CurrentRevisions map[string]string `json:"currentRevisions,omitempty"` - // updateRevisions, if not empty, indicates the new version of the RSM used to generate Pods. + // updateRevisions, if not empty, indicates the new version of the InstanceSet used to generate the underlying workload. // key is the pod name, value is the revision. // // +optional @@ -262,14 +262,14 @@ type ReplicatedStateMachineStatus struct { // +genclient // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// +kubebuilder:resource:categories={kubeblocks,all},shortName=rsm -// +kubebuilder:printcolumn:name="LEADER",type="string",JSONPath=".status.membersStatus[?(@.role.isLeader==true)].podName",description="leader pod name." +// +kubebuilder:resource:categories={kubeblocks,all},shortName=its +// +kubebuilder:printcolumn:name="LEADER",type="string",JSONPath=".status.membersStatus[?(@.role.isLeader==true)].podName",description="leader instance name." // +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.readyReplicas",description="ready replicas." // +kubebuilder:printcolumn:name="REPLICAS",type="string",JSONPath=".status.replicas",description="total replicas." // +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" -// ReplicatedStateMachine is the Schema for the replicatedstatemachines API. -type ReplicatedStateMachine struct { +// InstanceSet is the Schema for the instancesets API. +type InstanceSet struct { // The metadata for the type, like API version and kind. metav1.TypeMeta `json:",inline"` @@ -278,20 +278,20 @@ type ReplicatedStateMachine struct { // Defines the desired state of the state machine. It includes the configuration details for the state machine. // - Spec ReplicatedStateMachineSpec `json:"spec,omitempty"` + Spec InstanceSetSpec `json:"spec,omitempty"` // Represents the current information about the state machine. This data may be out of date. // - Status ReplicatedStateMachineStatus `json:"status,omitempty"` + Status InstanceSetStatus `json:"status,omitempty"` } // +kubebuilder:object:root=true -// ReplicatedStateMachineList contains a list of ReplicatedStateMachine -type ReplicatedStateMachineList struct { +// InstanceSetList contains a list of InstanceSet +type InstanceSetList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []ReplicatedStateMachine `json:"items"` + Items []InstanceSet `json:"items"` } type ReplicaRole struct { @@ -366,9 +366,9 @@ type RoleProbe struct { // Upon completion of all actions, the final output should be a single string representing the role name defined in spec.Roles. // The latest [BusyBox](https://busybox.net/) image will be used if Image is not configured. // Environment variables can be used in Command: - // - v_KB_RSM_LAST_STDOUT: stdout from the last action, watch for 'v_' prefix - // - KB_RSM_USERNAME: username part of the credential - // - KB_RSM_PASSWORD: password part of the credential + // - v_KB_ITS_LAST_STDOUT: stdout from the last action, watch for 'v_' prefix + // - KB_ITS_USERNAME: username part of the credential + // - KB_ITS_PASSWORD: password part of the credential // // +optional CustomHandler []Action `json:"customHandler,omitempty"` @@ -418,13 +418,13 @@ type RoleProbe struct { type Credential struct { // Defines the user's name for the credential. - // The corresponding environment variable will be KB_RSM_USERNAME. + // The corresponding environment variable will be KB_ITS_USERNAME. // // +kubebuilder:validation:Required Username CredentialVar `json:"username"` // Represents the user's password for the credential. - // The corresponding environment variable will be KB_RSM_PASSWORD. + // The corresponding environment variable will be KB_ITS_PASSWORD. // // +kubebuilder:validation:Required Password CredentialVar `json:"password"` @@ -449,11 +449,11 @@ type CredentialVar struct { type MembershipReconfiguration struct { // Specifies the environment variables that can be used in all following Actions: - // - KB_RSM_USERNAME: Represents the username part of the credential - // - KB_RSM_PASSWORD: Represents the password part of the credential - // - KB_RSM_LEADER_HOST: Represents the leader host - // - KB_RSM_TARGET_HOST: Represents the target host - // - KB_RSM_SERVICE_PORT: Represents the service port + // - KB_ITS_USERNAME: Represents the username part of the credential + // - KB_ITS_PASSWORD: Represents the password part of the credential + // - KB_ITS_LEADER_HOST: Represents the leader host + // - KB_ITS_TARGET_HOST: Represents the target host + // - KB_ITS_SERVICE_PORT: Represents the service port // // Defines the action to perform a switchover. // If the Image is not configured, the latest [BusyBox](https://busybox.net/) image will be used. @@ -519,12 +519,12 @@ type MemberStatus struct { // +optional Ready bool `json:"ready,omitempty"` - // Indicates whether it is required for the replica set manager (rsm) to have at least one primary pod ready. + // Indicates whether it is required for the InstanceSet to have at least one primary instance ready. // // +optional ReadyWithoutPrimary bool `json:"readyWithoutPrimary"` } func init() { - SchemeBuilder.Register(&ReplicatedStateMachine{}, &ReplicatedStateMachineList{}) + SchemeBuilder.Register(&InstanceSet{}, &InstanceSetList{}) } diff --git a/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go b/apis/workloads/v1alpha1/instanceset_webhook.go similarity index 65% rename from apis/workloads/v1alpha1/replicatedstatemachine_webhook.go rename to apis/workloads/v1alpha1/instanceset_webhook.go index 96249894367..5db0e484ed8 100644 --- a/apis/workloads/v1alpha1/replicatedstatemachine_webhook.go +++ b/apis/workloads/v1alpha1/instanceset_webhook.go @@ -28,9 +28,9 @@ import ( ) // log is for logging in this package. -var replicatedstatemachinelog = logf.Log.WithName("replicatedstatemachine-resource") +var instancesetlog = logf.Log.WithName("instanceset-resource") -func (r *ReplicatedStateMachine) SetupWebhookWithManager(mgr ctrl.Manager) error { +func (r *InstanceSet) SetupWebhookWithManager(mgr ctrl.Manager) error { return ctrl.NewWebhookManagedBy(mgr). For(r). Complete() @@ -38,44 +38,44 @@ func (r *ReplicatedStateMachine) SetupWebhookWithManager(mgr ctrl.Manager) error // TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! -//+kubebuilder:webhook:path=/mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine,mutating=true,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=create;update,versions=v1alpha1,name=mreplicatedstatemachine.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/mutate-workloads-kubeblocks-io-v1alpha1-instanceset,mutating=true,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=instancesets,verbs=create;update,versions=v1alpha1,name=minstanceset.kb.io,admissionReviewVersions=v1 -var _ webhook.Defaulter = &ReplicatedStateMachine{} +var _ webhook.Defaulter = &InstanceSet{} // Default implements webhook.Defaulter so a webhook will be registered for the type -func (r *ReplicatedStateMachine) Default() { - replicatedstatemachinelog.Info("default", "name", r.Name) +func (r *InstanceSet) Default() { + instancesetlog.Info("default", "name", r.Name) // TODO(user): fill in your defaulting logic. } // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. -//+kubebuilder:webhook:path=/validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine,mutating=false,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=create;update,versions=v1alpha1,name=vreplicatedstatemachine.kb.io,admissionReviewVersions=v1 +//+kubebuilder:webhook:path=/validate-workloads-kubeblocks-io-v1alpha1-instanceset,mutating=false,failurePolicy=fail,sideEffects=None,groups=workloads.kubeblocks.io,resources=instancesets,verbs=create;update,versions=v1alpha1,name=vinstanceset.kb.io,admissionReviewVersions=v1 -var _ webhook.Validator = &ReplicatedStateMachine{} +var _ webhook.Validator = &InstanceSet{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *ReplicatedStateMachine) ValidateCreate() (admission.Warnings, error) { - replicatedstatemachinelog.Info("validate create", "name", r.Name) +func (r *InstanceSet) ValidateCreate() (admission.Warnings, error) { + instancesetlog.Info("validate create", "name", r.Name) return nil, r.validate() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *ReplicatedStateMachine) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { - replicatedstatemachinelog.Info("validate update", "name", r.Name) +func (r *InstanceSet) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + instancesetlog.Info("validate update", "name", r.Name) return nil, r.validate() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *ReplicatedStateMachine) ValidateDelete() (admission.Warnings, error) { - replicatedstatemachinelog.Info("validate delete", "name", r.Name) +func (r *InstanceSet) ValidateDelete() (admission.Warnings, error) { + instancesetlog.Info("validate delete", "name", r.Name) return nil, r.validate() } -func (r *ReplicatedStateMachine) validate() error { +func (r *InstanceSet) validate() error { var allErrs field.ErrorList if len(r.Spec.Roles) > 0 { @@ -105,7 +105,7 @@ func (r *ReplicatedStateMachine) validate() error { return apierrors.NewInvalid( schema.GroupKind{ Group: "workloads.kubeblocks.io/v1alpha1", - Kind: "ReplicatedStateMachine", + Kind: "InstanceSet", }, r.Name, allErrs) } diff --git a/apis/workloads/v1alpha1/replicatedstatemachine_webhook_test.go b/apis/workloads/v1alpha1/instanceset_webhook_test.go similarity index 82% rename from apis/workloads/v1alpha1/replicatedstatemachine_webhook_test.go rename to apis/workloads/v1alpha1/instanceset_webhook_test.go index aafe0b55689..a17f4ae5bdd 100644 --- a/apis/workloads/v1alpha1/replicatedstatemachine_webhook_test.go +++ b/apis/workloads/v1alpha1/instanceset_webhook_test.go @@ -26,10 +26,10 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" ) -var _ = Describe("ReplicatedStateMachine Webhook", func() { +var _ = Describe("InstanceSet Webhook", func() { Context("spec validation", func() { - const name = "test-replicated-state-machine" - var rsm *ReplicatedStateMachine + const name = "test-instance-set" + var its *InstanceSet BeforeEach(func() { commonLabels := map[string]string{ @@ -40,12 +40,12 @@ var _ = Describe("ReplicatedStateMachine Webhook", func() { constant.KBAppComponentLabelKey: "componentName", } replicas := int32(1) - rsm = &ReplicatedStateMachine{ + its = &InstanceSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: testCtx.DefaultNamespace, }, - Spec: ReplicatedStateMachineSpec{ + Spec: InstanceSetSpec{ Replicas: &replicas, Selector: &metav1.LabelSelector{ MatchLabels: commonLabels, @@ -78,48 +78,48 @@ var _ = Describe("ReplicatedStateMachine Webhook", func() { }) It("should return an error if no leader set", func() { - rsm.Spec.Roles = []ReplicaRole{ + its.Spec.Roles = []ReplicaRole{ { Name: "leader", IsLeader: false, AccessMode: ReadWriteMode, }, } - err := k8sClient.Create(ctx, rsm) + err := k8sClient.Create(ctx, its) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(ContainSubstring("leader is required")) }) It("should return an error if servicePort not provided", func() { - rsm.Spec.Roles = []ReplicaRole{ + its.Spec.Roles = []ReplicaRole{ { Name: "leader", IsLeader: true, AccessMode: ReadWriteMode, }, } - err := k8sClient.Create(ctx, rsm) + err := k8sClient.Create(ctx, its) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(ContainSubstring("servicePort must provide")) }) It("should succeed if spec is well defined", func() { - rsm.Spec.Roles = []ReplicaRole{ + its.Spec.Roles = []ReplicaRole{ { Name: "leader", IsLeader: true, AccessMode: ReadWriteMode, }, } - rsm.Spec.Service.Spec.Ports = []corev1.ServicePort{ + its.Spec.Service.Spec.Ports = []corev1.ServicePort{ { Name: "foo", Protocol: "tcp", Port: 12345, }, } - Expect(k8sClient.Create(ctx, rsm)).Should(Succeed()) - Expect(k8sClient.Delete(ctx, rsm)).Should(Succeed()) + Expect(k8sClient.Create(ctx, its)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, its)).Should(Succeed()) }) }) }) diff --git a/apis/workloads/v1alpha1/webhook_suite_test.go b/apis/workloads/v1alpha1/webhook_suite_test.go index 89ab4c9be66..e58c4632fec 100644 --- a/apis/workloads/v1alpha1/webhook_suite_test.go +++ b/apis/workloads/v1alpha1/webhook_suite_test.go @@ -107,7 +107,7 @@ var _ = BeforeSuite(func() { }) Expect(err).NotTo(HaveOccurred()) - err = (&ReplicatedStateMachine{}).SetupWebhookWithManager(mgr) + err = (&InstanceSet{}).SetupWebhookWithManager(mgr) Expect(err).NotTo(HaveOccurred()) testCtx = testutil.NewDefaultTestContext(ctx, k8sClient, testEnv) diff --git a/apis/workloads/v1alpha1/zz_generated.deepcopy.go b/apis/workloads/v1alpha1/zz_generated.deepcopy.go index d9eb4ddbbd9..5548e896c8b 100644 --- a/apis/workloads/v1alpha1/zz_generated.deepcopy.go +++ b/apis/workloads/v1alpha1/zz_generated.deepcopy.go @@ -92,6 +92,185 @@ func (in *CredentialVar) DeepCopy() *CredentialVar { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSet) DeepCopyInto(out *InstanceSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSet. +func (in *InstanceSet) DeepCopy() *InstanceSet { + if in == nil { + return nil + } + out := new(InstanceSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSetList) DeepCopyInto(out *InstanceSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InstanceSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSetList. +func (in *InstanceSetList) DeepCopy() *InstanceSetList { + if in == nil { + return nil + } + out := new(InstanceSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InstanceSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSetSpec) DeepCopyInto(out *InstanceSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(v1.Service) + (*in).DeepCopyInto(*out) + } + if in.AlternativeServices != nil { + in, out := &in.AlternativeServices, &out.AlternativeServices + *out = make([]v1.Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.Instances != nil { + in, out := &in.Instances, &out.Instances + *out = make([]InstanceTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.OfflineInstances != nil { + in, out := &in.OfflineInstances, &out.OfflineInstances + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.Roles != nil { + in, out := &in.Roles, &out.Roles + *out = make([]ReplicaRole, len(*in)) + copy(*out, *in) + } + if in.RoleProbe != nil { + in, out := &in.RoleProbe, &out.RoleProbe + *out = new(RoleProbe) + (*in).DeepCopyInto(*out) + } + if in.MembershipReconfiguration != nil { + in, out := &in.MembershipReconfiguration, &out.MembershipReconfiguration + *out = new(MembershipReconfiguration) + (*in).DeepCopyInto(*out) + } + if in.MemberUpdateStrategy != nil { + in, out := &in.MemberUpdateStrategy, &out.MemberUpdateStrategy + *out = new(MemberUpdateStrategy) + **out = **in + } + if in.Credential != nil { + in, out := &in.Credential, &out.Credential + *out = new(Credential) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSetSpec. +func (in *InstanceSetSpec) DeepCopy() *InstanceSetSpec { + if in == nil { + return nil + } + out := new(InstanceSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstanceSetStatus) DeepCopyInto(out *InstanceSetStatus) { + *out = *in + in.StatefulSetStatus.DeepCopyInto(&out.StatefulSetStatus) + if in.MembersStatus != nil { + in, out := &in.MembersStatus, &out.MembersStatus + *out = make([]MemberStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CurrentRevisions != nil { + in, out := &in.CurrentRevisions, &out.CurrentRevisions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UpdateRevisions != nil { + in, out := &in.UpdateRevisions, &out.UpdateRevisions + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstanceSetStatus. +func (in *InstanceSetStatus) DeepCopy() *InstanceSetStatus { + if in == nil { + return nil + } + out := new(InstanceSetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstanceTemplate) DeepCopyInto(out *InstanceTemplate) { *out = *in @@ -258,185 +437,6 @@ func (in *ReplicaRole) DeepCopy() *ReplicaRole { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicatedStateMachine) DeepCopyInto(out *ReplicatedStateMachine) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStateMachine. -func (in *ReplicatedStateMachine) DeepCopy() *ReplicatedStateMachine { - if in == nil { - return nil - } - out := new(ReplicatedStateMachine) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicatedStateMachine) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicatedStateMachineList) DeepCopyInto(out *ReplicatedStateMachineList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicatedStateMachine, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStateMachineList. -func (in *ReplicatedStateMachineList) DeepCopy() *ReplicatedStateMachineList { - if in == nil { - return nil - } - out := new(ReplicatedStateMachineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicatedStateMachineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicatedStateMachineSpec) DeepCopyInto(out *ReplicatedStateMachineSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(v1.Service) - (*in).DeepCopyInto(*out) - } - if in.AlternativeServices != nil { - in, out := &in.AlternativeServices, &out.AlternativeServices - *out = make([]v1.Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Template.DeepCopyInto(&out.Template) - if in.Instances != nil { - in, out := &in.Instances, &out.Instances - *out = make([]InstanceTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.OfflineInstances != nil { - in, out := &in.OfflineInstances, &out.OfflineInstances - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]v1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.Roles != nil { - in, out := &in.Roles, &out.Roles - *out = make([]ReplicaRole, len(*in)) - copy(*out, *in) - } - if in.RoleProbe != nil { - in, out := &in.RoleProbe, &out.RoleProbe - *out = new(RoleProbe) - (*in).DeepCopyInto(*out) - } - if in.MembershipReconfiguration != nil { - in, out := &in.MembershipReconfiguration, &out.MembershipReconfiguration - *out = new(MembershipReconfiguration) - (*in).DeepCopyInto(*out) - } - if in.MemberUpdateStrategy != nil { - in, out := &in.MemberUpdateStrategy, &out.MemberUpdateStrategy - *out = new(MemberUpdateStrategy) - **out = **in - } - if in.Credential != nil { - in, out := &in.Credential, &out.Credential - *out = new(Credential) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStateMachineSpec. -func (in *ReplicatedStateMachineSpec) DeepCopy() *ReplicatedStateMachineSpec { - if in == nil { - return nil - } - out := new(ReplicatedStateMachineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicatedStateMachineStatus) DeepCopyInto(out *ReplicatedStateMachineStatus) { - *out = *in - in.StatefulSetStatus.DeepCopyInto(&out.StatefulSetStatus) - if in.MembersStatus != nil { - in, out := &in.MembersStatus, &out.MembersStatus - *out = make([]MemberStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CurrentRevisions != nil { - in, out := &in.CurrentRevisions, &out.CurrentRevisions - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.UpdateRevisions != nil { - in, out := &in.UpdateRevisions, &out.UpdateRevisions - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStateMachineStatus. -func (in *ReplicatedStateMachineStatus) DeepCopy() *ReplicatedStateMachineStatus { - if in == nil { - return nil - } - out := new(ReplicatedStateMachineStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RoleProbe) DeepCopyInto(out *RoleProbe) { *out = *in diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 26638787ecb..ffe8b881efa 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -60,9 +60,9 @@ import ( k8scorecontrollers "github.com/apecloud/kubeblocks/controllers/k8score" workloadscontrollers "github.com/apecloud/kubeblocks/controllers/workloads" "github.com/apecloud/kubeblocks/pkg/constant" + "github.com/apecloud/kubeblocks/pkg/controller/instanceset" "github.com/apecloud/kubeblocks/pkg/controller/multicluster" "github.com/apecloud/kubeblocks/pkg/controller/rsm" - "github.com/apecloud/kubeblocks/pkg/controller/rsm2" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" "github.com/apecloud/kubeblocks/pkg/metrics" viper "github.com/apecloud/kubeblocks/pkg/viperx" @@ -132,9 +132,9 @@ func init() { viper.SetDefault(constant.KBDataScriptClientsImage, "apecloud/kubeblocks-datascript:latest") viper.SetDefault(constant.KubernetesClusterDomainEnv, constant.DefaultDNSDomain) viper.SetDefault(rsm.FeatureGateRSMCompatibilityMode, true) - viper.SetDefault(rsm2.FeatureGateRSMReplicaProvider, string(rsm2.PodProvider)) - viper.SetDefault(rsm2.MaxPlainRevisionCount, 1024) - viper.SetDefault(rsm2.FeatureGateIgnorePodVerticalScaling, false) + viper.SetDefault(instanceset.FeatureGateRSMReplicaProvider, string(instanceset.PodProvider)) + viper.SetDefault(instanceset.MaxPlainRevisionCount, 1024) + viper.SetDefault(instanceset.FeatureGateIgnorePodVerticalScaling, false) viper.SetDefault(constant.FeatureGateEnableRuntimeMetrics, false) viper.SetDefault(constant.CfgKBReconcileWorkers, 8) } @@ -495,12 +495,12 @@ func main() { } if viper.GetBool(workloadsFlagKey.viperName()) { - if err = (&workloadscontrollers.ReplicatedStateMachineReconciler{ + if err = (&workloadscontrollers.InstanceSetReconciler{ Client: client, Scheme: mgr.GetScheme(), Recorder: mgr.GetEventRecorderFor("replicated-state-machine-controller"), }).SetupWithManager(mgr, multiClusterMgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ReplicatedStateMachine") + setupLog.Error(err, "unable to create controller", "controller", "InstanceSet") os.Exit(1) } } @@ -544,8 +544,8 @@ func main() { os.Exit(1) } - if err = (&workloadsv1alpha1.ReplicatedStateMachine{}).SetupWebhookWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create webhook", "webhook", "ReplicatedStateMachine") + if err = (&workloadsv1alpha1.InstanceSet{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "InstanceSet") os.Exit(1) } diff --git a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml index 382a4ce5266..c0f6d4ca809 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusterdefinitions.yaml @@ -8686,10 +8686,10 @@ spec: type: object rsmSpec: description: Defines workload spec of this component. From KB - 0.7.0, RSM(ReplicatedStateMachineSpec) will be the underlying - CR which powers all kinds of workload in KB. RSM is an enhanced - stateful workload extension dedicated for heavy-state workloads - like databases. + 0.7.0, RSM(InstanceSetSpec) will be the underlying CR which + powers all kinds of workload in KB. RSM is an enhanced stateful + workload extension dedicated for heavy-state workloads like + databases. properties: memberUpdateStrategy: description: "Describes the strategy for updating Members @@ -8814,11 +8814,11 @@ spec: type: object switchoverAction: description: "Specifies the environment variables that - can be used in all following Actions: - KB_RSM_USERNAME: - Represents the username part of the credential - KB_RSM_PASSWORD: - Represents the password part of the credential - KB_RSM_LEADER_HOST: - Represents the leader host - KB_RSM_TARGET_HOST: Represents - the target host - KB_RSM_SERVICE_PORT: Represents + can be used in all following Actions: - KB_ITS_USERNAME: + Represents the username part of the credential - KB_ITS_PASSWORD: + Represents the password part of the credential - KB_ITS_LEADER_HOST: + Represents the leader host - KB_ITS_TARGET_HOST: Represents + the target host - KB_ITS_SERVICE_PORT: Represents the service port \n Defines the action to perform a switchover. If the Image is not configured, the latest [BusyBox](https://busybox.net/) image will @@ -8865,10 +8865,10 @@ spec: be a single string representing the role name defined in spec.Roles. The latest [BusyBox](https://busybox.net/) image will be used if Image is not configured. Environment - variables can be used in Command: - v_KB_RSM_LAST_STDOUT: + variables can be used in Command: - v_KB_ITS_LAST_STDOUT: stdout from the last action, watch for ''v_'' prefix - - KB_RSM_USERNAME: username part of the credential - - KB_RSM_PASSWORD: password part of the credential' + - KB_ITS_USERNAME: username part of the credential + - KB_ITS_PASSWORD: password part of the credential' items: properties: args: diff --git a/config/crd/bases/apps.kubeblocks.io_clusters.yaml b/config/crd/bases/apps.kubeblocks.io_clusters.yaml index 25f92baf5cd..586d1a902e0 100644 --- a/config/crd/bases/apps.kubeblocks.io_clusters.yaml +++ b/config/crd/bases/apps.kubeblocks.io_clusters.yaml @@ -7595,8 +7595,7 @@ spec: type: boolean readyWithoutPrimary: description: Indicates whether it is required for the - replica set manager (rsm) to have at least one primary - pod ready. + InstanceSet to have at least one primary instance ready. type: boolean role: description: Defines the role of the replica in the cluster. diff --git a/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml b/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml similarity index 99% rename from deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml rename to config/crd/bases/workloads.kubeblocks.io_instancesets.yaml index d190ff4cd10..c4b3bdadb85 100644 --- a/deploy/helm/crds/workloads.kubeblocks.io_replicatedstatemachines.yaml +++ b/config/crd/bases/workloads.kubeblocks.io_instancesets.yaml @@ -5,23 +5,23 @@ metadata: controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks - name: replicatedstatemachines.workloads.kubeblocks.io + name: instancesets.workloads.kubeblocks.io spec: group: workloads.kubeblocks.io names: categories: - kubeblocks - all - kind: ReplicatedStateMachine - listKind: ReplicatedStateMachineList - plural: replicatedstatemachines + kind: InstanceSet + listKind: InstanceSetList + plural: instancesets shortNames: - - rsm - singular: replicatedstatemachine + - its + singular: instanceset scope: Namespaced versions: - additionalPrinterColumns: - - description: leader pod name. + - description: leader instance name. jsonPath: .status.membersStatus[?(@.role.isLeader==true)].podName name: LEADER type: string @@ -39,8 +39,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: ReplicatedStateMachine is the Schema for the replicatedstatemachines - API. + description: InstanceSet is the Schema for the instancesets API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -604,7 +603,7 @@ spec: properties: password: description: Represents the user's password for the credential. - The corresponding environment variable will be KB_RSM_PASSWORD. + The corresponding environment variable will be KB_ITS_PASSWORD. properties: value: description: "Specifies the value of the environment variable. @@ -710,7 +709,7 @@ spec: type: object username: description: Defines the user's name for the credential. The corresponding - environment variable will be KB_RSM_USERNAME. + environment variable will be KB_ITS_USERNAME. properties: value: description: "Specifies the value of the environment variable. @@ -822,19 +821,19 @@ spec: description: "Overrides values in default Template. \n Instance is the fundamental unit managed by KubeBlocks. It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. - A RSM manages instances with a total count of Replicas, and by default, - all these instances are generated from the same template. The InstanceTemplate - provides a way to override values in the default template, allowing - the RSM to manage instances from different templates. \n The naming - convention for instances (pods) based on the RSM Name, InstanceTemplate - Name, and ordinal. The constructed instance name follows the pattern: - $(rsm.name)-$(template.name)-$(ordinal). By default, the ordinal - starts from 0 for each InstanceTemplate. It is important to ensure - that the Name of each InstanceTemplate is unique. \n The sum of - replicas across all InstanceTemplates should not exceed the total - number of Replicas specified for the RSM. Any remaining replicas - will be generated using the default template and will follow the - default naming rules." + An InstanceSet manages instances with a total count of Replicas, + and by default, all these instances are generated from the same + template. The InstanceTemplate provides a way to override values + in the default template, allowing the InstanceSet to manage instances + from different templates. \n The naming convention for instances + (pods) based on the InstanceSet Name, InstanceTemplate Name, and + ordinal. The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). + By default, the ordinal starts from 0 for each InstanceTemplate. + It is important to ensure that the Name of each InstanceTemplate + is unique. \n The sum of replicas across all InstanceTemplates should + not exceed the total number of Replicas specified for the InstanceSet. + Any remaining replicas will be generated using the default template + and will follow the default naming rules." items: properties: annotations: @@ -974,9 +973,9 @@ spec: type: object name: description: Specifies the name of the template. Each instance - of the template derives its name from the RSM's Name, the - template's Name and the instance's ordinal. The constructed - instance name follows the pattern $(rsm.name)-$(template.name)-$(ordinal). + of the template derives its name from the InstanceSet Name, + the template's Name and the instance's ordinal. The constructed + instance name follows the pattern $(instance_set.name)-$(template.name)-$(ordinal). The ordinal starts from 0 by default. maxLength: 54 pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ @@ -3354,11 +3353,11 @@ spec: type: object switchoverAction: description: "Specifies the environment variables that can be - used in all following Actions: - KB_RSM_USERNAME: Represents - the username part of the credential - KB_RSM_PASSWORD: Represents - the password part of the credential - KB_RSM_LEADER_HOST: Represents - the leader host - KB_RSM_TARGET_HOST: Represents the target - host - KB_RSM_SERVICE_PORT: Represents the service port \n Defines + used in all following Actions: - KB_ITS_USERNAME: Represents + the username part of the credential - KB_ITS_PASSWORD: Represents + the password part of the credential - KB_ITS_LEADER_HOST: Represents + the leader host - KB_ITS_TARGET_HOST: Represents the target + host - KB_ITS_SERVICE_PORT: Represents the service port \n Defines the action to perform a switchover. If the Image is not configured, the latest [BusyBox](https://busybox.net/) image will be used." properties: @@ -3400,8 +3399,8 @@ spec: type: string type: array paused: - description: Indicates that the rsm is paused, meaning the reconciliation - of this rsm object will be paused. + description: Indicates that the InstanceSet is paused, meaning the + reconciliation of this InstanceSet object will be paused. type: boolean podManagementPolicy: description: "Controls how pods are created during initial scale up, @@ -3438,9 +3437,9 @@ spec: actions, the final output should be a single string representing the role name defined in spec.Roles. The latest [BusyBox](https://busybox.net/) image will be used if Image is not configured. Environment variables - can be used in Command: - v_KB_RSM_LAST_STDOUT: stdout from - the last action, watch for ''v_'' prefix - KB_RSM_USERNAME: - username part of the credential - KB_RSM_PASSWORD: password + can be used in Command: - v_KB_ITS_LAST_STDOUT: stdout from + the last action, watch for ''v_'' prefix - KB_ITS_USERNAME: + username part of the credential - KB_ITS_PASSWORD: password part of the credential' items: properties: @@ -11678,8 +11677,8 @@ spec: type: object updateStrategy: description: Indicates the StatefulSetUpdateStrategy that will be - employed to update Pods in the RSM when a revision is made to Template. - UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType + employed to update Pods in the InstanceSet when a revision is made + to Template. UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil properties: rollingUpdate: @@ -11717,11 +11716,11 @@ spec: type: object volumeClaimTemplates: description: Represents a list of claims that pods are allowed to - reference. The ReplicatedStateMachine controller is responsible - for mapping network identities to claims in a way that maintains - the identity of a pod. Every claim in this list must have at least - one matching (by name) volumeMount in one container in the template. - A claim in this list takes precedence over any volumes in the template, + reference. The InstanceSet controller is responsible for mapping + network identities to claims in a way that maintains the identity + of a pod. Every claim in this list must have at least one matching + (by name) volumeMount in one container in the template. A claim + in this list takes precedence over any volumes in the template, with the same name. items: description: PersistentVolumeClaim is a user's request for and claim @@ -12164,8 +12163,8 @@ spec: type: object type: array currentGeneration: - description: When not empty, indicates the version of the Replicated - State Machine (RSM) used to generate the underlying workload. + description: When not empty, indicates the version of the InstanceSet + used to generate the underlying workload. format: int64 type: integer currentReplicas: @@ -12182,8 +12181,8 @@ spec: additionalProperties: type: string description: currentRevisions, if not empty, indicates the old version - of the RSM used to generate Pods. key is the pod name, value is - the revision. + of the InstanceSet used to generate the underlying workload. key + is the pod name, value is the revision. type: object initReplicas: description: Defines the initial number of pods (members) when the @@ -12203,8 +12202,8 @@ spec: description: Whether the corresponding Pod is in ready condition. type: boolean readyWithoutPrimary: - description: Indicates whether it is required for the replica - set manager (rsm) to have at least one primary pod ready. + description: Indicates whether it is required for the InstanceSet + to have at least one primary instance ready. type: boolean role: description: Defines the role of the replica in the cluster. @@ -12268,8 +12267,8 @@ spec: additionalProperties: type: string description: updateRevisions, if not empty, indicates the new version - of the RSM used to generate Pods. key is the pod name, value is - the revision. + of the InstanceSet used to generate the underlying workload. key + is the pod name, value is the revision. type: object updatedReplicas: description: updatedReplicas is the number of Pods created by the diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 1e2c8fa4e11..9c4eef3443e 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -13,7 +13,7 @@ resources: - bases/dataprotection.kubeblocks.io_backuppolicies.yaml - bases/dataprotection.kubeblocks.io_backups.yaml - bases/extensions.kubeblocks.io_addons.yaml -- bases/workloads.kubeblocks.io_replicatedstatemachines.yaml +- bases/workloads.kubeblocks.io_instancesets.yaml - bases/storage.kubeblocks.io_storageproviders.yaml - bases/dataprotection.kubeblocks.io_backuprepos.yaml - bases/dataprotection.kubeblocks.io_restores.yaml @@ -41,7 +41,7 @@ patchesStrategicMerge: #- patches/webhook_in_hostpreflights.yaml #- patches/webhook_in_preflights.yaml #- patches/webhook_in_addons.yaml -#- patches/webhook_in_replicatedstatemachines.yaml +#- patches/webhook_in_instancesets.yaml #- patches/webhook_in_storageproviders.yaml #- patches/webhook_in_backuprepos.yaml #- patches/webhook_in_restores.yaml @@ -68,7 +68,7 @@ patchesStrategicMerge: #- patches/cainjection_in_hostpreflights.yaml #- patches/cainjection_in_preflights.yaml #- patches/cainjection_in_addonspecs.yaml -#- patches/cainjection_in_replicatedstatemachines.yaml +#- patches/cainjection_in_instancesets.yaml #- patches/cainjection_in_storageproviders.yaml #- patches/cainjection_in_backuprepos.yaml #- patches/cainjection_in_restores.yaml diff --git a/config/crd/patches/cainjection_in_workloads_replicatedstatemachines.yaml b/config/crd/patches/cainjection_in_workloads_instancesets.yaml similarity index 81% rename from config/crd/patches/cainjection_in_workloads_replicatedstatemachines.yaml rename to config/crd/patches/cainjection_in_workloads_instancesets.yaml index 28731ab2563..1b022a1c56e 100644 --- a/config/crd/patches/cainjection_in_workloads_replicatedstatemachines.yaml +++ b/config/crd/patches/cainjection_in_workloads_instancesets.yaml @@ -4,4 +4,4 @@ kind: CustomResourceDefinition metadata: annotations: cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: replicatedstatemachines.workloads.kubeblocks.io + name: instancesets.workloads.kubeblocks.io diff --git a/config/crd/patches/webhook_in_workloads_replicatedstatemachines.yaml b/config/crd/patches/webhook_in_workloads_instancesets.yaml similarity index 86% rename from config/crd/patches/webhook_in_workloads_replicatedstatemachines.yaml rename to config/crd/patches/webhook_in_workloads_instancesets.yaml index e24f648c4ee..4f341bcbca9 100644 --- a/config/crd/patches/webhook_in_workloads_replicatedstatemachines.yaml +++ b/config/crd/patches/webhook_in_workloads_instancesets.yaml @@ -2,7 +2,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: replicatedstatemachines.workloads.kubeblocks.io + name: instancesets.workloads.kubeblocks.io spec: conversion: strategy: Webhook diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 8857d623348..0a2926e8b77 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -12,31 +12,6 @@ rules: - get - patch - update -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - deployments/finalizers - verbs: - - update -- apiGroups: - - apps - resources: - - deployments/status - verbs: - - get - apiGroups: - apps resources: @@ -524,6 +499,7 @@ rules: resources: - pods verbs: + - create - delete - deletecollection - get @@ -550,6 +526,12 @@ rules: verbs: - get - list +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get - apiGroups: - "" resources: @@ -929,6 +911,32 @@ rules: - get - patch - update +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets/finalizers + verbs: + - update +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets/status + verbs: + - get + - patch + - update - apiGroups: - workloads.kubeblocks.io resources: diff --git a/config/rbac/workloads_replicatedstatemachine_editor_role.yaml b/config/rbac/workloads_instanceset_editor_role.yaml similarity index 76% rename from config/rbac/workloads_replicatedstatemachine_editor_role.yaml rename to config/rbac/workloads_instanceset_editor_role.yaml index 69c5a5665e9..e0d02903e6c 100644 --- a/config/rbac/workloads_replicatedstatemachine_editor_role.yaml +++ b/config/rbac/workloads_instanceset_editor_role.yaml @@ -4,17 +4,17 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: replicatedstatemachine-editor-role + app.kubernetes.io/instance: instanceset-editor-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubeblocks app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize - name: replicatedstatemachine-editor-role + name: instanceset-editor-role rules: - apiGroups: - workloads.kubeblocks.io resources: - - replicatedstatemachines + - instancesets verbs: - create - delete @@ -26,6 +26,6 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - replicatedstatemachines/status + - instancesets/status verbs: - get diff --git a/config/rbac/workloads_replicatedstatemachine_viewer_role.yaml b/config/rbac/workloads_instanceset_viewer_role.yaml similarity index 74% rename from config/rbac/workloads_replicatedstatemachine_viewer_role.yaml rename to config/rbac/workloads_instanceset_viewer_role.yaml index 087fa590ba2..8fc6e7149f5 100644 --- a/config/rbac/workloads_replicatedstatemachine_viewer_role.yaml +++ b/config/rbac/workloads_instanceset_viewer_role.yaml @@ -4,17 +4,17 @@ kind: ClusterRole metadata: labels: app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: replicatedstatemachine-viewer-role + app.kubernetes.io/instance: instanceset-viewer-role app.kubernetes.io/component: rbac app.kubernetes.io/created-by: kubeblocks app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize - name: replicatedstatemachines-viewer-role + name: instancesets-viewer-role rules: - apiGroups: - workloads.kubeblocks.io resources: - - replicatedstatemachines + - instancesets verbs: - get - list @@ -22,6 +22,6 @@ rules: - apiGroups: - workloads.kubeblocks.io resources: - - replicatedstatemachines/status + - instancesets/status verbs: - get diff --git a/config/samples/workloads_v1alpha1_replicatedstatemachine.yaml b/config/samples/workloads_v1alpha1_instanceset.yaml similarity index 56% rename from config/samples/workloads_v1alpha1_replicatedstatemachine.yaml rename to config/samples/workloads_v1alpha1_instanceset.yaml index b320af79628..5d199308c38 100644 --- a/config/samples/workloads_v1alpha1_replicatedstatemachine.yaml +++ b/config/samples/workloads_v1alpha1_instanceset.yaml @@ -1,12 +1,12 @@ apiVersion: workloads.kubeblocks.io/v1alpha1 -kind: ReplicatedStateMachine +kind: InstanceSet metadata: labels: - app.kubernetes.io/name: replicatedstatemachine - app.kubernetes.io/instance: replicatedstatemachine-sample + app.kubernetes.io/name: instanceset + app.kubernetes.io/instance: instanceset-sample app.kubernetes.io/part-of: kubeblocks app.kubernetes.io/managed-by: kustomize app.kubernetes.io/created-by: kubeblocks - name: replicatedstatemachine-sample + name: instanceset-sample spec: # TODO(user): Add fields here diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index e9cf83204e7..7ba74d5c988 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -10,9 +10,9 @@ webhooks: service: name: webhook-service namespace: system - path: /mutate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine + path: /mutate-workloads-kubeblocks-io-v1alpha1-instanceset failurePolicy: Fail - name: mreplicatedstatemachine.kb.io + name: minstanceset.kb.io rules: - apiGroups: - workloads.kubeblocks.io @@ -22,7 +22,7 @@ webhooks: - CREATE - UPDATE resources: - - replicatedstatemachines + - instancesets sideEffects: None - admissionReviewVersions: - v1 @@ -156,9 +156,9 @@ webhooks: service: name: webhook-service namespace: system - path: /validate-workloads-kubeblocks-io-v1alpha1-replicatedstatemachine + path: /validate-workloads-kubeblocks-io-v1alpha1-instanceset failurePolicy: Fail - name: vreplicatedstatemachine.kb.io + name: vinstanceset.kb.io rules: - apiGroups: - workloads.kubeblocks.io @@ -168,7 +168,7 @@ webhooks: - CREATE - UPDATE resources: - - replicatedstatemachines + - instancesets sideEffects: None - admissionReviewVersions: - v1 diff --git a/controllers/apps/cluster_controller_test.go b/controllers/apps/cluster_controller_test.go index f07dd3f5ce9..37b7a11d0c7 100644 --- a/controllers/apps/cluster_controller_test.go +++ b/controllers/apps/cluster_controller_test.go @@ -381,17 +381,17 @@ var _ = Describe("Cluster Controller", func() { } Eventually(testapps.CheckObjExists(&testCtx, compKey, &appsv1alpha1.Component{}, true)).Should(Succeed()) - By("Wait RSM created") - rsmkey := compKey - rsm := &workloads.ReplicatedStateMachine{} - Eventually(testapps.CheckObjExists(&testCtx, rsmkey, rsm, true)).Should(Succeed()) + By("Wait InstanceSet created") + itsKey := compKey + its := &workloads.InstanceSet{} + Eventually(testapps.CheckObjExists(&testCtx, itsKey, its, true)).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, clusterKey, func(g Gomega, cluster *appsv1alpha1.Cluster) { g.Expect(cluster.Spec.ComponentSpecs).Should(HaveLen(1)) clusterJSON, err := json.Marshal(cluster.Spec.ComponentSpecs[0].Instances) g.Expect(err).Should(BeNil()) - rsmJSON, err := json.Marshal(rsm.Spec.Instances) + itsJSON, err := json.Marshal(its.Spec.Instances) g.Expect(err).Should(BeNil()) - g.Expect(clusterJSON).Should(Equal(rsmJSON)) + g.Expect(clusterJSON).Should(Equal(itsJSON)) })).Should(Succeed()) } diff --git a/controllers/apps/cluster_status_event_handler_test.go b/controllers/apps/cluster_status_event_handler_test.go index 163ab5777d1..bca77342e67 100644 --- a/controllers/apps/cluster_status_event_handler_test.go +++ b/controllers/apps/cluster_status_event_handler_test.go @@ -132,15 +132,15 @@ var _ = Describe("test cluster Failed/Abnormal phase", func() { Expect(handleEventForClusterStatus(ctx, k8sClient, clusterRecorder, event)).Should(Succeed()) By("watch warning event from workload, but mismatch condition ") - rsmKey := types.NamespacedName{ + key := types.NamespacedName{ Namespace: clusterKey.Namespace, Name: clusterKey.Name + "-" + statefulMySQLCompName, } - Eventually(testapps.CheckObjExists(&testCtx, rsmKey, &workloads.ReplicatedStateMachine{}, true)).Should(Succeed()) + Eventually(testapps.CheckObjExists(&testCtx, key, &workloads.InstanceSet{}, true)).Should(Succeed()) involvedObject := corev1.ObjectReference{ - Name: rsmKey.Name, - Kind: constant.RSMKind, + Name: key.Name, + Kind: constant.InstanceSetKind, Namespace: testCtx.DefaultNamespace, } event.InvolvedObject = involvedObject diff --git a/controllers/apps/component_controller.go b/controllers/apps/component_controller.go index d3447a6b8ea..ff4f62589fd 100644 --- a/controllers/apps/component_controller.go +++ b/controllers/apps/component_controller.go @@ -57,6 +57,15 @@ type ComponentReconciler struct { // +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=components/status,verbs=get;update;patch // +kubebuilder:rbac:groups=apps.kubeblocks.io,resources=components/finalizers,verbs=update +// owned workload API +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines/finalizers,verbs=update + +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets/finalizers,verbs=update + // owned K8s core API resources controller-gen RBAC marker // full access on core API resources // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete;deletecollection @@ -199,7 +208,7 @@ func (r *ComponentReconciler) setupWithManager(mgr ctrl.Manager) error { WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(constant.CfgKBReconcileWorkers), }). - Watches(&workloads.ReplicatedStateMachine{}, handler.EnqueueRequestsFromMapFunc(r.filterComponentResources)). + Watches(&workloads.InstanceSet{}, handler.EnqueueRequestsFromMapFunc(r.filterComponentResources)). Owns(&corev1.Service{}). Owns(&corev1.Secret{}). Owns(&corev1.ConfigMap{}). @@ -229,7 +238,7 @@ func (r *ComponentReconciler) setupWithMultiClusterManager(mgr ctrl.Manager, mul WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(constant.CfgKBReconcileWorkers), }). - Watches(&workloads.ReplicatedStateMachine{}, handler.EnqueueRequestsFromMapFunc(r.filterComponentResources)). + Watches(&workloads.InstanceSet{}, handler.EnqueueRequestsFromMapFunc(r.filterComponentResources)). Owns(&dpv1alpha1.Backup{}). Owns(&dpv1alpha1.Restore{}). Watches(&appsv1alpha1.Configuration{}, handler.EnqueueRequestsFromMapFunc(r.configurationEventHandler)) diff --git a/controllers/apps/component_controller_test.go b/controllers/apps/component_controller_test.go index fc513cd1414..550d3730749 100644 --- a/controllers/apps/component_controller_test.go +++ b/controllers/apps/component_controller_test.go @@ -332,19 +332,19 @@ var _ = Describe("Component Controller", func() { } mockCompRunning := func(compName string) { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, client.ObjectKeyFromObject(clusterObj), compName) - Expect(rsmList.Items).Should(HaveLen(1)) - rsm := rsmList.Items[0] - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterObj.Name, compName). - SetReplicas(*rsm.Spec.Replicas). + itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, client.ObjectKeyFromObject(clusterObj), compName) + Expect(itsList.Items).Should(HaveLen(1)) + its := itsList.Items[0] + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, clusterObj.Name, compName). + SetReplicas(*its.Spec.Replicas). Create(&testCtx). GetObject() pods := testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, compName) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, &rsm, func() { - testk8s.MockRSMReady(&rsm, pods...) + Expect(testapps.ChangeObjStatus(&testCtx, &its, func() { + testk8s.MockInstanceSetReady(&its, pods...) })).ShouldNot(HaveOccurred()) Eventually(testapps.GetComponentPhase(&testCtx, types.NamespacedName{ Namespace: clusterObj.Namespace, @@ -392,8 +392,8 @@ var _ = Describe("Component Controller", func() { checkSingleWorkload := func(compDefName string, expects func(g Gomega, sts *appsv1.StatefulSet, deploy *appsv1.Deployment)) { Eventually(func(g Gomega) { - l := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - sts := rsm.ConvertRSMToSTS(&l.Items[0]) + l := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey) + sts := rsm.ConvertInstanceSetToSTS(&l.Items[0]) expects(g, sts, nil) }).Should(Succeed()) } @@ -508,9 +508,9 @@ var _ = Describe("Component Controller", func() { By("Mocking component PVCs to bound") mockComponentPVCsAndBound(comp, int(comp.Replicas), true, storageClassName) - By("Checking rsm replicas right") - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) - Expect(int(*rsmList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Replicas)) + By("Checking its replicas right") + itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, comp.Name) + Expect(int(*itsList.Items[0].Spec.Replicas)).To(BeEquivalentTo(comp.Replicas)) By("Creating mock pods in StatefulSet") pods := mockPodsForTest(clusterObj, int(comp.Replicas)) @@ -535,8 +535,8 @@ var _ = Describe("Component Controller", func() { checkUpdatedStsReplicas := func() { By("Checking updated sts replicas") Eventually(func() int32 { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, comp.Name) - return *rsmList.Items[0].Spec.Replicas + itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, comp.Name) + return *itsList.Items[0].Spec.Replicas }).Should(BeEquivalentTo(updatedReplicas)) } @@ -846,10 +846,10 @@ var _ = Describe("Component Controller", func() { }) By("Checking the replicas") - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - rsm := &rsmList.Items[0] - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterObj.Name, compName). - SetReplicas(*rsm.Spec.Replicas). + itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey) + its := &itsList.Items[0] + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, clusterObj.Name, compName). + SetReplicas(*its.Spec.Replicas). Create(&testCtx).GetObject() Expect(*sts.Spec.Replicas).Should(BeEquivalentTo(replicas)) @@ -887,8 +887,8 @@ var _ = Describe("Component Controller", func() { case statefulCompDefName, consensusCompDefName: mockPods = testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, compName) } - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, mockPods...) + Expect(testapps.ChangeObjStatus(&testCtx, its, func() { + testk8s.MockInstanceSetReady(its, mockPods...) })).ShouldNot(HaveOccurred()) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) @@ -1358,17 +1358,17 @@ var _ = Describe("Component Controller", func() { }, }, } - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { envVars, _ := buildEnvVarsNData(nil, targetEnvVars, false) targetEnvVarsMapping := map[string]corev1.EnvVar{} for i, v := range envVars { targetEnvVarsMapping[v.Name] = envVars[i] } - for _, cc := range [][]corev1.Container{rsm.Spec.Template.Spec.InitContainers, rsm.Spec.Template.Spec.Containers} { + for _, cc := range [][]corev1.Container{its.Spec.Template.Spec.InitContainers, its.Spec.Template.Spec.Containers} { for _, c := range cc { envValueMapping := map[string]corev1.EnvVar{} for i, env := range c.Env { @@ -1403,12 +1403,12 @@ var _ = Describe("Component Controller", func() { createClusterObjV2(compName, compDefObj.Name, func(f *testapps.MockClusterFactory) { f.SetReplicas(replicasLimit.MaxReplicas * 2) }) - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(*rsm.Spec.Replicas).Should(BeEquivalentTo(replicasLimit.MaxReplicas * 2)) + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(*its.Spec.Replicas).Should(BeEquivalentTo(replicasLimit.MaxReplicas * 2)) })).Should(Succeed()) By("set replicas limit") @@ -1429,11 +1429,11 @@ var _ = Describe("Component Controller", func() { g.Expect(comp.Status.Conditions[0].Status).Should(BeEquivalentTo(metav1.ConditionFalse)) g.Expect(comp.Status.Conditions[0].Message).Should(ContainSubstring(replicasOutOfLimitError(replicas, *replicasLimit).Error())) })).Should(Succeed()) - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Consistently(testapps.CheckObjExists(&testCtx, rsmKey, &workloads.ReplicatedStateMachine{}, false)).Should(Succeed()) + Consistently(testapps.CheckObjExists(&testCtx, itsKey, &workloads.InstanceSet{}, false)).Should(Succeed()) } By("create component w/ replicas limit set - ok") @@ -1441,12 +1441,12 @@ var _ = Describe("Component Controller", func() { createClusterObjV2(compName, compDefObj.Name, func(f *testapps.MockClusterFactory) { f.SetReplicas(replicas) }) - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(*rsm.Spec.Replicas).Should(BeEquivalentTo(replicas)) + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(*its.Spec.Replicas).Should(BeEquivalentTo(replicas)) })).Should(Succeed()) } } @@ -1475,12 +1475,12 @@ var _ = Describe("Component Controller", func() { IsLeader: false, }, } - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(rsm.Spec.Roles).Should(HaveExactElements(targetRoles)) + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.Spec.Roles).Should(HaveExactElements(targetRoles)) })).Should(Succeed()) } @@ -1524,12 +1524,12 @@ var _ = Describe("Component Controller", func() { MountPath: constant.MountPath, ReadOnly: true, } - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - podSpec := rsm.Spec.Template.Spec + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + podSpec := its.Spec.Template.Spec g.Expect(podSpec.Volumes).Should(ContainElements(targetVolume)) for _, c := range podSpec.Containers { g.Expect(c.VolumeMounts).Should(ContainElements(targetVolumeMount)) @@ -1569,12 +1569,12 @@ var _ = Describe("Component Controller", func() { }) By("Checking the Affinity, the TopologySpreadConstraints and Tolerations") - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - podSpec := rsm.Spec.Template.Spec + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + podSpec := its.Spec.Template.Spec // node affinity g.Expect(podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key).To(Equal(labelKey)) // pod anti-affinity @@ -1628,12 +1628,12 @@ var _ = Describe("Component Controller", func() { }) By("check the service account used in Pod") - rsmKey := types.NamespacedName{ + itsKey := types.NamespacedName{ Namespace: compObj.Namespace, Name: compObj.Name, } - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(rsm.Spec.Template.Spec.ServiceAccountName).To(Equal(saName)) + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + g.Expect(its.Spec.Template.Spec.ServiceAccountName).To(Equal(saName)) })).Should(Succeed()) By("check the RBAC resources created") @@ -1699,16 +1699,16 @@ var _ = Describe("Component Controller", func() { waitForCreatingResourceCompletely(clusterKey, compDefName) By("Checking statefulSet number") - rsmList := testk8s.ListAndCheckRSMItemsCount(&testCtx, clusterKey, 1) - rsm := &rsmList.Items[0] - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). - SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() + itsList := testk8s.ListAndCheckInstanceSetItemsCount(&testCtx, clusterKey, 1) + its := &itsList.Items[0] + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, clusterKey.Name, compName). + SetReplicas(*its.Spec.Replicas).Create(&testCtx).GetObject() mockPods := testapps.MockReplicationComponentPods(nil, testCtx, sts, clusterObj.Name, compDefName, nil) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, mockPods...) + Expect(testapps.ChangeObjStatus(&testCtx, its, func() { + testk8s.MockInstanceSetReady(its, mockPods...) })).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.RunningClusterPhase)) } @@ -1728,16 +1728,16 @@ var _ = Describe("Component Controller", func() { By("Waiting for the cluster controller to create resources completely") waitForCreatingResourceCompletely(clusterKey, compName) - var rsm *workloads.ReplicatedStateMachine + var its *workloads.InstanceSet Eventually(func(g Gomega) { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - g.Expect(rsmList.Items).ShouldNot(BeEmpty()) - rsm = &rsmList.Items[0] + itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey) + g.Expect(itsList.Items).ShouldNot(BeEmpty()) + its = &itsList.Items[0] }).Should(Succeed()) - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). - AddAppComponentLabel(rsm.Labels[constant.KBAppComponentLabelKey]). - AddAppInstanceLabel(rsm.Labels[constant.AppInstanceLabelKey]). - SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, clusterKey.Name, compName). + AddAppComponentLabel(its.Labels[constant.KBAppComponentLabelKey]). + AddAppInstanceLabel(its.Labels[constant.AppInstanceLabelKey]). + SetReplicas(*its.Spec.Replicas).Create(&testCtx).GetObject() By("Creating mock pods in StatefulSet, and set controller reference") pods := mockPodsForTest(clusterObj, replicas) for i, pod := range pods { @@ -1780,9 +1780,9 @@ var _ = Describe("Component Controller", func() { g.Expect(followerCount).Should(Equal(2)) }).Should(Succeed()) - // trigger rsm to reconcile as the underlying sts is not created - Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(sts), func(rsm *workloads.ReplicatedStateMachine) { - rsm.Annotations["time"] = time.Now().Format(time.RFC3339) + // trigger its to reconcile as the underlying sts is not created + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(sts), func(its *workloads.InstanceSet) { + its.Annotations["time"] = time.Now().Format(time.RFC3339) })()).Should(Succeed()) By("Checking pods' annotations") Eventually(func(g Gomega) { @@ -1794,17 +1794,17 @@ var _ = Describe("Component Controller", func() { g.Expect(pod.Annotations[constant.ComponentReplicasAnnotationKey]).Should(Equal(strconv.Itoa(int(*sts.Spec.Replicas)))) } }).Should(Succeed()) - rsmPatch := client.MergeFrom(rsm.DeepCopy()) - By("Updating RSM's status") - rsm.Status.UpdateRevision = "mock-version" + itsPatch := client.MergeFrom(its.DeepCopy()) + By("Updating ITS status") + its.Status.UpdateRevision = "mock-version" pods, err := intctrlutil.GetPodListByStatefulSet(ctx, k8sClient, sts) Expect(err).Should(BeNil()) var podList []*corev1.Pod for i := range pods { podList = append(podList, &pods[i]) } - testk8s.MockRSMReady(rsm, podList...) - Expect(k8sClient.Status().Patch(ctx, rsm, rsmPatch)).Should(Succeed()) + testk8s.MockInstanceSetReady(its, podList...) + Expect(k8sClient.Status().Patch(ctx, its, itsPatch)).Should(Succeed()) stsPatch := client.MergeFrom(sts.DeepCopy()) By("Updating StatefulSet's status") @@ -1883,24 +1883,24 @@ var _ = Describe("Component Controller", func() { By("Waiting for the cluster controller to create resources completely") waitForCreatingResourceCompletely(clusterKey, compName) - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - rsm := rsmList.Items[0] - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, compName). - SetReplicas(*rsm.Spec.Replicas). + itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey) + its := itsList.Items[0] + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, clusterKey.Name, compName). + SetReplicas(*its.Spec.Replicas). Create(&testCtx).GetObject() By("mock pod/sts are available and wait for component enter running phase") mockPods := testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, compName) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, &rsm, func() { - testk8s.MockRSMReady(&rsm, mockPods...) + Expect(testapps.ChangeObjStatus(&testCtx, &its, func() { + testk8s.MockInstanceSetReady(&its, mockPods...) })).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, compName)).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) By("the restore container has been removed from init containers") - Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(&rsm), func(g Gomega, tmpRSM *workloads.ReplicatedStateMachine) { - g.Expect(tmpRSM.Spec.Template.Spec.InitContainers).Should(BeEmpty()) + Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(&its), func(g Gomega, tmpIts *workloads.InstanceSet) { + g.Expect(tmpIts.Spec.Template.Spec.InitContainers).Should(BeEmpty()) })).Should(Succeed()) By("clean up annotations after cluster running") @@ -2002,9 +2002,9 @@ var _ = Describe("Component Controller", func() { viper.Set(constant.KBToolsImage, oldToolsImage) }() - underlyingWorkload := func() *workloads.ReplicatedStateMachine { - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - return &rsmList.Items[0] + underlyingWorkload := func() *workloads.InstanceSet { + itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey) + return &itsList.Items[0] } initWorkloadGeneration := underlyingWorkload().GetGeneration() @@ -2374,15 +2374,15 @@ var _ = Describe("Component Controller", func() { f.SetServiceVersion(prevRelease.ServiceVersion) }) - By("check the labels and image in rsm") - rsmKey := compKey - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { + By("check the labels and image in its") + itsKey := compKey + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { // check comp-def and service-version labels - g.Expect(rsm.Annotations).ShouldNot(BeEmpty()) - g.Expect(rsm.Annotations).Should(HaveKeyWithValue(constant.AppComponentLabelKey, compObj.Spec.CompDef)) - g.Expect(rsm.Annotations).Should(HaveKeyWithValue(constant.KBAppServiceVersionKey, compObj.Spec.ServiceVersion)) + g.Expect(its.Annotations).ShouldNot(BeEmpty()) + g.Expect(its.Annotations).Should(HaveKeyWithValue(constant.AppComponentLabelKey, compObj.Spec.CompDef)) + g.Expect(its.Annotations).Should(HaveKeyWithValue(constant.KBAppServiceVersionKey, compObj.Spec.ServiceVersion)) // check the image - c := rsm.Spec.Template.Spec.Containers[0] + c := its.Spec.Template.Spec.Containers[0] g.Expect(c.Image).To(BeEquivalentTo(prevRelease.Images[c.Name])) })).Should(Succeed()) @@ -2399,16 +2399,16 @@ var _ = Describe("Component Controller", func() { comp.Annotations["now"] = now })()).Should(Succeed()) - By("wait rsm updated and check the labels and image in rsm not changed") - Eventually(testapps.CheckObj(&testCtx, rsmKey, func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - // check the rsm is updated - g.Expect(rsm.Annotations).ShouldNot(BeEmpty()) - g.Expect(rsm.Annotations).Should(HaveKeyWithValue("now", now)) + By("wait its updated and check the labels and image in its not changed") + Eventually(testapps.CheckObj(&testCtx, itsKey, func(g Gomega, its *workloads.InstanceSet) { + // check the its is updated + g.Expect(its.Annotations).ShouldNot(BeEmpty()) + g.Expect(its.Annotations).Should(HaveKeyWithValue("now", now)) // check comp-def and service-version labels unchanged - g.Expect(rsm.Annotations).Should(HaveKeyWithValue(constant.AppComponentLabelKey, compObj.Spec.CompDef)) - g.Expect(rsm.Annotations).Should(HaveKeyWithValue(constant.KBAppServiceVersionKey, compObj.Spec.ServiceVersion)) + g.Expect(its.Annotations).Should(HaveKeyWithValue(constant.AppComponentLabelKey, compObj.Spec.CompDef)) + g.Expect(its.Annotations).Should(HaveKeyWithValue(constant.KBAppServiceVersionKey, compObj.Spec.ServiceVersion)) // check the image unchanged - c := rsm.Spec.Template.Spec.Containers[0] + c := its.Spec.Template.Spec.Containers[0] g.Expect(c.Image).To(BeEquivalentTo(prevRelease.Images[c.Name])) })).Should(Succeed()) } diff --git a/controllers/apps/component_utils.go b/controllers/apps/component_utils.go index 98e0e9a1fd6..ce2aa670fe6 100644 --- a/controllers/apps/component_utils.go +++ b/controllers/apps/component_utils.go @@ -80,7 +80,7 @@ func getObjectListByCustomLabels(ctx context.Context, cli client.Client, cluster return cli.List(ctx, objectList, opts...) } -func DelayUpdateRsmSystemFields(obj v1alpha1.ReplicatedStateMachineSpec, pobj *v1alpha1.ReplicatedStateMachineSpec) { +func DelayUpdateInstanceSetSystemFields(obj v1alpha1.InstanceSetSpec, pobj *v1alpha1.InstanceSetSpec) { DelayUpdatePodSpecSystemFields(obj.Template.Spec, &pobj.Template.Spec) if pobj.RoleProbe != nil && obj.RoleProbe != nil { @@ -100,7 +100,7 @@ func DelayUpdatePodSpecSystemFields(obj corev1.PodSpec, pobj *corev1.PodSpec) { updateLorryContainer(obj.Containers, pobj.Containers) } -func UpdateRsmSystemFields(obj v1alpha1.ReplicatedStateMachineSpec, pobj *v1alpha1.ReplicatedStateMachineSpec) { +func UpdateInstanceSetSystemFields(obj v1alpha1.InstanceSetSpec, pobj *v1alpha1.InstanceSetSpec) { UpdatePodSpecSystemFields(obj.Template.Spec, &pobj.Template.Spec) if pobj.RoleProbe != nil && obj.RoleProbe != nil { pobj.RoleProbe.FailureThreshold = obj.RoleProbe.FailureThreshold diff --git a/controllers/apps/component_utils_test.go b/controllers/apps/component_utils_test.go index b960ef43912..70a88e9602e 100644 --- a/controllers/apps/component_utils_test.go +++ b/controllers/apps/component_utils_test.go @@ -114,7 +114,7 @@ var _ = Describe("Component Utils", func() { Expect(len(stsList.Items) > 0).Should(BeTrue()) By("test GetComponentStsMinReadySeconds") - minReadySeconds, _ := component.GetComponentRSMMinReadySeconds(ctx, k8sClient, *cluster, consensusCompName) + minReadySeconds, _ := component.GetComponentMinReadySeconds(ctx, k8sClient, *cluster, consensusCompName) Expect(minReadySeconds).To(Equal(int32(0))) }) }) diff --git a/controllers/apps/configuration/combine_upgrade_policy_test.go b/controllers/apps/configuration/combine_upgrade_policy_test.go index 23449012a40..d72a8a7dccd 100644 --- a/controllers/apps/configuration/combine_upgrade_policy_test.go +++ b/controllers/apps/configuration/combine_upgrade_policy_test.go @@ -53,7 +53,7 @@ var _ = Describe("Reconfigure CombineSyncPolicy", func() { Expect(upgradePolicyMap[appsv1alpha1.DynamicReloadAndRestartPolicy]).ShouldNot(BeNil()) mockParam := newMockReconfigureParams("simplePolicy", k8sMockClient.Client(), - withMockRSM(2, nil), + withMockInstanceSet(2, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), @@ -77,7 +77,7 @@ var _ = Describe("Reconfigure CombineSyncPolicy", func() { } mockParam := newMockReconfigureParams("simplePolicy", k8sMockClient.Client(), - withMockRSM(2, nil), + withMockInstanceSet(2, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), diff --git a/controllers/apps/configuration/config_reconcile_wrapper.go b/controllers/apps/configuration/config_reconcile_wrapper.go index fef293fef06..94b3974756c 100644 --- a/controllers/apps/configuration/config_reconcile_wrapper.go +++ b/controllers/apps/configuration/config_reconcile_wrapper.go @@ -38,8 +38,8 @@ type configReconcileContext struct { ConfigMap *corev1.ConfigMap BuiltinComponent *component.SynthesizedComponent - Containers []string - RSMList []workloads.ReplicatedStateMachine + Containers []string + InstanceSetList []workloads.InstanceSet reqCtx intctrlutil.RequestCtx } @@ -62,18 +62,18 @@ func (c *configReconcileContext) GetRelatedObjects() error { return c.Cluster(). ComponentAndComponentDef(). ComponentSpec(). - RSM(). + Workload(). SynthesizedComponent(). Complete() } -func (c *configReconcileContext) RSM() *configReconcileContext { +func (c *configReconcileContext) Workload() *configReconcileContext { stsFn := func() (err error) { - c.RSMList, c.Containers, err = retrieveRelatedComponentsByConfigmap( + c.InstanceSetList, c.Containers, err = retrieveRelatedComponentsByConfigmap( c.Client, c.Context, c.Name, - generics.RSMSignature, + generics.InstanceSetSignature, client.ObjectKeyFromObject(c.ConfigMap), client.InNamespace(c.Namespace), c.MatchingLabels) diff --git a/controllers/apps/configuration/config_related_helper.go b/controllers/apps/configuration/config_related_helper.go index 0e0ac3614dc..1acdbad1310 100644 --- a/controllers/apps/configuration/config_related_helper.go +++ b/controllers/apps/configuration/config_related_helper.go @@ -83,7 +83,7 @@ func transformPodTemplate(obj client.Object) *corev1.PodTemplateSpec { return &v.Spec.Template case *appv1.Deployment: return &v.Spec.Template - case *workloads.ReplicatedStateMachine: + case *workloads.InstanceSet: return &v.Spec.Template } } diff --git a/controllers/apps/configuration/parallel_upgrade_policy.go b/controllers/apps/configuration/parallel_upgrade_policy.go index 835ac4cea05..1be527a1e97 100644 --- a/controllers/apps/configuration/parallel_upgrade_policy.go +++ b/controllers/apps/configuration/parallel_upgrade_policy.go @@ -34,7 +34,7 @@ func init() { } func (p *parallelUpgradePolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { - funcs := GetRSMRollingUpgradeFuncs() + funcs := GetInstanceSetRollingUpgradeFuncs() pods, err := funcs.GetPodsFunc(params) if err != nil { return makeReturnedStatus(ESFailedAndRetry), err diff --git a/controllers/apps/configuration/parallel_upgrade_policy_test.go b/controllers/apps/configuration/parallel_upgrade_policy_test.go index 68bc4c402a9..8e1cca3b066 100644 --- a/controllers/apps/configuration/parallel_upgrade_policy_test.go +++ b/controllers/apps/configuration/parallel_upgrade_policy_test.go @@ -65,7 +65,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { withGRPCClient(func(addr string) (cfgproto.ReconfigureClient, error) { return reconfigureClient, nil }), - withMockRSM(3, nil), + withMockInstanceSet(3, nil), withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", @@ -79,7 +79,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListReturnedResult(fromPodObjectList( - newMockPodsWithRSM(&mockParam.RSMUnits[0], 3), + newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 3), )))) status, err := parallelPolicy.Upgrade(mockParam) @@ -94,7 +94,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { withGRPCClient(func(addr string) (cfgproto.ReconfigureClient, error) { return reconfigureClient, nil }), - withMockRSM(3, nil), + withMockInstanceSet(3, nil), withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", @@ -134,7 +134,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { withGRPCClient(func(addr string) (cfgproto.ReconfigureClient, error) { return reconfigureClient, nil }), - withMockRSM(3, nil), + withMockInstanceSet(3, nil), withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", @@ -147,7 +147,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListReturnedResult( - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 3))), testutil.WithTimes(2), + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 3))), testutil.WithTimes(2), )) status, err := parallelPolicy.Upgrade(mockParam) @@ -176,7 +176,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { withGRPCClient(func(addr string) (cfgproto.ReconfigureClient, error) { return reconfigureClient, nil }), - withMockRSM(3, nil), + withMockInstanceSet(3, nil), withClusterComponent(3), withConfigSpec("for_test", map[string]string{ "a": "b", @@ -187,7 +187,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { VolumeName: "test_volume", }}})) - setPods := newMockPodsWithRSM(&mockParam.RSMUnits[0], 5) + setPods := newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 5) k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListReturnedResult(fromPodObjectList(setPods)), testutil.WithAnyTimes(), )) @@ -204,7 +204,7 @@ var _ = Describe("Reconfigure ParallelPolicy", func() { It("Should failed", func() { // not support type mockParam := newMockReconfigureParams("parallelPolicy", k8sMockClient.Client(), - withMockRSM(2, nil), + withMockInstanceSet(2, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), diff --git a/controllers/apps/configuration/policy_util.go b/controllers/apps/configuration/policy_util.go index 8e5d69a6ade..28db8b264cb 100644 --- a/controllers/apps/configuration/policy_util.go +++ b/controllers/apps/configuration/policy_util.go @@ -45,8 +45,8 @@ import ( // GetComponentPods gets all pods of the component. func GetComponentPods(params reconfigureParams) ([]corev1.Pod, error) { componentPods := make([]corev1.Pod, 0) - for i := range params.RSMUnits { - pods, err := intctrlutil.GetPodListByRSM(params.Ctx.Ctx, params.Client, ¶ms.RSMUnits[i]) + for i := range params.InstanceSetUnits { + pods, err := intctrlutil.GetPodListByInstanceSet(params.Ctx.Ctx, params.Client, ¶ms.InstanceSetUnits[i]) if err != nil { return nil, err } @@ -72,11 +72,11 @@ func CheckReconfigureUpdateProgress(pods []corev1.Pod, configKey, version string } func getPodsForOnlineUpdate(params reconfigureParams) ([]corev1.Pod, error) { - if len(params.RSMUnits) > 1 { - return nil, core.MakeError("component require only one rsm, actual %d components", len(params.RSMUnits)) + if len(params.InstanceSetUnits) > 1 { + return nil, core.MakeError("component require only one InstanceSet, actual %d components", len(params.InstanceSetUnits)) } - if len(params.RSMUnits) == 0 { + if len(params.InstanceSetUnits) == 0 { return nil, nil } @@ -86,7 +86,7 @@ func getPodsForOnlineUpdate(params reconfigureParams) ([]corev1.Pod, error) { } if params.SynthesizedComponent != nil { - rsmcore.SortPods(pods, rsmcore.ComposeRolePriorityMap(component.ConvertSynthesizeCompRoleToRSMRole(params.SynthesizedComponent)), true) + rsmcore.SortPods(pods, rsmcore.ComposeRolePriorityMap(component.ConvertSynthesizeCompRoleToInstanceSetRole(params.SynthesizedComponent)), true) } return pods, nil } @@ -223,8 +223,8 @@ func restartComponent(cli client.Client, ctx intctrlutil.RequestCtx, configKey s err = restartWorkloadComponent(cli, ctx.Ctx, cfgAnnotationKey, newVersion, w, generics.StatefulSetSignature) case *appv1.Deployment: err = restartWorkloadComponent(cli, ctx.Ctx, cfgAnnotationKey, newVersion, w, generics.DeploymentSignature) - case *workloads.ReplicatedStateMachine: - err = restartWorkloadComponent(cli, ctx.Ctx, cfgAnnotationKey, newVersion, w, generics.RSMSignature) + case *workloads.InstanceSet: + err = restartWorkloadComponent(cli, ctx.Ctx, cfgAnnotationKey, newVersion, w, generics.InstanceSetSignature) default: // ignore other types workload } diff --git a/controllers/apps/configuration/policy_util_test.go b/controllers/apps/configuration/policy_util_test.go index 1c6fb2b5186..b11ff072ecf 100644 --- a/controllers/apps/configuration/policy_util_test.go +++ b/controllers/apps/configuration/policy_util_test.go @@ -47,12 +47,12 @@ var ( stsSchemaKind = appsv1.SchemeGroupVersion.WithKind("StatefulSet") ) -func newMockRSM(replicas int, name string, labels map[string]string) workloads.ReplicatedStateMachine { +func newMockInstanceSet(replicas int, name string, labels map[string]string) workloads.InstanceSet { uid, _ := password.Generate(12, 12, 0, true, false) serviceName, _ := password.Generate(12, 0, 0, true, false) - return workloads.ReplicatedStateMachine{ + return workloads.InstanceSet{ TypeMeta: metav1.TypeMeta{ - Kind: "ReplicatedStateMachine", + Kind: "InstanceSet", APIVersion: "workloads.kubeblocks.io/v1alpha1", }, ObjectMeta: metav1.ObjectMeta{ @@ -60,7 +60,7 @@ func newMockRSM(replicas int, name string, labels map[string]string) workloads.R Namespace: defaultNamespace, UID: types.UID(uid), }, - Spec: workloads.ReplicatedStateMachineSpec{ + Spec: workloads.InstanceSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: labels, }, @@ -87,12 +87,12 @@ func newMockRSM(replicas int, name string, labels map[string]string) workloads.R type ParamsOps func(params *reconfigureParams) -func withMockRSM(replicas int, labels map[string]string) ParamsOps { +func withMockInstanceSet(replicas int, labels map[string]string) ParamsOps { return func(params *reconfigureParams) { rand, _ := password.Generate(12, 8, 0, true, false) stsName := "test_" + rand - params.RSMUnits = []workloads.ReplicatedStateMachine{ - newMockRSM(replicas, stsName, labels), + params.InstanceSetUnits = []workloads.InstanceSet{ + newMockInstanceSet(replicas, stsName, labels), } } } @@ -220,11 +220,11 @@ func newMockReconfigureParams(testName string, cli client.Client, paramOps ...Pa return params } -func newMockPodsWithRSM(rsm *workloads.ReplicatedStateMachine, replicas int, options ...PodOptions) []corev1.Pod { +func newMockPodsWithInstanceSet(its *workloads.InstanceSet, replicas int, options ...PodOptions) []corev1.Pod { pods := make([]corev1.Pod, replicas) for i := 0; i < replicas; i++ { - pods[i] = newMockPod(rsm.Name+"-"+fmt.Sprint(i), &rsm.Spec.Template.Spec) - pods[i].OwnerReferences = []metav1.OwnerReference{newControllerRef(rsm, stsSchemaKind)} + pods[i] = newMockPod(its.Name+"-"+fmt.Sprint(i), &its.Spec.Template.Spec) + pods[i].OwnerReferences = []metav1.OwnerReference{newControllerRef(its, stsSchemaKind)} pods[i].Status.PodIP = "1.1.1.1" } for _, customFn := range options { diff --git a/controllers/apps/configuration/reconfigure_controller.go b/controllers/apps/configuration/reconfigure_controller.go index d076ae7be3a..29391f12dda 100644 --- a/controllers/apps/configuration/reconfigure_controller.go +++ b/controllers/apps/configuration/reconfigure_controller.go @@ -206,7 +206,7 @@ func (r *ReconfigureReconciler) sync(reqCtx intctrlutil.RequestCtx, configMap *c return intctrlutil.Reconciled() } - if len(reconcileContext.RSMList) == 0 { + if len(reconcileContext.InstanceSetList) == 0 { reqCtx.Recorder.Event(configMap, corev1.EventTypeWarning, appsv1alpha1.ReasonReconfigureFailed, "the configmap is not used by any container, skip reconfigure") return updateConfigPhase(r.Client, reqCtx, configMap, appsv1alpha1.CFinishedPhase, configurationNotUsingMessage) @@ -221,7 +221,7 @@ func (r *ReconfigureReconciler) sync(reqCtx intctrlutil.RequestCtx, configMap *c Ctx: reqCtx, Cluster: reconcileContext.ClusterObj, ContainerNames: reconcileContext.Containers, - RSMUnits: reconcileContext.RSMList, + InstanceSetUnits: reconcileContext.InstanceSetList, ClusterComponent: reconcileContext.ClusterComObj, SynthesizedComponent: reconcileContext.BuiltinComponent, Restart: forceRestart || !cfgcm.IsSupportReload(resources.configConstraintObj.Spec.DynamicReloadAction), diff --git a/controllers/apps/configuration/reconfigure_policy.go b/controllers/apps/configuration/reconfigure_policy.go index 9a3c02fda71..b22aa77b022 100644 --- a/controllers/apps/configuration/reconfigure_policy.go +++ b/controllers/apps/configuration/reconfigure_policy.go @@ -111,8 +111,8 @@ type reconfigureParams struct { // TODO(xingran): remove this field when test case is refactored. Component *appsv1alpha1.ClusterComponentDefinition - // List of ReplicatedStateMachine using this config template. - RSMUnits []workloads.ReplicatedStateMachine + // List of InstanceSet using this config template. + InstanceSetUnits []workloads.InstanceSet } var ( @@ -164,9 +164,9 @@ func (param *reconfigureParams) maxRollingReplicas() int32 { } var maxUnavailable *intstr.IntOrString - for _, rsm := range param.RSMUnits { - if rsm.Spec.UpdateStrategy.RollingUpdate != nil { - maxUnavailable = rsm.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable + for _, its := range param.InstanceSetUnits { + if its.Spec.UpdateStrategy.RollingUpdate != nil { + maxUnavailable = its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable } if maxUnavailable != nil { break @@ -298,7 +298,7 @@ func makeReturnedStatus(status ExecStatus, ops ...func(status *ReturnedStatus)) func fromWorkloadObjects(params reconfigureParams) []client.Object { r := make([]client.Object, 0) - for _, unit := range params.RSMUnits { + for _, unit := range params.InstanceSetUnits { r = append(r, &unit) } return r diff --git a/controllers/apps/configuration/rolling_upgrade_policy.go b/controllers/apps/configuration/rolling_upgrade_policy.go index 5ada48b5f9f..aaf59c06d40 100644 --- a/controllers/apps/configuration/rolling_upgrade_policy.go +++ b/controllers/apps/configuration/rolling_upgrade_policy.go @@ -51,7 +51,7 @@ func init() { } func (r *rollingUpgradePolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { - return performRollingUpgrade(params, GetRSMRollingUpgradeFuncs()) + return performRollingUpgrade(params, GetInstanceSetRollingUpgradeFuncs()) } func (r *rollingUpgradePolicy) GetPolicyName() string { diff --git a/controllers/apps/configuration/rolling_upgrade_policy_test.go b/controllers/apps/configuration/rolling_upgrade_policy_test.go index 8095412425c..adfa1161d55 100644 --- a/controllers/apps/configuration/rolling_upgrade_policy_test.go +++ b/controllers/apps/configuration/rolling_upgrade_policy_test.go @@ -61,7 +61,7 @@ var _ = Describe("Reconfigure RollingPolicy", func() { createReconfigureParam := func(compType appsv1alpha1.WorkloadType, replicas int) reconfigureParams { return newMockReconfigureParams("rollingPolicy", k8sMockClient.Client(), - withMockRSM(replicas, nil), + withMockInstanceSet(replicas, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), @@ -105,10 +105,10 @@ var _ = Describe("Reconfigure RollingPolicy", func() { acc := 0 mockPods := [][]corev1.Pod{ - newMockPodsWithRSM(&mockParam.RSMUnits[0], 2), - newMockPodsWithRSM(&mockParam.RSMUnits[0], 5, + newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2), + newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 5, mockLeaderLabel), - newMockPodsWithRSM(&mockParam.RSMUnits[0], 3, + newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 3, withReadyPod(0, 0), withAvailablePod(0, 3), mockLeaderLabel), @@ -184,7 +184,7 @@ var _ = Describe("Reconfigure RollingPolicy", func() { }, }, } - pods = newMockPodsWithRSM(&mockParam.RSMUnits[0], defaultReplica) + pods = newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], defaultReplica) } k8sMockClient.MockListMethod(testutil.WithListReturned( diff --git a/controllers/apps/configuration/simple_policy.go b/controllers/apps/configuration/simple_policy.go index f971012c542..4459d2fbf51 100644 --- a/controllers/apps/configuration/simple_policy.go +++ b/controllers/apps/configuration/simple_policy.go @@ -37,7 +37,7 @@ func init() { func (s *simplePolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { params.Ctx.Log.V(1).Info("simple policy begin....") - return restartAndCheckComponent(params, GetRSMRollingUpgradeFuncs(), fromWorkloadObjects(params)) + return restartAndCheckComponent(params, GetInstanceSetRollingUpgradeFuncs(), fromWorkloadObjects(params)) } func (s *simplePolicy) GetPolicyName() string { diff --git a/controllers/apps/configuration/simple_policy_test.go b/controllers/apps/configuration/simple_policy_test.go index 8232dfc064b..c04087398e9 100644 --- a/controllers/apps/configuration/simple_policy_test.go +++ b/controllers/apps/configuration/simple_policy_test.go @@ -59,7 +59,7 @@ var _ = Describe("Reconfigure simplePolicy", func() { Expect(simplePolicy.GetPolicyName()).Should(BeEquivalentTo("simple")) mockParam := newMockReconfigureParams("simplePolicy", k8sMockClient.Client(), - withMockRSM(2, nil), + withMockInstanceSet(2, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), @@ -77,14 +77,14 @@ var _ = Describe("Reconfigure simplePolicy", func() { testutil.WithSucceed(testutil.WithAnyTimes())) k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListSequenceResult([][]runtime.Object{ - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2)), - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2)), + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { // mock pod-1 restart if index == 1 { updatePodCfgVersion(pod, mockParam.getConfigKey(), mockParam.getTargetVersionHash()) } })), - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { // mock all pod restart updatePodCfgVersion(pod, mockParam.getConfigKey(), mockParam.getTargetVersionHash()) })), @@ -122,7 +122,7 @@ var _ = Describe("Reconfigure simplePolicy", func() { Context("simple reconfigure policy test with Replication", func() { It("Should success", func() { mockParam := newMockReconfigureParams("simplePolicy", k8sMockClient.Client(), - withMockRSM(2, nil), + withMockInstanceSet(2, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), @@ -137,8 +137,8 @@ var _ = Describe("Reconfigure simplePolicy", func() { k8sMockClient.MockPatchMethod(testutil.WithSucceed(testutil.WithAnyTimes())) k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListSequenceResult([][]runtime.Object{ - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2)), - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2, + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2)), + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, _ int) { updatePodCfgVersion(pod, mockParam.getConfigKey(), mockParam.getTargetVersionHash()) })), @@ -161,7 +161,7 @@ var _ = Describe("Reconfigure simplePolicy", func() { It("Should failed", func() { // not support type mockParam := newMockReconfigureParams("simplePolicy", k8sMockClient.Client(), - withMockRSM(2, nil), + withMockInstanceSet(2, nil), withConfigSpec("for_test", map[string]string{ "key": "value", }), @@ -178,14 +178,14 @@ var _ = Describe("Reconfigure simplePolicy", func() { testutil.WithSucceed(testutil.WithAnyTimes())) k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListSequenceResult([][]runtime.Object{ - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2)), - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2)), + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { // mock pod-1 restart if index == 1 { updatePodCfgVersion(pod, mockParam.getConfigKey(), mockParam.getTargetVersionHash()) } })), - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 2, withReadyPod(0, 2), func(pod *corev1.Pod, index int) { // mock all pod restart updatePodCfgVersion(pod, mockParam.getConfigKey(), mockParam.getTargetVersionHash()) })), @@ -225,7 +225,7 @@ var _ = Describe("Reconfigure simplePolicy", func() { // It("Should failed", func() { // // mock not cc // mockParam := newMockReconfigureParams("simplePolicy", nil, - // withMockRSM(2, nil), + // withMockInstanceSet(2, nil), // withConfigSpec("not_tpl_name", map[string]string{ // "key": "value", // }), diff --git a/controllers/apps/configuration/sync_upgrade_policy.go b/controllers/apps/configuration/sync_upgrade_policy.go index fa1a4c4ab18..b61ce32f77d 100644 --- a/controllers/apps/configuration/sync_upgrade_policy.go +++ b/controllers/apps/configuration/sync_upgrade_policy.go @@ -54,7 +54,7 @@ func (o *syncPolicy) Upgrade(params reconfigureParams) (ReturnedStatus, error) { return makeReturnedStatus(ESNone), nil } - funcs := GetRSMRollingUpgradeFuncs() + funcs := GetInstanceSetRollingUpgradeFuncs() pods, err := funcs.GetPodsFunc(params) if err != nil { return makeReturnedStatus(ESFailedAndRetry), err diff --git a/controllers/apps/configuration/sync_upgrade_policy_test.go b/controllers/apps/configuration/sync_upgrade_policy_test.go index b10ff534aed..8b2d28b330e 100644 --- a/controllers/apps/configuration/sync_upgrade_policy_test.go +++ b/controllers/apps/configuration/sync_upgrade_policy_test.go @@ -63,7 +63,7 @@ var _ = Describe("Reconfigure OperatorSyncPolicy", func() { withGRPCClient(func(addr string) (cfgproto.ReconfigureClient, error) { return reconfigureClient, nil }), - withMockRSM(3, nil), + withMockInstanceSet(3, nil), withConfigSpec("for_test", map[string]string{"a": "c b e f"}), withConfigConstraintSpec(&appsv1beta1.FormatterConfig{Format: appsv1beta1.RedisCfg}), withConfigPatch(map[string]string{ @@ -80,9 +80,9 @@ var _ = Describe("Reconfigure OperatorSyncPolicy", func() { By("mock client get pod caller") k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListSequenceResult([][]runtime.Object{ - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 3, + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 3, withReadyPod(0, 1))), - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 3, + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 3, withReadyPod(0, 3))), }), testutil.WithAnyTimes())) @@ -120,7 +120,7 @@ var _ = Describe("Reconfigure OperatorSyncPolicy", func() { withGRPCClient(func(addr string) (cfgproto.ReconfigureClient, error) { return reconfigureClient, nil }), - withMockRSM(3, nil), + withMockInstanceSet(3, nil), withConfigSpec("for_test", map[string]string{"a": "c b e f"}), withConfigConstraintSpec(&appsv1beta1.FormatterConfig{Format: appsv1beta1.RedisCfg}), withConfigPatch(map[string]string{ @@ -144,7 +144,7 @@ var _ = Describe("Reconfigure OperatorSyncPolicy", func() { By("mock client get pod caller") k8sMockClient.MockListMethod(testutil.WithListReturned( testutil.WithConstructListReturnedResult( - fromPodObjectList(newMockPodsWithRSM(&mockParam.RSMUnits[0], 3, + fromPodObjectList(newMockPodsWithInstanceSet(&mockParam.InstanceSetUnits[0], 3, withReadyPod(0, 1), func(pod *corev1.Pod, index int) { if index == 0 { if pod.Labels == nil { diff --git a/controllers/apps/configuration/types.go b/controllers/apps/configuration/types.go index 5cc3bf22482..0b730db20d3 100644 --- a/controllers/apps/configuration/types.go +++ b/controllers/apps/configuration/types.go @@ -38,7 +38,6 @@ type RestartContainerFunc func(pod *corev1.Pod, ctx context.Context, containerNa type OnlineUpdatePodFunc func(pod *corev1.Pod, ctx context.Context, createClient createReconfigureClient, configSpec string, updatedParams map[string]string) error // Node: Distinguish between implementation and interface. -// RollingUpgradeFuncs defines the interface, rsm is an implementation of Stateful, Replication and Consensus, not the only solution. type RollingUpgradeFuncs struct { GetPodsFunc GetPodsFunc @@ -47,7 +46,7 @@ type RollingUpgradeFuncs struct { RestartComponent RestartComponent } -func GetRSMRollingUpgradeFuncs() RollingUpgradeFuncs { +func GetInstanceSetRollingUpgradeFuncs() RollingUpgradeFuncs { return RollingUpgradeFuncs{ GetPodsFunc: getPodsForOnlineUpdate, RestartContainerFunc: commonStopContainerWithPod, diff --git a/controllers/apps/operations/ops_progress_util.go b/controllers/apps/operations/ops_progress_util.go index 710cbde4a1f..43e7a1de56e 100644 --- a/controllers/apps/operations/ops_progress_util.go +++ b/controllers/apps/operations/ops_progress_util.go @@ -171,7 +171,7 @@ func handleComponentStatusProgress( if podList, err = intctrlcomp.GetComponentPodList(reqCtx.Ctx, cli, *opsRes.Cluster, clusterComponent.Name); err != nil { return } - completedCount, err = handleRSMProgress(reqCtx, cli, opsRes, podList, pgRes, compStatus) + completedCount, err = handleInstanceSetProgress(reqCtx, cli, opsRes, podList, pgRes, compStatus) expectReplicas := clusterComponent.Replicas if opsRes.OpsRequest.Status.Phase == appsv1alpha1.OpsCancellingPhase { // only rollback the actual re-created pod during cancelling. @@ -180,14 +180,14 @@ func handleComponentStatusProgress( return expectReplicas, completedCount, err } -// handleRSMProgress handles the component progressDetails which using RSM workloads. -func handleRSMProgress(reqCtx intctrlutil.RequestCtx, +// handleInstanceSetProgress handles the component progressDetails which using InstanceSet workloads. +func handleInstanceSetProgress(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes *OpsResource, podList *corev1.PodList, pgRes progressResource, compStatus *appsv1alpha1.OpsRequestComponentStatus) (int32, error) { - minReadySeconds, err := intctrlcomp.GetComponentRSMMinReadySeconds(reqCtx.Ctx, cli, *opsRes.Cluster, pgRes.clusterComponent.Name) + minReadySeconds, err := intctrlcomp.GetComponentMinReadySeconds(reqCtx.Ctx, cli, *opsRes.Cluster, pgRes.clusterComponent.Name) if err != nil { return 0, err } @@ -463,7 +463,7 @@ func handleScaleOutProgress(reqCtx intctrlutil.RequestCtx, podList *corev1.PodList, compStatus *appsv1alpha1.OpsRequestComponentStatus) (int32, error) { var componentName = pgRes.clusterComponent.Name - minReadySeconds, err := intctrlcomp.GetComponentRSMMinReadySeconds(reqCtx.Ctx, cli, *opsRes.Cluster, componentName) + minReadySeconds, err := intctrlcomp.GetComponentMinReadySeconds(reqCtx.Ctx, cli, *opsRes.Cluster, componentName) if err != nil { return 0, err } @@ -510,7 +510,7 @@ func handleScaleDownProgress( }) } var componentName = pgRes.clusterComponent.Name - minReadySeconds, err := intctrlcomp.GetComponentRSMMinReadySeconds(reqCtx.Ctx, cli, *opsRes.Cluster, componentName) + minReadySeconds, err := intctrlcomp.GetComponentMinReadySeconds(reqCtx.Ctx, cli, *opsRes.Cluster, componentName) if err != nil { return 0, err } diff --git a/controllers/apps/operations/ops_util.go b/controllers/apps/operations/ops_util.go index bbf32acc824..026dc54d07e 100644 --- a/controllers/apps/operations/ops_util.go +++ b/controllers/apps/operations/ops_util.go @@ -199,14 +199,14 @@ func opsRequestHasProcessed(reqCtx intctrlutil.RequestCtx, cli client.Client, op return false } // if all pods of all components are with latest revision, ops has processed - rsmList := &workloads.ReplicatedStateMachineList{} - if err := cli.List(reqCtx.Ctx, rsmList, + itsList := &workloads.InstanceSetList{} + if err := cli.List(reqCtx.Ctx, itsList, client.InNamespace(opsRes.Cluster.Namespace), client.MatchingLabels{constant.AppInstanceLabelKey: opsRes.Cluster.Name}); err != nil { return false } - for _, rsm := range rsmList.Items { - isLatestRevision, err := intctrlcomp.IsComponentPodsWithLatestRevision(reqCtx.Ctx, cli, opsRes.Cluster, &rsm) + for _, its := range itsList.Items { + isLatestRevision, err := intctrlcomp.IsComponentPodsWithLatestRevision(reqCtx.Ctx, cli, opsRes.Cluster, &its) if err != nil { return false } diff --git a/controllers/apps/operations/restart.go b/controllers/apps/operations/restart.go index 04d933b8eb9..a3585557feb 100644 --- a/controllers/apps/operations/restart.go +++ b/controllers/apps/operations/restart.go @@ -66,7 +66,7 @@ func (r restartOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Clie componentKindList := []client.ObjectList{ &appv1.DeploymentList{}, &appv1.StatefulSetList{}, - &workloads.ReplicatedStateMachineList{}, + &workloads.InstanceSetList{}, } for _, objectList := range componentKindList { if err := restartComponent(reqCtx, cli, opsRes, componentNameMap, objectList); err != nil { diff --git a/controllers/apps/opsrequest_controller.go b/controllers/apps/opsrequest_controller.go index 379edaae846..99c4d38e3b5 100644 --- a/controllers/apps/opsrequest_controller.go +++ b/controllers/apps/opsrequest_controller.go @@ -92,7 +92,7 @@ func (r *OpsRequestReconciler) SetupWithManager(mgr ctrl.Manager) error { MaxConcurrentReconciles: int(math.Ceil(viper.GetFloat64(constant.CfgKBReconcileWorkers) / 2)), }). Watches(&appsv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.parseRunningOpsRequests)). - Watches(&workloadsv1alpha1.ReplicatedStateMachine{}, handler.EnqueueRequestsFromMapFunc(r.parseRunningOpsRequestsForRSM)). + Watches(&workloadsv1alpha1.InstanceSet{}, handler.EnqueueRequestsFromMapFunc(r.parseRunningOpsRequestsForInstanceSet)). Watches(&dpv1alpha1.Backup{}, handler.EnqueueRequestsFromMapFunc(r.parseBackupOpsRequest)). Watches(&corev1.PersistentVolumeClaim{}, handler.EnqueueRequestsFromMapFunc(r.parseVolumeExpansionOpsRequest)). Watches(&corev1.Pod{}, handler.EnqueueRequestsFromMapFunc(r.parsePod)). @@ -379,14 +379,14 @@ func (r *OpsRequestReconciler) parseRunningOpsRequests(ctx context.Context, obje return r.getRunningOpsRequestsFromCluster(cluster) } -func (r *OpsRequestReconciler) parseRunningOpsRequestsForRSM(ctx context.Context, object client.Object) []reconcile.Request { - rsm := object.(*workloadsv1alpha1.ReplicatedStateMachine) - clusterName := rsm.Labels[constant.AppInstanceLabelKey] +func (r *OpsRequestReconciler) parseRunningOpsRequestsForInstanceSet(ctx context.Context, object client.Object) []reconcile.Request { + its := object.(*workloadsv1alpha1.InstanceSet) + clusterName := its.Labels[constant.AppInstanceLabelKey] if clusterName == "" { return nil } cluster := &appsv1alpha1.Cluster{} - if err := r.Client.Get(ctx, client.ObjectKey{Name: clusterName, Namespace: rsm.Namespace}, cluster); err != nil { + if err := r.Client.Get(ctx, client.ObjectKey{Name: clusterName, Namespace: its.Namespace}, cluster); err != nil { return nil } return r.getRunningOpsRequestsFromCluster(cluster) diff --git a/controllers/apps/opsrequest_controller_test.go b/controllers/apps/opsrequest_controller_test.go index 0c86b157032..29504813c59 100644 --- a/controllers/apps/opsrequest_controller_test.go +++ b/controllers/apps/opsrequest_controller_test.go @@ -164,16 +164,16 @@ var _ = Describe("OpsRequest Controller", func() { })).ShouldNot(HaveOccurred()) } var mysqlSts *appsv1.StatefulSet - var mysqlRSM *workloads.ReplicatedStateMachine - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) - mysqlRSM = &rsmList.Items[0] - mysqlSts = testapps.NewStatefulSetFactory(mysqlRSM.Namespace, mysqlRSM.Name, clusterKey.Name, mysqlCompName). - SetReplicas(*mysqlRSM.Spec.Replicas).Create(&testCtx).GetObject() + var mysqlIts *workloads.InstanceSet + itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, mysqlCompName) + mysqlIts = &itsList.Items[0] + mysqlSts = testapps.NewStatefulSetFactory(mysqlIts.Namespace, mysqlIts.Name, clusterKey.Name, mysqlCompName). + SetReplicas(*mysqlIts.Spec.Replicas).Create(&testCtx).GetObject() Expect(testapps.ChangeObjStatus(&testCtx, mysqlSts, func() { testk8s.MockStatefulSetReady(mysqlSts) })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, mysqlRSM, func() { - testk8s.MockRSMReady(mysqlRSM, pod) + Expect(testapps.ChangeObjStatus(&testCtx, mysqlIts, func() { + testk8s.MockInstanceSetReady(mysqlIts, pod) })).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.RunningClusterPhase)) @@ -203,8 +203,8 @@ var _ = Describe("OpsRequest Controller", func() { // })).Should(Succeed()) By("mock bring Cluster and changed component back to running status") - Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(mysqlRSM), func(tmpRSM *workloads.ReplicatedStateMachine) { - testk8s.MockRSMReady(tmpRSM, pod) + Expect(testapps.GetAndChangeObjStatus(&testCtx, client.ObjectKeyFromObject(mysqlIts), func(tmpIts *workloads.InstanceSet) { + testk8s.MockInstanceSetReady(tmpIts, pod) })()).ShouldNot(HaveOccurred()) Eventually(testapps.GetClusterComponentPhase(&testCtx, clusterKey, mysqlCompName)).Should(Equal(appsv1alpha1.RunningClusterCompPhase)) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.RunningClusterPhase)) @@ -223,9 +223,9 @@ var _ = Describe("OpsRequest Controller", func() { By("check cluster resource requirements changed") targetRequests := scalingCtx.target.Requests - rsmList = testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) - mysqlRSM = &rsmList.Items[0] - Expect(reflect.DeepEqual(mysqlRSM.Spec.Template.Spec.Containers[0].Resources.Requests, targetRequests)).Should(BeTrue()) + itsList = testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, mysqlCompName) + mysqlIts = &itsList.Items[0] + Expect(reflect.DeepEqual(mysqlIts.Spec.Template.Spec.Containers[0].Resources.Requests, targetRequests)).Should(BeTrue()) By("check OpsRequest reclaimed after ttl") Expect(testapps.ChangeObj(&testCtx, verticalScalingOpsRequest, func(lopsReq *appsv1alpha1.OpsRequest) { @@ -281,8 +281,8 @@ var _ = Describe("OpsRequest Controller", func() { }) componentWorkload := func() client.Object { - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, clusterKey, mysqlCompName) - return &rsmList.Items[0] + itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, clusterKey, mysqlCompName) + return &itsList.Items[0] } mockCompRunning := func(replicas int32) { @@ -296,14 +296,14 @@ var _ = Describe("OpsRequest Controller", func() { })).Should(Succeed()) wl := componentWorkload() - rsm, _ := wl.(*workloads.ReplicatedStateMachine) - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, clusterKey.Name, mysqlCompName). - SetReplicas(*rsm.Spec.Replicas).GetObject() + its, _ := wl.(*workloads.InstanceSet) + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, clusterKey.Name, mysqlCompName). + SetReplicas(*its.Spec.Replicas).GetObject() testapps.CheckedCreateK8sResource(&testCtx, sts) mockPods := testapps.MockConsensusComponentPods(&testCtx, sts, clusterObj.Name, mysqlCompName) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, mockPods...) + Expect(testapps.ChangeObjStatus(&testCtx, its, func() { + testk8s.MockInstanceSetReady(its, mockPods...) })).ShouldNot(HaveOccurred()) Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) @@ -505,11 +505,11 @@ var _ = Describe("OpsRequest Controller", func() { By("check the underlying workload been updated") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), - func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) + func(g Gomega, its *workloads.InstanceSet) { + g.Expect(*its.Spec.Replicas).Should(Equal(replicas)) })).Should(Succeed()) - rsm := componentWorkload() - Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(sts *appsv1.StatefulSet) { + its := componentWorkload() + Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(its), func(sts *appsv1.StatefulSet) { sts.Spec.Replicas = &replicas })).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), @@ -583,11 +583,11 @@ var _ = Describe("OpsRequest Controller", func() { By("check the underlying workload been updated") Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), - func(g Gomega, rsm *workloads.ReplicatedStateMachine) { - g.Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) + func(g Gomega, its *workloads.InstanceSet) { + g.Expect(*its.Spec.Replicas).Should(Equal(replicas)) })).Should(Succeed()) - rsm := componentWorkload() - Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(sts *appsv1.StatefulSet) { + its := componentWorkload() + Eventually(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(its), func(sts *appsv1.StatefulSet) { sts.Spec.Replicas = &replicas })).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(componentWorkload()), diff --git a/controllers/apps/systemaccount_controller_test.go b/controllers/apps/systemaccount_controller_test.go index 704e851ff20..5a52b85b83a 100644 --- a/controllers/apps/systemaccount_controller_test.go +++ b/controllers/apps/systemaccount_controller_test.go @@ -176,18 +176,18 @@ var _ = Describe("SystemAccount Controller", func() { _ = assureEndpoint(objectKey.Namespace, headlessServiceName, ips[0:clusterEndPointsSize]) By("Mock the underlying workloads to ready") - rsmList := testk8s.ListAndCheckRSMWithComponent(&testCtx, objectKey, compName) - rsm := &rsmList.Items[0] + itsList := testk8s.ListAndCheckInstanceSetWithComponent(&testCtx, objectKey, compName) + its := &itsList.Items[0] podName := fmt.Sprintf("%s-%s-0", objectKey.Name, compName) pod := testapps.MockConsensusComponentStsPod(&testCtx, nil, objectKey.Name, compName, podName, "leader", "ReadWrite") - sts := testapps.NewStatefulSetFactory(rsm.Namespace, rsm.Name, objectKey.Name, compName). - SetReplicas(*rsm.Spec.Replicas).Create(&testCtx).GetObject() + sts := testapps.NewStatefulSetFactory(its.Namespace, its.Name, objectKey.Name, compName). + SetReplicas(*its.Spec.Replicas).Create(&testCtx).GetObject() Expect(testapps.ChangeObjStatus(&testCtx, sts, func() { testk8s.MockStatefulSetReady(sts) })).ShouldNot(HaveOccurred()) - Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - testk8s.MockRSMReady(rsm, pod) + Expect(testapps.ChangeObjStatus(&testCtx, its, func() { + testk8s.MockInstanceSetReady(its, pod) })).ShouldNot(HaveOccurred()) By("Wait cluster phase to be Running") diff --git a/controllers/apps/transformer_cluster_backup_policy.go b/controllers/apps/transformer_cluster_backup_policy.go index a02af3523ab..594f4956b35 100644 --- a/controllers/apps/transformer_cluster_backup_policy.go +++ b/controllers/apps/transformer_cluster_backup_policy.go @@ -344,13 +344,13 @@ func (r *clusterBackupPolicyTransformer) syncRoleLabelSelector(target *dpv1alpha } func (r *clusterBackupPolicyTransformer) getCompReplicas() int32 { - rsm := &workloads.ReplicatedStateMachine{} + its := &workloads.InstanceSet{} compSpec := r.getClusterComponentSpec() - rsmName := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) - if err := r.Client.Get(r.Context, client.ObjectKey{Name: rsmName, Namespace: r.Cluster.Namespace}, rsm); err != nil { + name := fmt.Sprintf("%s-%s", r.Cluster.Name, compSpec.Name) + if err := r.Client.Get(r.Context, client.ObjectKey{Name: name, Namespace: r.Cluster.Namespace}, its); err != nil { return compSpec.Replicas } - return *rsm.Spec.Replicas + return *its.Spec.Replicas } // buildBackupPolicy builds a new backup policy by the backup policy template. diff --git a/controllers/apps/transformer_component_deletion.go b/controllers/apps/transformer_component_deletion.go index ad1a42ba6c8..1caf552bb6a 100644 --- a/controllers/apps/transformer_component_deletion.go +++ b/controllers/apps/transformer_component_deletion.go @@ -170,7 +170,7 @@ func (t *componentDeletionTransformer) getCluster(transCtx *componentTransformCo func compOwnedKinds() []client.ObjectList { return []client.ObjectList{ - &workloads.ReplicatedStateMachineList{}, + &workloads.InstanceSetList{}, &policyv1.PodDisruptionBudgetList{}, &corev1.ServiceList{}, &corev1.ServiceAccountList{}, diff --git a/controllers/apps/transformer_component_rbac.go b/controllers/apps/transformer_component_rbac.go index b073485b973..cb7366edd8e 100644 --- a/controllers/apps/transformer_component_rbac.go +++ b/controllers/apps/transformer_component_rbac.go @@ -91,10 +91,10 @@ func (t *componentRBACTransformer) Transform(ctx graph.TransformContext, dag *gr } createServiceAccount(serviceAccount, graphCli, dag, parent) - rsmList := graphCli.FindAll(dag, &workloads.ReplicatedStateMachine{}) - for _, rsm := range rsmList { + itsList := graphCli.FindAll(dag, &workloads.InstanceSet{}) + for _, its := range itsList { // serviceAccount must be created before workload - graphCli.DependOn(dag, rsm, serviceAccount) + graphCli.DependOn(dag, its, serviceAccount) } return nil diff --git a/controllers/apps/transformer_component_rbac_test.go b/controllers/apps/transformer_component_rbac_test.go index b045f955eab..0b26d740b5c 100644 --- a/controllers/apps/transformer_component_rbac_test.go +++ b/controllers/apps/transformer_component_rbac_test.go @@ -146,9 +146,9 @@ var _ = Describe("object rbac transformer test.", func() { graphCli.Create(dagExpected, serviceAccount) graphCli.Create(dagExpected, roleBinding) graphCli.DependOn(dagExpected, roleBinding, serviceAccount) - rsmList := graphCli.FindAll(dagExpected, &workloads.ReplicatedStateMachine{}) - for i := range rsmList { - graphCli.DependOn(dagExpected, rsmList[i], serviceAccount) + itsList := graphCli.FindAll(dagExpected, &workloads.InstanceSet{}) + for i := range itsList { + graphCli.DependOn(dagExpected, itsList[i], serviceAccount) } Expect(dag.Equals(dagExpected, model.DefaultLess)).Should(BeTrue()) }) @@ -170,9 +170,9 @@ var _ = Describe("object rbac transformer test.", func() { graphCli.Create(dagExpected, clusterRoleBinding) graphCli.DependOn(dagExpected, roleBinding, clusterRoleBinding) graphCli.DependOn(dagExpected, clusterRoleBinding, serviceAccount) - rsmList := graphCli.FindAll(dagExpected, &workloads.ReplicatedStateMachine{}) - for i := range rsmList { - graphCli.DependOn(dagExpected, rsmList[i], serviceAccount) + itsList := graphCli.FindAll(dagExpected, &workloads.InstanceSet{}) + for i := range itsList { + graphCli.DependOn(dagExpected, itsList[i], serviceAccount) } Expect(dag.Equals(dagExpected, model.DefaultLess)).Should(BeTrue()) }) @@ -182,7 +182,7 @@ var _ = Describe("object rbac transformer test.", func() { func mockDAG(graphCli model.GraphClient, cluster *appsv1alpha1.Cluster) *graph.DAG { d := graph.NewDAG() graphCli.Root(d, cluster, cluster, model.ActionStatusPtr()) - rsm := &workloads.ReplicatedStateMachine{} - graphCli.Create(d, rsm) + its := &workloads.InstanceSet{} + graphCli.Create(d, its) return d } diff --git a/controllers/apps/transformer_component_service.go b/controllers/apps/transformer_component_service.go index 813162da625..f0132cd124c 100644 --- a/controllers/apps/transformer_component_service.go +++ b/controllers/apps/transformer_component_service.go @@ -35,8 +35,8 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/component" "github.com/apecloud/kubeblocks/pkg/controller/graph" + "github.com/apecloud/kubeblocks/pkg/controller/instanceset" "github.com/apecloud/kubeblocks/pkg/controller/model" - "github.com/apecloud/kubeblocks/pkg/controller/rsm2" ) var ( @@ -62,7 +62,7 @@ func (t *componentServiceTransformer) Transform(ctx graph.TransformContext, dag synthesizeComp := transCtx.SynthesizeComponent graphCli, _ := transCtx.Client.(model.GraphClient) for _, service := range synthesizeComp.ComponentServices { - // component controller does not handle the default headless service; the default headless service is managed by the RSM. + // component controller does not handle the default headless service; the default headless service is managed by the InstanceSet. if t.skipDefaultHeadlessSvc(synthesizeComp, &service) { continue } @@ -225,13 +225,13 @@ func generatePodNames(synthesizeComp *component.SynthesizedComponent) []string { } podNames := make([]string, 0) - workloadName := constant.GenerateRSMNamePattern(synthesizeComp.ClusterName, synthesizeComp.Name) + workloadName := constant.GenerateWorkloadNamePattern(synthesizeComp.ClusterName, synthesizeComp.Name) for _, template := range synthesizeComp.Instances { - templateNames := rsm2.GenerateInstanceNamesFromTemplate(workloadName, template.Name, templateReplicas(template), synthesizeComp.OfflineInstances) + templateNames := instanceset.GenerateInstanceNamesFromTemplate(workloadName, template.Name, templateReplicas(template), synthesizeComp.OfflineInstances) podNames = append(podNames, templateNames...) } if templateReplicasCnt < synthesizeComp.Replicas { - names := rsm2.GenerateInstanceNamesFromTemplate(workloadName, "", synthesizeComp.Replicas-templateReplicasCnt, synthesizeComp.OfflineInstances) + names := instanceset.GenerateInstanceNamesFromTemplate(workloadName, "", synthesizeComp.Replicas-templateReplicasCnt, synthesizeComp.OfflineInstances) podNames = append(podNames, names...) } return podNames diff --git a/controllers/apps/transformer_component_status.go b/controllers/apps/transformer_component_status.go index 1407bce52d6..db49652d414 100644 --- a/controllers/apps/transformer_component_status.go +++ b/controllers/apps/transformer_component_status.go @@ -50,7 +50,7 @@ const ( componentPhaseTransition = "ComponentPhaseTransition" ) -// componentStatusTransformer computes the current status: read the underlying rsm status and update the component status +// componentStatusTransformer computes the current status: read the underlying workload status and update the component status type componentStatusTransformer struct { client.Client } @@ -64,10 +64,10 @@ type componentStatusHandler struct { synthesizeComp *component.SynthesizedComponent dag *graph.DAG - // runningRSM is a snapshot of the rsm that is already running - runningRSM *workloads.ReplicatedStateMachine - // protoRSM is the rsm object that is rebuilt from scratch during each reconcile process - protoRSM *workloads.ReplicatedStateMachine + // runningITS is a snapshot of the ITS that is already running + runningITS *workloads.InstanceSet + // protoITS is the ITS object that is rebuilt from scratch during each reconcile process + protoITS *workloads.InstanceSet // podsReady indicates if the component's underlying pods are ready podsReady bool } @@ -92,15 +92,15 @@ func (t *componentStatusTransformer) Transform(ctx graph.TransformContext, dag * } cluster := transCtx.Cluster synthesizeComp := transCtx.SynthesizeComponent - runningRSM, _ := transCtx.RunningWorkload.(*workloads.ReplicatedStateMachine) - protoRSM, _ := transCtx.ProtoWorkload.(*workloads.ReplicatedStateMachine) + runningITS, _ := transCtx.RunningWorkload.(*workloads.InstanceSet) + protoITS, _ := transCtx.ProtoWorkload.(*workloads.InstanceSet) switch { case model.IsObjectUpdating(transCtx.ComponentOrig): transCtx.Logger.Info(fmt.Sprintf("update component status after applying resources, generation: %d", comp.Generation)) comp.Status.ObservedGeneration = comp.Generation case model.IsObjectStatusUpdating(transCtx.ComponentOrig): // reconcile the component status and sync the component status to cluster status - csh := newComponentStatusHandler(reqCtx, t.Client, cluster, comp, synthesizeComp, runningRSM, protoRSM, dag) + csh := newComponentStatusHandler(reqCtx, t.Client, cluster, comp, synthesizeComp, runningITS, protoITS, dag) if err := csh.reconcileComponentStatus(); err != nil { return err } @@ -121,18 +121,18 @@ func (t *componentStatusTransformer) Transform(ctx graph.TransformContext, dag * // reconcileComponentStatus reconciles component status. func (r *componentStatusHandler) reconcileComponentStatus() error { - if r.runningRSM == nil { + if r.runningITS == nil { return nil } - // check if the rsm is deleting + // check if the ITS is deleting isDeleting := func() bool { - return !r.runningRSM.DeletionTimestamp.IsZero() + return !r.runningITS.DeletionTimestamp.IsZero() }() - // check if the rsm replicas is zero + // check if the ITS replicas is zero isZeroReplica := func() bool { - return (r.runningRSM.Spec.Replicas == nil || *r.runningRSM.Spec.Replicas == 0) && r.synthesizeComp.Replicas == 0 + return (r.runningITS.Spec.Replicas == nil || *r.runningITS.Spec.Replicas == 0) && r.synthesizeComp.Replicas == 0 }() // get the component's underlying pods @@ -145,8 +145,8 @@ func (r *componentStatusHandler) reconcileComponentStatus() error { return len(pods) > 0 }() - // check if the rsm is running - isRSMRunning, err := r.isRSMRunning() + // check if the ITS is running + isITSRunning, err := r.isInstanceSetRunning() if err != nil { return err } @@ -193,8 +193,8 @@ func (r *componentStatusHandler) reconcileComponentStatus() error { }() r.reqCtx.Log.Info( - fmt.Sprintf("component status conditions, isRSMRunning: %v, isAllConfigSynced: %v, hasRunningVolumeExpansion: %v, hasFailure: %v, isInCreatingPhase: %v, isComponentAvailable: %v", - isRSMRunning, isAllConfigSynced, hasRunningVolumeExpansion, hasFailure, isInCreatingPhase, isComponentAvailable)) + fmt.Sprintf("component status conditions, isInstanceSetRunning: %v, isAllConfigSynced: %v, hasRunningVolumeExpansion: %v, hasFailure: %v, isInCreatingPhase: %v, isComponentAvailable: %v", + isITSRunning, isAllConfigSynced, hasRunningVolumeExpansion, hasFailure, isInCreatingPhase, isComponentAvailable)) r.podsReady = false switch { @@ -206,7 +206,7 @@ func (r *componentStatusHandler) reconcileComponentStatus() error { case isZeroReplica: r.setComponentStatusPhase(appsv1alpha1.StoppedClusterCompPhase, nil, "component is Stopped") r.podsReady = true - case isRSMRunning && isAllConfigSynced && !hasRunningVolumeExpansion: + case isITSRunning && isAllConfigSynced && !hasRunningVolumeExpansion: r.setComponentStatusPhase(appsv1alpha1.RunningClusterCompPhase, nil, "component is Running") r.podsReady = true case !hasFailure && isInCreatingPhase: @@ -234,7 +234,7 @@ func (r *componentStatusHandler) reconcileComponentStatus() error { // 2. with latest revision // 3. and with leader role label set func (r *componentStatusHandler) isComponentAvailable(pods []*corev1.Pod) (bool, error) { - if isLatestRevision, err := component.IsComponentPodsWithLatestRevision(r.reqCtx.Ctx, r.cli, r.cluster, r.runningRSM); err != nil { + if isLatestRevision, err := component.IsComponentPodsWithLatestRevision(r.reqCtx.Ctx, r.cli, r.cluster, r.runningITS); err != nil { return false, err } else if !isLatestRevision { return false, nil @@ -247,7 +247,7 @@ func (r *componentStatusHandler) isComponentAvailable(pods []*corev1.Pod) (bool, if !ok { return false } - for _, replicaRole := range r.runningRSM.Spec.Roles { + for _, replicaRole := range r.runningITS.Spec.Roles { if roleName == replicaRole.Name && replicaRole.IsLeader { return true } @@ -257,7 +257,7 @@ func (r *componentStatusHandler) isComponentAvailable(pods []*corev1.Pod) (bool, hasPodAvailable := false for _, pod := range pods { - if !podutils.IsPodAvailable(pod, r.runningRSM.Spec.MinReadySeconds, metav1.Time{Time: time.Now()}) { + if !podutils.IsPodAvailable(pod, r.runningITS.Spec.MinReadySeconds, metav1.Time{Time: time.Now()}) { continue } if shouldCheckRole && hasLeaderRoleLabel(pod) { @@ -270,20 +270,20 @@ func (r *componentStatusHandler) isComponentAvailable(pods []*corev1.Pod) (bool, return hasPodAvailable, nil } -// isRunning checks if the component underlying rsm workload is running. -func (r *componentStatusHandler) isRSMRunning() (bool, error) { - if r.runningRSM == nil { +// isRunning checks if the component underlying workload is running. +func (r *componentStatusHandler) isInstanceSetRunning() (bool, error) { + if r.runningITS == nil { return false, nil } - if isLatestRevision, err := component.IsComponentPodsWithLatestRevision(r.reqCtx.Ctx, r.cli, r.cluster, r.runningRSM); err != nil { + if isLatestRevision, err := component.IsComponentPodsWithLatestRevision(r.reqCtx.Ctx, r.cli, r.cluster, r.runningITS); err != nil { return false, err } else if !isLatestRevision { - r.reqCtx.Log.Info("rsm underlying workload is not the latest revision") + r.reqCtx.Log.Info("underlying workload is not the latest revision") return false, nil } - // whether rsm is ready - return rsmcore.IsRSMReady(r.runningRSM), nil + // whether the ITS is ready + return rsmcore.IsInstanceSetReady(r.runningITS), nil } // isAllConfigSynced checks if all configTemplates are synced. @@ -328,19 +328,19 @@ func (r *componentStatusHandler) isAllConfigSynced() (bool, error) { // isScaleOutFailed checks if the component scale out failed. func (r *componentStatusHandler) isScaleOutFailed() (bool, error) { - if r.runningRSM == nil { + if r.runningITS == nil { return false, nil } - if r.runningRSM.Spec.Replicas == nil { + if r.runningITS.Spec.Replicas == nil { return false, nil } - if r.synthesizeComp.Replicas <= *r.runningRSM.Spec.Replicas { + if r.synthesizeComp.Replicas <= *r.runningITS.Spec.Replicas { return false, nil } - // stsObj is the underlying rsm workload which is already running in the component. - stsObj := rsmcore.ConvertRSMToSTS(r.runningRSM) - stsProto := rsmcore.ConvertRSMToSTS(r.protoRSM) + // stsObj is the underlying workload which is already running in the component. + stsObj := rsmcore.ConvertInstanceSetToSTS(r.runningITS) + stsProto := rsmcore.ConvertInstanceSetToSTS(r.protoITS) backupKey := types.NamespacedName{ Namespace: stsObj.Namespace, Name: constant.GenerateResourceNameWithScalingSuffix(stsObj.Name), @@ -354,7 +354,7 @@ func (r *componentStatusHandler) isScaleOutFailed() (bool, error) { } else if status == backupStatusFailed { return true, nil } - for i := *r.runningRSM.Spec.Replicas; i < r.synthesizeComp.Replicas; i++ { + for i := *r.runningITS.Spec.Replicas; i < r.synthesizeComp.Replicas; i++ { if status, err := d.CheckRestoreStatus(i); err != nil { return false, err } else if status == dpv1alpha1.RestorePhaseFailed { @@ -370,8 +370,8 @@ func (r *componentStatusHandler) hasVolumeExpansionRunning() (bool, bool, error) running bool failed bool ) - for _, vct := range r.runningRSM.Spec.VolumeClaimTemplates { - volumes, err := r.getRunningVolumes(r.reqCtx, r.cli, vct.Name, r.runningRSM) + for _, vct := range r.runningITS.Spec.VolumeClaimTemplates { + volumes, err := r.getRunningVolumes(r.reqCtx, r.cli, vct.Name, r.runningITS) if err != nil { return false, false, err } @@ -386,9 +386,9 @@ func (r *componentStatusHandler) hasVolumeExpansionRunning() (bool, bool, error) return running, failed, nil } -// getRunningVolumes gets the running volumes of the rsm. +// getRunningVolumes gets the running volumes of the ITS. func (r *componentStatusHandler) getRunningVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, vctName string, - rsmObj *workloads.ReplicatedStateMachine) ([]*corev1.PersistentVolumeClaim, error) { + itsObj *workloads.InstanceSet) ([]*corev1.PersistentVolumeClaim, error) { labels := constant.GetComponentWellKnownLabels(r.cluster.Name, r.synthesizeComp.Name) pvcs, err := component.ListObjWithLabelsInNamespace(reqCtx.Ctx, cli, generics.PersistentVolumeClaimSignature, r.cluster.Namespace, labels, inDataContext4C()) @@ -399,7 +399,7 @@ func (r *componentStatusHandler) getRunningVolumes(reqCtx intctrlutil.RequestCtx return nil, err } matchedPVCs := make([]*corev1.PersistentVolumeClaim, 0) - prefix := fmt.Sprintf("%s-%s", vctName, rsmObj.Name) + prefix := fmt.Sprintf("%s-%s", vctName, itsObj.Name) for _, pvc := range pvcs { if strings.HasPrefix(pvc.Name, prefix) { matchedPVCs = append(matchedPVCs, pvc) @@ -411,7 +411,7 @@ func (r *componentStatusHandler) getRunningVolumes(reqCtx intctrlutil.RequestCtx // hasFailedPod checks if the component has failed pod. // TODO(xingran): remove the dependency of the component's workload type. func (r *componentStatusHandler) hasFailedPod(pods []*corev1.Pod) (bool, appsv1alpha1.ComponentMessageMap, error) { - if isLatestRevision, err := component.IsComponentPodsWithLatestRevision(r.reqCtx.Ctx, r.cli, r.cluster, r.runningRSM); err != nil { + if isLatestRevision, err := component.IsComponentPodsWithLatestRevision(r.reqCtx.Ctx, r.cli, r.cluster, r.runningITS); err != nil { return false, nil, err } else if !isLatestRevision { return false, nil, nil @@ -582,8 +582,8 @@ func newComponentStatusHandler(reqCtx intctrlutil.RequestCtx, cluster *appsv1alpha1.Cluster, comp *appsv1alpha1.Component, synthesizeComp *component.SynthesizedComponent, - runningRSM *workloads.ReplicatedStateMachine, - protoRSM *workloads.ReplicatedStateMachine, + runningITS *workloads.InstanceSet, + protoITS *workloads.InstanceSet, dag *graph.DAG) *componentStatusHandler { return &componentStatusHandler{ cli: cli, @@ -591,8 +591,8 @@ func newComponentStatusHandler(reqCtx intctrlutil.RequestCtx, cluster: cluster, comp: comp, synthesizeComp: synthesizeComp, - runningRSM: runningRSM, - protoRSM: protoRSM, + runningITS: runningITS, + protoITS: protoITS, dag: dag, podsReady: false, } diff --git a/controllers/apps/transformer_component_tls_test.go b/controllers/apps/transformer_component_tls_test.go index 38687e2c448..10d80b628a6 100644 --- a/controllers/apps/transformer_component_tls_test.go +++ b/controllers/apps/transformer_component_tls_test.go @@ -298,8 +298,8 @@ var _ = Describe("TLS self-signed cert function", func() { Eventually(testapps.GetClusterObservedGeneration(&testCtx, clusterKey)).Should(BeEquivalentTo(1)) Eventually(testapps.GetClusterPhase(&testCtx, clusterKey)).Should(Equal(appsv1alpha1.CreatingClusterPhase)) - rsmList := testk8s.ListAndCheckRSM(&testCtx, clusterKey) - sts := *rsm.ConvertRSMToSTS(&rsmList.Items[0]) + itsList := testk8s.ListAndCheckInstanceSet(&testCtx, clusterKey) + sts := *rsm.ConvertInstanceSetToSTS(&itsList.Items[0]) cd := &appsv1alpha1.ClusterDefinition{} Expect(k8sClient.Get(ctx, types.NamespacedName{Name: clusterDefName, Namespace: testCtx.DefaultNamespace}, cd)).Should(Succeed()) cmName := cfgcore.GetInstanceCMName(&sts, &cd.Spec.ComponentDefs[0].ConfigSpecs[0].ComponentTemplateSpec) diff --git a/controllers/apps/transformer_component_vars.go b/controllers/apps/transformer_component_vars.go index aeeecc85331..546cfcfdadd 100644 --- a/controllers/apps/transformer_component_vars.go +++ b/controllers/apps/transformer_component_vars.go @@ -93,16 +93,16 @@ func generatedComponent4LegacyCluster(transCtx *componentTransformContext) (bool } synthesizedComp := transCtx.SynthesizeComponent - rsmObj := &workloads.ReplicatedStateMachine{} - rsmKey := types.NamespacedName{ + itsObj := &workloads.InstanceSet{} + itsKey := types.NamespacedName{ Namespace: synthesizedComp.Namespace, - Name: constant.GenerateRSMNamePattern(synthesizedComp.ClusterName, synthesizedComp.Name), + Name: constant.GenerateWorkloadNamePattern(synthesizedComp.ClusterName, synthesizedComp.Name), } - if err := transCtx.Client.Get(transCtx.Context, rsmKey, rsmObj); err != nil { + if err := transCtx.Client.Get(transCtx.Context, itsKey, itsObj); err != nil { return false, client.IgnoreNotFound(err) } - return !model.IsOwnerOf(transCtx.ComponentOrig, rsmObj), nil + return !model.IsOwnerOf(transCtx.ComponentOrig, itsObj), nil } func buildEnvVarsNData(synthesizedComp *component.SynthesizedComponent, vars []corev1.EnvVar, legacy bool) ([]corev1.EnvVar, map[string]string) { diff --git a/controllers/apps/transformer_component_workload.go b/controllers/apps/transformer_component_workload.go index 60d97f90a41..0bce7cd84b0 100644 --- a/controllers/apps/transformer_component_workload.go +++ b/controllers/apps/transformer_component_workload.go @@ -51,12 +51,12 @@ import ( lorry "github.com/apecloud/kubeblocks/pkg/lorry/client" ) -// componentWorkloadTransformer handles component rsm workload generation +// componentWorkloadTransformer handles component workload generation type componentWorkloadTransformer struct { client.Client } -// componentWorkloadOps handles component rsm workload ops +// componentWorkloadOps handles component workload ops type componentWorkloadOps struct { cli client.Client reqCtx intctrlutil.RequestCtx @@ -64,10 +64,10 @@ type componentWorkloadOps struct { synthesizeComp *component.SynthesizedComponent dag *graph.DAG - // runningRSM is a snapshot of the rsm that is already running - runningRSM *workloads.ReplicatedStateMachine - // protoRSM is the rsm object that is rebuilt from scratch during each reconcile process - protoRSM *workloads.ReplicatedStateMachine + // runningITS is a snapshot of the InstanceSet that is already running + runningITS *workloads.InstanceSet + // protoITS is the InstanceSet object that is rebuilt from scratch during each reconcile process + protoITS *workloads.InstanceSet } var _ graph.Transformer = &componentWorkloadTransformer{} @@ -86,92 +86,92 @@ func (t *componentWorkloadTransformer) Transform(ctx graph.TransformContext, dag Recorder: transCtx.EventRecorder, } - runningRSM, err := t.runningRSMObject(ctx, synthesizeComp) + runningITS, err := t.runningInstanceSetObject(ctx, synthesizeComp) if err != nil { return err } - transCtx.RunningWorkload = runningRSM + transCtx.RunningWorkload = runningITS // build synthesizeComp podSpec volumeMounts buildPodSpecVolumeMounts(synthesizeComp) - // build rsm workload - protoRSM, err := factory.BuildRSM(synthesizeComp) + // build workload + protoITS, err := factory.BuildInstanceSet(synthesizeComp) if err != nil { return err } - if runningRSM != nil { - *protoRSM.Spec.Selector = *runningRSM.Spec.Selector - protoRSM.Spec.Template.Labels = runningRSM.Spec.Template.Labels + if runningITS != nil { + *protoITS.Spec.Selector = *runningITS.Spec.Selector + protoITS.Spec.Template.Labels = runningITS.Spec.Template.Labels } - transCtx.ProtoWorkload = protoRSM + transCtx.ProtoWorkload = protoITS - buildRSMPlacementAnnotation(transCtx.Component, protoRSM) + buildInstanceSetPlacementAnnotation(transCtx.Component, protoITS) - // build configuration template annotations to rsm workload - buildRSMConfigTplAnnotations(protoRSM, synthesizeComp) + // build configuration template annotations to workload + buildInstanceSetConfigTplAnnotations(protoITS, synthesizeComp) graphCli, _ := transCtx.Client.(model.GraphClient) - if runningRSM == nil { - if protoRSM != nil { - graphCli.Create(dag, protoRSM) + if runningITS == nil { + if protoITS != nil { + graphCli.Create(dag, protoITS) return nil } } else { - if protoRSM == nil { - graphCli.Delete(dag, runningRSM) + if protoITS == nil { + graphCli.Delete(dag, runningITS) } else { - err = t.handleUpdate(reqCtx, graphCli, dag, cluster, synthesizeComp, runningRSM, protoRSM) + err = t.handleUpdate(reqCtx, graphCli, dag, cluster, synthesizeComp, runningITS, protoITS) } } return err } -func (t *componentWorkloadTransformer) runningRSMObject(ctx graph.TransformContext, - synthesizeComp *component.SynthesizedComponent) (*workloads.ReplicatedStateMachine, error) { - rsmKey := types.NamespacedName{ +func (t *componentWorkloadTransformer) runningInstanceSetObject(ctx graph.TransformContext, + synthesizeComp *component.SynthesizedComponent) (*workloads.InstanceSet, error) { + itsKey := types.NamespacedName{ Namespace: synthesizeComp.Namespace, - Name: constant.GenerateRSMNamePattern(synthesizeComp.ClusterName, synthesizeComp.Name), + Name: constant.GenerateWorkloadNamePattern(synthesizeComp.ClusterName, synthesizeComp.Name), } - rsm := &workloads.ReplicatedStateMachine{} - if err := ctx.GetClient().Get(ctx.GetContext(), rsmKey, rsm); err != nil { + its := &workloads.InstanceSet{} + if err := ctx.GetClient().Get(ctx.GetContext(), itsKey, its); err != nil { if apierrors.IsNotFound(err) { return nil, nil } return nil, err } - return rsm, nil + return its, nil } func (t *componentWorkloadTransformer) handleUpdate(reqCtx intctrlutil.RequestCtx, cli model.GraphClient, dag *graph.DAG, - cluster *appsv1alpha1.Cluster, synthesizeComp *component.SynthesizedComponent, runningRSM, protoRSM *workloads.ReplicatedStateMachine) error { - // TODO(xingran): Some RSM workload operations should be moved down to Lorry implementation. Subsequent operations such as horizontal scaling will be removed from the component controller - if err := t.handleWorkloadUpdate(reqCtx, dag, cluster, synthesizeComp, runningRSM, protoRSM); err != nil { + cluster *appsv1alpha1.Cluster, synthesizeComp *component.SynthesizedComponent, runningITS, protoITS *workloads.InstanceSet) error { + // TODO(xingran): Some workload operations should be moved down to Lorry implementation. Subsequent operations such as horizontal scaling will be removed from the component controller + if err := t.handleWorkloadUpdate(reqCtx, dag, cluster, synthesizeComp, runningITS, protoITS); err != nil { return err } - objCopy := copyAndMergeRSM(runningRSM, protoRSM, synthesizeComp) + objCopy := copyAndMergeITS(runningITS, protoITS, synthesizeComp) if objCopy != nil && !cli.IsAction(dag, objCopy, model.ActionNoopPtr()) { cli.Update(dag, nil, objCopy, &model.ReplaceIfExistingOption{}) } // to work around that the scaled PVC will be deleted at object action. - if err := updateVolumes(reqCtx, t.Client, synthesizeComp, runningRSM, dag); err != nil { + if err := updateVolumes(reqCtx, t.Client, synthesizeComp, runningITS, dag); err != nil { return err } return nil } func (t *componentWorkloadTransformer) handleWorkloadUpdate(reqCtx intctrlutil.RequestCtx, dag *graph.DAG, - cluster *appsv1alpha1.Cluster, synthesizeComp *component.SynthesizedComponent, obj, rsm *workloads.ReplicatedStateMachine) error { - cwo := newComponentWorkloadOps(reqCtx, t.Client, cluster, synthesizeComp, obj, rsm, dag) + cluster *appsv1alpha1.Cluster, synthesizeComp *component.SynthesizedComponent, obj, its *workloads.InstanceSet) error { + cwo := newComponentWorkloadOps(reqCtx, t.Client, cluster, synthesizeComp, obj, its, dag) - // handle rsm expand volume + // handle expand volume if err := cwo.expandVolume(); err != nil { return err } - // handle rsm workload horizontal scale + // handle workload horizontal scale if err := cwo.horizontalScale(); err != nil { return err } @@ -217,10 +217,10 @@ func buildPodSpecVolumeMounts(synthesizeComp *component.SynthesizedComponent) { synthesizeComp.PodSpec = podSpec } -// copyAndMergeRSM merges two RSM objects for updating: +// copyAndMergeITS merges two ITS objects for updating: // 1. new an object targetObj by copying from oldObj // 2. merge all fields can be updated from newObj into targetObj -func copyAndMergeRSM(oldRsm, newRsm *workloads.ReplicatedStateMachine, synthesizeComp *component.SynthesizedComponent) *workloads.ReplicatedStateMachine { +func copyAndMergeITS(oldITS, newITS *workloads.InstanceSet, synthesizeComp *component.SynthesizedComponent) *workloads.InstanceSet { // mergeAnnotations keeps the original annotations. mergeMetadataMap := func(originalMap map[string]string, targetMap *map[string]string) { if targetMap == nil || originalMap == nil { @@ -237,101 +237,101 @@ func copyAndMergeRSM(oldRsm, newRsm *workloads.ReplicatedStateMachine, synthesiz } } - updateUpdateStrategy := func(rsmObj, rsmProto *workloads.ReplicatedStateMachine) { + updateUpdateStrategy := func(itsObj, itsProto *workloads.InstanceSet) { var objMaxUnavailable *intstr.IntOrString - if rsmObj.Spec.UpdateStrategy.RollingUpdate != nil { - objMaxUnavailable = rsmObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable + if itsObj.Spec.UpdateStrategy.RollingUpdate != nil { + objMaxUnavailable = itsObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable } - rsmObj.Spec.UpdateStrategy = rsmProto.Spec.UpdateStrategy - if objMaxUnavailable == nil && rsmObj.Spec.UpdateStrategy.RollingUpdate != nil { + itsObj.Spec.UpdateStrategy = itsProto.Spec.UpdateStrategy + if objMaxUnavailable == nil && itsObj.Spec.UpdateStrategy.RollingUpdate != nil { // HACK: This field is alpha-level (since v1.24) and is only honored by servers that enable the // MaxUnavailableStatefulSet feature. // When we get a nil MaxUnavailable from k8s, we consider that the field is not supported by the server, // and set the MaxUnavailable as nil explicitly to avoid the workload been updated unexpectedly. // Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods - rsmObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = nil + itsObj.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = nil } } // be compatible with existed cluster - updateService := func(rsmObj, rsmProto *workloads.ReplicatedStateMachine) *corev1.Service { - if rsmProto.Spec.Service != nil { - return rsmProto.Spec.Service + updateService := func(itsObj, itsProto *workloads.InstanceSet) *corev1.Service { + if itsProto.Spec.Service != nil { + return itsProto.Spec.Service } - if rsmObj.Spec.Service == nil { + if itsObj.Spec.Service == nil { return nil } - defaultServiceName := rsmObj.Name + defaultServiceName := itsObj.Name for _, svc := range synthesizeComp.ComponentServices { if svc.PodService != nil && *svc.PodService || svc.DisableAutoProvision != nil && *svc.DisableAutoProvision { continue } serviceName := constant.GenerateComponentServiceName(synthesizeComp.ClusterName, synthesizeComp.Name, svc.ServiceName) if defaultServiceName == serviceName { - return rsmObj.Spec.Service + return itsObj.Spec.Service } } return nil } - rsmObjCopy := oldRsm.DeepCopy() - rsmProto := newRsm + itsObjCopy := oldITS.DeepCopy() + itsProto := newITS // If the service version and component definition are not updated, we should not update the images in workload. - checkNRollbackProtoImages(rsmObjCopy, rsmProto) + checkNRollbackProtoImages(itsObjCopy, itsProto) // remove original monitor annotations - if len(rsmObjCopy.Annotations) > 0 { - maps.DeleteFunc(rsmObjCopy.Annotations, func(k, v string) bool { + if len(itsObjCopy.Annotations) > 0 { + maps.DeleteFunc(itsObjCopy.Annotations, func(k, v string) bool { return strings.HasPrefix(k, "monitor.kubeblocks.io") }) } - mergeMetadataMap(rsmObjCopy.Annotations, &rsmProto.Annotations) - rsmObjCopy.Annotations = rsmProto.Annotations + mergeMetadataMap(itsObjCopy.Annotations, &itsProto.Annotations) + itsObjCopy.Annotations = itsProto.Annotations // keep the original template annotations. - // if annotations exist and are replaced, the rsm will be updated. - mergeMetadataMap(rsmObjCopy.Spec.Template.Annotations, &rsmProto.Spec.Template.Annotations) - rsmObjCopy.Spec.Template = *rsmProto.Spec.Template.DeepCopy() - rsmObjCopy.Spec.Replicas = rsmProto.Spec.Replicas - rsmObjCopy.Spec.Service = updateService(rsmObjCopy, rsmProto) - rsmObjCopy.Spec.AlternativeServices = rsmProto.Spec.AlternativeServices - rsmObjCopy.Spec.Roles = rsmProto.Spec.Roles - rsmObjCopy.Spec.RoleProbe = rsmProto.Spec.RoleProbe - rsmObjCopy.Spec.MembershipReconfiguration = rsmProto.Spec.MembershipReconfiguration - rsmObjCopy.Spec.MemberUpdateStrategy = rsmProto.Spec.MemberUpdateStrategy - rsmObjCopy.Spec.Credential = rsmProto.Spec.Credential - rsmObjCopy.Spec.Instances = rsmProto.Spec.Instances - rsmObjCopy.Spec.OfflineInstances = rsmProto.Spec.OfflineInstances - - if rsmProto.Spec.UpdateStrategy.Type != "" || rsmProto.Spec.UpdateStrategy.RollingUpdate != nil { - updateUpdateStrategy(rsmObjCopy, rsmProto) - } - - intctrlutil.ResolvePodSpecDefaultFields(oldRsm.Spec.Template.Spec, &rsmObjCopy.Spec.Template.Spec) - DelayUpdateRsmSystemFields(oldRsm.Spec, &rsmObjCopy.Spec) - - isSpecUpdated := !reflect.DeepEqual(&oldRsm.Spec, &rsmObjCopy.Spec) + // if annotations exist and are replaced, the its will be updated. + mergeMetadataMap(itsObjCopy.Spec.Template.Annotations, &itsProto.Spec.Template.Annotations) + itsObjCopy.Spec.Template = *itsProto.Spec.Template.DeepCopy() + itsObjCopy.Spec.Replicas = itsProto.Spec.Replicas + itsObjCopy.Spec.Service = updateService(itsObjCopy, itsProto) + itsObjCopy.Spec.AlternativeServices = itsProto.Spec.AlternativeServices + itsObjCopy.Spec.Roles = itsProto.Spec.Roles + itsObjCopy.Spec.RoleProbe = itsProto.Spec.RoleProbe + itsObjCopy.Spec.MembershipReconfiguration = itsProto.Spec.MembershipReconfiguration + itsObjCopy.Spec.MemberUpdateStrategy = itsProto.Spec.MemberUpdateStrategy + itsObjCopy.Spec.Credential = itsProto.Spec.Credential + itsObjCopy.Spec.Instances = itsProto.Spec.Instances + itsObjCopy.Spec.OfflineInstances = itsProto.Spec.OfflineInstances + + if itsProto.Spec.UpdateStrategy.Type != "" || itsProto.Spec.UpdateStrategy.RollingUpdate != nil { + updateUpdateStrategy(itsObjCopy, itsProto) + } + + intctrlutil.ResolvePodSpecDefaultFields(oldITS.Spec.Template.Spec, &itsObjCopy.Spec.Template.Spec) + DelayUpdateInstanceSetSystemFields(oldITS.Spec, &itsObjCopy.Spec) + + isSpecUpdated := !reflect.DeepEqual(&oldITS.Spec, &itsObjCopy.Spec) if isSpecUpdated { - UpdateRsmSystemFields(rsmProto.Spec, &rsmObjCopy.Spec) + UpdateInstanceSetSystemFields(itsProto.Spec, &itsObjCopy.Spec) } - isLabelsUpdated := !reflect.DeepEqual(oldRsm.Labels, rsmObjCopy.Labels) - isAnnotationsUpdated := !reflect.DeepEqual(oldRsm.Annotations, rsmObjCopy.Annotations) + isLabelsUpdated := !reflect.DeepEqual(oldITS.Labels, itsObjCopy.Labels) + isAnnotationsUpdated := !reflect.DeepEqual(oldITS.Annotations, itsObjCopy.Annotations) if !isSpecUpdated && !isLabelsUpdated && !isAnnotationsUpdated { return nil } - return rsmObjCopy + return itsObjCopy } -func checkNRollbackProtoImages(rsmObj, rsmProto *workloads.ReplicatedStateMachine) { - if rsmObj.Annotations == nil || rsmProto.Annotations == nil { +func checkNRollbackProtoImages(itsObj, itsProto *workloads.InstanceSet) { + if itsObj.Annotations == nil || itsProto.Annotations == nil { return } annotationUpdated := func(key string) bool { - using, ok1 := rsmObj.Annotations[key] - proto, ok2 := rsmProto.Annotations[key] + using, ok1 := itsObj.Annotations[key] + proto, ok2 := itsProto.Annotations[key] if !ok1 || !ok2 { return true } @@ -355,7 +355,7 @@ func checkNRollbackProtoImages(rsmObj, rsmProto *workloads.ReplicatedStateMachin // otherwise, roll-back the images in proto images := make([]map[string]string, 2) - for i, cc := range [][]corev1.Container{rsmObj.Spec.Template.Spec.InitContainers, rsmObj.Spec.Template.Spec.Containers} { + for i, cc := range [][]corev1.Container{itsObj.Spec.Template.Spec.InitContainers, itsObj.Spec.Template.Spec.Containers} { images[i] = make(map[string]string) for _, c := range cc { images[i][c.Name] = c.Image @@ -366,17 +366,17 @@ func checkNRollbackProtoImages(rsmObj, rsmProto *workloads.ReplicatedStateMachin c.Image = image } } - for i := range rsmProto.Spec.Template.Spec.InitContainers { - rollback(0, &rsmProto.Spec.Template.Spec.InitContainers[i]) + for i := range itsProto.Spec.Template.Spec.InitContainers { + rollback(0, &itsProto.Spec.Template.Spec.InitContainers[i]) } - for i := range rsmProto.Spec.Template.Spec.Containers { - rollback(1, &rsmProto.Spec.Template.Spec.Containers[i]) + for i := range itsProto.Spec.Template.Spec.Containers { + rollback(1, &itsProto.Spec.Template.Spec.Containers[i]) } } -// expandVolume handles rsm workload expand volume +// expandVolume handles workload expand volume func (r *componentWorkloadOps) expandVolume() error { - for _, vct := range r.runningRSM.Spec.VolumeClaimTemplates { + for _, vct := range r.runningITS.Spec.VolumeClaimTemplates { var proto *corev1.PersistentVolumeClaimTemplate for i, v := range r.synthesizeComp.VolumeClaimTemplates { if v.Name == vct.Name { @@ -396,9 +396,9 @@ func (r *componentWorkloadOps) expandVolume() error { return nil } -// horizontalScale handles rsm workload horizontal scale +// horizontalScale handles workload horizontal scale func (r *componentWorkloadOps) horizontalScale() error { - sts := rsmcore.ConvertRSMToSTS(r.runningRSM) + sts := rsmcore.ConvertInstanceSetToSTS(r.runningITS) if sts.Status.ReadyReplicas == r.synthesizeComp.Replicas { return nil } @@ -500,8 +500,8 @@ func (r *componentWorkloadOps) scaleOut(stsObj *apps.StatefulSet) error { return nil } graphCli := model.NewGraphClient(r.cli) - graphCli.Noop(r.dag, r.protoRSM) - stsProto := rsmcore.ConvertRSMToSTS(r.protoRSM) + graphCli.Noop(r.dag, r.protoITS) + stsProto := rsmcore.ConvertInstanceSetToSTS(r.protoITS) d, err := newDataClone(r.reqCtx, r.cli, r.cluster, r.synthesizeComp, stsObj, stsProto, backupKey) if err != nil { return err @@ -516,11 +516,11 @@ func (r *componentWorkloadOps) scaleOut(stsObj *apps.StatefulSet) error { } } if succeed { - // pvcs are ready, rsm.replicas should be updated - graphCli.Update(r.dag, nil, r.protoRSM) + // pvcs are ready, ITS.replicas should be updated + graphCli.Update(r.dag, nil, r.protoITS) return r.postScaleOut(stsObj) } else { - graphCli.Noop(r.dag, r.protoRSM) + graphCli.Noop(r.dag, r.protoITS) // update objs will trigger reconcile, no need to requeue error objs1, objs2, err := d.CloneData(d) if err != nil { @@ -571,7 +571,7 @@ func (r *componentWorkloadOps) leaveMember4ScaleIn() error { return false } - for _, replicaRole := range r.runningRSM.Spec.Roles { + for _, replicaRole := range r.runningITS.Spec.Roles { if roleName == replicaRole.Name && replicaRole.IsLeader { return true } @@ -599,7 +599,7 @@ func (r *componentWorkloadOps) leaveMember4ScaleIn() error { return err } - // TODO: Move memberLeave to the RSM controller. Instead of performing a switchover, we can directly scale down the non-leader nodes. This is because the pod ordinal is not guaranteed to be continuous. + // TODO: Move memberLeave to the ITS controller. Instead of performing a switchover, we can directly scale down the non-leader nodes. This is because the pod ordinal is not guaranteed to be continuous. podsToMemberLeave := make([]*corev1.Pod, 0) genPodNamesByDefault := generatePodNames(r.synthesizeComp) for _, pod := range pods { @@ -665,11 +665,11 @@ func (r *componentWorkloadOps) deletePVCs4ScaleIn(stsObj *apps.StatefulSet) erro } func (r *componentWorkloadOps) expandVolumes(vctName string, proto *corev1.PersistentVolumeClaimTemplate) error { - for i := *r.runningRSM.Spec.Replicas - 1; i >= 0; i-- { + for i := *r.runningITS.Spec.Replicas - 1; i >= 0; i-- { pvc := &corev1.PersistentVolumeClaim{} pvcKey := types.NamespacedName{ Namespace: r.cluster.Namespace, - Name: fmt.Sprintf("%s-%s-%d", vctName, r.runningRSM.Name, i), + Name: fmt.Sprintf("%s-%s-%d", vctName, r.runningITS.Name, i), } pvcNotFound := false if err := r.cli.Get(r.reqCtx.Ctx, pvcKey, pvc, inDataContext4C()); err != nil { @@ -812,7 +812,7 @@ func (r *componentWorkloadOps) updatePVCSize(pvcKey types.NamespacedName, } updatePVCByRecreateFromStep := func(fromStep pvcRecreateStep) { - lastVertex := r.buildProtoRSMWorkloadVertex() + lastVertex := r.buildProtoITSWorkloadVertex() for step := pvRestorePolicyStep; step >= fromStep && step >= pvPolicyRetainStep; step-- { lastVertex = addStepMap[step](lastVertex, step) } @@ -850,11 +850,11 @@ func (r *componentWorkloadOps) updatePVCSize(pvcKey types.NamespacedName, return nil } -// buildProtoRSMWorkloadVertex builds protoRSM workload vertex -func (r *componentWorkloadOps) buildProtoRSMWorkloadVertex() *model.ObjectVertex { +// buildProtoITSWorkloadVertex builds protoITS workload vertex +func (r *componentWorkloadOps) buildProtoITSWorkloadVertex() *model.ObjectVertex { for _, vertex := range r.dag.Vertices() { v, _ := vertex.(*model.ObjectVertex) - if v.Obj == r.protoRSM { + if v.Obj == r.protoITS { return v } } @@ -862,12 +862,12 @@ func (r *componentWorkloadOps) buildProtoRSMWorkloadVertex() *model.ObjectVertex } func updateVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, synthesizeComp *component.SynthesizedComponent, - rsmObj *workloads.ReplicatedStateMachine, dag *graph.DAG) error { + itsObj *workloads.InstanceSet, dag *graph.DAG) error { graphCli := model.NewGraphClient(cli) getRunningVolumes := func(vctName string) ([]*corev1.PersistentVolumeClaim, error) { labels := constant.GetComponentWellKnownLabels(synthesizeComp.ClusterName, synthesizeComp.Name) pvcs, err := component.ListObjWithLabelsInNamespace(reqCtx.Ctx, cli, - generics.PersistentVolumeClaimSignature, rsmObj.Namespace, labels, inDataContext4C()) + generics.PersistentVolumeClaimSignature, itsObj.Namespace, labels, inDataContext4C()) if err != nil { if apierrors.IsNotFound(err) { return nil, nil @@ -875,7 +875,7 @@ func updateVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, synthesizeC return nil, err } matchedPVCs := make([]*corev1.PersistentVolumeClaim, 0) - prefix := fmt.Sprintf("%s-%s", vctName, rsmObj.Name) + prefix := fmt.Sprintf("%s-%s", vctName, itsObj.Name) for _, pvc := range pvcs { if strings.HasPrefix(pvc.Name, prefix) { matchedPVCs = append(matchedPVCs, pvc) @@ -905,15 +905,15 @@ func updateVolumes(reqCtx intctrlutil.RequestCtx, cli client.Client, synthesizeC return nil } -func buildRSMPlacementAnnotation(comp *appsv1alpha1.Component, rsm *workloads.ReplicatedStateMachine) { - if rsm.Annotations == nil { - rsm.Annotations = make(map[string]string) +func buildInstanceSetPlacementAnnotation(comp *appsv1alpha1.Component, its *workloads.InstanceSet) { + if its.Annotations == nil { + its.Annotations = make(map[string]string) } - rsm.Annotations[constant.KBAppMultiClusterPlacementKey] = placement(comp) + its.Annotations[constant.KBAppMultiClusterPlacementKey] = placement(comp) } -// buildRSMConfigTplAnnotations builds config tpl annotations for rsm -func buildRSMConfigTplAnnotations(rsm *workloads.ReplicatedStateMachine, synthesizedComp *component.SynthesizedComponent) { +// buildInstanceSetConfigTplAnnotations builds config tpl annotations for ITS +func buildInstanceSetConfigTplAnnotations(its *workloads.InstanceSet, synthesizedComp *component.SynthesizedComponent) { configTplAnnotations := make(map[string]string) for _, configTplSpec := range synthesizedComp.ConfigTemplates { configTplAnnotations[core.GenerateTPLUniqLabelKeyWithConfig(configTplSpec.Name)] = core.GetComponentCfgName(synthesizedComp.ClusterName, synthesizedComp.Name, configTplSpec.Name) @@ -921,13 +921,13 @@ func buildRSMConfigTplAnnotations(rsm *workloads.ReplicatedStateMachine, synthes for _, scriptTplSpec := range synthesizedComp.ScriptTemplates { configTplAnnotations[core.GenerateTPLUniqLabelKeyWithConfig(scriptTplSpec.Name)] = core.GetComponentCfgName(synthesizedComp.ClusterName, synthesizedComp.Name, scriptTplSpec.Name) } - updateRSMAnnotationsWithTemplate(rsm, configTplAnnotations) + updateInstanceSetAnnotationsWithTemplate(its, configTplAnnotations) } -func updateRSMAnnotationsWithTemplate(rsm *workloads.ReplicatedStateMachine, allTemplateAnnotations map[string]string) { +func updateInstanceSetAnnotationsWithTemplate(its *workloads.InstanceSet, allTemplateAnnotations map[string]string) { // full configmap upgrade existLabels := make(map[string]string) - annotations := rsm.GetAnnotations() + annotations := its.GetAnnotations() if annotations == nil { annotations = make(map[string]string) } @@ -946,23 +946,23 @@ func updateRSMAnnotationsWithTemplate(rsm *workloads.ReplicatedStateMachine, all for key, val := range allTemplateAnnotations { annotations[key] = val } - rsm.SetAnnotations(annotations) + its.SetAnnotations(annotations) } func newComponentWorkloadOps(reqCtx intctrlutil.RequestCtx, cli client.Client, cluster *appsv1alpha1.Cluster, synthesizeComp *component.SynthesizedComponent, - runningRSM *workloads.ReplicatedStateMachine, - protoRSM *workloads.ReplicatedStateMachine, + runningITS *workloads.InstanceSet, + protoITS *workloads.InstanceSet, dag *graph.DAG) *componentWorkloadOps { return &componentWorkloadOps{ cli: cli, reqCtx: reqCtx, cluster: cluster, synthesizeComp: synthesizeComp, - runningRSM: runningRSM, - protoRSM: protoRSM, + runningITS: runningITS, + protoITS: protoITS, dag: dag, } } diff --git a/controllers/k8score/event_controller_test.go b/controllers/k8score/event_controller_test.go index 3752d25d09b..277801383d7 100644 --- a/controllers/k8score/event_controller_test.go +++ b/controllers/k8score/event_controller_test.go @@ -125,11 +125,11 @@ var _ = Describe("Event Controller", func() { Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(clusterObj), &appsv1alpha1.Cluster{}, true)).Should(Succeed()) rsmName := fmt.Sprintf("%s-%s", clusterObj.Name, consensusCompName) - rsm := testapps.NewRSMFactory(clusterObj.Namespace, rsmName, clusterObj.Name, consensusCompName). + rsm := testapps.NewInstanceSetFactory(clusterObj.Namespace, rsmName, clusterObj.Name, consensusCompName). SetReplicas(int32(3)). AddContainer(corev1.Container{Name: testapps.DefaultMySQLContainerName, Image: testapps.ApeCloudMySQLImage}). Create(&testCtx).GetObject() - Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(tmpRSM *workloads.ReplicatedStateMachine) { + Expect(testapps.GetAndChangeObj(&testCtx, client.ObjectKeyFromObject(rsm), func(tmpRSM *workloads.InstanceSet) { tmpRSM.Spec.Roles = []workloads.ReplicaRole{ { Name: "leader", diff --git a/controllers/workloads/replicatedstatemachine_controller.go b/controllers/workloads/instanceset_controller.go similarity index 70% rename from controllers/workloads/replicatedstatemachine_controller.go rename to controllers/workloads/instanceset_controller.go index 49c609476f2..d73f1f5e60d 100644 --- a/controllers/workloads/replicatedstatemachine_controller.go +++ b/controllers/workloads/instanceset_controller.go @@ -36,60 +36,71 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/handler" + "github.com/apecloud/kubeblocks/pkg/controller/instanceset" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" "github.com/apecloud/kubeblocks/pkg/controller/multicluster" "github.com/apecloud/kubeblocks/pkg/controller/rsm" - "github.com/apecloud/kubeblocks/pkg/controller/rsm2" intctrlutil "github.com/apecloud/kubeblocks/pkg/controllerutil" viper "github.com/apecloud/kubeblocks/pkg/viperx" ) -// ReplicatedStateMachineReconciler reconciles a ReplicatedStateMachine object -type ReplicatedStateMachineReconciler struct { +// InstanceSetReconciler reconciles a InstanceSet object +type InstanceSetReconciler struct { client.Client Scheme *runtime.Scheme Recorder record.EventRecorder } -// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=replicatedstatemachines/finalizers,verbs=update - -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete;deletecollection -// +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get -// +kubebuilder:rbac:groups=apps,resources=deployments/finalizers,verbs=update +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=workloads.kubeblocks.io,resources=instancesets/finalizers,verbs=update // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete;deletecollection // +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get // +kubebuilder:rbac:groups=apps,resources=statefulsets/finalizers,verbs=update +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get +// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=update + +// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/status,verbs=get +// +kubebuilder:rbac:groups=core,resources=persistentvolumeclaims/finalizers,verbs=update + +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=core,resources=configmaps/finalizers,verbs=update + +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete;deletecollection +// +kubebuilder:rbac:groups=core,resources=services/status,verbs=get +// +kubebuilder:rbac:groups=core,resources=services/finalizers,verbs=update + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the ReplicatedStateMachine object against the actual cluster state, and then +// the InstanceSet object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *ReplicatedStateMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx).WithValues("ReplicatedStateMachine", req.NamespacedName) +func (r *InstanceSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx).WithValues("InstanceSet", req.NamespacedName) - provider, err := rsm2.CurrentReplicaProvider(ctx, r.Client, req.NamespacedName) + provider, err := instanceset.CurrentReplicaProvider(ctx, r.Client, req.NamespacedName) if err != nil { return ctrl.Result{}, err } - if provider == rsm2.PodProvider { + if provider == instanceset.PodProvider { err = kubebuilderx.NewController(ctx, r.Client, req, r.Recorder, logger). - Prepare(rsm2.NewTreeLoader()). - Do(rsm2.NewFixMetaReconciler()). - Do(rsm2.NewDeletionReconciler()). - Do(rsm2.NewStatusReconciler()). - Do(rsm2.NewRevisionUpdateReconciler()). - Do(rsm2.NewAssistantObjectReconciler()). - Do(rsm2.NewReplicasAlignmentReconciler()). - Do(rsm2.NewUpdateReconciler()). + Prepare(instanceset.NewTreeLoader()). + Do(instanceset.NewFixMetaReconciler()). + Do(instanceset.NewDeletionReconciler()). + Do(instanceset.NewStatusReconciler()). + Do(instanceset.NewRevisionUpdateReconciler()). + Do(instanceset.NewAssistantObjectReconciler()). + Do(instanceset.NewReplicasAlignmentReconciler()). + Do(instanceset.NewUpdateReconciler()). Commit() return ctrl.Result{}, err } @@ -101,7 +112,7 @@ func (r *ReplicatedStateMachineReconciler) Reconcile(ctx context.Context, req ct Recorder: r.Recorder, } - reqCtx.Log.V(1).Info("reconcile", "ReplicatedStateMachine", req.NamespacedName) + reqCtx.Log.V(1).Info("reconcile", "InstanceSet", req.NamespacedName) requeueError := func(err error) (ctrl.Result, error) { if re, ok := err.(model.RequeueError); ok { @@ -113,7 +124,7 @@ func (r *ReplicatedStateMachineReconciler) Reconcile(ctx context.Context, req ct return intctrlutil.CheckedRequeueWithError(err, reqCtx.Log, "") } - // the RSM reconciliation loop is a two-phase model: plan Build and plan Execute + // the InstanceSet reconciliation loop is a two-phase model: plan Build and plan Execute // Init stage planBuilder := rsm.NewRSMPlanBuilder(reqCtx, r.Client, req) if err := planBuilder.Init(); err != nil { @@ -169,7 +180,7 @@ func (r *ReplicatedStateMachineReconciler) Reconcile(ctx context.Context, req ct } // SetupWithManager sets up the controller with the Manager. -func (r *ReplicatedStateMachineReconciler) SetupWithManager(mgr ctrl.Manager, multiClusterMgr multicluster.Manager) error { +func (r *InstanceSetReconciler) SetupWithManager(mgr ctrl.Manager, multiClusterMgr multicluster.Manager) error { ctx := &handler.FinderContext{ Context: context.Background(), Reader: r.Client, @@ -182,10 +193,10 @@ func (r *ReplicatedStateMachineReconciler) SetupWithManager(mgr ctrl.Manager, mu return r.setupWithMultiClusterManager(mgr, multiClusterMgr, ctx) } -func (r *ReplicatedStateMachineReconciler) setupWithManager(mgr ctrl.Manager, ctx *handler.FinderContext) error { +func (r *InstanceSetReconciler) setupWithManager(mgr ctrl.Manager, ctx *handler.FinderContext) error { if viper.GetBool(rsm.FeatureGateRSMCompatibilityMode) { nameLabels := []string{constant.AppInstanceLabelKey, constant.KBAppComponentLabelKey} - delegatorFinder := handler.NewDelegatorFinder(&workloads.ReplicatedStateMachine{}, nameLabels) + delegatorFinder := handler.NewDelegatorFinder(&workloads.InstanceSet{}, nameLabels) ownerFinder := handler.NewOwnerFinder(&appsv1.StatefulSet{}) stsHandler := handler.NewBuilder(ctx).AddFinder(delegatorFinder).Build() jobHandler := handler.NewBuilder(ctx).AddFinder(delegatorFinder).Build() @@ -193,7 +204,7 @@ func (r *ReplicatedStateMachineReconciler) setupWithManager(mgr ctrl.Manager, ct stsPodHandler := handler.NewBuilder(ctx).AddFinder(ownerFinder).AddFinder(delegatorFinder).Build() return intctrlutil.NewNamespacedControllerManagedBy(mgr). - For(&workloads.ReplicatedStateMachine{}). + For(&workloads.InstanceSet{}). WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(constant.CfgKBReconcileWorkers), }). @@ -206,10 +217,10 @@ func (r *ReplicatedStateMachineReconciler) setupWithManager(mgr ctrl.Manager, ct } stsOwnerFinder := handler.NewOwnerFinder(&appsv1.StatefulSet{}) - rsmOwnerFinder := handler.NewOwnerFinder(&workloads.ReplicatedStateMachine{}) - podHandler := handler.NewBuilder(ctx).AddFinder(stsOwnerFinder).AddFinder(rsmOwnerFinder).Build() + itsOwnerFinder := handler.NewOwnerFinder(&workloads.InstanceSet{}) + podHandler := handler.NewBuilder(ctx).AddFinder(stsOwnerFinder).AddFinder(itsOwnerFinder).Build() return intctrlutil.NewNamespacedControllerManagedBy(mgr). - For(&workloads.ReplicatedStateMachine{}). + For(&workloads.InstanceSet{}). WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(constant.CfgKBReconcileWorkers), }). @@ -221,10 +232,10 @@ func (r *ReplicatedStateMachineReconciler) setupWithManager(mgr ctrl.Manager, ct Complete(r) } -func (r *ReplicatedStateMachineReconciler) setupWithMultiClusterManager(mgr ctrl.Manager, +func (r *InstanceSetReconciler) setupWithMultiClusterManager(mgr ctrl.Manager, multiClusterMgr multicluster.Manager, ctx *handler.FinderContext) error { nameLabels := []string{constant.AppInstanceLabelKey, constant.KBAppComponentLabelKey} - delegatorFinder := handler.NewDelegatorFinder(&workloads.ReplicatedStateMachine{}, nameLabels) + delegatorFinder := handler.NewDelegatorFinder(&workloads.InstanceSet{}, nameLabels) ownerFinder := handler.NewOwnerFinder(&appsv1.StatefulSet{}) stsHandler := handler.NewBuilder(ctx).AddFinder(delegatorFinder).Build() // pod owned by legacy StatefulSet @@ -233,7 +244,7 @@ func (r *ReplicatedStateMachineReconciler) setupWithMultiClusterManager(mgr ctrl jobHandler := handler.NewBuilder(ctx).AddFinder(delegatorFinder).Build() b := intctrlutil.NewNamespacedControllerManagedBy(mgr). - For(&workloads.ReplicatedStateMachine{}). + For(&workloads.InstanceSet{}). WithOptions(controller.Options{ MaxConcurrentReconciles: viper.GetInt(constant.CfgKBReconcileWorkers), }) @@ -241,8 +252,8 @@ func (r *ReplicatedStateMachineReconciler) setupWithMultiClusterManager(mgr ctrl multiClusterMgr.Watch(b, &appsv1.StatefulSet{}, stsHandler). Watch(b, &corev1.Pod{}, stsPodHandler). Watch(b, &batchv1.Job{}, jobHandler). - Own(b, &corev1.Pod{}, &workloads.ReplicatedStateMachine{}). - Own(b, &corev1.PersistentVolumeClaim{}, &workloads.ReplicatedStateMachine{}) + Own(b, &corev1.Pod{}, &workloads.InstanceSet{}). + Own(b, &corev1.PersistentVolumeClaim{}, &workloads.InstanceSet{}) return b.Complete(r) } diff --git a/controllers/workloads/replicatedstatemachine_controller_test.go b/controllers/workloads/instanceset_controller_test.go similarity index 84% rename from controllers/workloads/replicatedstatemachine_controller_test.go rename to controllers/workloads/instanceset_controller_test.go index 2785c896009..70521bb6ab7 100644 --- a/controllers/workloads/replicatedstatemachine_controller_test.go +++ b/controllers/workloads/instanceset_controller_test.go @@ -29,20 +29,20 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/builder" - "github.com/apecloud/kubeblocks/pkg/controller/rsm2" + "github.com/apecloud/kubeblocks/pkg/controller/instanceset" testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" viper "github.com/apecloud/kubeblocks/pkg/viperx" ) -var _ = Describe("ReplicatedStateMachine Controller", func() { +var _ = Describe("InstanceSet Controller", func() { Context("reconciliation with ReplicaProvider=StatefulSet", func() { var replicaProvider string BeforeEach(func() { - replicaProvider = viper.GetString(rsm2.FeatureGateRSMReplicaProvider) - viper.Set(rsm2.FeatureGateRSMReplicaProvider, string(rsm2.StatefulSetProvider)) + replicaProvider = viper.GetString(instanceset.FeatureGateRSMReplicaProvider) + viper.Set(instanceset.FeatureGateRSMReplicaProvider, string(instanceset.StatefulSetProvider)) }) AfterEach(func() { - viper.Set(rsm2.FeatureGateRSMReplicaProvider, replicaProvider) + viper.Set(instanceset.FeatureGateRSMReplicaProvider, replicaProvider) }) It("should reconcile well", func() { @@ -87,7 +87,7 @@ var _ = Describe("ReplicatedStateMachine Controller", func() { Image: "foo", Command: []string{"bar"}, } - rsm := builder.NewReplicatedStateMachineBuilder(testCtx.DefaultNamespace, name). + rsm := builder.NewInstanceSetBuilder(testCtx.DefaultNamespace, name). AddMatchLabelsInMap(commonLabels). SetService(service). SetTemplate(template). @@ -95,12 +95,12 @@ var _ = Describe("ReplicatedStateMachine Controller", func() { GetObject() Expect(k8sClient.Create(ctx, rsm)).Should(Succeed()) Eventually(testapps.CheckObj(&testCtx, client.ObjectKeyFromObject(rsm), - func(g Gomega, set *workloads.ReplicatedStateMachine) { + func(g Gomega, set *workloads.InstanceSet) { g.Expect(set.Status.ObservedGeneration).Should(BeEquivalentTo(1)) }), ).Should(Succeed()) Expect(k8sClient.Delete(ctx, rsm)).Should(Succeed()) - Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(rsm), &workloads.ReplicatedStateMachine{}, false)). + Eventually(testapps.CheckObjExists(&testCtx, client.ObjectKeyFromObject(rsm), &workloads.InstanceSet{}, false)). Should(Succeed()) }) }) diff --git a/controllers/workloads/suite_test.go b/controllers/workloads/suite_test.go index a1783c0cb67..db196e32d8c 100644 --- a/controllers/workloads/suite_test.go +++ b/controllers/workloads/suite_test.go @@ -95,7 +95,7 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) recorder := k8sManager.GetEventRecorderFor("consensus-set-controller") - err = (&ReplicatedStateMachineReconciler{ + err = (&InstanceSetReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), Recorder: recorder, diff --git a/deploy/helm/config/rbac/role.yaml b/deploy/helm/config/rbac/role.yaml index 8857d623348..0a2926e8b77 100644 --- a/deploy/helm/config/rbac/role.yaml +++ b/deploy/helm/config/rbac/role.yaml @@ -12,31 +12,6 @@ rules: - get - patch - update -- apiGroups: - - apps - resources: - - deployments - verbs: - - create - - delete - - deletecollection - - get - - list - - patch - - update - - watch -- apiGroups: - - apps - resources: - - deployments/finalizers - verbs: - - update -- apiGroups: - - apps - resources: - - deployments/status - verbs: - - get - apiGroups: - apps resources: @@ -524,6 +499,7 @@ rules: resources: - pods verbs: + - create - delete - deletecollection - get @@ -550,6 +526,12 @@ rules: verbs: - get - list +- apiGroups: + - "" + resources: + - pods/status + verbs: + - get - apiGroups: - "" resources: @@ -929,6 +911,32 @@ rules: - get - patch - update +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets/finalizers + verbs: + - update +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets/status + verbs: + - get + - patch + - update - apiGroups: - workloads.kubeblocks.io resources: diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml index 382a4ce5266..c0f6d4ca809 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusterdefinitions.yaml @@ -8686,10 +8686,10 @@ spec: type: object rsmSpec: description: Defines workload spec of this component. From KB - 0.7.0, RSM(ReplicatedStateMachineSpec) will be the underlying - CR which powers all kinds of workload in KB. RSM is an enhanced - stateful workload extension dedicated for heavy-state workloads - like databases. + 0.7.0, RSM(InstanceSetSpec) will be the underlying CR which + powers all kinds of workload in KB. RSM is an enhanced stateful + workload extension dedicated for heavy-state workloads like + databases. properties: memberUpdateStrategy: description: "Describes the strategy for updating Members @@ -8814,11 +8814,11 @@ spec: type: object switchoverAction: description: "Specifies the environment variables that - can be used in all following Actions: - KB_RSM_USERNAME: - Represents the username part of the credential - KB_RSM_PASSWORD: - Represents the password part of the credential - KB_RSM_LEADER_HOST: - Represents the leader host - KB_RSM_TARGET_HOST: Represents - the target host - KB_RSM_SERVICE_PORT: Represents + can be used in all following Actions: - KB_ITS_USERNAME: + Represents the username part of the credential - KB_ITS_PASSWORD: + Represents the password part of the credential - KB_ITS_LEADER_HOST: + Represents the leader host - KB_ITS_TARGET_HOST: Represents + the target host - KB_ITS_SERVICE_PORT: Represents the service port \n Defines the action to perform a switchover. If the Image is not configured, the latest [BusyBox](https://busybox.net/) image will @@ -8865,10 +8865,10 @@ spec: be a single string representing the role name defined in spec.Roles. The latest [BusyBox](https://busybox.net/) image will be used if Image is not configured. Environment - variables can be used in Command: - v_KB_RSM_LAST_STDOUT: + variables can be used in Command: - v_KB_ITS_LAST_STDOUT: stdout from the last action, watch for ''v_'' prefix - - KB_RSM_USERNAME: username part of the credential - - KB_RSM_PASSWORD: password part of the credential' + - KB_ITS_USERNAME: username part of the credential + - KB_ITS_PASSWORD: password part of the credential' items: properties: args: diff --git a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml index 25f92baf5cd..586d1a902e0 100644 --- a/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml +++ b/deploy/helm/crds/apps.kubeblocks.io_clusters.yaml @@ -7595,8 +7595,7 @@ spec: type: boolean readyWithoutPrimary: description: Indicates whether it is required for the - replica set manager (rsm) to have at least one primary - pod ready. + InstanceSet to have at least one primary instance ready. type: boolean role: description: Defines the role of the replica in the cluster. diff --git a/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml b/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml similarity index 99% rename from config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml rename to deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml index d190ff4cd10..c4b3bdadb85 100644 --- a/config/crd/bases/workloads.kubeblocks.io_replicatedstatemachines.yaml +++ b/deploy/helm/crds/workloads.kubeblocks.io_instancesets.yaml @@ -5,23 +5,23 @@ metadata: controller-gen.kubebuilder.io/version: v0.12.1 labels: app.kubernetes.io/name: kubeblocks - name: replicatedstatemachines.workloads.kubeblocks.io + name: instancesets.workloads.kubeblocks.io spec: group: workloads.kubeblocks.io names: categories: - kubeblocks - all - kind: ReplicatedStateMachine - listKind: ReplicatedStateMachineList - plural: replicatedstatemachines + kind: InstanceSet + listKind: InstanceSetList + plural: instancesets shortNames: - - rsm - singular: replicatedstatemachine + - its + singular: instanceset scope: Namespaced versions: - additionalPrinterColumns: - - description: leader pod name. + - description: leader instance name. jsonPath: .status.membersStatus[?(@.role.isLeader==true)].podName name: LEADER type: string @@ -39,8 +39,7 @@ spec: name: v1alpha1 schema: openAPIV3Schema: - description: ReplicatedStateMachine is the Schema for the replicatedstatemachines - API. + description: InstanceSet is the Schema for the instancesets API. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -604,7 +603,7 @@ spec: properties: password: description: Represents the user's password for the credential. - The corresponding environment variable will be KB_RSM_PASSWORD. + The corresponding environment variable will be KB_ITS_PASSWORD. properties: value: description: "Specifies the value of the environment variable. @@ -710,7 +709,7 @@ spec: type: object username: description: Defines the user's name for the credential. The corresponding - environment variable will be KB_RSM_USERNAME. + environment variable will be KB_ITS_USERNAME. properties: value: description: "Specifies the value of the environment variable. @@ -822,19 +821,19 @@ spec: description: "Overrides values in default Template. \n Instance is the fundamental unit managed by KubeBlocks. It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. - A RSM manages instances with a total count of Replicas, and by default, - all these instances are generated from the same template. The InstanceTemplate - provides a way to override values in the default template, allowing - the RSM to manage instances from different templates. \n The naming - convention for instances (pods) based on the RSM Name, InstanceTemplate - Name, and ordinal. The constructed instance name follows the pattern: - $(rsm.name)-$(template.name)-$(ordinal). By default, the ordinal - starts from 0 for each InstanceTemplate. It is important to ensure - that the Name of each InstanceTemplate is unique. \n The sum of - replicas across all InstanceTemplates should not exceed the total - number of Replicas specified for the RSM. Any remaining replicas - will be generated using the default template and will follow the - default naming rules." + An InstanceSet manages instances with a total count of Replicas, + and by default, all these instances are generated from the same + template. The InstanceTemplate provides a way to override values + in the default template, allowing the InstanceSet to manage instances + from different templates. \n The naming convention for instances + (pods) based on the InstanceSet Name, InstanceTemplate Name, and + ordinal. The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). + By default, the ordinal starts from 0 for each InstanceTemplate. + It is important to ensure that the Name of each InstanceTemplate + is unique. \n The sum of replicas across all InstanceTemplates should + not exceed the total number of Replicas specified for the InstanceSet. + Any remaining replicas will be generated using the default template + and will follow the default naming rules." items: properties: annotations: @@ -974,9 +973,9 @@ spec: type: object name: description: Specifies the name of the template. Each instance - of the template derives its name from the RSM's Name, the - template's Name and the instance's ordinal. The constructed - instance name follows the pattern $(rsm.name)-$(template.name)-$(ordinal). + of the template derives its name from the InstanceSet Name, + the template's Name and the instance's ordinal. The constructed + instance name follows the pattern $(instance_set.name)-$(template.name)-$(ordinal). The ordinal starts from 0 by default. maxLength: 54 pattern: ^[a-z0-9]([a-z0-9\.\-]*[a-z0-9])?$ @@ -3354,11 +3353,11 @@ spec: type: object switchoverAction: description: "Specifies the environment variables that can be - used in all following Actions: - KB_RSM_USERNAME: Represents - the username part of the credential - KB_RSM_PASSWORD: Represents - the password part of the credential - KB_RSM_LEADER_HOST: Represents - the leader host - KB_RSM_TARGET_HOST: Represents the target - host - KB_RSM_SERVICE_PORT: Represents the service port \n Defines + used in all following Actions: - KB_ITS_USERNAME: Represents + the username part of the credential - KB_ITS_PASSWORD: Represents + the password part of the credential - KB_ITS_LEADER_HOST: Represents + the leader host - KB_ITS_TARGET_HOST: Represents the target + host - KB_ITS_SERVICE_PORT: Represents the service port \n Defines the action to perform a switchover. If the Image is not configured, the latest [BusyBox](https://busybox.net/) image will be used." properties: @@ -3400,8 +3399,8 @@ spec: type: string type: array paused: - description: Indicates that the rsm is paused, meaning the reconciliation - of this rsm object will be paused. + description: Indicates that the InstanceSet is paused, meaning the + reconciliation of this InstanceSet object will be paused. type: boolean podManagementPolicy: description: "Controls how pods are created during initial scale up, @@ -3438,9 +3437,9 @@ spec: actions, the final output should be a single string representing the role name defined in spec.Roles. The latest [BusyBox](https://busybox.net/) image will be used if Image is not configured. Environment variables - can be used in Command: - v_KB_RSM_LAST_STDOUT: stdout from - the last action, watch for ''v_'' prefix - KB_RSM_USERNAME: - username part of the credential - KB_RSM_PASSWORD: password + can be used in Command: - v_KB_ITS_LAST_STDOUT: stdout from + the last action, watch for ''v_'' prefix - KB_ITS_USERNAME: + username part of the credential - KB_ITS_PASSWORD: password part of the credential' items: properties: @@ -11678,8 +11677,8 @@ spec: type: object updateStrategy: description: Indicates the StatefulSetUpdateStrategy that will be - employed to update Pods in the RSM when a revision is made to Template. - UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType + employed to update Pods in the InstanceSet when a revision is made + to Template. UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil properties: rollingUpdate: @@ -11717,11 +11716,11 @@ spec: type: object volumeClaimTemplates: description: Represents a list of claims that pods are allowed to - reference. The ReplicatedStateMachine controller is responsible - for mapping network identities to claims in a way that maintains - the identity of a pod. Every claim in this list must have at least - one matching (by name) volumeMount in one container in the template. - A claim in this list takes precedence over any volumes in the template, + reference. The InstanceSet controller is responsible for mapping + network identities to claims in a way that maintains the identity + of a pod. Every claim in this list must have at least one matching + (by name) volumeMount in one container in the template. A claim + in this list takes precedence over any volumes in the template, with the same name. items: description: PersistentVolumeClaim is a user's request for and claim @@ -12164,8 +12163,8 @@ spec: type: object type: array currentGeneration: - description: When not empty, indicates the version of the Replicated - State Machine (RSM) used to generate the underlying workload. + description: When not empty, indicates the version of the InstanceSet + used to generate the underlying workload. format: int64 type: integer currentReplicas: @@ -12182,8 +12181,8 @@ spec: additionalProperties: type: string description: currentRevisions, if not empty, indicates the old version - of the RSM used to generate Pods. key is the pod name, value is - the revision. + of the InstanceSet used to generate the underlying workload. key + is the pod name, value is the revision. type: object initReplicas: description: Defines the initial number of pods (members) when the @@ -12203,8 +12202,8 @@ spec: description: Whether the corresponding Pod is in ready condition. type: boolean readyWithoutPrimary: - description: Indicates whether it is required for the replica - set manager (rsm) to have at least one primary pod ready. + description: Indicates whether it is required for the InstanceSet + to have at least one primary instance ready. type: boolean role: description: Defines the role of the replica in the cluster. @@ -12268,8 +12267,8 @@ spec: additionalProperties: type: string description: updateRevisions, if not empty, indicates the new version - of the RSM used to generate Pods. key is the pod name, value is - the revision. + of the InstanceSet used to generate the underlying workload. key + is the pod name, value is the revision. type: object updatedReplicas: description: updatedReplicas is the number of Pods created by the diff --git a/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml b/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml new file mode 100644 index 00000000000..de1b60c8b9f --- /dev/null +++ b/deploy/helm/templates/rbac/workloads_instanceset_editor_role.yaml @@ -0,0 +1,26 @@ +# permissions for end users to edit replicatedstatemachines. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} + name: {{ include "kubeblocks.fullname" . }}-instanceset-editor-role +rules: +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets/status + verbs: + - get diff --git a/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml b/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml new file mode 100644 index 00000000000..34fd0faed78 --- /dev/null +++ b/deploy/helm/templates/rbac/workloads_instanceset_viewer_role.yaml @@ -0,0 +1,22 @@ +# permissions for end users to view replicatedstatemachines. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubeblocks.labels" . | nindent 4 }} + name: {{ include "kubeblocks.fullname" . }}-instanceset-viewer-role +rules: +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets + verbs: + - get + - list + - watch +- apiGroups: + - workloads.kubeblocks.io + resources: + - instancesets/status + verbs: + - get diff --git a/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml index 69c5a5665e9..0c97a1026c9 100644 --- a/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml +++ b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_editor_role.yaml @@ -3,13 +3,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: replicatedstatemachine-editor-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubeblocks - app.kubernetes.io/part-of: kubeblocks - app.kubernetes.io/managed-by: kustomize - name: replicatedstatemachine-editor-role + {{- include "kubeblocks.labels" . | nindent 4 }} + name: {{ include "kubeblocks.fullname" . }}-replicatedstatemachine-editor-role rules: - apiGroups: - workloads.kubeblocks.io diff --git a/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml index 087fa590ba2..79a5ab86203 100644 --- a/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml +++ b/deploy/helm/templates/rbac/workloads_replicatedstatemachine_viewer_role.yaml @@ -3,13 +3,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: replicatedstatemachine-viewer-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: kubeblocks - app.kubernetes.io/part-of: kubeblocks - app.kubernetes.io/managed-by: kustomize - name: replicatedstatemachines-viewer-role + {{- include "kubeblocks.labels" . | nindent 4 }} + name: {{ include "kubeblocks.fullname" . }}-replicatedstatemachine-viewer-role rules: - apiGroups: - workloads.kubeblocks.io diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md index 34cf091ad8c..783aaf348e4 100644 --- a/docs/developer_docs/api-reference/cluster.md +++ b/docs/developer_docs/api-reference/cluster.md @@ -3105,18 +3105,6 @@ Must comply with the IANA Service Naming rule.

-isSharding
- -bool - - - -(Optional) -

Specifies that this componentDef is a shading component definition.

- - - - componentDefs
[]string @@ -3782,7 +3770,7 @@ RSMSpec (Optional)

Defines workload spec of this component. -From KB 0.7.0, RSM(ReplicatedStateMachineSpec) will be the underlying CR which powers all kinds of workload in KB. +From KB 0.7.0, RSM(InstanceSetSpec) will be the underlying CR which powers all kinds of workload in KB. RSM is an enhanced stateful workload extension dedicated for heavy-state workloads like databases.

@@ -19999,12 +19987,12 @@ string Resource Types: -

ReplicatedStateMachine +

InstanceSet

-

ReplicatedStateMachine is the Schema for the replicatedstatemachines API.

+

InstanceSet is the Schema for the instancesets API.

@@ -20027,7 +20015,7 @@ stringkind
string - + @@ -20171,15 +20159,15 @@ Kubernetes core/v1.PodTemplateSpec

Overrides values in default Template.

Instance is the fundamental unit managed by KubeBlocks. It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. -A RSM manages instances with a total count of Replicas, +An InstanceSet manages instances with a total count of Replicas, and by default, all these instances are generated from the same template. The InstanceTemplate provides a way to override values in the default template, -allowing the RSM to manage instances from different templates.

-

The naming convention for instances (pods) based on the RSM Name, InstanceTemplate Name, and ordinal. -The constructed instance name follows the pattern: $(rsm.name)-$(template.name)-$(ordinal). +allowing the InstanceSet to manage instances from different templates.

+

The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). By default, the ordinal starts from 0 for each InstanceTemplate. It is important to ensure that the Name of each InstanceTemplate is unique.

-

The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the RSM. +

The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. Any remaining replicas will be generated using the default template and will follow the default naming rules.

@@ -20207,7 +20195,7 @@ Any remaining replicas will be generated using the default template and will fol @@ -20320,7 +20308,7 @@ bool @@ -20344,8 +20332,8 @@ Credential @@ -20433,7 +20421,7 @@ string

Credential

-(Appears on:ReplicatedStateMachineSpec) +(Appears on:InstanceSetSpec)

@@ -20456,7 +20444,7 @@ CredentialVar @@ -20470,7 +20458,7 @@ CredentialVar @@ -20522,12 +20510,13 @@ Kubernetes core/v1.EnvVarSource
ReplicatedStateMachineInstanceSet
@@ -20048,8 +20036,8 @@ Refer to the Kubernetes API documentation for the fields of the spec
- -ReplicatedStateMachineSpec + +InstanceSetSpec
(Optional)

Represents a list of claims that pods are allowed to reference. -The ReplicatedStateMachine controller is responsible for mapping network identities to +The InstanceSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over @@ -20245,7 +20233,7 @@ Kubernetes apps/v1.StatefulSetUpdateStrategy

Indicates the StatefulSetUpdateStrategy that will be -employed to update Pods in the RSM when a revision is made to +employed to update Pods in the InstanceSet when a revision is made to Template. UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil

(Optional) -

Indicates that the rsm is paused, meaning the reconciliation of this rsm object will be paused.

+

Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused.

status
- -ReplicatedStateMachineStatus + +InstanceSetStatus

Defines the user’s name for the credential. -The corresponding environment variable will be KB_RSM_USERNAME.

+The corresponding environment variable will be KB_ITS_USERNAME.

Represents the user’s password for the credential. -The corresponding environment variable will be KB_RSM_PASSWORD.

+The corresponding environment variable will be KB_ITS_PASSWORD.

-

InstanceTemplate +

InstanceSetSpec

-(Appears on:ReplicatedStateMachineSpec) +(Appears on:InstanceSet)

+

InstanceSetSpec defines the desired state of InstanceSet

@@ -20539,281 +20528,292 @@ Kubernetes core/v1.EnvVarSource - -
-name
+replicas
-string +int32
-

Specifies the name of the template. -Each instance of the template derives its name from the RSM’s Name, the template’s Name and the instance’s ordinal. -The constructed instance name follows the pattern $(rsm.name)-$(template.name)-$(ordinal). -The ordinal starts from 0 by default.

+(Optional) +

Specifies the desired number of replicas of the given Template. +These replicas are instantiations of the same Template, with each having a consistent identity. +Defaults to 1 if unspecified.

-replicas
+minReadySeconds
int32
(Optional) -

Number of replicas of this template. -Default is 1.

+

Defines the minimum number of seconds a newly created pod should be ready +without any of its container crashing to be considered available. +Defaults to 0, meaning the pod will be considered available as soon as it is ready.

-annotations
+selector
-map[string]string + +Kubernetes meta/v1.LabelSelector +
-(Optional) -

Defines annotations to override. -Add new or override existing annotations.

+

Represents a label query over pods that should match the replica count. +It must match the pod template’s labels. +More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors

-labels
+serviceName
-map[string]string +string
-(Optional) -

Defines labels to override. -Add new or override existing labels.

+

Refers to the name of the service that governs this StatefulSet. +This service must exist before the StatefulSet and is responsible for +the network identity of the set. Pods get DNS/hostnames that follow a specific pattern.

-image
+service
-string + +Kubernetes core/v1.Service +
(Optional) -

Defines image to override. -Will override the first container’s image of the pod.

+

Defines the behavior of a service spec. +Provides read-write service. +https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

-nodeName
+alternativeServices
-string + +[]Kubernetes core/v1.Service +
(Optional) -

Defines NodeName to override.

+

Defines Alternative Services selector pattern specifier. +Can be used for creating Readonly service.

-nodeSelector
+template
-map[string]string + +Kubernetes core/v1.PodTemplateSpec +
-(Optional) -

Defines NodeSelector to override.

-tolerations
+instances
- -[]Kubernetes core/v1.Toleration + +[]InstanceTemplate
(Optional) -

Defines Tolerations to override. -Add new or override existing tolerations.

+

Overrides values in default Template.

+

Instance is the fundamental unit managed by KubeBlocks. +It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. +An InstanceSet manages instances with a total count of Replicas, +and by default, all these instances are generated from the same template. +The InstanceTemplate provides a way to override values in the default template, +allowing the InstanceSet to manage instances from different templates.

+

The naming convention for instances (pods) based on the InstanceSet Name, InstanceTemplate Name, and ordinal. +The constructed instance name follows the pattern: $(instance_set.name)-$(template.name)-$(ordinal). +By default, the ordinal starts from 0 for each InstanceTemplate. +It is important to ensure that the Name of each InstanceTemplate is unique.

+

The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the InstanceSet. +Any remaining replicas will be generated using the default template and will follow the default naming rules.

-resources
+offlineInstances
- -Kubernetes core/v1.ResourceRequirements - +[]string
(Optional) -

Defines Resources to override. -Will override the first container’s resources of the pod.

+

Specifies instances to be scaled in with dedicated names in the list.

-env
+volumeClaimTemplates
- -[]Kubernetes core/v1.EnvVar + +[]Kubernetes core/v1.PersistentVolumeClaim
(Optional) -

Defines Env to override. -Add new or override existing envs.

+

Represents a list of claims that pods are allowed to reference. +The InstanceSet controller is responsible for mapping network identities to +claims in a way that maintains the identity of a pod. Every claim in +this list must have at least one matching (by name) volumeMount in one +container in the template. A claim in this list takes precedence over +any volumes in the template, with the same name.

-volumes
+podManagementPolicy
- -[]Kubernetes core/v1.Volume + +Kubernetes apps/v1.PodManagementPolicyType
(Optional) -

Defines Volumes to override. -Add new or override existing volumes.

+

Controls how pods are created during initial scale up, +when replacing pods on nodes, or when scaling down.

+

The default policy is OrderedReady, where pods are created in increasing order and the controller waits until each pod is ready before +continuing. When scaling down, the pods are removed in the opposite order. +The alternative policy is Parallel which will create pods in parallel +to match the desired scale without waiting, and on scale down will delete +all pods at once.

-volumeMounts
+updateStrategy
- -[]Kubernetes core/v1.VolumeMount + +Kubernetes apps/v1.StatefulSetUpdateStrategy
-(Optional) -

Defines VolumeMounts to override. -Add new or override existing volume mounts of the first container in the pod.

+

Indicates the StatefulSetUpdateStrategy that will be +employed to update Pods in the InstanceSet when a revision is made to +Template. +UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil

-volumeClaimTemplates
+roles
- -[]Kubernetes core/v1.PersistentVolumeClaim + +[]ReplicaRole
(Optional) -

Defines VolumeClaimTemplates to override. -Add new or override existing volume claim templates.

+

A list of roles defined in the system.

-

MemberStatus -

-

-(Appears on:ClusterComponentStatus, ReplicatedStateMachineStatus) -

-
-
- - - - + + - -
FieldDescription +roleProbe
+ + +RoleProbe + + +
+(Optional) +

Provides method to probe role.

+
-podName
+membershipReconfiguration
-string + +MembershipReconfiguration +
-

Represents the name of the pod.

+(Optional) +

Provides actions to do membership dynamic reconfiguration.

-role
+memberUpdateStrategy
- -ReplicaRole + +MemberUpdateStrategy
(Optional) -

Defines the role of the replica in the cluster.

+

Members(Pods) update strategy.

+
    +
  • serial: update Members one by one that guarantee minimum component unavailable time.
  • +
  • bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
  • +
  • parallel: force parallel
  • +
-ready
+paused
bool
(Optional) -

Whether the corresponding Pod is in ready condition.

+

Indicates that the InstanceSet is paused, meaning the reconciliation of this InstanceSet object will be paused.

-readyWithoutPrimary
+credential
-bool + +Credential +
(Optional) -

Indicates whether it is required for the replica set manager (rsm) to have at least one primary pod ready.

+

Credential used to connect to DB engine

-

MemberUpdateStrategy -(string alias)

-

-(Appears on:RSMSpec, ReplicatedStateMachineSpec) -

-
-

MemberUpdateStrategy defines Cluster Component update strategy.

-
- - - - - - - - - - - - - - -
ValueDescription

"BestEffortParallel"

"Parallel"

"Serial"

-

MembershipReconfiguration +

InstanceSetStatus

-(Appears on:RSMSpec, ReplicatedStateMachineSpec) +(Appears on:InstanceSet)

+

InstanceSetStatus defines the observed state of InstanceSet

@@ -20825,159 +20825,104 @@ bool - - - - - - -
-switchoverAction
+StatefulSetStatus
- -Action + +Kubernetes apps/v1.StatefulSetStatus
-(Optional) -

Specifies the environment variables that can be used in all following Actions: -- KB_RSM_USERNAME: Represents the username part of the credential -- KB_RSM_PASSWORD: Represents the password part of the credential -- KB_RSM_LEADER_HOST: Represents the leader host -- KB_RSM_TARGET_HOST: Represents the target host -- KB_RSM_SERVICE_PORT: Represents the service port

-

Defines the action to perform a switchover. -If the Image is not configured, the latest BusyBox image will be used.

+

+(Members of StatefulSetStatus are embedded into this type.) +

-memberJoinAction
+initReplicas
- -Action - +int32
-(Optional) -

Defines the action to add a member. -If the Image is not configured, the Image from the previous non-nil action will be used.

-
-memberLeaveAction
- - -Action - - -
-(Optional) -

Defines the action to remove a member. -If the Image is not configured, the Image from the previous non-nil action will be used.

+

Defines the initial number of pods (members) when the cluster is first initialized. +This value is set to spec.Replicas at the time of object creation and remains constant thereafter.

-logSyncAction
+readyInitReplicas
- -Action - +int32
(Optional) -

Defines the action to trigger the new member to start log syncing. -If the Image is not configured, the Image from the previous non-nil action will be used.

+

Represents the number of pods (members) that have already reached the MembersStatus during the cluster initialization stage. +This value remains constant once it equals InitReplicas.

-promoteAction
+currentGeneration
- -Action - +int64
(Optional) -

Defines the action to inform the cluster that the new member can join voting now. -If the Image is not configured, the Image from the previous non-nil action will be used.

-
-

ReplicaRole -

-

-(Appears on:RSMSpec, MemberStatus, ReplicatedStateMachineSpec) -

-
-
- - - - - - - - - - -
FieldDescription
-name
- -string - -
-

Defines the role name of the replica.

+

When not empty, indicates the version of the InstanceSet used to generate the underlying workload.

-accessMode
+membersStatus
- -AccessMode + +[]MemberStatus
-

Specifies the service capabilities of this member.

+(Optional) +

Provides the status of each member in the cluster.

-canVote
+currentRevisions
-bool +map[string]string
(Optional) -

Indicates if this member has voting rights.

+

currentRevisions, if not empty, indicates the old version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision.

-isLeader
+updateRevisions
-bool +map[string]string
(Optional) -

Determines if this member is the leader.

+

updateRevisions, if not empty, indicates the new version of the InstanceSet used to generate the underlying workload. +key is the pod name, value is the revision.

-

ReplicatedStateMachineSpec +

InstanceTemplate

-(Appears on:ReplicatedStateMachine) +(Appears on:InstanceSetSpec)

-

ReplicatedStateMachineSpec defines the desired state of ReplicatedStateMachine

@@ -20989,292 +20934,281 @@ bool + +
-replicas
+name
-int32 +string
-(Optional) -

Specifies the desired number of replicas of the given Template. -These replicas are instantiations of the same Template, with each having a consistent identity. -Defaults to 1 if unspecified.

+

Specifies the name of the template. +Each instance of the template derives its name from the InstanceSet Name, the template’s Name and the instance’s ordinal. +The constructed instance name follows the pattern $(instance_set.name)-$(template.name)-$(ordinal). +The ordinal starts from 0 by default.

-minReadySeconds
+replicas
int32
(Optional) -

Defines the minimum number of seconds a newly created pod should be ready -without any of its container crashing to be considered available. -Defaults to 0, meaning the pod will be considered available as soon as it is ready.

+

Number of replicas of this template. +Default is 1.

-selector
+annotations
- -Kubernetes meta/v1.LabelSelector - +map[string]string
-

Represents a label query over pods that should match the replica count. -It must match the pod template’s labels. -More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors

+(Optional) +

Defines annotations to override. +Add new or override existing annotations.

-serviceName
+labels
-string +map[string]string
-

Refers to the name of the service that governs this StatefulSet. -This service must exist before the StatefulSet and is responsible for -the network identity of the set. Pods get DNS/hostnames that follow a specific pattern.

+(Optional) +

Defines labels to override. +Add new or override existing labels.

-service
+image
- -Kubernetes core/v1.Service - +string
(Optional) -

Defines the behavior of a service spec. -Provides read-write service. -https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

+

Defines image to override. +Will override the first container’s image of the pod.

-alternativeServices
+nodeName
- -[]Kubernetes core/v1.Service - +string
(Optional) -

Defines Alternative Services selector pattern specifier. -Can be used for creating Readonly service.

+

Defines NodeName to override.

-template
+nodeSelector
- -Kubernetes core/v1.PodTemplateSpec - +map[string]string
+(Optional) +

Defines NodeSelector to override.

-instances
+tolerations
- -[]InstanceTemplate + +[]Kubernetes core/v1.Toleration
(Optional) -

Overrides values in default Template.

-

Instance is the fundamental unit managed by KubeBlocks. -It represents a Pod with additional objects such as PVCs, Services, ConfigMaps, etc. -A RSM manages instances with a total count of Replicas, -and by default, all these instances are generated from the same template. -The InstanceTemplate provides a way to override values in the default template, -allowing the RSM to manage instances from different templates.

-

The naming convention for instances (pods) based on the RSM Name, InstanceTemplate Name, and ordinal. -The constructed instance name follows the pattern: $(rsm.name)-$(template.name)-$(ordinal). -By default, the ordinal starts from 0 for each InstanceTemplate. -It is important to ensure that the Name of each InstanceTemplate is unique.

-

The sum of replicas across all InstanceTemplates should not exceed the total number of Replicas specified for the RSM. -Any remaining replicas will be generated using the default template and will follow the default naming rules.

+

Defines Tolerations to override. +Add new or override existing tolerations.

-offlineInstances
+resources
-[]string + +Kubernetes core/v1.ResourceRequirements +
(Optional) -

Specifies instances to be scaled in with dedicated names in the list.

+

Defines Resources to override. +Will override the first container’s resources of the pod.

-volumeClaimTemplates
+env
- -[]Kubernetes core/v1.PersistentVolumeClaim + +[]Kubernetes core/v1.EnvVar
(Optional) -

Represents a list of claims that pods are allowed to reference. -The ReplicatedStateMachine controller is responsible for mapping network identities to -claims in a way that maintains the identity of a pod. Every claim in -this list must have at least one matching (by name) volumeMount in one -container in the template. A claim in this list takes precedence over -any volumes in the template, with the same name.

+

Defines Env to override. +Add new or override existing envs.

-podManagementPolicy
+volumes
- -Kubernetes apps/v1.PodManagementPolicyType + +[]Kubernetes core/v1.Volume
(Optional) -

Controls how pods are created during initial scale up, -when replacing pods on nodes, or when scaling down.

-

The default policy is OrderedReady, where pods are created in increasing order and the controller waits until each pod is ready before -continuing. When scaling down, the pods are removed in the opposite order. -The alternative policy is Parallel which will create pods in parallel -to match the desired scale without waiting, and on scale down will delete -all pods at once.

+

Defines Volumes to override. +Add new or override existing volumes.

-updateStrategy
+volumeMounts
- -Kubernetes apps/v1.StatefulSetUpdateStrategy + +[]Kubernetes core/v1.VolumeMount
-

Indicates the StatefulSetUpdateStrategy that will be -employed to update Pods in the RSM when a revision is made to -Template. -UpdateStrategy.Type will be set to appsv1.OnDeleteStatefulSetStrategyType if MemberUpdateStrategy is not nil

+(Optional) +

Defines VolumeMounts to override. +Add new or override existing volume mounts of the first container in the pod.

-roles
+volumeClaimTemplates
- -[]ReplicaRole + +[]Kubernetes core/v1.PersistentVolumeClaim
(Optional) -

A list of roles defined in the system.

+

Defines VolumeClaimTemplates to override. +Add new or override existing volume claim templates.

+

MemberStatus +

+

+(Appears on:ClusterComponentStatus, InstanceSetStatus) +

+
+
+ + - - + + + +
-roleProbe
- - -RoleProbe - - -
-(Optional) -

Provides method to probe role.

-
FieldDescription
-membershipReconfiguration
+podName
- -MembershipReconfiguration - +string
-(Optional) -

Provides actions to do membership dynamic reconfiguration.

+

Represents the name of the pod.

-memberUpdateStrategy
+role
- -MemberUpdateStrategy + +ReplicaRole
(Optional) -

Members(Pods) update strategy.

-
    -
  • serial: update Members one by one that guarantee minimum component unavailable time.
  • -
  • bestEffortParallel: update Members in parallel that guarantee minimum component un-writable time.
  • -
  • parallel: force parallel
  • -
+

Defines the role of the replica in the cluster.

-paused
+ready
bool
(Optional) -

Indicates that the rsm is paused, meaning the reconciliation of this rsm object will be paused.

+

Whether the corresponding Pod is in ready condition.

-credential
+readyWithoutPrimary
- -Credential - +bool
(Optional) -

Credential used to connect to DB engine

+

Indicates whether it is required for the InstanceSet to have at least one primary instance ready.

-

ReplicatedStateMachineStatus +

MemberUpdateStrategy +(string alias)

+

+(Appears on:RSMSpec, InstanceSetSpec) +

+
+

MemberUpdateStrategy defines Cluster Component update strategy.

+
+ + + + + + + + + + + + + + +
ValueDescription

"BestEffortParallel"

"Parallel"

"Serial"

+

MembershipReconfiguration

-(Appears on:ReplicatedStateMachine) +(Appears on:RSMSpec, InstanceSetSpec)

-

ReplicatedStateMachineStatus defines the observed state of ReplicatedStateMachine

@@ -21286,94 +21220,148 @@ Credential + + +
-StatefulSetStatus
+switchoverAction
- -Kubernetes apps/v1.StatefulSetStatus + +Action
-

-(Members of StatefulSetStatus are embedded into this type.) -

+(Optional) +

Specifies the environment variables that can be used in all following Actions: +- KB_ITS_USERNAME: Represents the username part of the credential +- KB_ITS_PASSWORD: Represents the password part of the credential +- KB_ITS_LEADER_HOST: Represents the leader host +- KB_ITS_TARGET_HOST: Represents the target host +- KB_ITS_SERVICE_PORT: Represents the service port

+

Defines the action to perform a switchover. +If the Image is not configured, the latest BusyBox image will be used.

-initReplicas
+memberJoinAction
-int32 + +Action +
-

Defines the initial number of pods (members) when the cluster is first initialized. -This value is set to spec.Replicas at the time of object creation and remains constant thereafter.

+(Optional) +

Defines the action to add a member. +If the Image is not configured, the Image from the previous non-nil action will be used.

-readyInitReplicas
+memberLeaveAction
-int32 + +Action +
(Optional) -

Represents the number of pods (members) that have already reached the MembersStatus during the cluster initialization stage. -This value remains constant once it equals InitReplicas.

+

Defines the action to remove a member. +If the Image is not configured, the Image from the previous non-nil action will be used.

-currentGeneration
+logSyncAction
-int64 + +Action +
(Optional) -

When not empty, indicates the version of the Replicated State Machine (RSM) used to generate the underlying workload.

+

Defines the action to trigger the new member to start log syncing. +If the Image is not configured, the Image from the previous non-nil action will be used.

-membersStatus
+promoteAction
- -[]MemberStatus + +Action
(Optional) -

Provides the status of each member in the cluster.

+

Defines the action to inform the cluster that the new member can join voting now. +If the Image is not configured, the Image from the previous non-nil action will be used.

+
+

ReplicaRole +

+

+(Appears on:RSMSpec, InstanceSetSpec, MemberStatus) +

+
+
+ + + + + + + + + + + + + + + @@ -21381,7 +21369,7 @@ key is the pod name, value is the revision.

RoleProbe

-(Appears on:RSMSpec, ReplicatedStateMachineSpec) +(Appears on:RSMSpec, InstanceSetSpec)

RoleProbe defines how to observe role

@@ -21425,9 +21413,9 @@ Actions defined here are executed in series. Upon completion of all actions, the final output should be a single string representing the role name defined in spec.Roles. The latest BusyBox image will be used if Image is not configured. Environment variables can be used in Command: -- v_KB_RSM_LASTSTDOUT: stdout from the last action, watch for ‘v’ prefix -- KB_RSM_USERNAME: username part of the credential -- KB_RSM_PASSWORD: password part of the credential

+- v_KB_ITS_LASTSTDOUT: stdout from the last action, watch for ‘v’ prefix +- KB_ITS_USERNAME: username part of the credential +- KB_ITS_PASSWORD: password part of the credential

diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_instanceset.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_instanceset.go new file mode 100644 index 00000000000..7c5e74e76d8 --- /dev/null +++ b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_instanceset.go @@ -0,0 +1,141 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeInstanceSets implements InstanceSetInterface +type FakeInstanceSets struct { + Fake *FakeWorkloadsV1alpha1 + ns string +} + +var instancesetsResource = v1alpha1.SchemeGroupVersion.WithResource("instancesets") + +var instancesetsKind = v1alpha1.SchemeGroupVersion.WithKind("InstanceSet") + +// Get takes name of the instanceSet, and returns the corresponding instanceSet object, and an error if there is any. +func (c *FakeInstanceSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InstanceSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(instancesetsResource, c.ns, name), &v1alpha1.InstanceSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InstanceSet), err +} + +// List takes label and field selectors, and returns the list of InstanceSets that match those selectors. +func (c *FakeInstanceSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InstanceSetList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(instancesetsResource, instancesetsKind, c.ns, opts), &v1alpha1.InstanceSetList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.InstanceSetList{ListMeta: obj.(*v1alpha1.InstanceSetList).ListMeta} + for _, item := range obj.(*v1alpha1.InstanceSetList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested instanceSets. +func (c *FakeInstanceSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(instancesetsResource, c.ns, opts)) + +} + +// Create takes the representation of a instanceSet and creates it. Returns the server's representation of the instanceSet, and an error, if there is any. +func (c *FakeInstanceSets) Create(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.CreateOptions) (result *v1alpha1.InstanceSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(instancesetsResource, c.ns, instanceSet), &v1alpha1.InstanceSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InstanceSet), err +} + +// Update takes the representation of a instanceSet and updates it. Returns the server's representation of the instanceSet, and an error, if there is any. +func (c *FakeInstanceSets) Update(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.UpdateOptions) (result *v1alpha1.InstanceSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(instancesetsResource, c.ns, instanceSet), &v1alpha1.InstanceSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InstanceSet), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeInstanceSets) UpdateStatus(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.UpdateOptions) (*v1alpha1.InstanceSet, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(instancesetsResource, "status", c.ns, instanceSet), &v1alpha1.InstanceSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InstanceSet), err +} + +// Delete takes name of the instanceSet and deletes it. Returns an error if one occurs. +func (c *FakeInstanceSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(instancesetsResource, c.ns, name, opts), &v1alpha1.InstanceSet{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeInstanceSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(instancesetsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.InstanceSetList{}) + return err +} + +// Patch applies the patch and returns the patched instanceSet. +func (c *FakeInstanceSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InstanceSet, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(instancesetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.InstanceSet{}) + + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.InstanceSet), err +} diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go deleted file mode 100644 index cb6d5df46fb..00000000000 --- a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_replicatedstatemachine.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeReplicatedStateMachines implements ReplicatedStateMachineInterface -type FakeReplicatedStateMachines struct { - Fake *FakeWorkloadsV1alpha1 - ns string -} - -var replicatedstatemachinesResource = v1alpha1.SchemeGroupVersion.WithResource("replicatedstatemachines") - -var replicatedstatemachinesKind = v1alpha1.SchemeGroupVersion.WithKind("ReplicatedStateMachine") - -// Get takes name of the replicatedStateMachine, and returns the corresponding replicatedStateMachine object, and an error if there is any. -func (c *FakeReplicatedStateMachines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(replicatedstatemachinesResource, c.ns, name), &v1alpha1.ReplicatedStateMachine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ReplicatedStateMachine), err -} - -// List takes label and field selectors, and returns the list of ReplicatedStateMachines that match those selectors. -func (c *FakeReplicatedStateMachines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ReplicatedStateMachineList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(replicatedstatemachinesResource, replicatedstatemachinesKind, c.ns, opts), &v1alpha1.ReplicatedStateMachineList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ReplicatedStateMachineList{ListMeta: obj.(*v1alpha1.ReplicatedStateMachineList).ListMeta} - for _, item := range obj.(*v1alpha1.ReplicatedStateMachineList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested replicatedStateMachines. -func (c *FakeReplicatedStateMachines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(replicatedstatemachinesResource, c.ns, opts)) - -} - -// Create takes the representation of a replicatedStateMachine and creates it. Returns the server's representation of the replicatedStateMachine, and an error, if there is any. -func (c *FakeReplicatedStateMachines) Create(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.CreateOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(replicatedstatemachinesResource, c.ns, replicatedStateMachine), &v1alpha1.ReplicatedStateMachine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ReplicatedStateMachine), err -} - -// Update takes the representation of a replicatedStateMachine and updates it. Returns the server's representation of the replicatedStateMachine, and an error, if there is any. -func (c *FakeReplicatedStateMachines) Update(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.UpdateOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(replicatedstatemachinesResource, c.ns, replicatedStateMachine), &v1alpha1.ReplicatedStateMachine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ReplicatedStateMachine), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeReplicatedStateMachines) UpdateStatus(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.UpdateOptions) (*v1alpha1.ReplicatedStateMachine, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(replicatedstatemachinesResource, "status", c.ns, replicatedStateMachine), &v1alpha1.ReplicatedStateMachine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ReplicatedStateMachine), err -} - -// Delete takes name of the replicatedStateMachine and deletes it. Returns an error if one occurs. -func (c *FakeReplicatedStateMachines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(replicatedstatemachinesResource, c.ns, name, opts), &v1alpha1.ReplicatedStateMachine{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeReplicatedStateMachines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(replicatedstatemachinesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ReplicatedStateMachineList{}) - return err -} - -// Patch applies the patch and returns the patched replicatedStateMachine. -func (c *FakeReplicatedStateMachines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ReplicatedStateMachine, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(replicatedstatemachinesResource, c.ns, name, pt, data, subresources...), &v1alpha1.ReplicatedStateMachine{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ReplicatedStateMachine), err -} diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_workloads_client.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_workloads_client.go index 98b8b53c50a..22ecfe337a2 100644 --- a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_workloads_client.go +++ b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/fake/fake_workloads_client.go @@ -28,8 +28,8 @@ type FakeWorkloadsV1alpha1 struct { *testing.Fake } -func (c *FakeWorkloadsV1alpha1) ReplicatedStateMachines(namespace string) v1alpha1.ReplicatedStateMachineInterface { - return &FakeReplicatedStateMachines{c, namespace} +func (c *FakeWorkloadsV1alpha1) InstanceSets(namespace string) v1alpha1.InstanceSetInterface { + return &FakeInstanceSets{c, namespace} } // RESTClient returns a RESTClient that is used to communicate diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/generated_expansion.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/generated_expansion.go index 1b79a49eda4..0951d90426f 100644 --- a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/generated_expansion.go +++ b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type ReplicatedStateMachineExpansion interface{} +type InstanceSetExpansion interface{} diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/instanceset.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/instanceset.go new file mode 100644 index 00000000000..3bf4a04a03a --- /dev/null +++ b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/instanceset.go @@ -0,0 +1,195 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + scheme "github.com/apecloud/kubeblocks/pkg/client/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// InstanceSetsGetter has a method to return a InstanceSetInterface. +// A group's client should implement this interface. +type InstanceSetsGetter interface { + InstanceSets(namespace string) InstanceSetInterface +} + +// InstanceSetInterface has methods to work with InstanceSet resources. +type InstanceSetInterface interface { + Create(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.CreateOptions) (*v1alpha1.InstanceSet, error) + Update(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.UpdateOptions) (*v1alpha1.InstanceSet, error) + UpdateStatus(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.UpdateOptions) (*v1alpha1.InstanceSet, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.InstanceSet, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.InstanceSetList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InstanceSet, err error) + InstanceSetExpansion +} + +// instanceSets implements InstanceSetInterface +type instanceSets struct { + client rest.Interface + ns string +} + +// newInstanceSets returns a InstanceSets +func newInstanceSets(c *WorkloadsV1alpha1Client, namespace string) *instanceSets { + return &instanceSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the instanceSet, and returns the corresponding instanceSet object, and an error if there is any. +func (c *instanceSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InstanceSet, err error) { + result = &v1alpha1.InstanceSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("instancesets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of InstanceSets that match those selectors. +func (c *instanceSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InstanceSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.InstanceSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("instancesets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested instanceSets. +func (c *instanceSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("instancesets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a instanceSet and creates it. Returns the server's representation of the instanceSet, and an error, if there is any. +func (c *instanceSets) Create(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.CreateOptions) (result *v1alpha1.InstanceSet, err error) { + result = &v1alpha1.InstanceSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("instancesets"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(instanceSet). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a instanceSet and updates it. Returns the server's representation of the instanceSet, and an error, if there is any. +func (c *instanceSets) Update(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.UpdateOptions) (result *v1alpha1.InstanceSet, err error) { + result = &v1alpha1.InstanceSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("instancesets"). + Name(instanceSet.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(instanceSet). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *instanceSets) UpdateStatus(ctx context.Context, instanceSet *v1alpha1.InstanceSet, opts v1.UpdateOptions) (result *v1alpha1.InstanceSet, err error) { + result = &v1alpha1.InstanceSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("instancesets"). + Name(instanceSet.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(instanceSet). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the instanceSet and deletes it. Returns an error if one occurs. +func (c *instanceSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("instancesets"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *instanceSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("instancesets"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched instanceSet. +func (c *instanceSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InstanceSet, err error) { + result = &v1alpha1.InstanceSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("instancesets"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/replicatedstatemachine.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/replicatedstatemachine.go deleted file mode 100644 index 38f4dbd40ec..00000000000 --- a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/replicatedstatemachine.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - scheme "github.com/apecloud/kubeblocks/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ReplicatedStateMachinesGetter has a method to return a ReplicatedStateMachineInterface. -// A group's client should implement this interface. -type ReplicatedStateMachinesGetter interface { - ReplicatedStateMachines(namespace string) ReplicatedStateMachineInterface -} - -// ReplicatedStateMachineInterface has methods to work with ReplicatedStateMachine resources. -type ReplicatedStateMachineInterface interface { - Create(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.CreateOptions) (*v1alpha1.ReplicatedStateMachine, error) - Update(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.UpdateOptions) (*v1alpha1.ReplicatedStateMachine, error) - UpdateStatus(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.UpdateOptions) (*v1alpha1.ReplicatedStateMachine, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ReplicatedStateMachine, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ReplicatedStateMachineList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ReplicatedStateMachine, err error) - ReplicatedStateMachineExpansion -} - -// replicatedStateMachines implements ReplicatedStateMachineInterface -type replicatedStateMachines struct { - client rest.Interface - ns string -} - -// newReplicatedStateMachines returns a ReplicatedStateMachines -func newReplicatedStateMachines(c *WorkloadsV1alpha1Client, namespace string) *replicatedStateMachines { - return &replicatedStateMachines{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicatedStateMachine, and returns the corresponding replicatedStateMachine object, and an error if there is any. -func (c *replicatedStateMachines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - result = &v1alpha1.ReplicatedStateMachine{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicatedStateMachines that match those selectors. -func (c *replicatedStateMachines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ReplicatedStateMachineList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ReplicatedStateMachineList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicatedStateMachines. -func (c *replicatedStateMachines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a replicatedStateMachine and creates it. Returns the server's representation of the replicatedStateMachine, and an error, if there is any. -func (c *replicatedStateMachines) Create(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.CreateOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - result = &v1alpha1.ReplicatedStateMachine{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicatedStateMachine). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a replicatedStateMachine and updates it. Returns the server's representation of the replicatedStateMachine, and an error, if there is any. -func (c *replicatedStateMachines) Update(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.UpdateOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - result = &v1alpha1.ReplicatedStateMachine{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - Name(replicatedStateMachine.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicatedStateMachine). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *replicatedStateMachines) UpdateStatus(ctx context.Context, replicatedStateMachine *v1alpha1.ReplicatedStateMachine, opts v1.UpdateOptions) (result *v1alpha1.ReplicatedStateMachine, err error) { - result = &v1alpha1.ReplicatedStateMachine{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - Name(replicatedStateMachine.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(replicatedStateMachine). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the replicatedStateMachine and deletes it. Returns an error if one occurs. -func (c *replicatedStateMachines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicatedStateMachines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("replicatedstatemachines"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched replicatedStateMachine. -func (c *replicatedStateMachines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ReplicatedStateMachine, err error) { - result = &v1alpha1.ReplicatedStateMachine{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicatedstatemachines"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/workloads_client.go b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/workloads_client.go index d6a59b1e808..f53afd8e127 100644 --- a/pkg/client/clientset/versioned/typed/workloads/v1alpha1/workloads_client.go +++ b/pkg/client/clientset/versioned/typed/workloads/v1alpha1/workloads_client.go @@ -28,7 +28,7 @@ import ( type WorkloadsV1alpha1Interface interface { RESTClient() rest.Interface - ReplicatedStateMachinesGetter + InstanceSetsGetter } // WorkloadsV1alpha1Client is used to interact with features provided by the workloads.kubeblocks.io group. @@ -36,8 +36,8 @@ type WorkloadsV1alpha1Client struct { restClient rest.Interface } -func (c *WorkloadsV1alpha1Client) ReplicatedStateMachines(namespace string) ReplicatedStateMachineInterface { - return newReplicatedStateMachines(c, namespace) +func (c *WorkloadsV1alpha1Client) InstanceSets(namespace string) InstanceSetInterface { + return newInstanceSets(c, namespace) } // NewForConfig creates a new WorkloadsV1alpha1Client for the given config. diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go index 6dcb08979f1..27636de12f3 100644 --- a/pkg/client/informers/externalversions/generic.go +++ b/pkg/client/informers/externalversions/generic.go @@ -108,8 +108,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().StorageProviders().Informer()}, nil // Group=workloads.kubeblocks.io, Version=v1alpha1 - case workloadsv1alpha1.SchemeGroupVersion.WithResource("replicatedstatemachines"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Workloads().V1alpha1().ReplicatedStateMachines().Informer()}, nil + case workloadsv1alpha1.SchemeGroupVersion.WithResource("instancesets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Workloads().V1alpha1().InstanceSets().Informer()}, nil } diff --git a/pkg/client/informers/externalversions/workloads/v1alpha1/replicatedstatemachine.go b/pkg/client/informers/externalversions/workloads/v1alpha1/instanceset.go similarity index 53% rename from pkg/client/informers/externalversions/workloads/v1alpha1/replicatedstatemachine.go rename to pkg/client/informers/externalversions/workloads/v1alpha1/instanceset.go index 226a3eaf41e..4092008e663 100644 --- a/pkg/client/informers/externalversions/workloads/v1alpha1/replicatedstatemachine.go +++ b/pkg/client/informers/externalversions/workloads/v1alpha1/instanceset.go @@ -32,59 +32,59 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// ReplicatedStateMachineInformer provides access to a shared informer and lister for -// ReplicatedStateMachines. -type ReplicatedStateMachineInformer interface { +// InstanceSetInformer provides access to a shared informer and lister for +// InstanceSets. +type InstanceSetInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.ReplicatedStateMachineLister + Lister() v1alpha1.InstanceSetLister } -type replicatedStateMachineInformer struct { +type instanceSetInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc namespace string } -// NewReplicatedStateMachineInformer constructs a new informer for ReplicatedStateMachine type. +// NewInstanceSetInformer constructs a new informer for InstanceSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewReplicatedStateMachineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredReplicatedStateMachineInformer(client, namespace, resyncPeriod, indexers, nil) +func NewInstanceSetInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInstanceSetInformer(client, namespace, resyncPeriod, indexers, nil) } -// NewFilteredReplicatedStateMachineInformer constructs a new informer for ReplicatedStateMachine type. +// NewFilteredInstanceSetInformer constructs a new informer for InstanceSet type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredReplicatedStateMachineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredInstanceSetInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.WorkloadsV1alpha1().ReplicatedStateMachines(namespace).List(context.TODO(), options) + return client.WorkloadsV1alpha1().InstanceSets(namespace).List(context.TODO(), options) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.WorkloadsV1alpha1().ReplicatedStateMachines(namespace).Watch(context.TODO(), options) + return client.WorkloadsV1alpha1().InstanceSets(namespace).Watch(context.TODO(), options) }, }, - &workloadsv1alpha1.ReplicatedStateMachine{}, + &workloadsv1alpha1.InstanceSet{}, resyncPeriod, indexers, ) } -func (f *replicatedStateMachineInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredReplicatedStateMachineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *instanceSetInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInstanceSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *replicatedStateMachineInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&workloadsv1alpha1.ReplicatedStateMachine{}, f.defaultInformer) +func (f *instanceSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&workloadsv1alpha1.InstanceSet{}, f.defaultInformer) } -func (f *replicatedStateMachineInformer) Lister() v1alpha1.ReplicatedStateMachineLister { - return v1alpha1.NewReplicatedStateMachineLister(f.Informer().GetIndexer()) +func (f *instanceSetInformer) Lister() v1alpha1.InstanceSetLister { + return v1alpha1.NewInstanceSetLister(f.Informer().GetIndexer()) } diff --git a/pkg/client/informers/externalversions/workloads/v1alpha1/interface.go b/pkg/client/informers/externalversions/workloads/v1alpha1/interface.go index e7e48d6ec2f..ff9b8134595 100644 --- a/pkg/client/informers/externalversions/workloads/v1alpha1/interface.go +++ b/pkg/client/informers/externalversions/workloads/v1alpha1/interface.go @@ -24,8 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ReplicatedStateMachines returns a ReplicatedStateMachineInformer. - ReplicatedStateMachines() ReplicatedStateMachineInformer + // InstanceSets returns a InstanceSetInformer. + InstanceSets() InstanceSetInformer } type version struct { @@ -39,7 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// ReplicatedStateMachines returns a ReplicatedStateMachineInformer. -func (v *version) ReplicatedStateMachines() ReplicatedStateMachineInformer { - return &replicatedStateMachineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +// InstanceSets returns a InstanceSetInformer. +func (v *version) InstanceSets() InstanceSetInformer { + return &instanceSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } diff --git a/pkg/client/listers/workloads/v1alpha1/expansion_generated.go b/pkg/client/listers/workloads/v1alpha1/expansion_generated.go index 0e3fe2716bb..848f870e8e6 100644 --- a/pkg/client/listers/workloads/v1alpha1/expansion_generated.go +++ b/pkg/client/listers/workloads/v1alpha1/expansion_generated.go @@ -18,10 +18,10 @@ limitations under the License. package v1alpha1 -// ReplicatedStateMachineListerExpansion allows custom methods to be added to -// ReplicatedStateMachineLister. -type ReplicatedStateMachineListerExpansion interface{} +// InstanceSetListerExpansion allows custom methods to be added to +// InstanceSetLister. +type InstanceSetListerExpansion interface{} -// ReplicatedStateMachineNamespaceListerExpansion allows custom methods to be added to -// ReplicatedStateMachineNamespaceLister. -type ReplicatedStateMachineNamespaceListerExpansion interface{} +// InstanceSetNamespaceListerExpansion allows custom methods to be added to +// InstanceSetNamespaceLister. +type InstanceSetNamespaceListerExpansion interface{} diff --git a/pkg/client/listers/workloads/v1alpha1/instanceset.go b/pkg/client/listers/workloads/v1alpha1/instanceset.go new file mode 100644 index 00000000000..f5341343d4f --- /dev/null +++ b/pkg/client/listers/workloads/v1alpha1/instanceset.go @@ -0,0 +1,99 @@ +/* +Copyright (C) 2022-2024 ApeCloud Co., Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// InstanceSetLister helps list InstanceSets. +// All objects returned here must be treated as read-only. +type InstanceSetLister interface { + // List lists all InstanceSets in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.InstanceSet, err error) + // InstanceSets returns an object that can list and get InstanceSets. + InstanceSets(namespace string) InstanceSetNamespaceLister + InstanceSetListerExpansion +} + +// instanceSetLister implements the InstanceSetLister interface. +type instanceSetLister struct { + indexer cache.Indexer +} + +// NewInstanceSetLister returns a new InstanceSetLister. +func NewInstanceSetLister(indexer cache.Indexer) InstanceSetLister { + return &instanceSetLister{indexer: indexer} +} + +// List lists all InstanceSets in the indexer. +func (s *instanceSetLister) List(selector labels.Selector) (ret []*v1alpha1.InstanceSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.InstanceSet)) + }) + return ret, err +} + +// InstanceSets returns an object that can list and get InstanceSets. +func (s *instanceSetLister) InstanceSets(namespace string) InstanceSetNamespaceLister { + return instanceSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// InstanceSetNamespaceLister helps list and get InstanceSets. +// All objects returned here must be treated as read-only. +type InstanceSetNamespaceLister interface { + // List lists all InstanceSets in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.InstanceSet, err error) + // Get retrieves the InstanceSet from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.InstanceSet, error) + InstanceSetNamespaceListerExpansion +} + +// instanceSetNamespaceLister implements the InstanceSetNamespaceLister +// interface. +type instanceSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all InstanceSets in the indexer for a given namespace. +func (s instanceSetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.InstanceSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.InstanceSet)) + }) + return ret, err +} + +// Get retrieves the InstanceSet from the indexer for a given namespace and name. +func (s instanceSetNamespaceLister) Get(name string) (*v1alpha1.InstanceSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("instanceset"), name) + } + return obj.(*v1alpha1.InstanceSet), nil +} diff --git a/pkg/client/listers/workloads/v1alpha1/replicatedstatemachine.go b/pkg/client/listers/workloads/v1alpha1/replicatedstatemachine.go deleted file mode 100644 index eee66bd2ed5..00000000000 --- a/pkg/client/listers/workloads/v1alpha1/replicatedstatemachine.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright (C) 2022-2024 ApeCloud Co., Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ReplicatedStateMachineLister helps list ReplicatedStateMachines. -// All objects returned here must be treated as read-only. -type ReplicatedStateMachineLister interface { - // List lists all ReplicatedStateMachines in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ReplicatedStateMachine, err error) - // ReplicatedStateMachines returns an object that can list and get ReplicatedStateMachines. - ReplicatedStateMachines(namespace string) ReplicatedStateMachineNamespaceLister - ReplicatedStateMachineListerExpansion -} - -// replicatedStateMachineLister implements the ReplicatedStateMachineLister interface. -type replicatedStateMachineLister struct { - indexer cache.Indexer -} - -// NewReplicatedStateMachineLister returns a new ReplicatedStateMachineLister. -func NewReplicatedStateMachineLister(indexer cache.Indexer) ReplicatedStateMachineLister { - return &replicatedStateMachineLister{indexer: indexer} -} - -// List lists all ReplicatedStateMachines in the indexer. -func (s *replicatedStateMachineLister) List(selector labels.Selector) (ret []*v1alpha1.ReplicatedStateMachine, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ReplicatedStateMachine)) - }) - return ret, err -} - -// ReplicatedStateMachines returns an object that can list and get ReplicatedStateMachines. -func (s *replicatedStateMachineLister) ReplicatedStateMachines(namespace string) ReplicatedStateMachineNamespaceLister { - return replicatedStateMachineNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// ReplicatedStateMachineNamespaceLister helps list and get ReplicatedStateMachines. -// All objects returned here must be treated as read-only. -type ReplicatedStateMachineNamespaceLister interface { - // List lists all ReplicatedStateMachines in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ReplicatedStateMachine, err error) - // Get retrieves the ReplicatedStateMachine from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ReplicatedStateMachine, error) - ReplicatedStateMachineNamespaceListerExpansion -} - -// replicatedStateMachineNamespaceLister implements the ReplicatedStateMachineNamespaceLister -// interface. -type replicatedStateMachineNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all ReplicatedStateMachines in the indexer for a given namespace. -func (s replicatedStateMachineNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.ReplicatedStateMachine, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ReplicatedStateMachine)) - }) - return ret, err -} - -// Get retrieves the ReplicatedStateMachine from the indexer for a given namespace and name. -func (s replicatedStateMachineNamespaceLister) Get(name string) (*v1alpha1.ReplicatedStateMachine, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("replicatedstatemachine"), name) - } - return obj.(*v1alpha1.ReplicatedStateMachine), nil -} diff --git a/pkg/constant/const.go b/pkg/constant/const.go index cc8753ec5a4..e5782960638 100644 --- a/pkg/constant/const.go +++ b/pkg/constant/const.go @@ -209,7 +209,7 @@ const ( const ( DeploymentKind = "Deployment" StatefulSetKind = "StatefulSet" - RSMKind = "ReplicatedStateMachine" + InstanceSetKind = "InstanceSet" PodKind = "Pod" PersistentVolumeClaimKind = "PersistentVolumeClaim" CronJobKind = "CronJob" diff --git a/pkg/constant/pattern.go b/pkg/constant/pattern.go index f0864669480..992580378d4 100644 --- a/pkg/constant/pattern.go +++ b/pkg/constant/pattern.go @@ -83,13 +83,13 @@ func GenerateDefaultServiceAccountName(name string) string { return fmt.Sprintf("%s-%s", KBLowerPrefix, name) } -// GenerateRSMNamePattern generates rsm name pattern -func GenerateRSMNamePattern(clusterName, compName string) string { +// GenerateWorkloadNamePattern generates the workload name pattern +func GenerateWorkloadNamePattern(clusterName, compName string) string { return fmt.Sprintf("%s-%s", clusterName, compName) } -// GenerateRSMServiceNamePattern generates rsm name pattern -func GenerateRSMServiceNamePattern(rsmName string) string { +// GenerateServiceNamePattern generates the service name pattern +func GenerateServiceNamePattern(rsmName string) string { return fmt.Sprintf("%s-headless", rsmName) } diff --git a/pkg/controller/builder/builder_base_test.go b/pkg/controller/builder/builder_base_test.go index ed3bc8eb112..8dff9c8854a 100644 --- a/pkg/controller/builder/builder_base_test.go +++ b/pkg/controller/builder/builder_base_test.go @@ -44,10 +44,10 @@ var _ = Describe("base builder", func() { annotations := map[string]string{annotationKey3: annotationValue3} controllerRevision := "wer-23e23-sedfwe--34r23" finalizer := "foo-bar" - owner := NewReplicatedStateMachineBuilder(ns, name).GetObject() + owner := NewInstanceSetBuilder(ns, name).GetObject() owner.UID = "sdfwsedqw-swed-sdswe" ownerAPIVersion := "workloads.kubeblocks.io/v1alpha1" - ownerKind := "ReplicatedStateMachine" + ownerKind := "InstanceSet" obj := NewConfigMapBuilder(ns, name). SetUID(uid). AddLabels(labelKey1, labelValue1, labelKey2, labelValue2). diff --git a/pkg/controller/builder/builder_replicated_state_machine.go b/pkg/controller/builder/builder_instance_set.go similarity index 51% rename from pkg/controller/builder/builder_replicated_state_machine.go rename to pkg/controller/builder/builder_instance_set.go index 9df53954cc4..09d4c93d990 100644 --- a/pkg/controller/builder/builder_replicated_state_machine.go +++ b/pkg/controller/builder/builder_instance_set.go @@ -27,43 +27,43 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) -type ReplicatedStateMachineBuilder struct { - BaseBuilder[workloads.ReplicatedStateMachine, *workloads.ReplicatedStateMachine, ReplicatedStateMachineBuilder] +type InstanceSetBuilder struct { + BaseBuilder[workloads.InstanceSet, *workloads.InstanceSet, InstanceSetBuilder] } -func NewReplicatedStateMachineBuilder(namespace, name string) *ReplicatedStateMachineBuilder { - builder := &ReplicatedStateMachineBuilder{} +func NewInstanceSetBuilder(namespace, name string) *InstanceSetBuilder { + builder := &InstanceSetBuilder{} replicas := int32(1) builder.init(namespace, name, - &workloads.ReplicatedStateMachine{ - Spec: workloads.ReplicatedStateMachineSpec{ + &workloads.InstanceSet{ + Spec: workloads.InstanceSetSpec{ Replicas: &replicas, }, }, builder) return builder } -func (builder *ReplicatedStateMachineBuilder) SetReplicas(replicas int32) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetReplicas(replicas int32) *InstanceSetBuilder { builder.get().Spec.Replicas = &replicas return builder } -func (builder *ReplicatedStateMachineBuilder) SetMinReadySeconds(minReadySeconds int32) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetMinReadySeconds(minReadySeconds int32) *InstanceSetBuilder { builder.get().Spec.MinReadySeconds = minReadySeconds return builder } -func (builder *ReplicatedStateMachineBuilder) AddMatchLabel(key, value string) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) AddMatchLabel(key, value string) *InstanceSetBuilder { labels := make(map[string]string, 1) labels[key] = value return builder.AddMatchLabelsInMap(labels) } -func (builder *ReplicatedStateMachineBuilder) AddMatchLabels(keyValues ...string) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) AddMatchLabels(keyValues ...string) *InstanceSetBuilder { return builder.AddMatchLabelsInMap(WithMap(keyValues...)) } -func (builder *ReplicatedStateMachineBuilder) AddMatchLabelsInMap(labels map[string]string) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) AddMatchLabelsInMap(labels map[string]string) *InstanceSetBuilder { selector := builder.get().Spec.Selector if selector == nil { selector = &metav1.LabelSelector{} @@ -80,49 +80,49 @@ func (builder *ReplicatedStateMachineBuilder) AddMatchLabelsInMap(labels map[str return builder } -func (builder *ReplicatedStateMachineBuilder) SetServiceName(serviceName string) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetServiceName(serviceName string) *InstanceSetBuilder { builder.get().Spec.ServiceName = serviceName return builder } -func (builder *ReplicatedStateMachineBuilder) SetRoles(roles []workloads.ReplicaRole) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetRoles(roles []workloads.ReplicaRole) *InstanceSetBuilder { builder.get().Spec.Roles = roles return builder } -func (builder *ReplicatedStateMachineBuilder) SetTemplate(template corev1.PodTemplateSpec) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetTemplate(template corev1.PodTemplateSpec) *InstanceSetBuilder { builder.get().Spec.Template = template return builder } -func (builder *ReplicatedStateMachineBuilder) AddVolumeClaimTemplates(templates ...corev1.PersistentVolumeClaim) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) AddVolumeClaimTemplates(templates ...corev1.PersistentVolumeClaim) *InstanceSetBuilder { templateList := builder.get().Spec.VolumeClaimTemplates templateList = append(templateList, templates...) builder.get().Spec.VolumeClaimTemplates = templateList return builder } -func (builder *ReplicatedStateMachineBuilder) SetVolumeClaimTemplates(templates ...corev1.PersistentVolumeClaim) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetVolumeClaimTemplates(templates ...corev1.PersistentVolumeClaim) *InstanceSetBuilder { builder.get().Spec.VolumeClaimTemplates = templates return builder } -func (builder *ReplicatedStateMachineBuilder) SetPodManagementPolicy(policy apps.PodManagementPolicyType) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetPodManagementPolicy(policy apps.PodManagementPolicyType) *InstanceSetBuilder { builder.get().Spec.PodManagementPolicy = policy return builder } -func (builder *ReplicatedStateMachineBuilder) SetUpdateStrategy(strategy apps.StatefulSetUpdateStrategy) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetUpdateStrategy(strategy apps.StatefulSetUpdateStrategy) *InstanceSetBuilder { builder.get().Spec.UpdateStrategy = strategy return builder } -func (builder *ReplicatedStateMachineBuilder) SetUpdateStrategyType(strategyType apps.StatefulSetUpdateStrategyType) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetUpdateStrategyType(strategyType apps.StatefulSetUpdateStrategyType) *InstanceSetBuilder { builder.get().Spec.UpdateStrategy.Type = strategyType return builder } -func (builder *ReplicatedStateMachineBuilder) SetCustomHandler(handler []workloads.Action) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetCustomHandler(handler []workloads.Action) *InstanceSetBuilder { roleProbe := builder.get().Spec.RoleProbe if roleProbe == nil { roleProbe = &workloads.RoleProbe{} @@ -132,7 +132,7 @@ func (builder *ReplicatedStateMachineBuilder) SetCustomHandler(handler []workloa return builder } -func (builder *ReplicatedStateMachineBuilder) AddCustomHandler(handler workloads.Action) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) AddCustomHandler(handler workloads.Action) *InstanceSetBuilder { roleProbe := builder.get().Spec.RoleProbe if roleProbe == nil { roleProbe = &workloads.RoleProbe{} @@ -144,27 +144,27 @@ func (builder *ReplicatedStateMachineBuilder) AddCustomHandler(handler workloads return builder } -func (builder *ReplicatedStateMachineBuilder) SetRoleProbe(roleProbe *workloads.RoleProbe) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetRoleProbe(roleProbe *workloads.RoleProbe) *InstanceSetBuilder { builder.get().Spec.RoleProbe = roleProbe return builder } -func (builder *ReplicatedStateMachineBuilder) SetService(service *corev1.Service) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetService(service *corev1.Service) *InstanceSetBuilder { builder.get().Spec.Service = service return builder } -func (builder *ReplicatedStateMachineBuilder) SetAlternativeServices(services []corev1.Service) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetAlternativeServices(services []corev1.Service) *InstanceSetBuilder { builder.get().Spec.AlternativeServices = services return builder } -func (builder *ReplicatedStateMachineBuilder) SetMembershipReconfiguration(reconfiguration *workloads.MembershipReconfiguration) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetMembershipReconfiguration(reconfiguration *workloads.MembershipReconfiguration) *InstanceSetBuilder { builder.get().Spec.MembershipReconfiguration = reconfiguration return builder } -func (builder *ReplicatedStateMachineBuilder) SetMemberUpdateStrategy(strategy *workloads.MemberUpdateStrategy) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetMemberUpdateStrategy(strategy *workloads.MemberUpdateStrategy) *InstanceSetBuilder { builder.get().Spec.MemberUpdateStrategy = strategy if strategy != nil { builder.SetUpdateStrategyType(apps.OnDeleteStatefulSetStrategyType) @@ -172,17 +172,17 @@ func (builder *ReplicatedStateMachineBuilder) SetMemberUpdateStrategy(strategy * return builder } -func (builder *ReplicatedStateMachineBuilder) SetPaused(paused bool) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetPaused(paused bool) *InstanceSetBuilder { builder.get().Spec.Paused = paused return builder } -func (builder *ReplicatedStateMachineBuilder) SetCredential(credential workloads.Credential) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetCredential(credential workloads.Credential) *InstanceSetBuilder { builder.get().Spec.Credential = &credential return builder } -func (builder *ReplicatedStateMachineBuilder) SetInstances(instances []workloads.InstanceTemplate) *ReplicatedStateMachineBuilder { +func (builder *InstanceSetBuilder) SetInstances(instances []workloads.InstanceTemplate) *InstanceSetBuilder { builder.get().Spec.Instances = instances return builder } diff --git a/pkg/controller/builder/builder_replicated_state_machine_test.go b/pkg/controller/builder/builder_instance_set_test.go similarity index 70% rename from pkg/controller/builder/builder_replicated_state_machine_test.go rename to pkg/controller/builder/builder_instance_set_test.go index d274854f14b..a22052a1648 100644 --- a/pkg/controller/builder/builder_replicated_state_machine_test.go +++ b/pkg/controller/builder/builder_instance_set_test.go @@ -170,7 +170,7 @@ var _ = Describe("replicated_state_machine builder", func() { Replicas: func() *int32 { r := int32(1); return &r }(), }, } - rsm := NewReplicatedStateMachineBuilder(ns, name). + its := NewInstanceSetBuilder(ns, name). SetReplicas(replicas). SetMinReadySeconds(minReadySeconds). AddMatchLabel(selectorKey1, selectorValue1). @@ -196,46 +196,46 @@ var _ = Describe("replicated_state_machine builder", func() { SetInstances(instances). GetObject() - Expect(rsm.Name).Should(Equal(name)) - Expect(rsm.Namespace).Should(Equal(ns)) - Expect(rsm.Spec.Replicas).ShouldNot(BeNil()) - Expect(*rsm.Spec.Replicas).Should(Equal(replicas)) - Expect(rsm.Spec.Selector).ShouldNot(BeNil()) - Expect(rsm.Spec.Selector.MatchLabels).Should(HaveLen(4)) - Expect(rsm.Spec.Selector.MatchLabels[selectorKey1]).Should(Equal(selectorValue1)) - Expect(rsm.Spec.Selector.MatchLabels[selectorKey2]).Should(Equal(selectorValue2)) - Expect(rsm.Spec.Selector.MatchLabels[selectorKey3]).Should(Equal(selectorValue3)) - Expect(rsm.Spec.Selector.MatchLabels[selectorKey4]).Should(Equal(selectorValue4)) - Expect(rsm.Spec.ServiceName).Should(Equal(serviceName)) - Expect(rsm.Spec.Roles).Should(HaveLen(1)) - Expect(rsm.Spec.Roles[0]).Should(Equal(role)) - Expect(rsm.Spec.MembershipReconfiguration).ShouldNot(BeNil()) - Expect(*rsm.Spec.MembershipReconfiguration).Should(Equal(reconfiguration)) - Expect(rsm.Spec.Template).Should(Equal(template)) - Expect(rsm.Spec.VolumeClaimTemplates).Should(HaveLen(2)) - Expect(rsm.Spec.VolumeClaimTemplates[0]).Should(Equal(vcs[0])) - Expect(rsm.Spec.VolumeClaimTemplates[1]).Should(Equal(vc)) - Expect(rsm.Spec.PodManagementPolicy).Should(Equal(policy)) - Expect(rsm.Spec.UpdateStrategy.Type).Should(Equal(strategyType)) - Expect(rsm.Spec.UpdateStrategy.RollingUpdate).ShouldNot(BeNil()) - Expect(rsm.Spec.UpdateStrategy.RollingUpdate.Partition).ShouldNot(BeNil()) - Expect(*rsm.Spec.UpdateStrategy.RollingUpdate.Partition).Should(Equal(partition)) - Expect(rsm.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).ShouldNot(BeNil()) - Expect(rsm.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).ShouldNot(Equal(maxUnavailable)) - Expect(rsm.Spec.RoleProbe).ShouldNot(BeNil()) - Expect(rsm.Spec.RoleProbe.InitialDelaySeconds).Should(Equal(delay)) - Expect(rsm.Spec.RoleProbe.CustomHandler).Should(HaveLen(2)) - Expect(rsm.Spec.RoleProbe.CustomHandler[0]).Should(Equal(actions[0])) - Expect(rsm.Spec.RoleProbe.CustomHandler[1]).Should(Equal(action)) - Expect(rsm.Spec.MemberUpdateStrategy).ShouldNot(BeNil()) - Expect(*rsm.Spec.MemberUpdateStrategy).Should(Equal(memberUpdateStrategy)) - Expect(rsm.Spec.Service).ShouldNot(BeNil()) - Expect(rsm.Spec.Service).Should(BeEquivalentTo(service)) - Expect(rsm.Spec.AlternativeServices).ShouldNot(BeNil()) - Expect(rsm.Spec.AlternativeServices).Should(Equal(alternativeServices)) - Expect(rsm.Spec.Paused).Should(Equal(paused)) - Expect(rsm.Spec.Credential).ShouldNot(BeNil()) - Expect(*rsm.Spec.Credential).Should(Equal(credential)) - Expect(rsm.Spec.Instances).Should(Equal(instances)) + Expect(its.Name).Should(Equal(name)) + Expect(its.Namespace).Should(Equal(ns)) + Expect(its.Spec.Replicas).ShouldNot(BeNil()) + Expect(*its.Spec.Replicas).Should(Equal(replicas)) + Expect(its.Spec.Selector).ShouldNot(BeNil()) + Expect(its.Spec.Selector.MatchLabels).Should(HaveLen(4)) + Expect(its.Spec.Selector.MatchLabels[selectorKey1]).Should(Equal(selectorValue1)) + Expect(its.Spec.Selector.MatchLabels[selectorKey2]).Should(Equal(selectorValue2)) + Expect(its.Spec.Selector.MatchLabels[selectorKey3]).Should(Equal(selectorValue3)) + Expect(its.Spec.Selector.MatchLabels[selectorKey4]).Should(Equal(selectorValue4)) + Expect(its.Spec.ServiceName).Should(Equal(serviceName)) + Expect(its.Spec.Roles).Should(HaveLen(1)) + Expect(its.Spec.Roles[0]).Should(Equal(role)) + Expect(its.Spec.MembershipReconfiguration).ShouldNot(BeNil()) + Expect(*its.Spec.MembershipReconfiguration).Should(Equal(reconfiguration)) + Expect(its.Spec.Template).Should(Equal(template)) + Expect(its.Spec.VolumeClaimTemplates).Should(HaveLen(2)) + Expect(its.Spec.VolumeClaimTemplates[0]).Should(Equal(vcs[0])) + Expect(its.Spec.VolumeClaimTemplates[1]).Should(Equal(vc)) + Expect(its.Spec.PodManagementPolicy).Should(Equal(policy)) + Expect(its.Spec.UpdateStrategy.Type).Should(Equal(strategyType)) + Expect(its.Spec.UpdateStrategy.RollingUpdate).ShouldNot(BeNil()) + Expect(its.Spec.UpdateStrategy.RollingUpdate.Partition).ShouldNot(BeNil()) + Expect(*its.Spec.UpdateStrategy.RollingUpdate.Partition).Should(Equal(partition)) + Expect(its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).ShouldNot(BeNil()) + Expect(its.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable).ShouldNot(Equal(maxUnavailable)) + Expect(its.Spec.RoleProbe).ShouldNot(BeNil()) + Expect(its.Spec.RoleProbe.InitialDelaySeconds).Should(Equal(delay)) + Expect(its.Spec.RoleProbe.CustomHandler).Should(HaveLen(2)) + Expect(its.Spec.RoleProbe.CustomHandler[0]).Should(Equal(actions[0])) + Expect(its.Spec.RoleProbe.CustomHandler[1]).Should(Equal(action)) + Expect(its.Spec.MemberUpdateStrategy).ShouldNot(BeNil()) + Expect(*its.Spec.MemberUpdateStrategy).Should(Equal(memberUpdateStrategy)) + Expect(its.Spec.Service).ShouldNot(BeNil()) + Expect(its.Spec.Service).Should(BeEquivalentTo(service)) + Expect(its.Spec.AlternativeServices).ShouldNot(BeNil()) + Expect(its.Spec.AlternativeServices).Should(Equal(alternativeServices)) + Expect(its.Spec.Paused).Should(Equal(paused)) + Expect(its.Spec.Credential).ShouldNot(BeNil()) + Expect(*its.Spec.Credential).Should(Equal(credential)) + Expect(its.Spec.Instances).Should(Equal(instances)) }) }) diff --git a/pkg/controller/component/component_definition_convertor.go b/pkg/controller/component/component_definition_convertor.go index e405f559a74..0299eba2377 100644 --- a/pkg/controller/component/component_definition_convertor.go +++ b/pkg/controller/component/component_definition_convertor.go @@ -446,7 +446,7 @@ func (c *compDefRolesConvertor) convert(args ...any) (any, error) { // if rsm spec is not nil, convert rsm role first. if clusterCompDef.RSMSpec != nil { - return c.convertRsmRole(clusterCompDef) + return c.convertInstanceSetRole(clusterCompDef) } switch clusterCompDef.WorkloadType { @@ -477,7 +477,7 @@ func (c *compDefRolesConvertor) convert(args ...any) (any, error) { } } -func (c *compDefRolesConvertor) convertRsmRole(clusterCompDef *appsv1alpha1.ClusterComponentDefinition) (any, error) { +func (c *compDefRolesConvertor) convertInstanceSetRole(clusterCompDef *appsv1alpha1.ClusterComponentDefinition) (any, error) { if clusterCompDef.RSMSpec == nil { return nil, nil } diff --git a/pkg/controller/component/component_definition_convertor_test.go b/pkg/controller/component/component_definition_convertor_test.go index 8de1695f88a..abbfaaef157 100644 --- a/pkg/controller/component/component_definition_convertor_test.go +++ b/pkg/controller/component/component_definition_convertor_test.go @@ -769,7 +769,7 @@ var _ = Describe("Component Definition Convertor", func() { Expect(res).Should(BeEquivalentTo(expectedRoles)) }) - It("rsm spec roles convertor", func() { + It("InstanceSet spec roles convertor", func() { convertor := &compDefRolesConvertor{} clusterCompDef.RSMSpec = &appsv1alpha1.RSMSpec{ Roles: []workloads.ReplicaRole{ @@ -933,19 +933,19 @@ var _ = Describe("Component Definition Convertor", func() { Expect(*actions.RoleProbe).Should(BeEquivalentTo(*expectedRoleProbe)) }) - It("rsm spec role probe convertor", func() { + It("ITS spec role probe convertor", func() { convertor := &compDefLifecycleActionsConvertor{} mockCommand := []string{ - "mock-rsm-role-probe-command", + "mock-its-role-probe-command", } mockArgs := []string{ - "mock-rsm-role-probe-args", + "mock-its-role-probe-args", } clusterCompDef.RSMSpec = &appsv1alpha1.RSMSpec{ RoleProbe: &workloads.RoleProbe{ CustomHandler: []workloads.Action{ { - Image: "mock-rsm-role-probe-image", + Image: "mock-its-role-probe-image", Command: mockCommand, Args: mockArgs, }, @@ -959,7 +959,7 @@ var _ = Describe("Component Definition Convertor", func() { Expect(actions.RoleProbe).ShouldNot(BeNil()) Expect(*actions.RoleProbe.BuiltinHandler).Should(BeEquivalentTo(appsv1alpha1.WeSQLBuiltinActionHandler)) Expect(actions.RoleProbe.CustomHandler).ShouldNot(BeNil()) - Expect(actions.RoleProbe.CustomHandler.Image).Should(BeEquivalentTo("mock-rsm-role-probe-image")) + Expect(actions.RoleProbe.CustomHandler.Image).Should(BeEquivalentTo("mock-its-role-probe-image")) Expect(actions.RoleProbe.CustomHandler.Exec.Command).Should(BeEquivalentTo(mockCommand)) Expect(actions.RoleProbe.CustomHandler.Exec.Args).Should(BeEquivalentTo(mockArgs)) }) diff --git a/pkg/controller/component/pod_utils.go b/pkg/controller/component/pod_utils.go index dd2ac349936..114a518b7c9 100644 --- a/pkg/controller/component/pod_utils.go +++ b/pkg/controller/component/pod_utils.go @@ -62,28 +62,28 @@ func GetComponentPodListWithRole(ctx context.Context, cli client.Reader, cluster // IsComponentPodsWithLatestRevision checks whether the underlying pod spec matches the one declared in the Cluster/Component. func IsComponentPodsWithLatestRevision(ctx context.Context, cli client.Reader, - cluster *appsv1alpha1.Cluster, rsm *workloads.ReplicatedStateMachine) (bool, error) { - if cluster == nil || rsm == nil { + cluster *appsv1alpha1.Cluster, its *workloads.InstanceSet) (bool, error) { + if cluster == nil || its == nil { return false, nil } - // check whether component spec has been sent to rsm - rsmComponentGeneration := rsm.GetAnnotations()[constant.KubeBlocksGenerationKey] + // check whether component spec has been sent to the underlying workload + itsComponentGeneration := its.GetAnnotations()[constant.KubeBlocksGenerationKey] if cluster.Status.ObservedGeneration != cluster.Generation || - rsmComponentGeneration != strconv.FormatInt(cluster.Generation, 10) { + itsComponentGeneration != strconv.FormatInt(cluster.Generation, 10) { return false, nil } - // check whether rsm spec has been sent to the underlying workload(sts) - if rsm.Status.ObservedGeneration != rsm.Generation { + // check whether its spec has been sent to the underlying workload + if its.Status.ObservedGeneration != its.Generation { return false, nil } - if rsm.Status.CurrentGeneration != rsm.Generation { + if its.Status.CurrentGeneration != its.Generation { return false, nil } - // TODO: depends on the workload (RSM) + // TODO: depends on the workload (InstanceSet) // check whether the underlying workload(sts) has sent the latest template to pods sts := &appsv1.StatefulSet{} - if err := cli.Get(ctx, client.ObjectKeyFromObject(rsm), sts); err != nil { + if err := cli.Get(ctx, client.ObjectKeyFromObject(its), sts); err != nil { if apierrors.IsNotFound(err) { return true, nil } @@ -92,7 +92,7 @@ func IsComponentPodsWithLatestRevision(ctx context.Context, cli client.Reader, if sts.Status.ObservedGeneration != sts.Generation { return false, nil } - pods, err := ListPodOwnedByComponent(ctx, cli, rsm.Namespace, rsm.Spec.Selector.MatchLabels, inDataContext()) + pods, err := ListPodOwnedByComponent(ctx, cli, its.Namespace, its.Spec.Selector.MatchLabels, inDataContext()) if err != nil { return false, err } diff --git a/pkg/controller/component/rsm_convertor.go b/pkg/controller/component/rsm_convertor.go index 98540108ad9..389486d8369 100644 --- a/pkg/controller/component/rsm_convertor.go +++ b/pkg/controller/component/rsm_convertor.go @@ -30,67 +30,67 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" ) -// BuildRSMFrom builds a new Component object based on SynthesizedComponent. -func BuildRSMFrom(synthesizeComp *SynthesizedComponent, protoRSM *workloads.ReplicatedStateMachine) (*workloads.ReplicatedStateMachine, error) { +// BuildWorkloadFrom builds a new Component object based on SynthesizedComponent. +func BuildWorkloadFrom(synthesizeComp *SynthesizedComponent, protoITS *workloads.InstanceSet) (*workloads.InstanceSet, error) { if synthesizeComp == nil { return nil, nil } - if protoRSM == nil { - protoRSM = &workloads.ReplicatedStateMachine{} + if protoITS == nil { + protoITS = &workloads.InstanceSet{} } convertors := map[string]convertor{ - "service": &rsmServiceConvertor{}, - "alternativeservices": &rsmAlternativeServicesConvertor{}, - "roles": &rsmRolesConvertor{}, - "roleprobe": &rsmRoleProbeConvertor{}, - "credential": &rsmCredentialConvertor{}, - "membershipreconfiguration": &rsmMembershipReconfigurationConvertor{}, - "memberupdatestrategy": &rsmMemberUpdateStrategyConvertor{}, - "podmanagementpolicy": &rsmPodManagementPolicyConvertor{}, - "updatestrategy": &rsmUpdateStrategyConvertor{}, - "instances": &rsmInstancesConvertor{}, - "offlineinstances": &rsmOfflineInstancesConvertor{}, - } - if err := covertObject(convertors, &protoRSM.Spec, synthesizeComp); err != nil { + "service": &itsServiceConvertor{}, + "alternativeservices": &itsAlternativeServicesConvertor{}, + "roles": &itsRolesConvertor{}, + "roleprobe": &itsRoleProbeConvertor{}, + "credential": &itsCredentialConvertor{}, + "membershipreconfiguration": &itsMembershipReconfigurationConvertor{}, + "memberupdatestrategy": &itsMemberUpdateStrategyConvertor{}, + "podmanagementpolicy": &itsPodManagementPolicyConvertor{}, + "updatestrategy": &itsUpdateStrategyConvertor{}, + "instances": &itsInstancesConvertor{}, + "offlineinstances": &itsOfflineInstancesConvertor{}, + } + if err := covertObject(convertors, &protoITS.Spec, synthesizeComp); err != nil { return nil, err } - return protoRSM, nil + return protoITS, nil } -// rsmServiceConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.Service. -type rsmServiceConvertor struct{} +// itsServiceConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.Service. +type itsServiceConvertor struct{} -// rsmAlternativeServicesConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.AlternativeServices. -type rsmAlternativeServicesConvertor struct{} +// itsAlternativeServicesConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.AlternativeServices. +type itsAlternativeServicesConvertor struct{} -// rsmRolesConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.Roles. -type rsmRolesConvertor struct{} +// itsRolesConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.Roles. +type itsRolesConvertor struct{} -// rsmRoleProbeConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.RoleProbe. -type rsmRoleProbeConvertor struct{} +// itsRoleProbeConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.RoleProbe. +type itsRoleProbeConvertor struct{} -// rsmCredentialConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.Credential. -type rsmCredentialConvertor struct{} +// itsCredentialConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.Credential. +type itsCredentialConvertor struct{} -// rsmMembershipReconfigurationConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.MembershipReconfiguration. -type rsmMembershipReconfigurationConvertor struct{} +// itsMembershipReconfigurationConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.MembershipReconfiguration. +type itsMembershipReconfigurationConvertor struct{} -// rsmMemberUpdateStrategyConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.MemberUpdateStrategy. -type rsmMemberUpdateStrategyConvertor struct{} +// itsMemberUpdateStrategyConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.MemberUpdateStrategy. +type itsMemberUpdateStrategyConvertor struct{} -func (c *rsmMemberUpdateStrategyConvertor) convert(args ...any) (any, error) { - synthesizeComp, err := parseRSMConvertorArgs(args...) +func (c *itsMemberUpdateStrategyConvertor) convert(args ...any) (any, error) { + synthesizeComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } return getMemberUpdateStrategy(synthesizeComp), nil } -// rsmPodManagementPolicyConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.PodManagementPolicy. -type rsmPodManagementPolicyConvertor struct{} +// itsPodManagementPolicyConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.PodManagementPolicy. +type itsPodManagementPolicyConvertor struct{} -func (c *rsmPodManagementPolicyConvertor) convert(args ...any) (any, error) { - synthesizedComp, err := parseRSMConvertorArgs(args...) +func (c *itsPodManagementPolicyConvertor) convert(args ...any) (any, error) { + synthesizedComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -104,11 +104,11 @@ func (c *rsmPodManagementPolicyConvertor) convert(args ...any) (any, error) { return appsv1.ParallelPodManagement, nil } -// rsmUpdateStrategyConvertor is an implementation of the convertor interface, used to convert the given object into ReplicatedStateMachine.Spec.Instances. -type rsmUpdateStrategyConvertor struct{} +// itsUpdateStrategyConvertor is an implementation of the convertor interface, used to convert the given object into InstanceSet.Spec.Instances. +type itsUpdateStrategyConvertor struct{} -func (c *rsmUpdateStrategyConvertor) convert(args ...any) (any, error) { - synthesizedComp, err := parseRSMConvertorArgs(args...) +func (c *itsUpdateStrategyConvertor) convert(args ...any) (any, error) { + synthesizedComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -119,11 +119,11 @@ func (c *rsmUpdateStrategyConvertor) convert(args ...any) (any, error) { return nil, nil } -// rsmInstancesConvertor converts component instanceTemplate to rsm instanceTemplate -type rsmInstancesConvertor struct{} +// itsInstancesConvertor converts component instanceTemplate to ITS instanceTemplate +type itsInstancesConvertor struct{} -func (c *rsmInstancesConvertor) convert(args ...any) (any, error) { - synthesizedComp, err := parseRSMConvertorArgs(args...) +func (c *itsInstancesConvertor) convert(args ...any) (any, error) { + synthesizedComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -135,11 +135,11 @@ func (c *rsmInstancesConvertor) convert(args ...any) (any, error) { return instances, nil } -// rsmOfflineInstancesConvertor converts component offlineInstances to rsm offlineInstances -type rsmOfflineInstancesConvertor struct{} +// itsOfflineInstancesConvertor converts component offlineInstances to ITS offlineInstances +type itsOfflineInstancesConvertor struct{} -func (c *rsmOfflineInstancesConvertor) convert(args ...any) (any, error) { - synthesizedComp, err := parseRSMConvertorArgs(args...) +func (c *itsOfflineInstancesConvertor) convert(args ...any) (any, error) { + synthesizedComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -170,8 +170,8 @@ func AppsInstanceToWorkloadInstance(instance *appsv1alpha1.InstanceTemplate) *wo } } -// parseRSMConvertorArgs parses the args of rsm convertor. -func parseRSMConvertorArgs(args ...any) (*SynthesizedComponent, error) { +// parseITSConvertorArgs parses the args of ITS convertor. +func parseITSConvertorArgs(args ...any) (*SynthesizedComponent, error) { synthesizeComp, ok := args[0].(*SynthesizedComponent) if !ok { return nil, errors.New("args[0] not a SynthesizedComponent object") @@ -200,13 +200,13 @@ func getMemberUpdateStrategy(synthesizedComp *SynthesizedComponent) *workloads.M } } -// rsmServiceConvertor converts the given object into ReplicatedStateMachine.Spec.Service. -// TODO(xingran): ComponentServices are not consistent with ReplicatedStateMachine.Spec.Service, If it is based on the new ComponentDefinition API, -// the services is temporarily handled in the component controller, and the corresponding ReplicatedStateMachine.Spec.Service is temporarily set nil. -func (c *rsmServiceConvertor) convert(args ...any) (any, error) { +// itsServiceConvertor converts the given object into InstanceSet.Spec.Service. +// TODO(xingran): ComponentServices are not consistent with InstanceSet.Spec.Service, If it is based on the new ComponentDefinition API, +// the services is temporarily handled in the component controller, and the corresponding InstanceSet.Spec.Service is temporarily set nil. +func (c *itsServiceConvertor) convert(args ...any) (any, error) { /* var compService appsv1alpha1.ComponentService - _, synthesizeComp, err := parseRSMConvertorArgs(args...) + _, synthesizeComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -214,42 +214,42 @@ func (c *rsmServiceConvertor) convert(args ...any) (any, error) { if len(compServices) == 0 { return nil, nil } - // get the first component service as the rsm service + // get the first component service as the ITS service if len(compServices) > 0 { compService = compServices[0] } - // TODO(xingran): ComponentService.Name and ComponentService.RoleSelector are not used in ReplicatedStateMachine.Spec.Service - rsmService := &corev1.Service{ + // TODO(xingran): ComponentService.Name and ComponentService.RoleSelector are not used in InstanceSet.Spec.Service + itsService := &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: string(compService.ServiceName), }, Spec: compService.ServiceSpec, } - return rsmService, nil + return itsService, nil */ return nil, nil } -// rsmAlternativeServicesConvertor converts the given object into ReplicatedStateMachine.Spec.AlternativeServices. -// TODO: ComponentServices are not consistent with ReplicatedStateMachine.Spec.AlternativeServices, If it is based on the new ComponentDefinition API, -// the services is temporarily handled in the component controller, and the corresponding ReplicatedStateMachine.Spec.AlternativeServices is temporarily set nil. -func (c *rsmAlternativeServicesConvertor) convert(args ...any) (any, error) { +// itsAlternativeServicesConvertor converts the given object into InstanceSet.Spec.AlternativeServices. +// TODO: ComponentServices are not consistent with InstanceSet.Spec.AlternativeServices, If it is based on the new ComponentDefinition API, +// the services is temporarily handled in the component controller, and the corresponding InstanceSet.Spec.AlternativeServices is temporarily set nil. +func (c *itsAlternativeServicesConvertor) convert(args ...any) (any, error) { return nil, nil } -// rsmRolesConvertor converts the ComponentDefinition.Spec.Roles into ReplicatedStateMachine.Spec.Roles. -func (c *rsmRolesConvertor) convert(args ...any) (any, error) { - synthesizeComp, err := parseRSMConvertorArgs(args...) +// itsRolesConvertor converts the ComponentDefinition.Spec.Roles into InstanceSet.Spec.Roles. +func (c *itsRolesConvertor) convert(args ...any) (any, error) { + synthesizeComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } - return ConvertSynthesizeCompRoleToRSMRole(synthesizeComp), nil + return ConvertSynthesizeCompRoleToInstanceSetRole(synthesizeComp), nil } -// rsmRoleProbeConvertor converts the ComponentDefinition.Spec.LifecycleActions.RoleProbe into ReplicatedStateMachine.Spec.RoleProbe. -func (c *rsmRoleProbeConvertor) convert(args ...any) (any, error) { - synthesizeComp, err := parseRSMConvertorArgs(args...) +// itsRoleProbeConvertor converts the ComponentDefinition.Spec.LifecycleActions.RoleProbe into InstanceSet.Spec.RoleProbe. +func (c *itsRoleProbeConvertor) convert(args ...any) (any, error) { + synthesizeComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -258,7 +258,7 @@ func (c *rsmRoleProbeConvertor) convert(args ...any) (any, error) { return nil, nil } - rsmRoleProbe := &workloads.RoleProbe{ + itsRoleProbe := &workloads.RoleProbe{ TimeoutSeconds: synthesizeComp.LifecycleActions.RoleProbe.TimeoutSeconds, PeriodSeconds: synthesizeComp.LifecycleActions.RoleProbe.PeriodSeconds, SuccessThreshold: 1, @@ -268,25 +268,25 @@ func (c *rsmRoleProbeConvertor) convert(args ...any) (any, error) { if synthesizeComp.LifecycleActions.RoleProbe.BuiltinHandler != nil { builtinHandler := string(*synthesizeComp.LifecycleActions.RoleProbe.BuiltinHandler) - rsmRoleProbe.BuiltinHandler = &builtinHandler - return rsmRoleProbe, nil + itsRoleProbe.BuiltinHandler = &builtinHandler + return itsRoleProbe, nil } - // TODO(xingran): RSM Action does not support args[] yet + // TODO(xingran): ITS Action does not support args[] yet if synthesizeComp.LifecycleActions.RoleProbe.CustomHandler != nil { - rsmRoleProbeCmdAction := workloads.Action{ + itsRoleProbeCmdAction := workloads.Action{ Image: synthesizeComp.LifecycleActions.RoleProbe.CustomHandler.Image, Command: synthesizeComp.LifecycleActions.RoleProbe.CustomHandler.Exec.Command, Args: synthesizeComp.LifecycleActions.RoleProbe.CustomHandler.Exec.Args, } - rsmRoleProbe.CustomHandler = []workloads.Action{rsmRoleProbeCmdAction} + itsRoleProbe.CustomHandler = []workloads.Action{itsRoleProbeCmdAction} } - return rsmRoleProbe, nil + return itsRoleProbe, nil } -func (c *rsmCredentialConvertor) convert(args ...any) (any, error) { - synthesizeComp, err := parseRSMConvertorArgs(args...) +func (c *itsCredentialConvertor) convert(args ...any) (any, error) { + synthesizeComp, err := parseITSConvertorArgs(args...) if err != nil { return nil, err } @@ -334,13 +334,13 @@ func (c *rsmCredentialConvertor) convert(args ...any) (any, error) { return credential, nil } -func (c *rsmMembershipReconfigurationConvertor) convert(args ...any) (any, error) { - // synthesizeComp, err := parseRSMConvertorArgs(args...) +func (c *itsMembershipReconfigurationConvertor) convert(args ...any) (any, error) { + // synthesizeComp, err := parseITSConvertorArgs(args...) return "", nil // TODO } -// ConvertSynthesizeCompRoleToRSMRole converts the component.SynthesizedComponent.Roles to workloads.ReplicaRole. -func ConvertSynthesizeCompRoleToRSMRole(synthesizedComp *SynthesizedComponent) []workloads.ReplicaRole { +// ConvertSynthesizeCompRoleToInstanceSetRole converts the component.SynthesizedComponent.Roles to workloads.ReplicaRole. +func ConvertSynthesizeCompRoleToInstanceSetRole(synthesizedComp *SynthesizedComponent) []workloads.ReplicaRole { if synthesizedComp.Roles == nil { return nil } @@ -355,13 +355,13 @@ func ConvertSynthesizeCompRoleToRSMRole(synthesizedComp *SynthesizedComponent) [ return workloads.NoneMode } } - rsmReplicaRoles := make([]workloads.ReplicaRole, 0) + itsReplicaRoles := make([]workloads.ReplicaRole, 0) for _, role := range synthesizedComp.Roles { - rsmReplicaRole := workloads.ReplicaRole{ + itsReplicaRole := workloads.ReplicaRole{ Name: role.Name, AccessMode: accessMode(role), CanVote: role.Votable, - // HACK: Since the RSM relies on IsLeader field to determine whether a workload is available, we are using + // HACK: Since the InstanceSet relies on IsLeader field to determine whether a workload is available, we are using // such a workaround to combine these two fields to provide the information. // However, the condition will be broken if a service with multiple different roles that can be writable // at the same time, such as Zookeeper. @@ -369,7 +369,7 @@ func ConvertSynthesizeCompRoleToRSMRole(synthesizedComp *SynthesizedComponent) [ // where the KB controller does not provide HA functionality. IsLeader: role.Serviceable && role.Writable, } - rsmReplicaRoles = append(rsmReplicaRoles, rsmReplicaRole) + itsReplicaRoles = append(itsReplicaRoles, itsReplicaRole) } - return rsmReplicaRoles + return itsReplicaRoles } diff --git a/pkg/controller/component/rsm_convertor_test.go b/pkg/controller/component/rsm_convertor_test.go index 9e7ca075ffc..bba0d2271ea 100644 --- a/pkg/controller/component/rsm_convertor_test.go +++ b/pkg/controller/component/rsm_convertor_test.go @@ -27,8 +27,8 @@ import ( workloadsalpha1 "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) -var _ = Describe("Test RSM Convertor", func() { - Context("rsm convertors", func() { +var _ = Describe("Test InstanceSet Convertor", func() { + Context("InstanceSet convertors", func() { var ( synComp *SynthesizedComponent ) @@ -52,7 +52,7 @@ var _ = Describe("Test RSM Convertor", func() { } }) It("convert", func() { - convertor := &rsmRoleProbeConvertor{} + convertor := &itsRoleProbeConvertor{} res, err := convertor.convert(synComp) Expect(err).Should(Succeed()) probe := res.(*workloadsalpha1.RoleProbe) diff --git a/pkg/controller/component/vars.go b/pkg/controller/component/vars.go index dd8f5ded234..db9088cd500 100644 --- a/pkg/controller/component/vars.go +++ b/pkg/controller/component/vars.go @@ -797,14 +797,14 @@ func resolvePodVarRefLow(ctx context.Context, cli client.Reader, synthesizedComp } else { key := types.NamespacedName{ Namespace: synthesizedComp.Namespace, - Name: constant.GenerateRSMNamePattern(synthesizedComp.ClusterName, compName), + Name: constant.GenerateWorkloadNamePattern(synthesizedComp.ClusterName, compName), } - rsm := &workloads.ReplicatedStateMachine{} - err := cli.Get(ctx, key, rsm, inDataContext()) + its := &workloads.InstanceSet{} + err := cli.Get(ctx, key, its, inDataContext()) if err != nil { return nil, err } - return &rsm.Spec.Template.Spec, nil + return &its.Spec.Template.Spec, nil } } return resolveReferentObjects(synthesizedComp, selector.ClusterObjectReference, getter) diff --git a/pkg/controller/component/workload_utils.go b/pkg/controller/component/workload_utils.go index 7a8cfc7b4e1..8475852cb15 100644 --- a/pkg/controller/component/workload_utils.go +++ b/pkg/controller/component/workload_utils.go @@ -59,17 +59,17 @@ func GetObjectListByComponentName(ctx context.Context, cli client.Reader, cluste return cli.List(ctx, objectList, client.MatchingLabels(matchLabels), inNamespace) } -// GetComponentRSMMinReadySeconds gets the statefulSet minReadySeconds of the component. -func GetComponentRSMMinReadySeconds(ctx context.Context, +// GetComponentMinReadySeconds gets the underlying workload's minReadySeconds of the component. +func GetComponentMinReadySeconds(ctx context.Context, cli client.Client, cluster appsv1alpha1.Cluster, componentName string) (minReadySeconds int32, err error) { - rsmList := &workloads.ReplicatedStateMachineList{} - if err = GetObjectListByComponentName(ctx, cli, cluster, rsmList, componentName); err != nil { + itsList := &workloads.InstanceSetList{} + if err = GetObjectListByComponentName(ctx, cli, cluster, itsList, componentName); err != nil { return } - if len(rsmList.Items) > 0 { - minReadySeconds = rsmList.Items[0].Spec.MinReadySeconds + if len(itsList.Items) > 0 { + minReadySeconds = itsList.Items[0].Spec.MinReadySeconds return } return minReadySeconds, err diff --git a/pkg/controller/configuration/tool_image_builder_test.go b/pkg/controller/configuration/tool_image_builder_test.go index 1927fdfd9bb..d5350aee99e 100644 --- a/pkg/controller/configuration/tool_image_builder_test.go +++ b/pkg/controller/configuration/tool_image_builder_test.go @@ -57,7 +57,7 @@ var _ = Describe("ToolsImageBuilderTest", func() { Context("ToolsImageBuilderTest", func() { It("TestScriptSpec", func() { - rsm, err := factory.BuildRSM(clusterComponent) + its, err := factory.BuildInstanceSet(clusterComponent) Expect(err).Should(Succeed()) cfgManagerParams := &cfgcm.CfgManagerBuildParams{ @@ -105,10 +105,10 @@ var _ = Describe("ToolsImageBuilderTest", func() { Policy: appsv1alpha1.NoneMergePolicy, }, } - Expect(buildConfigToolsContainer(cfgManagerParams, &rsm.Spec.Template.Spec, clusterComponent)).Should(Succeed()) + Expect(buildConfigToolsContainer(cfgManagerParams, &its.Spec.Template.Spec, clusterComponent)).Should(Succeed()) Expect(4).Should(BeEquivalentTo(len(cfgManagerParams.ToolsContainers))) Expect("test_images").Should(BeEquivalentTo(cfgManagerParams.ToolsContainers[0].Image)) - Expect(rsm.Spec.Template.Spec.Containers[0].Image).Should(BeEquivalentTo(cfgManagerParams.ToolsContainers[1].Image)) + Expect(its.Spec.Template.Spec.Containers[0].Image).Should(BeEquivalentTo(cfgManagerParams.ToolsContainers[1].Image)) Expect(kbToolsImage).Should(BeEquivalentTo(cfgManagerParams.ToolsContainers[2].Image)) Expect(kbToolsImage).Should(BeEquivalentTo(cfgManagerParams.ToolsContainers[3].Image)) Expect(initSecRenderedToolContainerName).Should(BeEquivalentTo(cfgManagerParams.ToolsContainers[3].Name)) diff --git a/pkg/controller/factory/builder.go b/pkg/controller/factory/builder.go index 3a2af37b534..cd0f011b312 100644 --- a/pkg/controller/factory/builder.go +++ b/pkg/controller/factory/builder.go @@ -50,8 +50,8 @@ import ( viper "github.com/apecloud/kubeblocks/pkg/viperx" ) -// BuildRSM builds a ReplicatedStateMachine object from SynthesizedComponent. -func BuildRSM(synthesizedComp *component.SynthesizedComponent) (*workloads.ReplicatedStateMachine, error) { +// BuildInstanceSet builds a InstanceSet object from SynthesizedComponent. +func BuildInstanceSet(synthesizedComp *component.SynthesizedComponent) (*workloads.InstanceSet, error) { var ( clusterDefName = synthesizedComp.ClusterDefName clusterCompDefName = synthesizedComp.ClusterCompDefName @@ -91,12 +91,12 @@ func BuildRSM(synthesizedComp *component.SynthesizedComponent) (*workloads.Repli Spec: *synthesizedComp.PodSpec.DeepCopy(), } - rsmName := constant.GenerateRSMNamePattern(clusterName, compName) - rsmBuilder := builder.NewReplicatedStateMachineBuilder(namespace, rsmName). + itsName := constant.GenerateWorkloadNamePattern(clusterName, compName) + itsBuilder := builder.NewInstanceSetBuilder(namespace, itsName). AddLabelsInMap(mergeLabels). AddAnnotationsInMap(mergeAnnotations). AddMatchLabelsInMap(labels). - SetServiceName(constant.GenerateRSMServiceNamePattern(rsmName)). + SetServiceName(constant.GenerateServiceNamePattern(itsName)). SetReplicas(synthesizedComp.Replicas). SetMinReadySeconds(synthesizedComp.MinReadySeconds). SetTemplate(template) @@ -105,31 +105,31 @@ func BuildRSM(synthesizedComp *component.SynthesizedComponent) (*workloads.Repli for _, vct := range synthesizedComp.VolumeClaimTemplates { vcts = append(vcts, vctToPVC(vct)) } - rsmBuilder.SetVolumeClaimTemplates(vcts...) + itsBuilder.SetVolumeClaimTemplates(vcts...) if common.IsCompactMode(synthesizedComp.Annotations) { - rsmBuilder.AddAnnotations(constant.FeatureReconciliationInCompactModeAnnotationKey, + itsBuilder.AddAnnotations(constant.FeatureReconciliationInCompactModeAnnotationKey, synthesizedComp.Annotations[constant.FeatureReconciliationInCompactModeAnnotationKey]) } - // convert componentDef attributes to rsm attributes. including service, credential, roles, roleProbe, membershipReconfiguration, memberUpdateStrategy, etc. - rsmObj, err := component.BuildRSMFrom(synthesizedComp, rsmBuilder.GetObject()) + // convert componentDef attributes to workload attributes. including service, credential, roles, roleProbe, membershipReconfiguration, memberUpdateStrategy, etc. + itsObj, err := component.BuildWorkloadFrom(synthesizedComp, itsBuilder.GetObject()) if err != nil { return nil, err } // update sts.spec.volumeClaimTemplates[].metadata.labels // TODO(xingran): synthesizedComp.VolumeTypes has been removed, and the following code needs to be refactored. - if len(rsmObj.Spec.VolumeClaimTemplates) > 0 && len(rsmObj.GetLabels()) > 0 { - for index, vct := range rsmObj.Spec.VolumeClaimTemplates { + if len(itsObj.Spec.VolumeClaimTemplates) > 0 && len(itsObj.GetLabels()) > 0 { + for index, vct := range itsObj.Spec.VolumeClaimTemplates { BuildPersistentVolumeClaimLabels(synthesizedComp, &vct, vct.Name) - rsmObj.Spec.VolumeClaimTemplates[index] = vct + itsObj.Spec.VolumeClaimTemplates[index] = vct } } - setDefaultResourceLimits(rsmObj) + setDefaultResourceLimits(itsObj) - return rsmObj, nil + return itsObj, nil } func vctToPVC(vct corev1.PersistentVolumeClaimTemplate) corev1.PersistentVolumeClaim { @@ -161,8 +161,8 @@ func getMonitorAnnotations(synthesizedComp *component.SynthesizedComponent) map[ return rsm.AddAnnotationScope(rsm.HeadlessServiceScope, annotations) } -func setDefaultResourceLimits(rsm *workloads.ReplicatedStateMachine) { - for _, cc := range []*[]corev1.Container{&rsm.Spec.Template.Spec.Containers, &rsm.Spec.Template.Spec.InitContainers} { +func setDefaultResourceLimits(its *workloads.InstanceSet) { + for _, cc := range []*[]corev1.Container{&its.Spec.Template.Spec.Containers, &its.Spec.Template.Spec.InitContainers} { for i := range *cc { intctrlutil.InjectZeroResourcesLimitsIfEmpty(&(*cc)[i]) } diff --git a/pkg/controller/factory/builder_test.go b/pkg/controller/factory/builder_test.go index 092e1e18b71..fb17eaf9580 100644 --- a/pkg/controller/factory/builder_test.go +++ b/pkg/controller/factory/builder_test.go @@ -255,21 +255,21 @@ var _ = Describe("builder", func() { Expect(credential.StringData["RANDOM_PASSWD"]).Should(Equal(originalPassword)) }) - It("builds RSM correctly", func() { + It("builds InstanceSet correctly", func() { clusterDef, cluster, synthesizedComponent := newClusterObjs(nil) - rsm, err := BuildRSM(synthesizedComponent) + its, err := BuildInstanceSet(synthesizedComponent) Expect(err).Should(BeNil()) - Expect(rsm).ShouldNot(BeNil()) + Expect(its).ShouldNot(BeNil()) By("set replicas = 0") newComponent := *synthesizedComponent newComponent.Replicas = 0 - rsm, err = BuildRSM(&newComponent) + its, err = BuildInstanceSet(&newComponent) Expect(err).Should(BeNil()) - Expect(rsm).ShouldNot(BeNil()) - Expect(*rsm.Spec.Replicas).Should(Equal(int32(0))) - Expect(rsm.Spec.VolumeClaimTemplates[0].Labels[constant.VolumeTypeLabelKey]). + Expect(its).ShouldNot(BeNil()) + Expect(*its.Spec.Replicas).Should(Equal(int32(0))) + Expect(its.Spec.VolumeClaimTemplates[0].Labels[constant.VolumeTypeLabelKey]). Should(Equal(string(appsv1alpha1.VolumeTypeData))) By("set workload type to Replication") @@ -281,13 +281,13 @@ var _ = Describe("builder", func() { } cluster.Spec.ComponentSpecs[0].Replicas = 2 replComponent := newAllFieldsSynthesizedComponent(clusterDef, nil, cluster) - rsm, err = BuildRSM(replComponent) + its, err = BuildInstanceSet(replComponent) Expect(err).Should(BeNil()) - Expect(rsm).ShouldNot(BeNil()) - Expect(*rsm.Spec.Replicas).Should(BeEquivalentTo(2)) + Expect(its).ShouldNot(BeNil()) + Expect(*its.Spec.Replicas).Should(BeEquivalentTo(2)) // test extra envs - Expect(rsm.Spec.Template.Spec.Containers).ShouldNot(BeEmpty()) - for _, container := range rsm.Spec.Template.Spec.Containers { + Expect(its.Spec.Template.Spec.Containers).ShouldNot(BeEmpty()) + for _, container := range its.Spec.Template.Spec.Containers { isContainEnv := false for _, env := range container.Env { if env.Name == "mock-key" && env.Value == "mock-value" { @@ -299,19 +299,19 @@ var _ = Describe("builder", func() { } // test roles - Expect(rsm.Spec.Roles).Should(HaveLen(2)) + Expect(its.Spec.Roles).Should(HaveLen(2)) for _, roleName := range []string{constant.Primary, constant.Secondary} { - Expect(slices.IndexFunc(rsm.Spec.Roles, func(role workloads.ReplicaRole) bool { + Expect(slices.IndexFunc(its.Spec.Roles, func(role workloads.ReplicaRole) bool { return role.Name == roleName })).Should(BeNumerically(">", -1)) } // test role probe - Expect(rsm.Spec.RoleProbe).ShouldNot(BeNil()) + Expect(its.Spec.RoleProbe).ShouldNot(BeNil()) // test member update strategy - Expect(rsm.Spec.MemberUpdateStrategy).ShouldNot(BeNil()) - Expect(*rsm.Spec.MemberUpdateStrategy).Should(BeEquivalentTo(workloads.SerialUpdateStrategy)) + Expect(its.Spec.MemberUpdateStrategy).ShouldNot(BeNil()) + Expect(*its.Spec.MemberUpdateStrategy).Should(BeEquivalentTo(workloads.SerialUpdateStrategy)) By("set workload type to Consensus") clusterDef.Spec.ComponentDefs[0].WorkloadType = appsv1alpha1.Consensus @@ -320,20 +320,20 @@ var _ = Describe("builder", func() { clusterDef.Spec.ComponentDefs[0].ConsensusSpec.UpdateStrategy = appsv1alpha1.BestEffortParallelStrategy cluster.Spec.ComponentSpecs[0].Replicas = 3 csComponent := newAllFieldsSynthesizedComponent(clusterDef, nil, cluster) - rsm, err = BuildRSM(csComponent) + its, err = BuildInstanceSet(csComponent) Expect(err).Should(BeNil()) - Expect(rsm).ShouldNot(BeNil()) + Expect(its).ShouldNot(BeNil()) // test roles - Expect(rsm.Spec.Roles).Should(HaveLen(1)) - Expect(rsm.Spec.Roles[0].Name).Should(Equal(appsv1alpha1.DefaultLeader.Name)) + Expect(its.Spec.Roles).Should(HaveLen(1)) + Expect(its.Spec.Roles[0].Name).Should(Equal(appsv1alpha1.DefaultLeader.Name)) // test role probe - Expect(rsm.Spec.RoleProbe).ShouldNot(BeNil()) + Expect(its.Spec.RoleProbe).ShouldNot(BeNil()) // test member update strategy - Expect(rsm.Spec.MemberUpdateStrategy).ShouldNot(BeNil()) - Expect(*rsm.Spec.MemberUpdateStrategy).Should(BeEquivalentTo(workloads.BestEffortParallelUpdateStrategy)) + Expect(its.Spec.MemberUpdateStrategy).ShouldNot(BeNil()) + Expect(*its.Spec.MemberUpdateStrategy).Should(BeEquivalentTo(workloads.BestEffortParallelUpdateStrategy)) }) It("builds BackupJob correctly", func() { diff --git a/pkg/controller/handler/handler_builder_test.go b/pkg/controller/handler/handler_builder_test.go index 7f7a4f36ad3..298bc0bdb88 100644 --- a/pkg/controller/handler/handler_builder_test.go +++ b/pkg/controller/handler/handler_builder_test.go @@ -47,8 +47,8 @@ var _ = Describe("handler builder test.", func() { namespace := "foo" clusterName := "bar" componentName := "test" - rsmName := fmt.Sprintf("%s-%s", clusterName, componentName) - stsName := rsmName + name := fmt.Sprintf("%s-%s", clusterName, componentName) + stsName := name podName := stsName + "-0" eventName := podName + ".123456" labels := map[string]string{ @@ -58,7 +58,7 @@ var _ = Describe("handler builder test.", func() { constant.AppInstanceLabelKey: clusterName, constant.KBAppComponentLabelKey: componentName, } - rsm := builder.NewReplicatedStateMachineBuilder(namespace, rsmName). + its := builder.NewInstanceSetBuilder(namespace, name). AddLabelsInMap(labels). GetObject() sts := builder.NewStatefulSetBuilder(namespace, stsName). @@ -86,7 +86,7 @@ var _ = Describe("handler builder test.", func() { handler := NewBuilder(finderCtx). AddFinder(NewInvolvedObjectFinder(&corev1.Pod{})). AddFinder(NewOwnerFinder(&appsv1.StatefulSet{})). - AddFinder(NewDelegatorFinder(&workloads.ReplicatedStateMachine{}, + AddFinder(NewDelegatorFinder(&workloads.InstanceSet{}, []string{constant.AppInstanceLabelKey, constant.KBAppComponentLabelKey})). Build() @@ -142,8 +142,8 @@ var _ = Describe("handler builder test.", func() { Expect(shutdown).Should(BeFalse()) request, ok := item.(reconcile.Request) Expect(ok).Should(BeTrue()) - Expect(request.Namespace).Should(Equal(rsm.Namespace)) - Expect(request.Name).Should(Equal(rsm.Name)) + Expect(request.Namespace).Should(Equal(its.Namespace)) + Expect(request.Name).Should(Equal(its.Name)) queue.Done(item) queue.Forget(item) } diff --git a/pkg/controller/rsm2/in_place_update_util.go b/pkg/controller/instanceset/in_place_update_util.go similarity index 94% rename from pkg/controller/rsm2/in_place_update_util.go rename to pkg/controller/instanceset/in_place_update_util.go index b759a1c5bbd..0469f4fee3c 100644 --- a/pkg/controller/rsm2/in_place_update_util.go +++ b/pkg/controller/instanceset/in_place_update_util.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "fmt" @@ -242,7 +242,7 @@ func equalField(old, new any) bool { func equalBasicInPlaceFields(old, new *corev1.Pod) bool { // Only comparing annotations and labels that are relevant to the new spec. - // These two fields might be modified by other controllers without the RSM controller knowing. + // These two fields might be modified by other controllers without the InstanceSet controller knowing. // For instance, two new annotations have been added by Patroni. // There are two strategies to handle this situation: override or replace. // The recreation approach (recreating pod(s) when any field is updated in the pod template) used by StatefulSet/Deployment/DaemonSet @@ -291,8 +291,8 @@ func equalResourcesInPlaceFields(old, new *corev1.Pod) bool { return true } -func getPodUpdatePolicy(rsm *workloads.ReplicatedStateMachine, pod *corev1.Pod) (PodUpdatePolicy, error) { - updateRevisions, err := getUpdateRevisions(rsm.Status.UpdateRevisions) +func getPodUpdatePolicy(its *workloads.InstanceSet, pod *corev1.Pod) (PodUpdatePolicy, error) { + updateRevisions, err := getUpdateRevisions(its.Status.UpdateRevisions) if err != nil { return NoOpsPolicy, err } @@ -301,13 +301,13 @@ func getPodUpdatePolicy(rsm *workloads.ReplicatedStateMachine, pod *corev1.Pod) return RecreatePolicy, nil } - rsmExt, err := buildRSMExt(rsm, nil) + itsExt, err := buildInstanceSetExt(its, nil) if err != nil { return NoOpsPolicy, err } - templateList := buildInstanceTemplateExts(rsmExt) + templateList := buildInstanceTemplateExts(itsExt) parentName, _ := ParseParentNameAndOrdinal(pod.Name) - templateName, _ := strings.CutPrefix(parentName, rsm.Name) + templateName, _ := strings.CutPrefix(parentName, its.Name) if len(templateName) > 0 { templateName, _ = strings.CutPrefix(templateName, "-") } @@ -317,7 +317,7 @@ func getPodUpdatePolicy(rsm *workloads.ReplicatedStateMachine, pod *corev1.Pod) if index < 0 { return NoOpsPolicy, fmt.Errorf("no corresponding template found for instance %s", pod.Name) } - inst, err := buildInstanceByTemplate(pod.Name, templateList[index], rsm, getPodRevision(pod)) + inst, err := buildInstanceByTemplate(pod.Name, templateList[index], its, getPodRevision(pod)) if err != nil { return NoOpsPolicy, err } @@ -343,10 +343,10 @@ func getPodUpdatePolicy(rsm *workloads.ReplicatedStateMachine, pod *corev1.Pod) return NoOpsPolicy, nil } -// IsPodUpdated tells whether the pod's spec is as expected in the rsm. +// IsPodUpdated tells whether the pod's spec is as expected in the InstanceSet. // This function is meant to replace the old fashion `GetPodRevision(pod) == updateRevision`, -// as the pod template revision has been redefined in rsm2. -func IsPodUpdated(rsm *workloads.ReplicatedStateMachine, pod *corev1.Pod) (bool, error) { - policy, err := getPodUpdatePolicy(rsm, pod) +// as the pod template revision has been redefined in instanceset. +func IsPodUpdated(its *workloads.InstanceSet, pod *corev1.Pod) (bool, error) { + policy, err := getPodUpdatePolicy(its, pod) return policy == NoOpsPolicy, err } diff --git a/pkg/controller/rsm2/in_place_update_util_test.go b/pkg/controller/instanceset/in_place_update_util_test.go similarity index 94% rename from pkg/controller/rsm2/in_place_update_util_test.go rename to pkg/controller/instanceset/in_place_update_util_test.go index 011a87458cc..1ce785a03e8 100644 --- a/pkg/controller/rsm2/in_place_update_util_test.go +++ b/pkg/controller/instanceset/in_place_update_util_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "fmt" @@ -103,7 +103,7 @@ var _ = Describe("instance util test", func() { podTemplate := template.DeepCopy() mergeMap(&map[string]string{key: randStr}, &podTemplate.Annotations) mergeMap(&map[string]string{key: randStr}, &podTemplate.Labels) - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). AddAnnotations(randStr, randStr). AddLabels(randStr, randStr). @@ -116,7 +116,7 @@ var _ = Describe("instance util test", func() { SetPodManagementPolicy(appsv1.ParallelPodManagement). GetObject() tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) var reconciler kubebuilderx.Reconciler By("update revisions") reconciler = NewRevisionUpdateReconciler() @@ -136,7 +136,7 @@ var _ = Describe("instance util test", func() { Expect(objects).Should(HaveLen(3)) pod1, ok := objects[0].(*corev1.Pod) Expect(ok).Should(BeTrue()) - policy, err := getPodUpdatePolicy(rsm, pod1) + policy, err := getPodUpdatePolicy(its, pod1) Expect(err).Should(BeNil()) Expect(policy).Should(Equal(NoOpsPolicy)) @@ -154,8 +154,8 @@ var _ = Describe("instance util test", func() { }, }) pod2.Labels[appsv1.ControllerRevisionHashLabelKey] = "new-revision" - rsm.Status.UpdateRevisions[pod2.Name] = getPodRevision(pod2) - policy, err = getPodUpdatePolicy(rsm, pod2) + its.Status.UpdateRevisions[pod2.Name] = getPodRevision(pod2) + policy, err = getPodUpdatePolicy(its, pod2) Expect(err).Should(BeNil()) Expect(policy).Should(Equal(RecreatePolicy)) @@ -163,7 +163,7 @@ var _ = Describe("instance util test", func() { pod3 := pod1.DeepCopy() randStr = rand.String(16) mergeMap(&map[string]string{key: randStr}, &pod3.Annotations) - policy, err = getPodUpdatePolicy(rsm, pod3) + policy, err = getPodUpdatePolicy(its, pod3) Expect(err).Should(BeNil()) Expect(policy).Should(Equal(InPlaceUpdatePolicy)) @@ -177,7 +177,7 @@ var _ = Describe("instance util test", func() { corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%dm", randInt)), } pod4.Spec.Containers[0].Resources.Requests = requests - policy, err = getPodUpdatePolicy(rsm, pod4) + policy, err = getPodUpdatePolicy(its, pod4) Expect(err).Should(BeNil()) Expect(policy).Should(Equal(InPlaceUpdatePolicy)) @@ -188,7 +188,7 @@ var _ = Describe("instance util test", func() { corev1.ResourceCPU: resource.MustParse(fmt.Sprintf("%dm", randInt)), } pod5.Spec.Containers[0].Resources.Requests = requests - policy, err = getPodUpdatePolicy(rsm, pod5) + policy, err = getPodUpdatePolicy(its, pod5) Expect(err).Should(BeNil()) Expect(policy).Should(Equal(InPlaceUpdatePolicy)) @@ -196,7 +196,7 @@ var _ = Describe("instance util test", func() { ignorePodVerticalScaling := viper.GetBool(FeatureGateIgnorePodVerticalScaling) defer viper.Set(FeatureGateIgnorePodVerticalScaling, ignorePodVerticalScaling) viper.Set(FeatureGateIgnorePodVerticalScaling, true) - policy, err = getPodUpdatePolicy(rsm, pod5) + policy, err = getPodUpdatePolicy(its, pod5) Expect(err).Should(BeNil()) Expect(policy).Should(Equal(NoOpsPolicy)) }) diff --git a/pkg/controller/rsm2/instance_util.go b/pkg/controller/instanceset/instance_util.go similarity index 90% rename from pkg/controller/rsm2/instance_util.go rename to pkg/controller/instanceset/instance_util.go index 0cb96b9b697..52d658a39af 100644 --- a/pkg/controller/rsm2/instance_util.go +++ b/pkg/controller/instanceset/instance_util.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "encoding/json" @@ -43,7 +43,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) type instanceTemplateExt struct { @@ -53,8 +53,8 @@ type instanceTemplateExt struct { VolumeClaimTemplates []corev1.PersistentVolumeClaim } -type rsmExt struct { - rsm *workloads.ReplicatedStateMachine +type instanceSetExt struct { + its *workloads.InstanceSet instanceTemplates []*workloads.InstanceTemplate } @@ -189,12 +189,12 @@ func ValidateDupInstanceNames[T any](instances []T, getNameFunc func(item T) str return nil } -func buildInstanceName2TemplateMap(rsmExt *rsmExt) (map[string]*instanceTemplateExt, error) { - instanceTemplateList := buildInstanceTemplateExts(rsmExt) +func buildInstanceName2TemplateMap(itsExt *instanceSetExt) (map[string]*instanceTemplateExt, error) { + instanceTemplateList := buildInstanceTemplateExts(itsExt) allNameTemplateMap := make(map[string]*instanceTemplateExt) var instanceNameList []string for _, template := range instanceTemplateList { - instanceNames := GenerateInstanceNamesFromTemplate(rsmExt.rsm.Name, template.Name, template.Replicas, rsmExt.rsm.Spec.OfflineInstances) + instanceNames := GenerateInstanceNamesFromTemplate(itsExt.its.Name, template.Name, template.Replicas, itsExt.its.Spec.OfflineInstances) instanceNameList = append(instanceNameList, instanceNames...) for _, name := range instanceNames { allNameTemplateMap[name] = template @@ -241,7 +241,7 @@ func generateInstanceNames(parentName, templateName string, return instanceNameList, ordinal } -func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent *workloads.ReplicatedStateMachine, revision string) (*instance, error) { +func buildInstanceByTemplate(name string, template *instanceTemplateExt, parent *workloads.InstanceSet, revision string) (*instance, error) { // 1. build a pod from template var err error if len(revision) == 0 { @@ -386,14 +386,14 @@ func copyAndMerge(oldObj, newObj client.Object) client.Object { } } -func validateSpec(rsm *workloads.ReplicatedStateMachine, tree *kubebuilderx.ObjectTree) error { +func validateSpec(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) error { replicasInTemplates := int32(0) - rsmExt, err := buildRSMExt(rsm, tree) + itsExt, err := buildInstanceSetExt(its, tree) if err != nil { return err } templateNames := sets.New[string]() - for _, template := range rsmExt.instanceTemplates { + for _, template := range itsExt.instanceTemplates { replicas := int32(1) if template.Replicas != nil { replicas = *template.Replicas @@ -405,35 +405,35 @@ func validateSpec(rsm *workloads.ReplicatedStateMachine, tree *kubebuilderx.Obje templateNames.Insert(template.Name) } // sum of spec.templates[*].replicas should not greater than spec.replicas - if replicasInTemplates > *rsm.Spec.Replicas { - return fmt.Errorf("total replicas in instances(%d) should not greater than replicas in spec(%d)", replicasInTemplates, *rsm.Spec.Replicas) + if replicasInTemplates > *its.Spec.Replicas { + return fmt.Errorf("total replicas in instances(%d) should not greater than replicas in spec(%d)", replicasInTemplates, *its.Spec.Replicas) } return nil } -func buildInstanceTemplateRevision(template *instanceTemplateExt, parent *workloads.ReplicatedStateMachine) (string, error) { +func buildInstanceTemplateRevision(template *instanceTemplateExt, parent *workloads.InstanceSet) (string, error) { podTemplate := filterInPlaceFields(&template.PodTemplateSpec) - rsm := builder.NewReplicatedStateMachineBuilder(parent.Namespace, parent.Name). + its := builder.NewInstanceSetBuilder(parent.Namespace, parent.Name). SetUID(parent.UID). AddAnnotationsInMap(parent.Annotations). AddMatchLabelsInMap(parent.Labels). SetTemplate(*podTemplate). GetObject() - cr, err := NewRevision(rsm) + cr, err := NewRevision(its) if err != nil { return "", err } return cr.Labels[ControllerRevisionHashLabel], nil } -func buildInstanceTemplateExts(rsmExt *rsmExt) []*instanceTemplateExt { - envConfigName := rsm1.GetEnvConfigMapName(rsmExt.rsm.Name) - defaultTemplate := rsm1.BuildPodTemplate(rsmExt.rsm, envConfigName) +func buildInstanceTemplateExts(itsExt *instanceSetExt) []*instanceTemplateExt { + envConfigName := rsm.GetEnvConfigMapName(itsExt.its.Name) + defaultTemplate := rsm.BuildPodTemplate(itsExt.its, envConfigName) makeInstanceTemplateExt := func() *instanceTemplateExt { var claims []corev1.PersistentVolumeClaim - for _, template := range rsmExt.rsm.Spec.VolumeClaimTemplates { + for _, template := range itsExt.its.Spec.VolumeClaimTemplates { claims = append(claims, *template.DeepCopy()) } return &instanceTemplateExt{ @@ -443,7 +443,7 @@ func buildInstanceTemplateExts(rsmExt *rsmExt) []*instanceTemplateExt { } var instanceTemplateExtList []*instanceTemplateExt - for _, template := range rsmExt.instanceTemplates { + for _, template := range itsExt.instanceTemplates { templateExt := makeInstanceTemplateExt() buildInstanceTemplateExt(*template, templateExt) instanceTemplateExtList = append(instanceTemplateExtList, templateExt) @@ -515,18 +515,18 @@ func getInstanceTemplates(instances []workloads.InstanceTemplate, template *core return append(instances, extraTemplates...) } -func findTemplateObject(rsm *workloads.ReplicatedStateMachine, tree *kubebuilderx.ObjectTree) (*corev1.ConfigMap, error) { - templateMap, err := getInstanceTemplateMap(rsm.Annotations) +func findTemplateObject(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) (*corev1.ConfigMap, error) { + templateMap, err := getInstanceTemplateMap(its.Annotations) // error has been checked in prepare stage, there should be no error occurs if err != nil { return nil, nil } for name, templateName := range templateMap { - if name != rsm.Name { + if name != its.Name { continue } // find the compressed instance templates, parse them - template := builder.NewConfigMapBuilder(rsm.Namespace, templateName).GetObject() + template := builder.NewConfigMapBuilder(its.Namespace, templateName).GetObject() templateObj, err := tree.Get(template) if err != nil { return nil, err @@ -592,16 +592,16 @@ func buildInstanceTemplateExt(template workloads.InstanceTemplate, templateExt * }) } -func buildRSMExt(rsm *workloads.ReplicatedStateMachine, tree *kubebuilderx.ObjectTree) (*rsmExt, error) { - instancesCompressed, err := findTemplateObject(rsm, tree) +func buildInstanceSetExt(its *workloads.InstanceSet, tree *kubebuilderx.ObjectTree) (*instanceSetExt, error) { + instancesCompressed, err := findTemplateObject(its, tree) if err != nil { return nil, err } - instanceTemplateList := buildInstanceTemplates(*rsm.Spec.Replicas, rsm.Spec.Instances, instancesCompressed) + instanceTemplateList := buildInstanceTemplates(*its.Spec.Replicas, its.Spec.Instances, instancesCompressed) - return &rsmExt{ - rsm: rsm, + return &instanceSetExt{ + its: its, instanceTemplates: instanceTemplateList, }, nil } diff --git a/pkg/controller/rsm2/instance_util_test.go b/pkg/controller/instanceset/instance_util_test.go similarity index 82% rename from pkg/controller/rsm2/instance_util_test.go rename to pkg/controller/instanceset/instance_util_test.go index 4e82c9e23a2..7984c619f60 100644 --- a/pkg/controller/rsm2/instance_util_test.go +++ b/pkg/controller/instanceset/instance_util_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "fmt" @@ -34,33 +34,33 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) var _ = Describe("instance util test", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetService(&corev1.Service{}). SetReplicas(3). SetTemplate(template). SetVolumeClaimTemplates(volumeClaimTemplates...). SetRoles(roles). GetObject() - priorityMap = rsm1.ComposeRolePriorityMap(rsm.Spec.Roles) + priorityMap = rsm.ComposeRolePriorityMap(its.Spec.Roles) }) Context("sortObjects function", func() { It("should work well", func() { pods := []client.Object{ - builder.NewPodBuilder(namespace, "pod-0").AddLabels(rsm1.RoleLabelKey, "follower").GetObject(), - builder.NewPodBuilder(namespace, "pod-1").AddLabels(rsm1.RoleLabelKey, "logger").GetObject(), + builder.NewPodBuilder(namespace, "pod-0").AddLabels(rsm.RoleLabelKey, "follower").GetObject(), + builder.NewPodBuilder(namespace, "pod-1").AddLabels(rsm.RoleLabelKey, "logger").GetObject(), builder.NewPodBuilder(namespace, "pod-2").GetObject(), - builder.NewPodBuilder(namespace, "pod-3").AddLabels(rsm1.RoleLabelKey, "learner").GetObject(), - builder.NewPodBuilder(namespace, "pod-4").AddLabels(rsm1.RoleLabelKey, "candidate").GetObject(), - builder.NewPodBuilder(namespace, "pod-5").AddLabels(rsm1.RoleLabelKey, "leader").GetObject(), - builder.NewPodBuilder(namespace, "pod-6").AddLabels(rsm1.RoleLabelKey, "learner").GetObject(), - builder.NewPodBuilder(namespace, "pod-10").AddLabels(rsm1.RoleLabelKey, "learner").GetObject(), - builder.NewPodBuilder(namespace, "foo-20").AddLabels(rsm1.RoleLabelKey, "learner").GetObject(), + builder.NewPodBuilder(namespace, "pod-3").AddLabels(rsm.RoleLabelKey, "learner").GetObject(), + builder.NewPodBuilder(namespace, "pod-4").AddLabels(rsm.RoleLabelKey, "candidate").GetObject(), + builder.NewPodBuilder(namespace, "pod-5").AddLabels(rsm.RoleLabelKey, "leader").GetObject(), + builder.NewPodBuilder(namespace, "pod-6").AddLabels(rsm.RoleLabelKey, "learner").GetObject(), + builder.NewPodBuilder(namespace, "pod-10").AddLabels(rsm.RoleLabelKey, "learner").GetObject(), + builder.NewPodBuilder(namespace, "foo-20").AddLabels(rsm.RoleLabelKey, "learner").GetObject(), } expectedOrder := []string{"pod-4", "pod-2", "foo-20", "pod-3", "pod-6", "pod-10", "pod-1", "pod-0", "pod-5"} @@ -116,25 +116,25 @@ var _ = Describe("instance util test", func() { }) Context("buildInstanceName2TemplateMap", func() { - It("build a rsm with default template only", func() { - rsmExt, err := buildRSMExt(rsm, nil) + It("build an its with default template only", func() { + itsExt, err := buildInstanceSetExt(its, nil) Expect(err).Should(BeNil()) - nameTemplate, err := buildInstanceName2TemplateMap(rsmExt) + nameTemplate, err := buildInstanceName2TemplateMap(itsExt) Expect(err).Should(BeNil()) Expect(nameTemplate).Should(HaveLen(3)) - name0 := rsm.Name + "-0" + name0 := its.Name + "-0" Expect(nameTemplate).Should(HaveKey(name0)) - Expect(nameTemplate).Should(HaveKey(rsm.Name + "-1")) - Expect(nameTemplate).Should(HaveKey(rsm.Name + "-2")) + Expect(nameTemplate).Should(HaveKey(its.Name + "-1")) + Expect(nameTemplate).Should(HaveKey(its.Name + "-2")) nameTemplate[name0].PodTemplateSpec.Spec.Volumes = nil - envConfigName := rsm1.GetEnvConfigMapName(rsm.Name) - defaultTemplate := rsm1.BuildPodTemplate(rsm, envConfigName) + envConfigName := rsm.GetEnvConfigMapName(its.Name) + defaultTemplate := rsm.BuildPodTemplate(its, envConfigName) Expect(nameTemplate[name0].PodTemplateSpec.Spec).Should(Equal(defaultTemplate.Spec)) }) - It("build a rsm with one instance template override", func() { + It("build an its with one instance template override", func() { nameOverride := "name-override" - nameOverride0 := rsm.Name + "-" + nameOverride + "-0" + nameOverride0 := its.Name + "-" + nameOverride + "-0" annotationOverride := map[string]string{ "foo": "bar", } @@ -148,19 +148,19 @@ var _ = Describe("instance util test", func() { Labels: labelOverride, Image: &imageOverride, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instance) - rsmExt, err := buildRSMExt(rsm, nil) + its.Spec.Instances = append(its.Spec.Instances, instance) + itsExt, err := buildInstanceSetExt(its, nil) Expect(err).Should(BeNil()) - nameTemplate, err := buildInstanceName2TemplateMap(rsmExt) + nameTemplate, err := buildInstanceName2TemplateMap(itsExt) Expect(err).Should(BeNil()) Expect(nameTemplate).Should(HaveLen(3)) - name0 := rsm.Name + "-0" - name1 := rsm.Name + "-1" + name0 := its.Name + "-0" + name1 := its.Name + "-1" Expect(nameTemplate).Should(HaveKey(name0)) Expect(nameTemplate).Should(HaveKey(name1)) Expect(nameTemplate).Should(HaveKey(nameOverride0)) - envConfigName := rsm1.GetEnvConfigMapName(rsm.Name) - expectedTemplate := rsm1.BuildPodTemplate(rsm, envConfigName) + envConfigName := rsm.GetEnvConfigMapName(its.Name) + expectedTemplate := rsm.BuildPodTemplate(its, envConfigName) Expect(nameTemplate[name0].PodTemplateSpec.Spec).Should(Equal(expectedTemplate.Spec)) Expect(nameTemplate[name1].PodTemplateSpec.Spec).Should(Equal(expectedTemplate.Spec)) Expect(nameTemplate[nameOverride0].PodTemplateSpec.Spec).ShouldNot(Equal(expectedTemplate.Spec)) @@ -172,25 +172,25 @@ var _ = Describe("instance util test", func() { Context("buildInstanceByTemplate", func() { It("should work well", func() { - rsmExt, err := buildRSMExt(rsm, nil) + itsExt, err := buildInstanceSetExt(its, nil) Expect(err).Should(BeNil()) - nameTemplate, err := buildInstanceName2TemplateMap(rsmExt) + nameTemplate, err := buildInstanceName2TemplateMap(itsExt) Expect(err).Should(BeNil()) Expect(nameTemplate).Should(HaveLen(3)) name := name + "-0" Expect(nameTemplate).Should(HaveKey(name)) template := nameTemplate[name] - replica, err := buildInstanceByTemplate(name, template, rsm, "") + replica, err := buildInstanceByTemplate(name, template, its, "") Expect(err).Should(BeNil()) Expect(replica.pod).ShouldNot(BeNil()) Expect(replica.pvcs).ShouldNot(BeNil()) Expect(replica.pvcs).Should(HaveLen(1)) Expect(replica.pod.Name).Should(Equal(name)) - Expect(replica.pod.Namespace).Should(Equal(rsm.Namespace)) + Expect(replica.pod.Namespace).Should(Equal(its.Namespace)) Expect(replica.pod.Spec.Volumes).Should(HaveLen(1)) Expect(replica.pod.Spec.Volumes[0].Name).Should(Equal(volumeClaimTemplates[0].Name)) - envConfigName := rsm1.GetEnvConfigMapName(rsm.Name) - expectedTemplate := rsm1.BuildPodTemplate(rsm, envConfigName) + envConfigName := rsm.GetEnvConfigMapName(its.Name) + expectedTemplate := rsm.BuildPodTemplate(its, envConfigName) Expect(replica.pod.Spec).ShouldNot(Equal(expectedTemplate.Spec)) // reset pod.volumes, pod.hostname and pod.subdomain replica.pod.Spec.Volumes = nil @@ -206,18 +206,18 @@ var _ = Describe("instance util test", func() { Context("validateSpec", func() { It("should work well", func() { By("a valid spec") - Expect(validateSpec(rsm, nil)).Should(Succeed()) + Expect(validateSpec(its, nil)).Should(Succeed()) By("sum of replicas in instance exceeds spec.replicas") - rsm2 := rsm.DeepCopy() + its2 := its.DeepCopy() replicas := int32(4) name := "barrrrr" instance := workloads.InstanceTemplate{ Name: name, Replicas: &replicas, } - rsm2.Spec.Instances = append(rsm2.Spec.Instances, instance) - err := validateSpec(rsm2, nil) + its2.Spec.Instances = append(its2.Spec.Instances, instance) + err := validateSpec(its2, nil) Expect(err).Should(HaveOccurred()) Expect(err.Error()).Should(ContainSubstring("should not greater than replicas in spec")) }) @@ -353,18 +353,18 @@ var _ = Describe("instance util test", func() { Replicas: func() *int32 { r := int32(1); return &r }(), }, } - rsm := builder.NewReplicatedStateMachineBuilder(namespace, name). + its := builder.NewInstanceSetBuilder(namespace, name). AddAnnotations(templateRefAnnotationKey, annotation). SetInstances(instances). GetObject() tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) Expect(tree.Add(templateObj)).Should(Succeed()) By("parse instance templates") - template, err := findTemplateObject(rsm, tree) + template, err := findTemplateObject(its, tree) Expect(err).Should(BeNil()) - instanceTemplates := getInstanceTemplates(rsm.Spec.Instances, template) + instanceTemplates := getInstanceTemplates(its.Spec.Instances, template) // append templates from mock function instances = append(instances, []workloads.InstanceTemplate{ { diff --git a/pkg/controller/rsm2/reconciler_assistant_object.go b/pkg/controller/instanceset/reconciler_assistant_object.go similarity index 86% rename from pkg/controller/rsm2/reconciler_assistant_object.go rename to pkg/controller/instanceset/reconciler_assistant_object.go index fc164198fb3..b552e747402 100644 --- a/pkg/controller/rsm2/reconciler_assistant_object.go +++ b/pkg/controller/instanceset/reconciler_assistant_object.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( corev1 "k8s.io/api/core/v1" @@ -27,7 +27,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) // assistantObjectReconciler manages non-workload objects, such as Service, ConfigMap, etc. @@ -48,17 +48,17 @@ func (a *assistantObjectReconciler) PreCondition(tree *kubebuilderx.ObjectTree) } func (a *assistantObjectReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) + its, _ := tree.GetRoot().(*workloads.InstanceSet) // generate objects by current spec - labels := getMatchLabels(rsm.Name) - selectors := getSvcSelector(rsm, false) - headlessSelectors := getSvcSelector(rsm, true) - - svc := rsm1.BuildSvc(*rsm, labels, selectors) - altSvs := rsm1.BuildAlternativeSvs(*rsm, labels) - headLessSvc := rsm1.BuildHeadlessSvc(*rsm, labels, headlessSelectors) - envConfig := rsm1.BuildEnvConfigMap(*rsm, labels) + labels := getMatchLabels(its.Name) + selectors := getSvcSelector(its, false) + headlessSelectors := getSvcSelector(its, true) + + svc := rsm.BuildSvc(*its, labels, selectors) + altSvs := rsm.BuildAlternativeSvs(*its, labels) + headLessSvc := rsm.BuildHeadlessSvc(*its, labels, headlessSelectors) + envConfig := rsm.BuildEnvConfigMap(*its, labels) var objects []client.Object if svc != nil { objects = append(objects, svc) @@ -68,7 +68,7 @@ func (a *assistantObjectReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*k } objects = append(objects, headLessSvc, envConfig) for _, object := range objects { - if err := rsm1.SetOwnership(rsm, object, model.GetScheme(), finalizer); err != nil { + if err := rsm.SetOwnership(its, object, model.GetScheme(), finalizer); err != nil { return nil, err } } @@ -84,7 +84,7 @@ func (a *assistantObjectReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*k oldSnapshot := make(map[model.GVKNObjKey]client.Object) svcList := tree.List(&corev1.Service{}) cmList := tree.List(&corev1.ConfigMap{}) - cmListFiltered, err := filterTemplate(cmList, rsm.Annotations) + cmListFiltered, err := filterTemplate(cmList, its.Annotations) if err != nil { return nil, err } diff --git a/pkg/controller/rsm2/reconciler_assistant_object_test.go b/pkg/controller/instanceset/reconciler_assistant_object_test.go similarity index 88% rename from pkg/controller/rsm2/reconciler_assistant_object_test.go rename to pkg/controller/instanceset/reconciler_assistant_object_test.go index 32c6eafbd82..7885fba3605 100644 --- a/pkg/controller/rsm2/reconciler_assistant_object_test.go +++ b/pkg/controller/instanceset/reconciler_assistant_object_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( . "github.com/onsi/ginkgo/v2" @@ -28,11 +28,12 @@ import ( "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) var _ = Describe("assistant object reconciler test", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetReplicas(3). AddMatchLabelsInMap(selectors). @@ -45,9 +46,9 @@ var _ = Describe("assistant object reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm.Generation = 1 + its.Generation = 1 tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) reconciler = NewAssistantObjectReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) @@ -58,7 +59,7 @@ var _ = Describe("assistant object reconciler test", func() { objects := tree.GetSecondaryObjects() Expect(objects).Should(HaveLen(2)) svc := builder.NewHeadlessServiceBuilder(namespace, name+"-headless").GetObject() - cm := builder.NewConfigMapBuilder(namespace, name+"-rsm-env").GetObject() + cm := builder.NewConfigMapBuilder(namespace, rsm.GetEnvConfigMapName(name)).GetObject() for _, object := range []client.Object{svc, cm} { name, err := model.GetGVKName(object) Expect(err).Should(BeNil()) diff --git a/pkg/controller/rsm2/reconciler_deletion.go b/pkg/controller/instanceset/reconciler_deletion.go similarity index 99% rename from pkg/controller/rsm2/reconciler_deletion.go rename to pkg/controller/instanceset/reconciler_deletion.go index 73fcda1d40e..fb075426ea3 100644 --- a/pkg/controller/rsm2/reconciler_deletion.go +++ b/pkg/controller/instanceset/reconciler_deletion.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( corev1 "k8s.io/api/core/v1" diff --git a/pkg/controller/rsm2/reconciler_deletion_test.go b/pkg/controller/instanceset/reconciler_deletion_test.go similarity index 90% rename from pkg/controller/rsm2/reconciler_deletion_test.go rename to pkg/controller/instanceset/reconciler_deletion_test.go index 5cdf79ee69f..4928640b353 100644 --- a/pkg/controller/rsm2/reconciler_deletion_test.go +++ b/pkg/controller/instanceset/reconciler_deletion_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "time" @@ -35,13 +35,13 @@ var _ = Describe("deletion reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm := builder.NewReplicatedStateMachineBuilder(namespace, name).GetObject() + its := builder.NewInstanceSetBuilder(namespace, name).GetObject() tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) reconciler := NewDeletionReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultUnsatisfied)) t := metav1.NewTime(time.Now()) - rsm.SetDeletionTimestamp(&t) + its.SetDeletionTimestamp(&t) Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) By("Reconcile") @@ -49,7 +49,7 @@ var _ = Describe("deletion reconciler test", func() { Expect(tree.Add(pod)).Should(Succeed()) newTree, err := reconciler.Reconcile(tree) Expect(err).Should(BeNil()) - Expect(newTree.GetRoot()).Should(Equal(rsm)) + Expect(newTree.GetRoot()).Should(Equal(its)) newTree, err = reconciler.Reconcile(newTree) Expect(err).Should(BeNil()) Expect(newTree.GetRoot()).Should(BeNil()) diff --git a/pkg/controller/rsm2/reconciler_fix_meta.go b/pkg/controller/instanceset/reconciler_fix_meta.go similarity index 98% rename from pkg/controller/rsm2/reconciler_fix_meta.go rename to pkg/controller/instanceset/reconciler_fix_meta.go index 9807914910b..604f7ef53bc 100644 --- a/pkg/controller/rsm2/reconciler_fix_meta.go +++ b/pkg/controller/instanceset/reconciler_fix_meta.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" diff --git a/pkg/controller/rsm2/reconciler_fix_meta_test.go b/pkg/controller/instanceset/reconciler_fix_meta_test.go similarity index 93% rename from pkg/controller/rsm2/reconciler_fix_meta_test.go rename to pkg/controller/instanceset/reconciler_fix_meta_test.go index 7d8f2fc0fe5..91a08f442fb 100644 --- a/pkg/controller/rsm2/reconciler_fix_meta_test.go +++ b/pkg/controller/instanceset/reconciler_fix_meta_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( . "github.com/onsi/ginkgo/v2" @@ -31,9 +31,9 @@ var _ = Describe("fix meta reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm := builder.NewReplicatedStateMachineBuilder(namespace, name).GetObject() + its := builder.NewInstanceSetBuilder(namespace, name).GetObject() tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) reconciler := NewFixMetaReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) diff --git a/pkg/controller/rsm2/reconciler_instance_alignment.go b/pkg/controller/instanceset/reconciler_instance_alignment.go similarity index 90% rename from pkg/controller/rsm2/reconciler_instance_alignment.go rename to pkg/controller/instanceset/reconciler_instance_alignment.go index e8d2399dc55..e31b5c2a24c 100644 --- a/pkg/controller/rsm2/reconciler_instance_alignment.go +++ b/pkg/controller/instanceset/reconciler_instance_alignment.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( appsv1 "k8s.io/api/apps/v1" @@ -47,22 +47,22 @@ func (r *instanceAlignmentReconciler) PreCondition(tree *kubebuilderx.ObjectTree if model.IsReconciliationPaused(tree.GetRoot()) { return kubebuilderx.ResultUnsatisfied } - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) - if err := validateSpec(rsm, tree); err != nil { + its, _ := tree.GetRoot().(*workloads.InstanceSet) + if err := validateSpec(its, tree); err != nil { return kubebuilderx.CheckResultWithError(err) } return kubebuilderx.ResultSatisfied } func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) - rsmExt, err := buildRSMExt(rsm, tree) + its, _ := tree.GetRoot().(*workloads.InstanceSet) + itsExt, err := buildInstanceSetExt(its, tree) if err != nil { return nil, err } // 1. build desired name to template map - nameToTemplateMap, err := buildInstanceName2TemplateMap(rsmExt) + nameToTemplateMap, err := buildInstanceName2TemplateMap(itsExt) if err != nil { return nil, err } @@ -86,7 +86,7 @@ func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) ( // default OrderedReady policy createCount, deleteCount := 1, 1 shouldReady := true - if rsm.Spec.PodManagementPolicy == appsv1.ParallelPodManagement { + if its.Spec.PodManagementPolicy == appsv1.ParallelPodManagement { createCount = len(createNameSet) deleteCount = len(deleteNameSet) shouldReady = false @@ -116,7 +116,7 @@ func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) ( if shouldReady && predecessor != nil && !isHealthy(predecessor) { break } - inst, err := buildInstanceByTemplate(name, nameToTemplateMap[name], rsm, "") + inst, err := buildInstanceByTemplate(name, nameToTemplateMap[name], its, "") if err != nil { return nil, err } @@ -155,9 +155,9 @@ func (r *instanceAlignmentReconciler) Reconcile(tree *kubebuilderx.ObjectTree) ( break } if shouldReady && !isRunningAndReady(pod) { - tree.EventRecorder.Eventf(rsm, corev1.EventTypeWarning, "RSM %s/%s is waiting for Pod %s to be Running and Ready", - rsm.Namespace, - rsm.Name, + tree.EventRecorder.Eventf(its, corev1.EventTypeWarning, "InstanceSet %s/%s is waiting for Pod %s to be Running and Ready", + its.Namespace, + its.Name, pod.Name) } if err := tree.Delete(pod); err != nil { diff --git a/pkg/controller/rsm2/reconciler_instance_alignment_test.go b/pkg/controller/instanceset/reconciler_instance_alignment_test.go similarity index 87% rename from pkg/controller/rsm2/reconciler_instance_alignment_test.go rename to pkg/controller/instanceset/reconciler_instance_alignment_test.go index b48b1038151..4d2b9b8858f 100644 --- a/pkg/controller/rsm2/reconciler_instance_alignment_test.go +++ b/pkg/controller/instanceset/reconciler_instance_alignment_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( . "github.com/onsi/ginkgo/v2" @@ -35,7 +35,7 @@ import ( var _ = Describe("replicas alignment reconciler test", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetService(&corev1.Service{}). SetReplicas(3). SetTemplate(template). @@ -47,9 +47,9 @@ var _ = Describe("replicas alignment reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm.Generation = 1 + its.Generation = 1 tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) reconciler = NewReplicasAlignmentReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) @@ -57,20 +57,20 @@ var _ = Describe("replicas alignment reconciler test", func() { // desired: bar-hello-0, bar-foo-0, bar-foo-1, bar-0, bar-1, bar-2, bar-3 // current: bar-foo-0, bar-1 replicas := int32(7) - rsm.Spec.Replicas = &replicas + its.Spec.Replicas = &replicas nameHello := "hello" instanceHello := workloads.InstanceTemplate{ Name: nameHello, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instanceHello) + its.Spec.Instances = append(its.Spec.Instances, instanceHello) nameFoo := "foo" replicasFoo := int32(2) instanceFoo := workloads.InstanceTemplate{ Name: nameFoo, Replicas: &replicasFoo, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instanceFoo) - podFoo0 := builder.NewPodBuilder(namespace, rsm.Name+"-foo-0").GetObject() + its.Spec.Instances = append(its.Spec.Instances, instanceFoo) + podFoo0 := builder.NewPodBuilder(namespace, its.Name+"-foo-0").GetObject() podBar1 := builder.NewPodBuilder(namespace, "bar-1").GetObject() Expect(tree.Add(podFoo0, podBar1)).Should(Succeed()) @@ -104,9 +104,9 @@ var _ = Describe("replicas alignment reconciler test", func() { By("do reconcile with Parallel policy") parallelTree, err := tree.DeepCopy() Expect(err).Should(BeNil()) - parallelRsm, ok := parallelTree.GetRoot().(*workloads.ReplicatedStateMachine) + parallelITS, ok := parallelTree.GetRoot().(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) - parallelRsm.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + parallelITS.Spec.PodManagementPolicy = appsv1.ParallelPodManagement newTree, err = reconciler.Reconcile(parallelTree) Expect(err).Should(BeNil()) // desired: bar-hello-0, bar-foo-0, bar-foo-1, bar-0, bar-1, bar-2, bar-3 @@ -119,8 +119,8 @@ var _ = Describe("replicas alignment reconciler test", func() { currentPodSnapshot[*name] = object } - podHello := builder.NewPodBuilder(namespace, rsm.Name+"-hello-0").GetObject() - podFoo1 := builder.NewPodBuilder(namespace, rsm.Name+"-foo-1").GetObject() + podHello := builder.NewPodBuilder(namespace, its.Name+"-hello-0").GetObject() + podFoo1 := builder.NewPodBuilder(namespace, its.Name+"-foo-1").GetObject() podBar2 := builder.NewPodBuilder(namespace, "bar-2").GetObject() podBar3 := builder.NewPodBuilder(namespace, "bar-3").GetObject() for _, object := range []client.Object{podHello, podFoo0, podFoo1, podBar0, podBar1, podBar2, podBar3} { diff --git a/pkg/controller/rsm2/reconciler_revision_update.go b/pkg/controller/instanceset/reconciler_revision_update.go similarity index 83% rename from pkg/controller/rsm2/reconciler_revision_update.go rename to pkg/controller/instanceset/reconciler_revision_update.go index 15f1480f978..f01ea7a11bd 100644 --- a/pkg/controller/rsm2/reconciler_revision_update.go +++ b/pkg/controller/instanceset/reconciler_revision_update.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" @@ -41,28 +41,28 @@ func (r *revisionUpdateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) * if tree.GetRoot() == nil || !model.IsObjectUpdating(tree.GetRoot()) { return kubebuilderx.ResultUnsatisfied } - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) - if err := validateSpec(rsm, tree); err != nil { + its, _ := tree.GetRoot().(*workloads.InstanceSet) + if err := validateSpec(its, tree); err != nil { return kubebuilderx.CheckResultWithError(err) } return kubebuilderx.ResultSatisfied } func (r *revisionUpdateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) - rsmExt, err := buildRSMExt(rsm, tree) + its, _ := tree.GetRoot().(*workloads.InstanceSet) + itsExt, err := buildInstanceSetExt(its, tree) if err != nil { return nil, err } // 1. build all templates by applying instance template overrides to default pod template - instanceTemplateList := buildInstanceTemplateExts(rsmExt) + instanceTemplateList := buildInstanceTemplateExts(itsExt) // build instance revision list from instance templates var instanceRevisionList []instanceRevision for _, template := range instanceTemplateList { - instanceNames := GenerateInstanceNamesFromTemplate(rsm.Name, template.Name, template.Replicas, rsmExt.rsm.Spec.OfflineInstances) - revision, err := buildInstanceTemplateRevision(template, rsm) + instanceNames := GenerateInstanceNamesFromTemplate(its.Name, template.Name, template.Replicas, itsExt.its.Spec.OfflineInstances) + revision, err := buildInstanceTemplateRevision(template, its) if err != nil { return nil, err } @@ -88,16 +88,16 @@ func (r *revisionUpdateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*ku if err != nil { return nil, err } - rsm.Status.UpdateRevisions = revisions + its.Status.UpdateRevisions = revisions updateRevision := "" if len(instanceRevisionList) > 0 { updateRevision = instanceRevisionList[len(instanceRevisionList)-1].revision } - rsm.Status.UpdateRevision = updateRevision + its.Status.UpdateRevision = updateRevision // The 'ObservedGeneration' field is used to indicate whether the revisions have been updated. // Computing these revisions in each reconciliation loop can be time-consuming, so we optimize it by // performing the computation only when the 'spec' is updated. - rsm.Status.ObservedGeneration = rsm.Generation + its.Status.ObservedGeneration = its.Generation return tree, nil } diff --git a/pkg/controller/rsm2/reconciler_revision_update_test.go b/pkg/controller/instanceset/reconciler_revision_update_test.go similarity index 75% rename from pkg/controller/rsm2/reconciler_revision_update_test.go rename to pkg/controller/instanceset/reconciler_revision_update_test.go index 0ebd7386383..8d79ffaa0f9 100644 --- a/pkg/controller/rsm2/reconciler_revision_update_test.go +++ b/pkg/controller/instanceset/reconciler_revision_update_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( . "github.com/onsi/ginkgo/v2" @@ -32,7 +32,7 @@ import ( var _ = Describe("revision update reconciler test", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetService(&corev1.Service{}). SetReplicas(3). SetTemplate(template). @@ -44,25 +44,25 @@ var _ = Describe("revision update reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm.Generation = 1 + its.Generation = 1 tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) reconciler := NewRevisionUpdateReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) By("Reconcile") newTree, err := reconciler.Reconcile(tree) Expect(err).Should(BeNil()) - newRsm, ok := newTree.GetRoot().(*workloads.ReplicatedStateMachine) + newITS, ok := newTree.GetRoot().(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) - Expect(newRsm.Status.ObservedGeneration).Should(Equal(rsm.Generation)) - updateRevisions, err := getUpdateRevisions(newRsm.Status.UpdateRevisions) + Expect(newITS.Status.ObservedGeneration).Should(Equal(its.Generation)) + updateRevisions, err := getUpdateRevisions(newITS.Status.UpdateRevisions) Expect(err).Should(BeNil()) Expect(updateRevisions).Should(HaveLen(3)) - Expect(updateRevisions).Should(HaveKey(rsm.Name + "-0")) - Expect(updateRevisions).Should(HaveKey(rsm.Name + "-1")) - Expect(updateRevisions).Should(HaveKey(rsm.Name + "-2")) - Expect(newRsm.Status.UpdateRevision).Should(Equal(updateRevisions[rsm.Name+"-2"])) + Expect(updateRevisions).Should(HaveKey(its.Name + "-0")) + Expect(updateRevisions).Should(HaveKey(its.Name + "-1")) + Expect(updateRevisions).Should(HaveKey(its.Name + "-2")) + Expect(newITS.Status.UpdateRevision).Should(Equal(updateRevisions[its.Name+"-2"])) }) }) }) diff --git a/pkg/controller/rsm2/reconciler_status.go b/pkg/controller/instanceset/reconciler_status.go similarity index 73% rename from pkg/controller/rsm2/reconciler_status.go rename to pkg/controller/instanceset/reconciler_status.go index 75c661e5638..bbf7be898fa 100644 --- a/pkg/controller/rsm2/reconciler_status.go +++ b/pkg/controller/instanceset/reconciler_status.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( corev1 "k8s.io/api/core/v1" @@ -25,7 +25,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) // statusReconciler computes the current status @@ -45,7 +45,7 @@ func (r *statusReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuil } func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) + its, _ := tree.GetRoot().(*workloads.InstanceSet) // 1. get all pods pods := tree.List(&corev1.Pod{}) var podList []corev1.Pod @@ -54,7 +54,7 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde podList = append(podList, *pod) } // 2. calculate status summary - updateRevisions, err := getUpdateRevisions(rsm.Status.UpdateRevisions) + updateRevisions, err := getUpdateRevisions(its.Status.UpdateRevisions) if err != nil { return nil, err } @@ -68,12 +68,12 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde } if isRunningAndReady(pod) { readyReplicas++ - if isRunningAndAvailable(pod, rsm.Spec.MinReadySeconds) { + if isRunningAndAvailable(pod, its.Spec.MinReadySeconds) { availableReplicas++ } } if isCreated(pod) && !isTerminating(pod) { - isPodUpdated, err := IsPodUpdated(rsm, pod) + isPodUpdated, err := IsPodUpdated(its, pod) if err != nil { return nil, err } @@ -85,25 +85,25 @@ func (r *statusReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde } } } - rsm.Status.Replicas = replicas - rsm.Status.ReadyReplicas = readyReplicas - rsm.Status.AvailableReplicas = availableReplicas - rsm.Status.CurrentReplicas = currentReplicas - rsm.Status.UpdatedReplicas = updatedReplicas - rsm.Status.CurrentGeneration = rsm.Generation + its.Status.Replicas = replicas + its.Status.ReadyReplicas = readyReplicas + its.Status.AvailableReplicas = availableReplicas + its.Status.CurrentReplicas = currentReplicas + its.Status.UpdatedReplicas = updatedReplicas + its.Status.CurrentGeneration = its.Generation // all pods have been updated totalReplicas := int32(1) - if rsm.Spec.Replicas != nil { - totalReplicas = *rsm.Spec.Replicas + if its.Spec.Replicas != nil { + totalReplicas = *its.Spec.Replicas } - if rsm.Status.Replicas == totalReplicas && rsm.Status.UpdatedReplicas == totalReplicas { - rsm.Status.CurrentRevisions = rsm.Status.UpdateRevisions - rsm.Status.CurrentRevision = rsm.Status.UpdateRevision - rsm.Status.CurrentReplicas = totalReplicas + if its.Status.Replicas == totalReplicas && its.Status.UpdatedReplicas == totalReplicas { + its.Status.CurrentRevisions = its.Status.UpdateRevisions + its.Status.CurrentRevision = its.Status.UpdateRevision + its.Status.CurrentReplicas = totalReplicas } // 3. set members status - rsm1.SetMembersStatus(rsm, &podList) + rsm.SetMembersStatus(its, &podList) return tree, nil } diff --git a/pkg/controller/rsm2/reconciler_status_test.go b/pkg/controller/instanceset/reconciler_status_test.go similarity index 68% rename from pkg/controller/rsm2/reconciler_status_test.go rename to pkg/controller/instanceset/reconciler_status_test.go index d295bae5ec1..6cbc091e234 100644 --- a/pkg/controller/rsm2/reconciler_status_test.go +++ b/pkg/controller/instanceset/reconciler_status_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "time" @@ -36,7 +36,7 @@ import ( var _ = Describe("status reconciler test", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetReplicas(3). AddMatchLabelsInMap(selectors). @@ -50,26 +50,26 @@ var _ = Describe("status reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm.Generation = 1 + its.Generation = 1 tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) By("prepare current tree") replicas := int32(7) - rsm.Spec.Replicas = &replicas - rsm.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + its.Spec.Replicas = &replicas + its.Spec.PodManagementPolicy = appsv1.ParallelPodManagement nameHello := "hello" instanceHello := workloads.InstanceTemplate{ Name: nameHello, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instanceHello) + its.Spec.Instances = append(its.Spec.Instances, instanceHello) generateNameFoo := "foo" replicasFoo := int32(2) instanceFoo := workloads.InstanceTemplate{ Name: generateNameFoo, Replicas: &replicasFoo, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instanceFoo) + its.Spec.Instances = append(its.Spec.Instances, instanceFoo) // prepare for update By("fix meta") @@ -97,13 +97,13 @@ var _ = Describe("status reconciler test", func() { Expect(reconciler.PreCondition(newTree)).Should(Equal(kubebuilderx.ResultSatisfied)) _, err = reconciler.Reconcile(newTree) Expect(err).Should(BeNil()) - Expect(rsm.Status.Replicas).Should(BeEquivalentTo(0)) - Expect(rsm.Status.ReadyReplicas).Should(BeEquivalentTo(0)) - Expect(rsm.Status.AvailableReplicas).Should(BeEquivalentTo(0)) - Expect(rsm.Status.UpdatedReplicas).Should(BeEquivalentTo(0)) - Expect(rsm.Status.CurrentReplicas).Should(BeEquivalentTo(0)) - Expect(rsm.Status.CurrentRevisions).Should(HaveLen(0)) - Expect(rsm.Status.CurrentGeneration).Should(BeEquivalentTo(rsm.Generation)) + Expect(its.Status.Replicas).Should(BeEquivalentTo(0)) + Expect(its.Status.ReadyReplicas).Should(BeEquivalentTo(0)) + Expect(its.Status.AvailableReplicas).Should(BeEquivalentTo(0)) + Expect(its.Status.UpdatedReplicas).Should(BeEquivalentTo(0)) + Expect(its.Status.CurrentReplicas).Should(BeEquivalentTo(0)) + Expect(its.Status.CurrentRevisions).Should(HaveLen(0)) + Expect(its.Status.CurrentGeneration).Should(BeEquivalentTo(its.Generation)) By("make all pods ready with old revision") condition := corev1.PodCondition{ @@ -124,16 +124,16 @@ var _ = Describe("status reconciler test", func() { } _, err = reconciler.Reconcile(newTree) Expect(err).Should(BeNil()) - Expect(rsm.Status.Replicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.ReadyReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.AvailableReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.UpdatedReplicas).Should(BeEquivalentTo(0)) - Expect(rsm.Status.CurrentReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.CurrentRevisions).Should(HaveLen(0)) - Expect(rsm.Status.CurrentGeneration).Should(BeEquivalentTo(rsm.Generation)) + Expect(its.Status.Replicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.ReadyReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.AvailableReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.UpdatedReplicas).Should(BeEquivalentTo(0)) + Expect(its.Status.CurrentReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.CurrentRevisions).Should(HaveLen(0)) + Expect(its.Status.CurrentGeneration).Should(BeEquivalentTo(its.Generation)) By("make all pods available with latest revision") - updateRevisions, err := getUpdateRevisions(rsm.Status.UpdateRevisions) + updateRevisions, err := getUpdateRevisions(its.Status.UpdateRevisions) Expect(err).Should(BeNil()) for _, object := range pods { pod, ok := object.(*corev1.Pod) @@ -142,13 +142,13 @@ var _ = Describe("status reconciler test", func() { } _, err = reconciler.Reconcile(newTree) Expect(err).Should(BeNil()) - Expect(rsm.Status.Replicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.ReadyReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.AvailableReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.UpdatedReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.CurrentReplicas).Should(BeEquivalentTo(replicas)) - Expect(rsm.Status.CurrentRevisions).Should(Equal(rsm.Status.UpdateRevisions)) - Expect(rsm.Status.CurrentGeneration).Should(BeEquivalentTo(rsm.Generation)) + Expect(its.Status.Replicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.ReadyReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.AvailableReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.UpdatedReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.CurrentReplicas).Should(BeEquivalentTo(replicas)) + Expect(its.Status.CurrentRevisions).Should(Equal(its.Status.UpdateRevisions)) + Expect(its.Status.CurrentGeneration).Should(BeEquivalentTo(its.Generation)) }) }) }) diff --git a/pkg/controller/rsm2/reconciler_update.go b/pkg/controller/instanceset/reconciler_update.go similarity index 79% rename from pkg/controller/rsm2/reconciler_update.go rename to pkg/controller/instanceset/reconciler_update.go index 0cad322ac35..f1e66f802f6 100644 --- a/pkg/controller/rsm2/reconciler_update.go +++ b/pkg/controller/instanceset/reconciler_update.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "fmt" @@ -30,7 +30,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" "github.com/apecloud/kubeblocks/pkg/controller/model" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) // updateReconciler handles the updates of instances based on the UpdateStrategy. @@ -50,22 +50,22 @@ func (r *updateReconciler) PreCondition(tree *kubebuilderx.ObjectTree) *kubebuil if model.IsReconciliationPaused(tree.GetRoot()) { return kubebuilderx.ResultUnsatisfied } - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) - if err := validateSpec(rsm, tree); err != nil { + its, _ := tree.GetRoot().(*workloads.InstanceSet) + if err := validateSpec(its, tree); err != nil { return kubebuilderx.CheckResultWithError(err) } return kubebuilderx.ResultSatisfied } func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilderx.ObjectTree, error) { - rsm, _ := tree.GetRoot().(*workloads.ReplicatedStateMachine) - rsmExt, err := buildRSMExt(rsm, tree) + its, _ := tree.GetRoot().(*workloads.InstanceSet) + itsExt, err := buildInstanceSetExt(its, tree) if err != nil { return nil, err } // 1. build desired name to template map - nameToTemplateMap, err := buildInstanceName2TemplateMap(rsmExt) + nameToTemplateMap, err := buildInstanceName2TemplateMap(itsExt) if err != nil { return nil, err } @@ -86,18 +86,18 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde } updateNameSet := oldNameSet.Intersection(newNameSet) if len(updateNameSet) != len(oldNameSet) || len(updateNameSet) != len(newNameSet) { - tree.Logger.Info(fmt.Sprintf("RSM %s/%s instances are not aligned", rsm.Namespace, rsm.Name)) + tree.Logger.Info(fmt.Sprintf("InstanceSet %s/%s instances are not aligned", its.Namespace, its.Name)) return tree, nil } // 3. do update // do nothing if UpdateStrategyType is 'OnDelete' - if rsm.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType { + if its.Spec.UpdateStrategy.Type == apps.OnDeleteStatefulSetStrategyType { return tree, nil } // handle 'RollingUpdate' - partition, maxUnavailable, err := parsePartitionNMaxUnavailable(rsm.Spec.UpdateStrategy.RollingUpdate, len(oldPodList)) + partition, maxUnavailable, err := parsePartitionNMaxUnavailable(its.Spec.UpdateStrategy.RollingUpdate, len(oldPodList)) if err != nil { return nil, err } @@ -111,8 +111,8 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde // TODO(free6om): compute updateCount from PodManagementPolicy(Serial/OrderedReady, Parallel, BestEffortParallel). // align MemberUpdateStrategy with PodManagementPolicy if it has nil value. - rsmForPlan := getRSMForUpdatePlan(rsm) - plan := rsm1.NewUpdatePlan(*rsmForPlan, oldPodList, IsPodUpdated) + itsForPlan := getInstanceSetForUpdatePlan(its) + plan := rsm.NewUpdatePlan(*itsForPlan, oldPodList, IsPodUpdated) podsToBeUpdated, err := plan.Execute() if err != nil { return nil, err @@ -121,7 +121,7 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde updatingPods := 0 updatedPods := 0 - priorities := rsm1.ComposeRolePriorityMap(rsm.Spec.Roles) + priorities := rsm.ComposeRolePriorityMap(its.Spec.Roles) sortObjects(oldPodList, priorities, true) for _, pod := range oldPodList { if updatingPods >= updateCount || updatingPods >= unavailable { @@ -132,19 +132,19 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde } if !isHealthy(pod) { - tree.Logger.Info(fmt.Sprintf("RSM %s/%s blocks on scale-in as the pod %s is not healthy", rsm.Namespace, rsm.Name, pod.Name)) + tree.Logger.Info(fmt.Sprintf("InstanceSet %s/%s blocks on scale-in as the pod %s is not healthy", its.Namespace, its.Name, pod.Name)) break } if err != nil { return nil, err } - updatePolicy, err := getPodUpdatePolicy(rsm, pod) + updatePolicy, err := getPodUpdatePolicy(its, pod) if err != nil { return nil, err } if updatePolicy == InPlaceUpdatePolicy { - newInstance, err := buildInstanceByTemplate(pod.Name, nameToTemplateMap[pod.Name], rsm, getPodRevision(pod)) + newInstance, err := buildInstanceByTemplate(pod.Name, nameToTemplateMap[pod.Name], its, getPodRevision(pod)) if err != nil { return nil, err } @@ -166,17 +166,17 @@ func (r *updateReconciler) Reconcile(tree *kubebuilderx.ObjectTree) (*kubebuilde return tree, nil } -func getRSMForUpdatePlan(rsm *workloads.ReplicatedStateMachine) *workloads.ReplicatedStateMachine { - if rsm.Spec.MemberUpdateStrategy != nil { - return rsm +func getInstanceSetForUpdatePlan(its *workloads.InstanceSet) *workloads.InstanceSet { + if its.Spec.MemberUpdateStrategy != nil { + return its } - rsmForPlan := rsm.DeepCopy() + itsForPlan := its.DeepCopy() updateStrategy := workloads.SerialUpdateStrategy - if rsm.Spec.PodManagementPolicy == apps.ParallelPodManagement { + if its.Spec.PodManagementPolicy == apps.ParallelPodManagement { updateStrategy = workloads.ParallelUpdateStrategy } - rsmForPlan.Spec.MemberUpdateStrategy = &updateStrategy - return rsmForPlan + itsForPlan.Spec.MemberUpdateStrategy = &updateStrategy + return itsForPlan } func parsePartitionNMaxUnavailable(rollingUpdate *apps.RollingUpdateStatefulSetStrategy, replicas int) (int, int, error) { diff --git a/pkg/controller/rsm2/reconciler_update_test.go b/pkg/controller/instanceset/reconciler_update_test.go similarity index 91% rename from pkg/controller/rsm2/reconciler_update_test.go rename to pkg/controller/instanceset/reconciler_update_test.go index 0b515c8c893..05f114528cc 100644 --- a/pkg/controller/rsm2/reconciler_update_test.go +++ b/pkg/controller/instanceset/reconciler_update_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "time" @@ -39,7 +39,7 @@ import ( var _ = Describe("update reconciler test", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetReplicas(3). AddMatchLabelsInMap(selectors). @@ -53,29 +53,29 @@ var _ = Describe("update reconciler test", func() { Context("PreCondition & Reconcile", func() { It("should work well", func() { By("PreCondition") - rsm.Generation = 1 + its.Generation = 1 tree := kubebuilderx.NewObjectTree() - tree.SetRoot(rsm) + tree.SetRoot(its) reconciler = NewUpdateReconciler() Expect(reconciler.PreCondition(tree)).Should(Equal(kubebuilderx.ResultSatisfied)) By("prepare current tree") // desired: bar-0, bar-1, bar-2, bar-3, bar-foo-0, bar-foo-1, bar-hello-0 replicas := int32(7) - rsm.Spec.Replicas = &replicas - rsm.Spec.PodManagementPolicy = appsv1.ParallelPodManagement + its.Spec.Replicas = &replicas + its.Spec.PodManagementPolicy = appsv1.ParallelPodManagement nameHello := "hello" instanceHello := workloads.InstanceTemplate{ Name: nameHello, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instanceHello) + its.Spec.Instances = append(its.Spec.Instances, instanceHello) generateNameFoo := "foo" replicasFoo := int32(2) instanceFoo := workloads.InstanceTemplate{ Name: generateNameFoo, Replicas: &replicasFoo, } - rsm.Spec.Instances = append(rsm.Spec.Instances, instanceFoo) + its.Spec.Instances = append(its.Spec.Instances, instanceFoo) // prepare for update By("fix meta") @@ -130,7 +130,7 @@ var _ = Describe("update reconciler test", func() { if labels == nil { labels = make(map[string]string) } - updateRevisions, err := getUpdateRevisions(rsm.Status.UpdateRevisions) + updateRevisions, err := getUpdateRevisions(its.Status.UpdateRevisions) Expect(err).Should(BeNil()) labels[appsv1.ControllerRevisionHashLabelKey] = updateRevisions[pod.Name] } @@ -148,7 +148,7 @@ var _ = Describe("update reconciler test", func() { By("reconcile with Partition=50% and MaxUnavailable=2") partitionTree, err := newTree.DeepCopy() Expect(err).Should(BeNil()) - root, ok := partitionTree.GetRoot().(*workloads.ReplicatedStateMachine) + root, ok := partitionTree.GetRoot().(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) partition := int32(3) maxUnavailable := intstr.FromInt32(2) @@ -167,7 +167,7 @@ var _ = Describe("update reconciler test", func() { By("update 'bar-hello-0', 'bar-foo-1' revision to the updated value") partitionTree, err = newTree.DeepCopy() Expect(err).Should(BeNil()) - root, ok = partitionTree.GetRoot().(*workloads.ReplicatedStateMachine) + root, ok = partitionTree.GetRoot().(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) root.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{ RollingUpdate: &appsv1.RollingUpdateStatefulSetStrategy{ @@ -190,7 +190,7 @@ var _ = Describe("update reconciler test", func() { By("reconcile with UpdateStrategy='OnDelete'") onDeleteTree, err := newTree.DeepCopy() Expect(err).Should(BeNil()) - root, ok = onDeleteTree.GetRoot().(*workloads.ReplicatedStateMachine) + root, ok = onDeleteTree.GetRoot().(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) root.Spec.UpdateStrategy.Type = appsv1.OnDeleteStatefulSetStrategyType _, err = reconciler.Reconcile(onDeleteTree) diff --git a/pkg/controller/rsm2/revision_util.go b/pkg/controller/instanceset/revision_util.go similarity index 94% rename from pkg/controller/rsm2/revision_util.go rename to pkg/controller/instanceset/revision_util.go index 552984c2ebb..7abd7bb71ae 100644 --- a/pkg/controller/rsm2/revision_util.go +++ b/pkg/controller/instanceset/revision_util.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "encoding/base64" @@ -47,15 +47,15 @@ var Codecs = serializer.NewCodecFactory(model.GetScheme()) var patchCodec = Codecs.LegacyCodec(workloads.SchemeGroupVersion) var controllerKind = apps.SchemeGroupVersion.WithKind("StatefulSet") -func NewRevision(rsm *workloads.ReplicatedStateMachine) (*apps.ControllerRevision, error) { - patch, err := getPatch(rsm) +func NewRevision(its *workloads.InstanceSet) (*apps.ControllerRevision, error) { + patch, err := getPatch(its) if err != nil { return nil, err } collision := int32(0) - cr, err := NewControllerRevision(rsm, + cr, err := NewControllerRevision(its, controllerKind, - rsm.Spec.Template.Labels, + its.Spec.Template.Labels, runtime.RawExtension{Raw: patch}, 1, &collision) @@ -65,7 +65,7 @@ func NewRevision(rsm *workloads.ReplicatedStateMachine) (*apps.ControllerRevisio if cr.ObjectMeta.Annotations == nil { cr.ObjectMeta.Annotations = make(map[string]string) } - for key, value := range rsm.Annotations { + for key, value := range its.Annotations { cr.ObjectMeta.Annotations[key] = value } return cr, nil @@ -75,8 +75,8 @@ func NewRevision(rsm *workloads.ReplicatedStateMachine) (*apps.ControllerRevisio // previous version. If the returned error is nil the patch is valid. The current state that we save is just the // PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously // recorded patches. -func getPatch(rsm *workloads.ReplicatedStateMachine) ([]byte, error) { - data, err := runtime.Encode(patchCodec, rsm) +func getPatch(its *workloads.InstanceSet) ([]byte, error) { + data, err := runtime.Encode(patchCodec, its) if err != nil { return nil, err } diff --git a/pkg/controller/rsm2/revision_util_test.go b/pkg/controller/instanceset/revision_util_test.go similarity index 98% rename from pkg/controller/rsm2/revision_util_test.go rename to pkg/controller/instanceset/revision_util_test.go index b087d29bf74..99615403a3a 100644 --- a/pkg/controller/rsm2/revision_util_test.go +++ b/pkg/controller/instanceset/revision_util_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "encoding/json" @@ -34,20 +34,17 @@ var _ = Describe("revision util test", func() { stsJSON := ` { "apiVersion": "workloads.kubeblocks.io/v1alpha1", - "kind": "ReplicatedStateMachine", + "kind": "InstanceSet", "metadata": { "annotations": { "config.kubeblocks.io/tpl-redis-metrics-config": "redis-test-redis-redis-metrics-config", "config.kubeblocks.io/tpl-redis-replication-config": "redis-test-redis-redis-replication-config", "config.kubeblocks.io/tpl-redis-scripts": "redis-test-redis-redis-scripts", - "kubeblocks.io/generation": "1", - "monitor.kubeblocks.io/agamotto.headless.rsm": "false", - "monitor.kubeblocks.io/scrape.headless.rsm": "false" + "kubeblocks.io/generation": "1" }, "creationTimestamp": "2024-01-31T11:27:08Z", "finalizers": [ - "cluster.kubeblocks.io/finalizer", - "rsm.workloads.kubeblocks.io/finalizer" + "cluster.kubeblocks.io/finalizer" ], "generation": 1, "labels": { @@ -117,7 +114,6 @@ var _ = Describe("revision util test", func() { "name": "secondary" } ], - "rsmTransformPolicy": "ToSts", "selector": { "matchLabels": { "app.kubernetes.io/instance": "redis-test", @@ -961,10 +957,10 @@ var _ = Describe("revision util test", func() { } } ` - rsm := &workloads.ReplicatedStateMachine{} - err := json.Unmarshal([]byte(stsJSON), rsm) + its := &workloads.InstanceSet{} + err := json.Unmarshal([]byte(stsJSON), its) Expect(err).Should(Succeed()) - cr, err := NewRevision(rsm) + cr, err := NewRevision(its) Expect(err).Should(Succeed()) Expect(cr.Name).Should(Equal("redis-test-redis-7665b47874")) }) diff --git a/pkg/controller/rsm2/suite_test.go b/pkg/controller/instanceset/suite_test.go similarity index 95% rename from pkg/controller/rsm2/suite_test.go rename to pkg/controller/instanceset/suite_test.go index 7267c8fda36..10830f434ab 100644 --- a/pkg/controller/rsm2/suite_test.go +++ b/pkg/controller/instanceset/suite_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "encoding/json" @@ -37,7 +37,7 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/kubebuilderx" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -50,15 +50,15 @@ const ( ) var ( - rsm *workloads.ReplicatedStateMachine + its *workloads.InstanceSet priorityMap map[string]int reconciler kubebuilderx.Reconciler - uid = types.UID("rsm-mock-uid") + uid = types.UID("its-mock-uid") selectors = map[string]string{ - constant.AppInstanceLabelKey: name, - rsm1.WorkloadsManagedByLabelKey: rsm1.KindReplicatedStateMachine, + constant.AppInstanceLabelKey: name, + rsm.WorkloadsManagedByLabelKey: rsm.KindReplicatedStateMachine, } roles = []workloads.ReplicaRole{ { @@ -198,7 +198,7 @@ func buildRandomPod() *corev1.Pod { func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "RSM2 Suite") + RunSpecs(t, "InstanceSet Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controller/rsm2/tree_loader.go b/pkg/controller/instanceset/tree_loader.go similarity index 95% rename from pkg/controller/rsm2/tree_loader.go rename to pkg/controller/instanceset/tree_loader.go index dc6db1f03dd..322c7334c5e 100644 --- a/pkg/controller/rsm2/tree_loader.go +++ b/pkg/controller/instanceset/tree_loader.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "context" @@ -40,7 +40,7 @@ type treeLoader struct{} func (r *treeLoader) Load(ctx context.Context, reader client.Reader, req ctrl.Request, recorder record.EventRecorder, logger logr.Logger) (*kubebuilderx.ObjectTree, error) { ml := getMatchLabels(req.Name) kinds := ownedKinds() - tree, err := kubebuilderx.ReadObjectTree[*workloads.ReplicatedStateMachine](ctx, reader, req, ml, kinds...) + tree, err := kubebuilderx.ReadObjectTree[*workloads.InstanceSet](ctx, reader, req, ml, kinds...) if err != nil { return nil, err } diff --git a/pkg/controller/rsm2/tree_loader_test.go b/pkg/controller/instanceset/tree_loader_test.go similarity index 93% rename from pkg/controller/rsm2/tree_loader_test.go rename to pkg/controller/instanceset/tree_loader_test.go index 20ff705bcc3..87224e12196 100644 --- a/pkg/controller/rsm2/tree_loader_test.go +++ b/pkg/controller/instanceset/tree_loader_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "context" @@ -49,7 +49,7 @@ var _ = Describe("tree loader test", func() { templateObj, annotation, err := mockCompressedInstanceTemplates(namespace, name) Expect(err).Should(BeNil()) - root := builder.NewReplicatedStateMachineBuilder(namespace, name).AddAnnotations(templateRefAnnotationKey, annotation).GetObject() + root := builder.NewInstanceSetBuilder(namespace, name).AddAnnotations(templateRefAnnotationKey, annotation).GetObject() obj0 := builder.NewPodBuilder(namespace, name+"-0").GetObject() obj1 := builder.NewPodBuilder(namespace, name+"-1").GetObject() obj2 := builder.NewPodBuilder(namespace, name+"-2").GetObject() @@ -57,8 +57,8 @@ var _ = Describe("tree loader test", func() { Expect(controllerutil.SetControllerReference(root, pod, model.GetScheme())).Should(Succeed()) } k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &workloads.ReplicatedStateMachine{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *workloads.ReplicatedStateMachine, _ ...client.GetOption) error { + Get(gomock.Any(), gomock.Any(), &workloads.InstanceSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, obj *workloads.InstanceSet, _ ...client.GetOption) error { *obj = *root return nil }).Times(1) diff --git a/pkg/controller/rsm2/types.go b/pkg/controller/instanceset/types.go similarity index 91% rename from pkg/controller/rsm2/types.go rename to pkg/controller/instanceset/types.go index 7d809251cc5..0d9591192a3 100644 --- a/pkg/controller/rsm2/types.go +++ b/pkg/controller/instanceset/types.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset type ReplicaProvider string @@ -27,15 +27,15 @@ const ( ) const ( - // FeatureGateRSMReplicaProvider determines the instance provider for the RSM controller. - // A instance provider is responsible for managing the underlying API resources required for the smooth operation of the RSM. + // FeatureGateRSMReplicaProvider determines the instance provider for the InstanceSet controller. + // A instance provider is responsible for managing the underlying API resources required for the smooth operation of the InstanceSet. // The currently supported instance providers are StatefulSet and Pod. // Planned supported instance providers include OpenKruise Advanced StatefulSet and KB Replica. FeatureGateRSMReplicaProvider = "RSM_REPLICA_PROVIDER" defaultReplicaProvider = PodProvider - // MaxPlainRevisionCount specified max number of plain revision stored in rsm.status.updateRevisions. + // MaxPlainRevisionCount specified max number of plain revision stored in status.updateRevisions. // All revisions will be compressed if exceeding this value. MaxPlainRevisionCount = "MAX_PLAIN_REVISION_COUNT" diff --git a/pkg/controller/rsm2/utils.go b/pkg/controller/instanceset/utils.go similarity index 88% rename from pkg/controller/rsm2/utils.go rename to pkg/controller/instanceset/utils.go index 8790ccc14f5..e5bf4e4a6e5 100644 --- a/pkg/controller/rsm2/utils.go +++ b/pkg/controller/instanceset/utils.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "context" @@ -29,7 +29,7 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" "github.com/apecloud/kubeblocks/pkg/constant" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" + "github.com/apecloud/kubeblocks/pkg/controller/rsm" viper "github.com/apecloud/kubeblocks/pkg/viperx" ) @@ -84,16 +84,16 @@ func CurrentReplicaProvider(ctx context.Context, cli client.Reader, objectKey cl func getMatchLabels(name string) map[string]string { return map[string]string{ - rsm1.WorkloadsManagedByLabelKey: managedBy, - rsm1.WorkloadsInstanceLabelKey: name, + rsm.WorkloadsManagedByLabelKey: managedBy, + rsm.WorkloadsInstanceLabelKey: name, } } -func getSvcSelector(rsm *workloads.ReplicatedStateMachine, headless bool) map[string]string { +func getSvcSelector(its *workloads.InstanceSet, headless bool) map[string]string { selectors := make(map[string]string) if !headless { - for _, role := range rsm.Spec.Roles { + for _, role := range its.Spec.Roles { if role.IsLeader && len(role.Name) > 0 { selectors[constant.RoleLabelKey] = role.Name break @@ -101,7 +101,7 @@ func getSvcSelector(rsm *workloads.ReplicatedStateMachine, headless bool) map[st } } - for k, v := range rsm.Spec.Selector.MatchLabels { + for k, v := range its.Spec.Selector.MatchLabels { selectors[k] = v } return selectors diff --git a/pkg/controller/rsm2/utils_test.go b/pkg/controller/instanceset/utils_test.go similarity index 99% rename from pkg/controller/rsm2/utils_test.go rename to pkg/controller/instanceset/utils_test.go index d8910238b06..7b23e4d5e11 100644 --- a/pkg/controller/rsm2/utils_test.go +++ b/pkg/controller/instanceset/utils_test.go @@ -17,7 +17,7 @@ You should have received a copy of the GNU Affero General Public License along with this program. If not, see . */ -package rsm2 +package instanceset import ( "context" diff --git a/pkg/controller/kubebuilderx/controller_test.go b/pkg/controller/kubebuilderx/controller_test.go index db93a225f4e..cc69b6b066a 100644 --- a/pkg/controller/kubebuilderx/controller_test.go +++ b/pkg/controller/kubebuilderx/controller_test.go @@ -45,7 +45,7 @@ var _ = Describe("controller test", func() { It("should work well", func() { ctx := context.Background() req := ctrl.Request{} - logger := log.FromContext(ctx).WithValues("ReplicatedStateMachine", "test") + logger := log.FromContext(ctx).WithValues("InstanceSet", "test") controller := NewController(context.Background(), nil, req, nil, logger) tree := NewObjectTree() diff --git a/pkg/controller/kubebuilderx/plan_builder.go b/pkg/controller/kubebuilderx/plan_builder.go index 857127cf6f3..9911ae474e4 100644 --- a/pkg/controller/kubebuilderx/plan_builder.go +++ b/pkg/controller/kubebuilderx/plan_builder.go @@ -98,7 +98,7 @@ func (b *PlanBuilder) AddParallelTransformer(_ ...graph.Transformer) graph.PlanB func (b *PlanBuilder) Build() (graph.Plan, error) { vertices := buildOrderedVertices(b.transCtx.GetContext(), b.currentTree, b.desiredTree) plan := &Plan{ - walkFunc: b.rsmWalkFunc, + walkFunc: b.defaultWalkFunc, vertices: vertices, } return plan, nil @@ -219,7 +219,7 @@ func (p *Plan) Execute() error { // Do the real works -func (b *PlanBuilder) rsmWalkFunc(v graph.Vertex) error { +func (b *PlanBuilder) defaultWalkFunc(v graph.Vertex) error { vertex, ok := v.(*model.ObjectVertex) if !ok { return fmt.Errorf("wrong vertex type %v", v) diff --git a/pkg/controller/kubebuilderx/plan_builder_test.go b/pkg/controller/kubebuilderx/plan_builder_test.go index 57359d51530..74b3bfd80c1 100644 --- a/pkg/controller/kubebuilderx/plan_builder_test.go +++ b/pkg/controller/kubebuilderx/plan_builder_test.go @@ -37,45 +37,45 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" "github.com/apecloud/kubeblocks/pkg/controller/builder" "github.com/apecloud/kubeblocks/pkg/controller/model" - rsm1 "github.com/apecloud/kubeblocks/pkg/controller/rsm" mockclient "github.com/apecloud/kubeblocks/pkg/testutil/k8s/mocks" ) var _ = Describe("plan builder test", func() { - Context("rsmWalkFunc function", func() { + Context("defaultWalkFunc function", func() { const ( namespace = "foo" name = "bar" + finalizer = "test" ) var ( planBuilder *PlanBuilder - rsm *workloads.ReplicatedStateMachine + its *workloads.InstanceSet ) BeforeEach(func() { bldr := NewPlanBuilder(ctx, k8sMock, nil, nil, nil, logger) planBuilder, _ = bldr.(*PlanBuilder) - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). - AddFinalizers([]string{rsm1.GetFinalizer(&workloads.ReplicatedStateMachine{})}). + its = builder.NewInstanceSetBuilder(namespace, name). + AddFinalizers([]string{finalizer}). GetObject() }) It("should create object", func() { v := &model.ObjectVertex{ - Obj: rsm, + Obj: its, Action: model.ActionCreatePtr(), } k8sMock.EXPECT(). Create(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.CreateOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.CreateOption) error { Expect(obj).ShouldNot(BeNil()) - Expect(obj.Namespace).Should(Equal(rsm.Namespace)) - Expect(obj.Name).Should(Equal(rsm.Name)) - Expect(obj.Finalizers).Should(Equal(rsm.Finalizers)) + Expect(obj.Namespace).Should(Equal(its.Namespace)) + Expect(obj.Name).Should(Equal(its.Name)) + Expect(obj.Finalizers).Should(Equal(its.Finalizers)) return nil }).Times(1) - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) It("should update sts object", func() { @@ -99,7 +99,7 @@ var _ = Describe("plan builder test", func() { Expect(obj.Spec.UpdateStrategy).Should(Equal(sts.Spec.UpdateStrategy)) return nil }).Times(1) - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) It("should update svc object", func() { @@ -120,7 +120,7 @@ var _ = Describe("plan builder test", func() { Expect(obj.Spec).Should(Equal(svc.Spec)) return nil }).Times(1) - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) It("should update pvc object", func() { @@ -145,42 +145,42 @@ var _ = Describe("plan builder test", func() { Expect(obj.Spec.Resources).Should(Equal(pvc.Spec.Resources)) return nil }).Times(1) - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) It("should delete object", func() { v := &model.ObjectVertex{ - Obj: rsm, + Obj: its, Action: model.ActionDeletePtr(), } k8sMock.EXPECT(). Update(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.UpdateOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.UpdateOption) error { Expect(obj).ShouldNot(BeNil()) Expect(obj.Finalizers).Should(HaveLen(0)) return nil }).Times(1) k8sMock.EXPECT(). Delete(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.DeleteOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.DeleteOption) error { Expect(obj).ShouldNot(BeNil()) - Expect(obj.Namespace).Should(Equal(rsm.Namespace)) - Expect(obj.Name).Should(Equal(rsm.Name)) + Expect(obj.Namespace).Should(Equal(its.Namespace)) + Expect(obj.Name).Should(Equal(its.Name)) Expect(obj.Finalizers).Should(HaveLen(0)) return nil }).Times(1) - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) It("should update object status", func() { - rsm.Generation = 2 - rsm.Status.ObservedGeneration = 2 - rsmOrig := rsm.DeepCopy() - rsmOrig.Status.ObservedGeneration = 1 + its.Generation = 2 + its.Status.ObservedGeneration = 2 + itsOrig := its.DeepCopy() + itsOrig.Status.ObservedGeneration = 1 v := &model.ObjectVertex{ - Obj: rsm, - OriObj: rsmOrig, + Obj: its, + OriObj: itsOrig, Action: model.ActionStatusPtr(), } ct := gomock.NewController(GinkgoT()) @@ -190,20 +190,20 @@ var _ = Describe("plan builder test", func() { k8sMock.EXPECT().Status().Return(statusWriter), statusWriter.EXPECT(). Update(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.UpdateOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.UpdateOption) error { Expect(obj).ShouldNot(BeNil()) - Expect(obj.Namespace).Should(Equal(rsm.Namespace)) - Expect(obj.Name).Should(Equal(rsm.Name)) - Expect(obj.Status.ObservedGeneration).Should(Equal(rsm.Status.ObservedGeneration)) + Expect(obj.Namespace).Should(Equal(its.Namespace)) + Expect(obj.Name).Should(Equal(its.Name)) + Expect(obj.Status.ObservedGeneration).Should(Equal(its.Status.ObservedGeneration)) return nil }).Times(1), ) - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) It("should return error if no action set", func() { v := &model.ObjectVertex{} - err := planBuilder.rsmWalkFunc(v) + err := planBuilder.defaultWalkFunc(v) Expect(err).ShouldNot(BeNil()) Expect(err.Error()).Should(ContainSubstring("vertex action can't be nil")) }) @@ -212,7 +212,7 @@ var _ = Describe("plan builder test", func() { v := &model.ObjectVertex{ Action: model.ActionNoopPtr(), } - Expect(planBuilder.rsmWalkFunc(v)).Should(Succeed()) + Expect(planBuilder.defaultWalkFunc(v)).Should(Succeed()) }) }) @@ -223,13 +223,13 @@ var _ = Describe("plan builder test", func() { ) var ( - rsm *workloads.ReplicatedStateMachine + its *workloads.InstanceSet currentTree *ObjectTree desiredTree *ObjectTree ) BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + its = builder.NewInstanceSetBuilder(namespace, name). AddLabels(constant.AppComponentLabelKey, name). SetReplicas(3). GetObject() @@ -250,18 +250,18 @@ var _ = Describe("plan builder test", func() { pod := builder.NewPodBuilder(namespace, name).GetObject() headlessSvc := builder.NewHeadlessServiceBuilder(namespace, name+"-headless").GetObject() svc := builder.NewServiceBuilder(namespace, name).GetObject() - env := builder.NewConfigMapBuilder(namespace, name+"-rsm-env").GetObject() + env := builder.NewConfigMapBuilder(namespace, name+"-its-env").GetObject() var verticesExpected []*model.ObjectVertex - verticesExpected = append(verticesExpected, newVertex(rsm.DeepCopy(), rsm, model.ActionStatusPtr())) + verticesExpected = append(verticesExpected, newVertex(its.DeepCopy(), its, model.ActionStatusPtr())) verticesExpected = append(verticesExpected, newVertex(nil, pod, model.ActionCreatePtr())) verticesExpected = append(verticesExpected, newVertex(nil, headlessSvc, model.ActionCreatePtr())) verticesExpected = append(verticesExpected, newVertex(nil, svc, model.ActionCreatePtr())) verticesExpected = append(verticesExpected, newVertex(nil, env, model.ActionCreatePtr())) // build ordered vertices - currentTree.SetRoot(rsm) - desiredTree.SetRoot(rsm) + currentTree.SetRoot(its) + desiredTree.SetRoot(its) Expect(desiredTree.Add(pod, headlessSvc, svc, env)).Should(Succeed()) vertices := buildOrderedVertices(ctx, currentTree, desiredTree) diff --git a/pkg/controller/kubebuilderx/reconciler_test.go b/pkg/controller/kubebuilderx/reconciler_test.go index 91b8323e90e..c9aa8400ed5 100644 --- a/pkg/controller/kubebuilderx/reconciler_test.go +++ b/pkg/controller/kubebuilderx/reconciler_test.go @@ -43,7 +43,7 @@ var _ = Describe("reconciler test", func() { Expect(tree).Should(Equal(expectedTree)) By("SetRoot & GetRoot") - root := builder.NewReplicatedStateMachineBuilder(namespace, name).GetObject() + root := builder.NewInstanceSetBuilder(namespace, name).GetObject() tree.SetRoot(root) expectedTree.root = root Expect(tree).Should(Equal(expectedTree)) diff --git a/pkg/controller/plan/restore.go b/pkg/controller/plan/restore.go index bfeaa32d009..0d86b84d587 100644 --- a/pkg/controller/plan/restore.go +++ b/pkg/controller/plan/restore.go @@ -202,9 +202,9 @@ func (r *RestoreManager) DoPostReady(comp *component.SynthesizedComponent, } jobActionLabels := constant.GetComponentWellKnownLabels(r.Cluster.Name, comp.Name) if comp.WorkloadType == appsv1alpha1.Consensus || comp.WorkloadType == appsv1alpha1.Replication { - // TODO: use rsm constant - rsmAccessModeLabelKey := "rsm.workloads.kubeblocks.io/access-mode" - jobActionLabels[rsmAccessModeLabelKey] = string(appsv1alpha1.ReadWrite) + // TODO: use ITS constant + itsAccessModeLabelKey := "rsm.workloads.kubeblocks.io/access-mode" + jobActionLabels[itsAccessModeLabelKey] = string(appsv1alpha1.ReadWrite) } sourceTargetName := compObj.Annotations[constant.BackupSourceTargetAnnotationKey] restore := &dpv1alpha1.Restore{ diff --git a/pkg/controller/rsm/plan_builder.go b/pkg/controller/rsm/plan_builder.go index bdd6090ec0c..e3110428470 100644 --- a/pkg/controller/rsm/plan_builder.go +++ b/pkg/controller/rsm/plan_builder.go @@ -59,11 +59,11 @@ func init() { // PlanBuilder implementation func (b *PlanBuilder) Init() error { - rsm := &workloads.ReplicatedStateMachine{} + rsm := &workloads.InstanceSet{} if err := b.cli.Get(b.transCtx.Context, b.req.NamespacedName, rsm); err != nil { return err } - b.AddTransformer(&initTransformer{ReplicatedStateMachine: rsm}) + b.AddTransformer(&initTransformer{InstanceSet: rsm}) return nil } diff --git a/pkg/controller/rsm/plan_builder_test.go b/pkg/controller/rsm/plan_builder_test.go index 28a28531e3f..9bda9ebbea9 100644 --- a/pkg/controller/rsm/plan_builder_test.go +++ b/pkg/controller/rsm/plan_builder_test.go @@ -53,8 +53,8 @@ var _ = Describe("plan builder test", func() { planBuilder := NewRSMPlanBuilder(reqCtx, cli, req) rsmBuilder, _ = planBuilder.(*PlanBuilder) - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). - AddFinalizers([]string{GetFinalizer(&workloads.ReplicatedStateMachine{})}). + rsm = builder.NewInstanceSetBuilder(namespace, name). + AddFinalizers([]string{GetFinalizer(&workloads.InstanceSet{})}). GetObject() }) @@ -65,7 +65,7 @@ var _ = Describe("plan builder test", func() { } k8sMock.EXPECT(). Create(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.CreateOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.CreateOption) error { Expect(obj).ShouldNot(BeNil()) Expect(obj.Namespace).Should(Equal(rsm.Namespace)) Expect(obj.Name).Should(Equal(rsm.Name)) @@ -152,14 +152,14 @@ var _ = Describe("plan builder test", func() { } k8sMock.EXPECT(). Update(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.UpdateOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.UpdateOption) error { Expect(obj).ShouldNot(BeNil()) Expect(obj.Finalizers).Should(HaveLen(0)) return nil }).Times(1) k8sMock.EXPECT(). Delete(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.DeleteOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.DeleteOption) error { Expect(obj).ShouldNot(BeNil()) Expect(obj.Namespace).Should(Equal(rsm.Namespace)) Expect(obj.Name).Should(Equal(rsm.Name)) @@ -187,7 +187,7 @@ var _ = Describe("plan builder test", func() { k8sMock.EXPECT().Status().Return(statusWriter), statusWriter.EXPECT(). Update(gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(_ context.Context, obj *workloads.ReplicatedStateMachine, _ ...client.UpdateOption) error { + DoAndReturn(func(_ context.Context, obj *workloads.InstanceSet, _ ...client.UpdateOption) error { Expect(obj).ShouldNot(BeNil()) Expect(obj.Namespace).Should(Equal(rsm.Namespace)) Expect(obj.Name).Should(Equal(rsm.Name)) diff --git a/pkg/controller/rsm/pod_role_event_handler.go b/pkg/controller/rsm/pod_role_event_handler.go index e6729d5aadc..227bf9d1862 100644 --- a/pkg/controller/rsm/pod_role_event_handler.go +++ b/pkg/controller/rsm/pod_role_event_handler.go @@ -136,7 +136,7 @@ func handleRoleChangedEvent(cli client.Client, reqCtx intctrlutil.RequestCtx, re } name, _ := intctrlutil.GetParentNameAndOrdinal(pod) - rsm := &workloads.ReplicatedStateMachine{} + rsm := &workloads.InstanceSet{} if err := cli.Get(reqCtx.Ctx, types.NamespacedName{Namespace: pod.Namespace, Name: name}, rsm); err != nil { return "", err } @@ -197,7 +197,7 @@ func parseProbeEventMessage(reqCtx intctrlutil.RequestCtx, event *corev1.Event) // updatePodRoleLabel updates pod role label when internal container role changed func updatePodRoleLabel(cli client.Client, reqCtx intctrlutil.RequestCtx, - rsm workloads.ReplicatedStateMachine, pod *corev1.Pod, roleName string, version string) error { + rsm workloads.InstanceSet, pod *corev1.Pod, roleName string, version string) error { ctx := reqCtx.Ctx roleMap := composeRoleMap(rsm) // role not defined in CR, ignore it diff --git a/pkg/controller/rsm/pod_role_event_handler_test.go b/pkg/controller/rsm/pod_role_event_handler_test.go index 263b4409404..9d8700a38f0 100644 --- a/pkg/controller/rsm/pod_role_event_handler_test.go +++ b/pkg/controller/rsm/pod_role_event_handler_test.go @@ -79,8 +79,8 @@ var _ = Describe("pod role label event handler test", func() { return nil }).Times(1) k8sMock.EXPECT(). - Get(gomock.Any(), gomock.Any(), &workloads.ReplicatedStateMachine{}, gomock.Any()). - DoAndReturn(func(_ context.Context, objKey client.ObjectKey, rsm *workloads.ReplicatedStateMachine, _ ...client.GetOption) error { + Get(gomock.Any(), gomock.Any(), &workloads.InstanceSet{}, gomock.Any()). + DoAndReturn(func(_ context.Context, objKey client.ObjectKey, rsm *workloads.InstanceSet, _ ...client.GetOption) error { rsm.Namespace = objKey.Namespace rsm.Name = objKey.Name rsm.Spec.Roles = []workloads.ReplicaRole{role} diff --git a/pkg/controller/rsm/suite_test.go b/pkg/controller/rsm/suite_test.go index dfe9bac2488..867b05460b0 100644 --- a/pkg/controller/rsm/suite_test.go +++ b/pkg/controller/rsm/suite_test.go @@ -155,7 +155,7 @@ var ( observeActions = []workloads.Action{{Command: []string{"cmd"}}} - rsm *workloads.ReplicatedStateMachine + rsm *workloads.InstanceSet ) func less(v1, v2 graph.Vertex) bool { @@ -176,7 +176,7 @@ func makePodUpdateReady(newRevision string, roleful bool, pods ...*corev1.Pod) { } } -func mockUnderlyingSts(rsm workloads.ReplicatedStateMachine, generation int64) *apps.StatefulSet { +func mockUnderlyingSts(rsm workloads.InstanceSet, generation int64) *apps.StatefulSet { labels := getLabels(&rsm) headlessSelectors := getSvcSelector(&rsm, true) headLessSvc := BuildHeadlessSvc(rsm, labels, headlessSelectors) @@ -207,7 +207,7 @@ func init() { func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "ReplicatedStateMachine Suite") + RunSpecs(t, "InstanceSet Suite") } var _ = BeforeSuite(func() { diff --git a/pkg/controller/rsm/transformer_deletion_test.go b/pkg/controller/rsm/transformer_deletion_test.go index 4efd36d66d3..83fbbad88db 100644 --- a/pkg/controller/rsm/transformer_deletion_test.go +++ b/pkg/controller/rsm/transformer_deletion_test.go @@ -42,7 +42,7 @@ import ( var _ = Describe("object deletion transformer test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). AddMatchLabelsInMap(selectors). SetServiceName(headlessSvcName). @@ -81,7 +81,7 @@ var _ = Describe("object deletion transformer test.", func() { controller := true sts.OwnerReferences = []metav1.OwnerReference{ { - Kind: reflect.TypeOf(workloads.ReplicatedStateMachine{}).Name(), + Kind: reflect.TypeOf(workloads.InstanceSet{}).Name(), Controller: &controller, }, } @@ -90,14 +90,14 @@ var _ = Describe("object deletion transformer test.", func() { headLessSvc := BuildHeadlessSvc(*rsm, labels, headlessSelectors) headLessSvc.SetOwnerReferences([]metav1.OwnerReference{ { - Kind: reflect.TypeOf(workloads.ReplicatedStateMachine{}).Name(), + Kind: reflect.TypeOf(workloads.InstanceSet{}).Name(), Controller: &controller, }, }) envConfig := BuildEnvConfigMap(*rsm, labels) envConfig.SetOwnerReferences([]metav1.OwnerReference{ { - Kind: reflect.TypeOf(workloads.ReplicatedStateMachine{}).Name(), + Kind: reflect.TypeOf(workloads.InstanceSet{}).Name(), Controller: &controller, }, }) @@ -113,7 +113,7 @@ var _ = Describe("object deletion transformer test.", func() { action := buildAction(rsm, actionName, jobTypeSwitchover, jobScenarioMembership, "", "") action.SetOwnerReferences([]metav1.OwnerReference{ { - Kind: reflect.TypeOf(workloads.ReplicatedStateMachine{}).Name(), + Kind: reflect.TypeOf(workloads.InstanceSet{}).Name(), Controller: &controller, }, }) diff --git a/pkg/controller/rsm/transformer_fix_meta_test.go b/pkg/controller/rsm/transformer_fix_meta_test.go index c93a02001a4..630356ccb95 100644 --- a/pkg/controller/rsm/transformer_fix_meta_test.go +++ b/pkg/controller/rsm/transformer_fix_meta_test.go @@ -33,7 +33,7 @@ import ( var _ = Describe("fix meta transformer test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetReplicas(3). GetObject() @@ -64,7 +64,7 @@ var _ = Describe("fix meta transformer test.", func() { Expect(dag.Equals(dagExpected, less)).Should(BeTrue()) root, err := model.FindRootVertex(dag) Expect(err).Should(BeNil()) - rsmNew, ok := root.Obj.(*workloads.ReplicatedStateMachine) + rsmNew, ok := root.Obj.(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) Expect(rsmNew.Finalizers).ShouldNot(BeNil()) Expect(slices.Contains(rsmNew.Finalizers, GetFinalizer(rsmNew))).Should(BeTrue()) diff --git a/pkg/controller/rsm/transformer_init.go b/pkg/controller/rsm/transformer_init.go index 5013aa7c486..54ea5c6f9ae 100644 --- a/pkg/controller/rsm/transformer_init.go +++ b/pkg/controller/rsm/transformer_init.go @@ -26,7 +26,7 @@ import ( ) type initTransformer struct { - *workloads.ReplicatedStateMachine + *workloads.InstanceSet } var _ graph.Transformer = &initTransformer{} @@ -34,11 +34,11 @@ var _ graph.Transformer = &initTransformer{} func (t *initTransformer) Transform(ctx graph.TransformContext, dag *graph.DAG) error { // init context transCtx, _ := ctx.(*rsmTransformContext) - transCtx.rsm, transCtx.rsmOrig = t.ReplicatedStateMachine, t.ReplicatedStateMachine.DeepCopy() + transCtx.rsm, transCtx.rsmOrig = t.InstanceSet, t.InstanceSet.DeepCopy() graphCli, _ := transCtx.Client.(model.GraphClient) // stop reconciliation if paused=true - if t.ReplicatedStateMachine.Spec.Paused { + if t.InstanceSet.Spec.Paused { graphCli.Root(dag, transCtx.rsmOrig, transCtx.rsm, model.ActionNoopPtr()) return graph.ErrPrematureStop } diff --git a/pkg/controller/rsm/transformer_init_test.go b/pkg/controller/rsm/transformer_init_test.go index df73c49294d..cb50551815c 100644 --- a/pkg/controller/rsm/transformer_init_test.go +++ b/pkg/controller/rsm/transformer_init_test.go @@ -30,7 +30,7 @@ import ( var _ = Describe("init transformer test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetReplicas(3). GetObject() @@ -43,7 +43,7 @@ var _ = Describe("init transformer test.", func() { } dag = graph.NewDAG() - transformer = &initTransformer{ReplicatedStateMachine: rsm} + transformer = &initTransformer{InstanceSet: rsm} }) Context("dag init", func() { diff --git a/pkg/controller/rsm/transformer_member_reconfiguration.go b/pkg/controller/rsm/transformer_member_reconfiguration.go index ec58f655758..3cb2b806808 100644 --- a/pkg/controller/rsm/transformer_member_reconfiguration.go +++ b/pkg/controller/rsm/transformer_member_reconfiguration.go @@ -78,7 +78,7 @@ func (t *MemberReconfigurationTransformer) Transform(ctx graph.TransformContext, // cluster initialization done, handle dynamic membership reconfiguration // rsm is ready - if IsRSMReady(rsm) { + if IsInstanceSetReady(rsm) { return cleanAction(transCtx, dag) } @@ -172,7 +172,7 @@ func cleanAction(transCtx *rsmTransformContext, dag *graph.DAG) error { return nil } -func isActionDone(rsm *workloads.ReplicatedStateMachine, action *batchv1.Job) bool { +func isActionDone(rsm *workloads.InstanceSet, action *batchv1.Job) bool { ordinal, _ := getActionOrdinal(action.Name) podName := getPodName(rsm.Name, ordinal) membersStatus := rsm.Status.MembersStatus @@ -199,7 +199,7 @@ func deleteAction(transCtx *rsmTransformContext, dag *graph.DAG, action *batchv1 doActionCleanup(dag, cli, action) } -func createNextAction(transCtx *rsmTransformContext, dag *graph.DAG, rsm *workloads.ReplicatedStateMachine, currentAction *batchv1.Job) error { +func createNextAction(transCtx *rsmTransformContext, dag *graph.DAG, rsm *workloads.InstanceSet, currentAction *batchv1.Job) error { actionInfoList := generateActionInfoList(rsm) if len(actionInfoList) == 0 { @@ -225,7 +225,7 @@ func createNextAction(transCtx *rsmTransformContext, dag *graph.DAG, rsm *worklo return createAction(dag, cli, rsm, nextAction) } -func generateActionInfoList(rsm *workloads.ReplicatedStateMachine) []*actionInfo { +func generateActionInfoList(rsm *workloads.InstanceSet) []*actionInfo { var actionInfoList []*actionInfo memberReadyReplicas := int32(len(rsm.Status.MembersStatus)) @@ -255,7 +255,7 @@ func isPreAction(actionType string) bool { return actionType == jobTypeSwitchover || actionType == jobTypeMemberLeaveNotifying } -func shouldHaveActions(rsm *workloads.ReplicatedStateMachine) bool { +func shouldHaveActions(rsm *workloads.InstanceSet) bool { currentReplicas := len(rsm.Status.MembersStatus) expectedReplicas := int(*rsm.Spec.Replicas) @@ -274,7 +274,7 @@ func shouldHaveActions(rsm *workloads.ReplicatedStateMachine) bool { return false } -func shouldCreateAction(rsm *workloads.ReplicatedStateMachine, actionType string, checker conditionChecker) bool { +func shouldCreateAction(rsm *workloads.InstanceSet, actionType string, checker conditionChecker) bool { if checker != nil && !checker() { return false } @@ -312,7 +312,7 @@ func getActionOrdinal(actionName string) (int, error) { // all members with ordinal less than action target pod should be in a good replication state: // 1. they should be in membersStatus // 2. they should have a leader -func abnormalAnalysis(rsm *workloads.ReplicatedStateMachine, action *batchv1.Job) error { +func abnormalAnalysis(rsm *workloads.InstanceSet, action *batchv1.Job) error { membersStatus := rsm.Status.MembersStatus statusMap := make(map[string]workloads.MemberStatus, len(membersStatus)) for _, status := range membersStatus { @@ -353,7 +353,7 @@ func abnormalAnalysis(rsm *workloads.ReplicatedStateMachine, action *batchv1.Job return nil } -func generateActionInfos(rsm *workloads.ReplicatedStateMachine, ordinal int, actionTypeList []string) []*actionInfo { +func generateActionInfos(rsm *workloads.InstanceSet, ordinal int, actionTypeList []string) []*actionInfo { var actionInfos []*actionInfo leaderPodName := getLeaderPodName(rsm.Status.MembersStatus) podName := getPodName(rsm.Name, ordinal) diff --git a/pkg/controller/rsm/transformer_member_reconfiguration_test.go b/pkg/controller/rsm/transformer_member_reconfiguration_test.go index e7baf011009..d0036b1d199 100644 --- a/pkg/controller/rsm/transformer_member_reconfiguration_test.go +++ b/pkg/controller/rsm/transformer_member_reconfiguration_test.go @@ -102,7 +102,7 @@ var _ = Describe("member reconfiguration transformer test.", func() { } BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetServiceName(headlessSvcName). AddMatchLabelsInMap(selectors). diff --git a/pkg/controller/rsm/transformer_object_generation.go b/pkg/controller/rsm/transformer_object_generation.go index ab2639e9cb4..3c9947873df 100644 --- a/pkg/controller/rsm/transformer_object_generation.go +++ b/pkg/controller/rsm/transformer_object_generation.go @@ -247,7 +247,7 @@ func copyAndMerge(oldObj, newObj client.Object) client.Object { } } -func BuildSvc(rsm workloads.ReplicatedStateMachine, labels, selectors map[string]string) *corev1.Service { +func BuildSvc(rsm workloads.InstanceSet, labels, selectors map[string]string) *corev1.Service { if rsm.Spec.Service == nil { return nil } @@ -262,7 +262,7 @@ func BuildSvc(rsm workloads.ReplicatedStateMachine, labels, selectors map[string GetObject() } -func BuildAlternativeSvs(rsm workloads.ReplicatedStateMachine, svcLabels map[string]string) []*corev1.Service { +func BuildAlternativeSvs(rsm workloads.InstanceSet, svcLabels map[string]string) []*corev1.Service { if rsm.Spec.Service == nil { return nil } @@ -292,7 +292,7 @@ func BuildAlternativeSvs(rsm workloads.ReplicatedStateMachine, svcLabels map[str return services } -func BuildHeadlessSvc(rsm workloads.ReplicatedStateMachine, labels, selectors map[string]string) *corev1.Service { +func BuildHeadlessSvc(rsm workloads.InstanceSet, labels, selectors map[string]string) *corev1.Service { annotations := ParseAnnotationsOfScope(HeadlessServiceScope, rsm.Annotations) hdlBuilder := builder.NewHeadlessServiceBuilder(rsm.Namespace, getHeadlessSvcName(rsm)). AddLabelsInMap(labels). @@ -320,7 +320,7 @@ func BuildHeadlessSvc(rsm workloads.ReplicatedStateMachine, labels, selectors ma return hdlBuilder.GetObject() } -func buildSts(rsm *workloads.ReplicatedStateMachine, headlessSvcName string, labels map[string]string) *apps.StatefulSet { +func buildSts(rsm *workloads.InstanceSet, headlessSvcName string, labels map[string]string) *apps.StatefulSet { envConfigName := GetEnvConfigMapName(rsm.Name) template := BuildPodTemplate(rsm, envConfigName) annotations := ParseAnnotationsOfScope(RootScope, rsm.Annotations) @@ -339,7 +339,7 @@ func buildSts(rsm *workloads.ReplicatedStateMachine, headlessSvcName string, lab GetObject() } -func BuildEnvConfigMap(rsm workloads.ReplicatedStateMachine, labels map[string]string) *corev1.ConfigMap { +func BuildEnvConfigMap(rsm workloads.InstanceSet, labels map[string]string) *corev1.ConfigMap { envData := buildEnvConfigData(rsm) annotations := ParseAnnotationsOfScope(ConfigMapScope, rsm.Annotations) return builder.NewConfigMapBuilder(rsm.Namespace, GetEnvConfigMapName(rsm.Name)). @@ -348,7 +348,7 @@ func BuildEnvConfigMap(rsm workloads.ReplicatedStateMachine, labels map[string]s SetData(envData).GetObject() } -func BuildPodTemplate(rsm *workloads.ReplicatedStateMachine, envConfigName string) *corev1.PodTemplateSpec { +func BuildPodTemplate(rsm *workloads.InstanceSet, envConfigName string) *corev1.PodTemplateSpec { template := rsm.Spec.Template.DeepCopy() // inject env ConfigMap into workload pods only for i := range template.Spec.Containers { @@ -367,7 +367,7 @@ func BuildPodTemplate(rsm *workloads.ReplicatedStateMachine, envConfigName strin return template } -func injectRoleProbeContainer(rsm *workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec) { +func injectRoleProbeContainer(rsm *workloads.InstanceSet, template *corev1.PodTemplateSpec) { roleProbe := rsm.Spec.RoleProbe if roleProbe == nil { return @@ -436,7 +436,7 @@ func buildActionSvcPorts(template *corev1.PodTemplateSpec, actions []workloads.A return actionSvcPorts } -func injectRoleProbeBaseContainer(rsm *workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec, actionSvcList string, credentialEnv []corev1.EnvVar) { +func injectRoleProbeBaseContainer(rsm *workloads.InstanceSet, template *corev1.PodTemplateSpec, actionSvcList string, credentialEnv []corev1.EnvVar) { // compute parameters for role probe base container roleProbe := rsm.Spec.RoleProbe if roleProbe == nil { @@ -646,7 +646,7 @@ func injectRoleProbeBaseContainer(rsm *workloads.ReplicatedStateMachine, templat template.Spec.Containers = append(template.Spec.Containers, *container) } -func injectCustomRoleProbeContainer(rsm *workloads.ReplicatedStateMachine, template *corev1.PodTemplateSpec, actionSvcPorts []int32, credentialEnv []corev1.EnvVar) { +func injectCustomRoleProbeContainer(rsm *workloads.InstanceSet, template *corev1.PodTemplateSpec, actionSvcPorts []int32, credentialEnv []corev1.EnvVar) { if rsm.Spec.RoleProbe == nil { return } @@ -705,7 +705,7 @@ func injectCustomRoleProbeContainer(rsm *workloads.ReplicatedStateMachine, templ } } -func buildEnvConfigData(set workloads.ReplicatedStateMachine) map[string]string { +func buildEnvConfigData(set workloads.InstanceSet) map[string]string { envData := map[string]string{} svcName := getHeadlessSvcName(set) uid := string(set.UID) diff --git a/pkg/controller/rsm/transformer_objection_generation_test.go b/pkg/controller/rsm/transformer_objection_generation_test.go index cc8b8ebf9fd..97d7ed396d5 100644 --- a/pkg/controller/rsm/transformer_objection_generation_test.go +++ b/pkg/controller/rsm/transformer_objection_generation_test.go @@ -36,7 +36,7 @@ import ( var _ = Describe("object generation transformer test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). AddLabels(constant.AppComponentLabelKey, name). SetReplicas(3). @@ -66,7 +66,7 @@ var _ = Describe("object generation transformer test.", func() { sts := builder.NewStatefulSetBuilder(namespace, name).GetObject() headlessSvc := builder.NewHeadlessServiceBuilder(name, getHeadlessSvcName(*rsm)).GetObject() svc := builder.NewServiceBuilder(name, name).GetObject() - env := builder.NewConfigMapBuilder(name, name+"-rsm-env").GetObject() + env := builder.NewConfigMapBuilder(name, GetEnvConfigMapName(name)).GetObject() k8sMock.EXPECT(). List(gomock.Any(), &apps.StatefulSetList{}, gomock.Any()). DoAndReturn(func(_ context.Context, list *apps.StatefulSetList, _ ...client.ListOption) error { diff --git a/pkg/controller/rsm/transformer_status_test.go b/pkg/controller/rsm/transformer_status_test.go index 33319ef58d0..2b25f1dd434 100644 --- a/pkg/controller/rsm/transformer_status_test.go +++ b/pkg/controller/rsm/transformer_status_test.go @@ -39,7 +39,7 @@ import ( var _ = Describe("object status transformer test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). AddMatchLabelsInMap(selectors). SetServiceName(headlessSvcName). @@ -87,7 +87,7 @@ var _ = Describe("object status transformer test.", func() { Expect(err).Should(BeNil()) Expect(root.Action).ShouldNot(BeNil()) Expect(*root.Action).Should(Equal(model.STATUS)) - rsmNew, ok := root.Obj.(*workloads.ReplicatedStateMachine) + rsmNew, ok := root.Obj.(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) Expect(rsmNew.Generation).Should(Equal(generation)) Expect(rsmNew.Status.ObservedGeneration).Should(Equal(generation)) @@ -133,7 +133,7 @@ var _ = Describe("object status transformer test.", func() { Expect(err).Should(BeNil()) Expect(root.Action).ShouldNot(BeNil()) Expect(*root.Action).Should(Equal(model.STATUS)) - rsmNew, ok := root.Obj.(*workloads.ReplicatedStateMachine) + rsmNew, ok := root.Obj.(*workloads.InstanceSet) Expect(ok).Should(BeTrue()) Expect(rsmNew.Status.ObservedGeneration).Should(Equal(generation)) // the only difference between rsm.status.StatefulSetStatus and sts.status is ObservedGeneration diff --git a/pkg/controller/rsm/transformer_update_strategy.go b/pkg/controller/rsm/transformer_update_strategy.go index f099f35cd4a..a7cc3c71a26 100644 --- a/pkg/controller/rsm/transformer_update_strategy.go +++ b/pkg/controller/rsm/transformer_update_strategy.go @@ -142,7 +142,7 @@ func doSwitchoverIfNeeded(transCtx *rsmTransformContext, dag *graph.DAG, pods [] return false, nil } -func createSwitchoverAction(dag *graph.DAG, cli model.GraphClient, rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) error { +func createSwitchoverAction(dag *graph.DAG, cli model.GraphClient, rsm *workloads.InstanceSet, pods []corev1.Pod) error { leader := getLeaderPodName(rsm.Status.MembersStatus) targetOrdinal := selectSwitchoverTarget(rsm, pods) target := getPodName(rsm.Name, targetOrdinal) @@ -155,7 +155,7 @@ func createSwitchoverAction(dag *graph.DAG, cli model.GraphClient, rsm *workload return createAction(dag, cli, rsm, action) } -func selectSwitchoverTarget(rsm *workloads.ReplicatedStateMachine, pods []corev1.Pod) int { +func selectSwitchoverTarget(rsm *workloads.InstanceSet, pods []corev1.Pod) int { var podUpdated, podUpdatedWithLabel string for _, pod := range pods { if intctrlutil.GetPodRevision(&pod) != rsm.Status.UpdateRevision { @@ -185,7 +185,7 @@ func selectSwitchoverTarget(rsm *workloads.ReplicatedStateMachine, pods []corev1 return ordinal } -func shouldSwitchover(rsm *workloads.ReplicatedStateMachine, podsToBeUpdated []*corev1.Pod, allPods []corev1.Pod) bool { +func shouldSwitchover(rsm *workloads.InstanceSet, podsToBeUpdated []*corev1.Pod, allPods []corev1.Pod) bool { if len(allPods) < 2 { // replicas is less than 2, no need to switchover return false diff --git a/pkg/controller/rsm/transformer_update_strategy_test.go b/pkg/controller/rsm/transformer_update_strategy_test.go index 43565a08f80..aa7a9e90d5c 100644 --- a/pkg/controller/rsm/transformer_update_strategy_test.go +++ b/pkg/controller/rsm/transformer_update_strategy_test.go @@ -38,7 +38,7 @@ import ( var _ = Describe("update strategy transformer test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetUID(uid). SetReplicas(3). SetRoles(roles). diff --git a/pkg/controller/rsm/types.go b/pkg/controller/rsm/types.go index 08b00f70fbc..bc99775c2fa 100644 --- a/pkg/controller/rsm/types.go +++ b/pkg/controller/rsm/types.go @@ -46,7 +46,7 @@ const ( WorkloadsManagedByLabelKey = "workloads.kubeblocks.io/managed-by" WorkloadsInstanceLabelKey = "workloads.kubeblocks.io/instance" - KindReplicatedStateMachine = "ReplicatedStateMachine" + KindReplicatedStateMachine = "InstanceSet" RoleLabelKey = "kubeblocks.io/role" rsmAccessModeLabelKey = "rsm.workloads.kubeblocks.io/access-mode" @@ -108,8 +108,8 @@ type rsmTransformContext struct { Client client.Reader record.EventRecorder logr.Logger - rsm *workloads.ReplicatedStateMachine - rsmOrig *workloads.ReplicatedStateMachine + rsm *workloads.InstanceSet + rsmOrig *workloads.InstanceSet } func (c *rsmTransformContext) GetContext() context.Context { @@ -144,7 +144,7 @@ var _ graph.TransformContext = &rsmTransformContext{} // here is what we should do: // add annotation 'prometheus.io/scrape' with an HeadlessServiceScope suffix to the RSM object's annotations field. // -// kind: ReplicatedStateMachine +// kind: InstanceSet // metadata: // annotations: // prometheus.io/scrape.headless.rsm: true diff --git a/pkg/controller/rsm/update_plan.go b/pkg/controller/rsm/update_plan.go index 9f58cfa19ef..04d306c6650 100644 --- a/pkg/controller/rsm/update_plan.go +++ b/pkg/controller/rsm/update_plan.go @@ -39,11 +39,11 @@ type updatePlan interface { } type realUpdatePlan struct { - rsm workloads.ReplicatedStateMachine + rsm workloads.InstanceSet pods []corev1.Pod dag *graph.DAG podsToBeUpdated []*corev1.Pod - isPodUpdated func(*workloads.ReplicatedStateMachine, *corev1.Pod) (bool, error) + isPodUpdated func(*workloads.InstanceSet, *corev1.Pod) (bool, error) } var _ updatePlan = &realUpdatePlan{} @@ -100,7 +100,7 @@ func (p *realUpdatePlan) planWalkFunc(vertex graph.Vertex) error { return ErrStop } -func (p *realUpdatePlan) defaultIsPodUpdatedFunc(rsm *workloads.ReplicatedStateMachine, pod *corev1.Pod) (bool, error) { +func (p *realUpdatePlan) defaultIsPodUpdatedFunc(rsm *workloads.InstanceSet, pod *corev1.Pod) (bool, error) { return intctrlutil.GetPodRevision(pod) == rsm.Status.UpdateRevision, nil } @@ -211,7 +211,7 @@ func (p *realUpdatePlan) Execute() ([]*corev1.Pod, error) { return p.podsToBeUpdated, nil } -func newUpdatePlan(rsm workloads.ReplicatedStateMachine, pods []corev1.Pod) updatePlan { +func newUpdatePlan(rsm workloads.InstanceSet, pods []corev1.Pod) updatePlan { return &realUpdatePlan{ rsm: rsm, pods: pods, @@ -219,7 +219,7 @@ func newUpdatePlan(rsm workloads.ReplicatedStateMachine, pods []corev1.Pod) upda } } -func NewUpdatePlan(rsm workloads.ReplicatedStateMachine, pods []*corev1.Pod, isPodUpdated func(*workloads.ReplicatedStateMachine, *corev1.Pod) (bool, error)) updatePlan { +func NewUpdatePlan(rsm workloads.InstanceSet, pods []*corev1.Pod, isPodUpdated func(*workloads.InstanceSet, *corev1.Pod) (bool, error)) updatePlan { var podList []corev1.Pod for _, pod := range pods { podList = append(podList, *pod) diff --git a/pkg/controller/rsm/update_plan_test.go b/pkg/controller/rsm/update_plan_test.go index 525a0d8f7f8..824ffecdba5 100644 --- a/pkg/controller/rsm/update_plan_test.go +++ b/pkg/controller/rsm/update_plan_test.go @@ -33,7 +33,7 @@ import ( var _ = Describe("update plan test.", func() { BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name).SetRoles(roles).GetObject() + rsm = builder.NewInstanceSetBuilder(namespace, name).SetRoles(roles).GetObject() rsm.Status.UpdateRevision = newRevision }) diff --git a/pkg/controller/rsm/utils.go b/pkg/controller/rsm/utils.go index 3c7fc53dee0..c58042bcaad 100644 --- a/pkg/controller/rsm/utils.go +++ b/pkg/controller/rsm/utils.go @@ -135,7 +135,7 @@ func ComposeRolePriorityMap(roles []workloads.ReplicaRole) map[string]int { return rolePriorityMap } -func composeRoleMap(rsm workloads.ReplicatedStateMachine) map[string]workloads.ReplicaRole { +func composeRoleMap(rsm workloads.InstanceSet) map[string]workloads.ReplicaRole { roleMap := make(map[string]workloads.ReplicaRole, 0) for _, role := range rsm.Spec.Roles { roleMap[strings.ToLower(role.Name)] = role @@ -143,7 +143,7 @@ func composeRoleMap(rsm workloads.ReplicatedStateMachine) map[string]workloads.R return roleMap } -func SetMembersStatus(rsm *workloads.ReplicatedStateMachine, pods *[]corev1.Pod) { +func SetMembersStatus(rsm *workloads.InstanceSet, pods *[]corev1.Pod) { // no roles defined if rsm.Spec.Roles == nil { setMembersStatusWithoutRole(rsm, pods) @@ -180,7 +180,7 @@ func SetMembersStatus(rsm *workloads.ReplicatedStateMachine, pods *[]corev1.Pod) rsm.Status.MembersStatus = newMembersStatus } -func setMembersStatusWithoutRole(rsm *workloads.ReplicatedStateMachine, pods *[]corev1.Pod) { +func setMembersStatusWithoutRole(rsm *workloads.InstanceSet, pods *[]corev1.Pod) { var membersStatus []workloads.MemberStatus for _, pod := range *pods { memberStatus := workloads.MemberStatus{ @@ -238,11 +238,11 @@ func getPodsOfStatefulSet(ctx context.Context, cli client.Reader, stsObj *appsv1 return pods, nil } -func getHeadlessSvcName(rsm workloads.ReplicatedStateMachine) string { +func getHeadlessSvcName(rsm workloads.InstanceSet) string { return strings.Join([]string{rsm.Name, "headless"}, "-") } -func findSvcPort(rsm *workloads.ReplicatedStateMachine) int { +func findSvcPort(rsm *workloads.InstanceSet) int { if rsm.Spec.Service == nil || len(rsm.Spec.Service.Spec.Ports) == 0 { return 0 } @@ -311,7 +311,7 @@ func getPodOrdinal(podName string) (int, error) { } // ordinal is the ordinal of pod which this action applies to -func createAction(dag *graph.DAG, cli model.GraphClient, rsm *workloads.ReplicatedStateMachine, action *batchv1.Job) error { +func createAction(dag *graph.DAG, cli model.GraphClient, rsm *workloads.InstanceSet, action *batchv1.Job) error { if err := SetOwnership(rsm, action, model.GetScheme(), GetFinalizer(action)); err != nil { return err } @@ -319,7 +319,7 @@ func createAction(dag *graph.DAG, cli model.GraphClient, rsm *workloads.Replicat return nil } -func buildAction(rsm *workloads.ReplicatedStateMachine, actionName, actionType, actionScenario string, leader, target string) *batchv1.Job { +func buildAction(rsm *workloads.InstanceSet, actionName, actionType, actionScenario string, leader, target string) *batchv1.Job { env := buildActionEnv(rsm, leader, target) template := buildActionPodTemplate(rsm, env, actionType) labels := getLabels(rsm) @@ -333,7 +333,7 @@ func buildAction(rsm *workloads.ReplicatedStateMachine, actionName, actionType, GetObject() } -func buildActionPodTemplate(rsm *workloads.ReplicatedStateMachine, env []corev1.EnvVar, actionType string) *corev1.PodTemplateSpec { +func buildActionPodTemplate(rsm *workloads.InstanceSet, env []corev1.EnvVar, actionType string) *corev1.PodTemplateSpec { credential := rsm.Spec.Credential credentialEnv := make([]corev1.EnvVar, 0) if credential != nil { @@ -371,7 +371,7 @@ func buildActionPodTemplate(rsm *workloads.ReplicatedStateMachine, env []corev1. return template } -func buildActionEnv(rsm *workloads.ReplicatedStateMachine, leader, target string) []corev1.EnvVar { +func buildActionEnv(rsm *workloads.InstanceSet, leader, target string) []corev1.EnvVar { svcName := getHeadlessSvcName(*rsm) leaderHost := fmt.Sprintf("%s.%s", leader, svcName) targetHost := fmt.Sprintf("%s.%s", target, svcName) @@ -520,7 +520,7 @@ func emitActionEvent(transCtx *rsmTransformContext, eventType, reason, message s } func GetFinalizer(obj client.Object) string { - if _, ok := obj.(*workloads.ReplicatedStateMachine); ok { + if _, ok := obj.(*workloads.InstanceSet); ok { return FinalizerName } if viper.GetBool(FeatureGateRSMCompatibilityMode) { @@ -529,7 +529,7 @@ func GetFinalizer(obj client.Object) string { return FinalizerName } -func getLabels(rsm *workloads.ReplicatedStateMachine) map[string]string { +func getLabels(rsm *workloads.InstanceSet) map[string]string { if viper.GetBool(FeatureGateRSMCompatibilityMode) { labels := make(map[string]string, 0) keys := []string{ @@ -552,7 +552,7 @@ func getLabels(rsm *workloads.ReplicatedStateMachine) map[string]string { } } -func getSvcSelector(rsm *workloads.ReplicatedStateMachine, headless bool) map[string]string { +func getSvcSelector(rsm *workloads.InstanceSet, headless bool) map[string]string { selectors := make(map[string]string, 0) if !headless { @@ -654,10 +654,10 @@ func CopyOwnership(owner, obj client.Object, scheme *runtime.Scheme, finalizer s return nil } -// IsRSMReady gives rsm level 'ready' state: +// IsInstanceSetReady gives rsm level 'ready' state: // 1. all replicas exist // 2. all members have role set -func IsRSMReady(rsm *workloads.ReplicatedStateMachine) bool { +func IsInstanceSetReady(rsm *workloads.InstanceSet) bool { if rsm == nil { return false } @@ -751,9 +751,9 @@ func ParseAnnotationsOfScope(scope AnnotationScope, scopedAnnotations map[string return annotations } -// ConvertRSMToSTS converts a rsm to sts +// ConvertInstanceSetToSTS converts a rsm to sts // TODO(free6om): refactor this func out -func ConvertRSMToSTS(rsm *workloads.ReplicatedStateMachine) *appsv1.StatefulSet { +func ConvertInstanceSetToSTS(rsm *workloads.InstanceSet) *appsv1.StatefulSet { if rsm == nil { return nil } @@ -776,7 +776,7 @@ func ConvertRSMToSTS(rsm *workloads.ReplicatedStateMachine) *appsv1.StatefulSet } func GetEnvConfigMapName(rsmName string) string { - return fmt.Sprintf("%s-rsm-env", rsmName) + return fmt.Sprintf("%s-its-env", rsmName) } // IsOwnedByRsm is used to judge if the obj is owned by rsm diff --git a/pkg/controller/rsm/utils_test.go b/pkg/controller/rsm/utils_test.go index f78ff338dc3..9aaf967d4ed 100644 --- a/pkg/controller/rsm/utils_test.go +++ b/pkg/controller/rsm/utils_test.go @@ -43,7 +43,7 @@ var _ = Describe("utils test", func() { var priorityMap map[string]int BeforeEach(func() { - rsm = builder.NewReplicatedStateMachineBuilder(namespace, name). + rsm = builder.NewInstanceSetBuilder(namespace, name). SetService(&corev1.Service{}). SetRoles(roles). GetObject() @@ -412,7 +412,7 @@ var _ = Describe("utils test", func() { Context("IsOwnedByRsm function", func() { It("should work well", func() { By("call without ownerReferences") - rsm := &workloads.ReplicatedStateMachine{} + rsm := &workloads.InstanceSet{} Expect(IsOwnedByRsm(rsm)).Should(BeFalse()) By("call with ownerReference's kind is rsm") diff --git a/pkg/controllerutil/rsm_utils.go b/pkg/controllerutil/instance_set_utils.go similarity index 80% rename from pkg/controllerutil/rsm_utils.go rename to pkg/controllerutil/instance_set_utils.go index c353c259834..f1fdb4b0196 100644 --- a/pkg/controllerutil/rsm_utils.go +++ b/pkg/controllerutil/instance_set_utils.go @@ -29,15 +29,15 @@ import ( workloads "github.com/apecloud/kubeblocks/apis/workloads/v1alpha1" ) -// GetPodListByRSM gets rsm pod list. -func GetPodListByRSM(ctx context.Context, cli client.Client, rsm *workloads.ReplicatedStateMachine) ([]corev1.Pod, error) { - selector, err := metav1.LabelSelectorAsMap(rsm.Spec.Selector) +// GetPodListByInstanceSet gets ITS pod list. +func GetPodListByInstanceSet(ctx context.Context, cli client.Client, its *workloads.InstanceSet) ([]corev1.Pod, error) { + selector, err := metav1.LabelSelectorAsMap(its.Spec.Selector) if err != nil { return nil, err } podList := &corev1.PodList{} if err := cli.List(ctx, podList, - &client.ListOptions{Namespace: rsm.Namespace}, + &client.ListOptions{Namespace: its.Namespace}, client.MatchingLabels(selector)); err != nil { return nil, err } diff --git a/pkg/generics/type.go b/pkg/generics/type.go index a7f9b4dfac4..e090b8b8ac0 100644 --- a/pkg/generics/type.go +++ b/pkg/generics/type.go @@ -72,7 +72,7 @@ var EventSignature = func(_ corev1.Event, _ *corev1.Event, _ corev1.EventList, _ var ConfigMapSignature = func(_ corev1.ConfigMap, _ *corev1.ConfigMap, _ corev1.ConfigMapList, _ *corev1.ConfigMapList) {} var EndpointsSignature = func(_ corev1.Endpoints, _ *corev1.Endpoints, _ corev1.EndpointsList, _ *corev1.EndpointsList) {} -var RSMSignature = func(_ workloads.ReplicatedStateMachine, _ *workloads.ReplicatedStateMachine, _ workloads.ReplicatedStateMachineList, _ *workloads.ReplicatedStateMachineList) { +var InstanceSetSignature = func(_ workloads.InstanceSet, _ *workloads.InstanceSet, _ workloads.InstanceSetList, _ *workloads.InstanceSetList) { } var StatefulSetSignature = func(_ appsv1.StatefulSet, _ *appsv1.StatefulSet, _ appsv1.StatefulSetList, _ *appsv1.StatefulSetList) { } diff --git a/pkg/lorry/engines/custom/manager.go b/pkg/lorry/engines/custom/manager.go index 58942379364..a0fe7c11062 100644 --- a/pkg/lorry/engines/custom/manager.go +++ b/pkg/lorry/engines/custom/manager.go @@ -62,12 +62,12 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) { DBManagerBase: *managerBase, } - err = mgr.InitASMActions() + err = mgr.InitInstanceSetActions() if err != nil { - mgr.Logger.Info("init RSM commands failed", "error", err.Error()) + mgr.Logger.Info("init InstanceSet commands failed", "error", err.Error()) return nil, err } - err = mgr.InitComponentDefintionActions() + err = mgr.InitComponentDefinitionActions() if err != nil { mgr.Logger.Info("init component definition commands failed", "error", err.Error()) return nil, err @@ -75,7 +75,7 @@ func NewManager(properties engines.Properties) (engines.DBManager, error) { return mgr, nil } -func (mgr *Manager) InitASMActions() error { +func (mgr *Manager) InitInstanceSetActions() error { actionSvcList := viper.GetString("KB_RSM_ACTION_SVC_LIST") if actionSvcList == "" || actionSvcList == "null" { return nil @@ -102,7 +102,7 @@ func (mgr *Manager) InitASMActions() error { return nil } -func (mgr *Manager) InitComponentDefintionActions() error { +func (mgr *Manager) InitComponentDefinitionActions() error { actionJSON := viper.GetString(constant.KBEnvActionCommands) if actionJSON != "" { err := json.Unmarshal([]byte(actionJSON), &mgr.actionCommands) diff --git a/pkg/testutil/apps/cluster_consensus_test_util.go b/pkg/testutil/apps/cluster_consensus_test_util.go index 6551e9ea422..1e001110ab5 100644 --- a/pkg/testutil/apps/cluster_consensus_test_util.go +++ b/pkg/testutil/apps/cluster_consensus_test_util.go @@ -99,13 +99,13 @@ func MockConsensusComponentStatefulSet( AddContainer(corev1.Container{Name: DefaultMySQLContainerName, Image: ApeCloudMySQLImage}).Create(testCtx).GetObject() } -// MockRSMComponent mocks the component rsm, just using in envTest -func MockRSMComponent( +// MockInstanceSetComponent mocks the ITS component, just using in envTest +func MockInstanceSetComponent( testCtx *testutil.TestContext, clusterName, - rsmCompName string) *workloads.ReplicatedStateMachine { - rsmName := clusterName + "-" + rsmCompName - return NewRSMFactory(testCtx.DefaultNamespace, rsmName, clusterName, rsmCompName).SetReplicas(ConsensusReplicas). + itsCompName string) *workloads.InstanceSet { + itsName := clusterName + "-" + itsCompName + return NewInstanceSetFactory(testCtx.DefaultNamespace, itsName, clusterName, itsCompName).SetReplicas(ConsensusReplicas). AddContainer(corev1.Container{Name: DefaultMySQLContainerName, Image: ApeCloudMySQLImage}).Create(testCtx).GetObject() } diff --git a/pkg/testutil/apps/rsm_factoy.go b/pkg/testutil/apps/instance_set_factoy.go similarity index 74% rename from pkg/testutil/apps/rsm_factoy.go rename to pkg/testutil/apps/instance_set_factoy.go index 74f51c24bdb..4e5e9a65617 100644 --- a/pkg/testutil/apps/rsm_factoy.go +++ b/pkg/testutil/apps/instance_set_factoy.go @@ -28,14 +28,14 @@ import ( "github.com/apecloud/kubeblocks/pkg/constant" ) -type MockRSMFactory struct { - BaseFactory[workloads.ReplicatedStateMachine, *workloads.ReplicatedStateMachine, MockRSMFactory] +type MockInstanceSetFactory struct { + BaseFactory[workloads.InstanceSet, *workloads.InstanceSet, MockInstanceSetFactory] } -func NewRSMFactory(namespace, name string, clusterName string, componentName string) *MockRSMFactory { - f := &MockRSMFactory{} +func NewInstanceSetFactory(namespace, name string, clusterName string, componentName string) *MockInstanceSetFactory { + f := &MockInstanceSetFactory{} f.Init(namespace, name, - &workloads.ReplicatedStateMachine{ + &workloads.InstanceSet{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ constant.AppInstanceLabelKey: clusterName, @@ -43,7 +43,7 @@ func NewRSMFactory(namespace, name string, clusterName string, componentName str constant.AppManagedByLabelKey: constant.AppName, }, }, - Spec: workloads.ReplicatedStateMachineSpec{ + Spec: workloads.InstanceSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ constant.AppInstanceLabelKey: clusterName, @@ -68,18 +68,18 @@ func NewRSMFactory(namespace, name string, clusterName string, componentName str return f } -func (factory *MockRSMFactory) SetReplicas(replicas int32) *MockRSMFactory { +func (factory *MockInstanceSetFactory) SetReplicas(replicas int32) *MockInstanceSetFactory { factory.Get().Spec.Replicas = &replicas return factory } -func (factory *MockRSMFactory) AddVolume(volume corev1.Volume) *MockRSMFactory { +func (factory *MockInstanceSetFactory) AddVolume(volume corev1.Volume) *MockInstanceSetFactory { volumes := &factory.Get().Spec.Template.Spec.Volumes *volumes = append(*volumes, volume) return factory } -func (factory *MockRSMFactory) AddConfigmapVolume(volumeName string, configmapName string) *MockRSMFactory { +func (factory *MockInstanceSetFactory) AddConfigmapVolume(volumeName string, configmapName string) *MockInstanceSetFactory { volume := corev1.Volume{ Name: volumeName, VolumeSource: corev1.VolumeSource{ @@ -92,13 +92,13 @@ func (factory *MockRSMFactory) AddConfigmapVolume(volumeName string, configmapNa return factory } -func (factory *MockRSMFactory) AddVolumeClaimTemplate(pvc corev1.PersistentVolumeClaim) *MockRSMFactory { +func (factory *MockInstanceSetFactory) AddVolumeClaimTemplate(pvc corev1.PersistentVolumeClaim) *MockInstanceSetFactory { volumeClaimTpls := &factory.Get().Spec.VolumeClaimTemplates *volumeClaimTpls = append(*volumeClaimTpls, pvc) return factory } -func (factory *MockRSMFactory) AddContainer(container corev1.Container) *MockRSMFactory { +func (factory *MockInstanceSetFactory) AddContainer(container corev1.Container) *MockInstanceSetFactory { containers := &factory.Get().Spec.Template.Spec.Containers *containers = append(*containers, container) return factory diff --git a/pkg/testutil/k8s/rsm_util.go b/pkg/testutil/k8s/instance_set_util.go similarity index 50% rename from pkg/testutil/k8s/rsm_util.go rename to pkg/testutil/k8s/instance_set_util.go index 73d16f9ff02..50587953ef3 100644 --- a/pkg/testutil/k8s/rsm_util.go +++ b/pkg/testutil/k8s/instance_set_util.go @@ -37,8 +37,8 @@ import ( testapps "github.com/apecloud/kubeblocks/pkg/testutil/apps" ) -// NewFakeRSM creates a fake RSM workload object for testing. -func NewFakeRSM(name string, replicas int) *workloads.ReplicatedStateMachine { +// NewFakeInstanceSet creates a fake ITS workload object for testing. +func NewFakeInstanceSet(name string, replicas int) *workloads.InstanceSet { template := corev1.PodTemplateSpec{ Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -51,28 +51,28 @@ func NewFakeRSM(name string, replicas int) *workloads.ReplicatedStateMachine { } template.Labels = map[string]string{"foo": "bar"} - rsmReplicas := int32(replicas) + itsReplicas := int32(replicas) Revision := name + "-d5df5b8d6" - return &workloads.ReplicatedStateMachine{ + return &workloads.InstanceSet{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: corev1.NamespaceDefault, }, - Spec: workloads.ReplicatedStateMachineSpec{ + Spec: workloads.InstanceSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"foo": "bar"}, }, - Replicas: &rsmReplicas, + Replicas: &itsReplicas, Template: template, ServiceName: "governingsvc", }, - Status: workloads.ReplicatedStateMachineStatus{ - InitReplicas: rsmReplicas, + Status: workloads.InstanceSetStatus{ + InitReplicas: itsReplicas, StatefulSetStatus: appsv1.StatefulSetStatus{ - AvailableReplicas: rsmReplicas, + AvailableReplicas: itsReplicas, ObservedGeneration: 0, - ReadyReplicas: rsmReplicas, - UpdatedReplicas: rsmReplicas, + ReadyReplicas: itsReplicas, + UpdatedReplicas: itsReplicas, CurrentRevision: Revision, UpdateRevision: Revision, }, @@ -80,34 +80,34 @@ func NewFakeRSM(name string, replicas int) *workloads.ReplicatedStateMachine { } } -// NewFakeRSMPod creates a fake pod of the RSM workload for testing. -func NewFakeRSMPod(rsm *workloads.ReplicatedStateMachine, ordinal int) *corev1.Pod { +// NewFakeInstanceSetPod creates a fake pod of the ITS workload for testing. +func NewFakeInstanceSetPod(its *workloads.InstanceSet, ordinal int) *corev1.Pod { pod := &corev1.Pod{} - pod.Name = fmt.Sprintf("%s-%d", rsm.Name, ordinal) + pod.Name = fmt.Sprintf("%s-%d", its.Name, ordinal) return pod } -// MockRSMReady mocks the RSM workload to ready state. -func MockRSMReady(rsm *workloads.ReplicatedStateMachine, pods ...*corev1.Pod) { - rsm.Status.InitReplicas = *rsm.Spec.Replicas - rsm.Status.ReadyInitReplicas = *rsm.Spec.Replicas - rsm.Status.AvailableReplicas = *rsm.Spec.Replicas - rsm.Status.ObservedGeneration = rsm.Generation - rsm.Status.CurrentGeneration = rsm.Generation - rsm.Status.Replicas = *rsm.Spec.Replicas - rsm.Status.ReadyReplicas = *rsm.Spec.Replicas - rsm.Status.CurrentRevision = rsm.Status.UpdateRevision - rsm.Status.UpdatedReplicas = rsm.Status.Replicas - - composeRoleMap := func(rsm workloads.ReplicatedStateMachine) map[string]workloads.ReplicaRole { +// MockInstanceSetReady mocks the ITS workload to ready state. +func MockInstanceSetReady(its *workloads.InstanceSet, pods ...*corev1.Pod) { + its.Status.InitReplicas = *its.Spec.Replicas + its.Status.ReadyInitReplicas = *its.Spec.Replicas + its.Status.AvailableReplicas = *its.Spec.Replicas + its.Status.ObservedGeneration = its.Generation + its.Status.CurrentGeneration = its.Generation + its.Status.Replicas = *its.Spec.Replicas + its.Status.ReadyReplicas = *its.Spec.Replicas + its.Status.CurrentRevision = its.Status.UpdateRevision + its.Status.UpdatedReplicas = its.Status.Replicas + + composeRoleMap := func(its workloads.InstanceSet) map[string]workloads.ReplicaRole { roleMap := make(map[string]workloads.ReplicaRole, 0) - for _, role := range rsm.Spec.Roles { + for _, role := range its.Spec.Roles { roleMap[strings.ToLower(role.Name)] = role } return roleMap } var membersStatus []workloads.MemberStatus - roleMap := composeRoleMap(*rsm) + roleMap := composeRoleMap(*its) for _, pod := range pods { roleName := strings.ToLower(pod.Labels[constant.RoleLabelKey]) role, ok := roleMap[roleName] @@ -120,62 +120,62 @@ func MockRSMReady(rsm *workloads.ReplicatedStateMachine, pods ...*corev1.Pod) { } membersStatus = append(membersStatus, memberStatus) } - rsm.Status.MembersStatus = membersStatus + its.Status.MembersStatus = membersStatus } -func ListAndCheckRSM(testCtx *testutil.TestContext, key types.NamespacedName) *workloads.ReplicatedStateMachineList { - rsmList := &workloads.ReplicatedStateMachineList{} +func ListAndCheckInstanceSet(testCtx *testutil.TestContext, key types.NamespacedName) *workloads.InstanceSetList { + itsList := &workloads.InstanceSetList{} gomega.Eventually(func(g gomega.Gomega) { - g.Expect(testCtx.Cli.List(testCtx.Ctx, rsmList, client.MatchingLabels{ + g.Expect(testCtx.Cli.List(testCtx.Ctx, itsList, client.MatchingLabels{ constant.AppInstanceLabelKey: key.Name, }, client.InNamespace(key.Namespace))).Should(gomega.Succeed()) - g.Expect(rsmList.Items).ShouldNot(gomega.BeNil()) - g.Expect(rsmList.Items).ShouldNot(gomega.BeEmpty()) + g.Expect(itsList.Items).ShouldNot(gomega.BeNil()) + g.Expect(itsList.Items).ShouldNot(gomega.BeEmpty()) }).Should(gomega.Succeed()) - return rsmList + return itsList } -func ListAndCheckRSMItemsCount(testCtx *testutil.TestContext, key types.NamespacedName, cnt int) *workloads.ReplicatedStateMachineList { - rsmList := &workloads.ReplicatedStateMachineList{} +func ListAndCheckInstanceSetItemsCount(testCtx *testutil.TestContext, key types.NamespacedName, cnt int) *workloads.InstanceSetList { + itsList := &workloads.InstanceSetList{} gomega.Eventually(func(g gomega.Gomega) { - g.Expect(testCtx.Cli.List(testCtx.Ctx, rsmList, client.MatchingLabels{ + g.Expect(testCtx.Cli.List(testCtx.Ctx, itsList, client.MatchingLabels{ constant.AppInstanceLabelKey: key.Name, }, client.InNamespace(key.Namespace))).Should(gomega.Succeed()) - g.Expect(len(rsmList.Items)).Should(gomega.Equal(cnt)) + g.Expect(len(itsList.Items)).Should(gomega.Equal(cnt)) }).Should(gomega.Succeed()) - return rsmList + return itsList } -func ListAndCheckRSMWithComponent(testCtx *testutil.TestContext, key types.NamespacedName, componentName string) *workloads.ReplicatedStateMachineList { - rsmList := &workloads.ReplicatedStateMachineList{} +func ListAndCheckInstanceSetWithComponent(testCtx *testutil.TestContext, key types.NamespacedName, componentName string) *workloads.InstanceSetList { + itsList := &workloads.InstanceSetList{} gomega.Eventually(func(g gomega.Gomega) { - g.Expect(testCtx.Cli.List(testCtx.Ctx, rsmList, client.MatchingLabels{ + g.Expect(testCtx.Cli.List(testCtx.Ctx, itsList, client.MatchingLabels{ constant.AppInstanceLabelKey: key.Name, constant.KBAppComponentLabelKey: componentName, }, client.InNamespace(key.Namespace))).Should(gomega.Succeed()) - g.Expect(rsmList.Items).ShouldNot(gomega.BeNil()) - g.Expect(rsmList.Items).ShouldNot(gomega.BeEmpty()) + g.Expect(itsList.Items).ShouldNot(gomega.BeNil()) + g.Expect(itsList.Items).ShouldNot(gomega.BeEmpty()) }).Should(gomega.Succeed()) - return rsmList + return itsList } -func PatchRSMStatus(testCtx *testutil.TestContext, stsName string, status workloads.ReplicatedStateMachineStatus) { +func PatchInstanceSetStatus(testCtx *testutil.TestContext, stsName string, status workloads.InstanceSetStatus) { objectKey := client.ObjectKey{Name: stsName, Namespace: testCtx.DefaultNamespace} - gomega.Expect(testapps.GetAndChangeObjStatus(testCtx, objectKey, func(newRSM *workloads.ReplicatedStateMachine) { - newRSM.Status = status + gomega.Expect(testapps.GetAndChangeObjStatus(testCtx, objectKey, func(newITS *workloads.InstanceSet) { + newITS.Status = status })()).Should(gomega.Succeed()) - gomega.Eventually(testapps.CheckObj(testCtx, objectKey, func(g gomega.Gomega, newRSM *workloads.ReplicatedStateMachine) { - g.Expect(reflect.DeepEqual(newRSM.Status, status)).Should(gomega.BeTrue()) + gomega.Eventually(testapps.CheckObj(testCtx, objectKey, func(g gomega.Gomega, newITS *workloads.InstanceSet) { + g.Expect(reflect.DeepEqual(newITS.Status, status)).Should(gomega.BeTrue()) })).Should(gomega.Succeed()) } -func InitRSMStatus(testCtx testutil.TestContext, rsm *workloads.ReplicatedStateMachine, controllerRevision string) { - gomega.Expect(testapps.ChangeObjStatus(&testCtx, rsm, func() { - rsm.Status.InitReplicas = *rsm.Spec.Replicas - rsm.Status.Replicas = *rsm.Spec.Replicas - rsm.Status.UpdateRevision = controllerRevision - rsm.Status.CurrentRevision = controllerRevision - rsm.Status.ObservedGeneration = rsm.Generation - rsm.Status.CurrentGeneration = rsm.Generation +func InitInstanceSetStatus(testCtx testutil.TestContext, its *workloads.InstanceSet, controllerRevision string) { + gomega.Expect(testapps.ChangeObjStatus(&testCtx, its, func() { + its.Status.InitReplicas = *its.Spec.Replicas + its.Status.Replicas = *its.Spec.Replicas + its.Status.UpdateRevision = controllerRevision + its.Status.CurrentRevision = controllerRevision + its.Status.ObservedGeneration = its.Generation + its.Status.CurrentGeneration = its.Generation })).Should(gomega.Succeed()) }
FieldDescription
+name
+ +string + +
+

Defines the role name of the replica.

-currentRevisions
+accessMode
-map[string]string + +AccessMode + + +
+

Specifies the service capabilities of this member.

+
+canVote
+ +bool
(Optional) -

currentRevisions, if not empty, indicates the old version of the RSM used to generate Pods. -key is the pod name, value is the revision.

+

Indicates if this member has voting rights.

-updateRevisions
+isLeader
-map[string]string +bool
(Optional) -

updateRevisions, if not empty, indicates the new version of the RSM used to generate Pods. -key is the pod name, value is the revision.

+

Determines if this member is the leader.