From 1b885a3060a0be915deea0a5da7f7cfcf868d440 Mon Sep 17 00:00:00 2001
From: wangyelei
Date: Wed, 24 Apr 2024 13:50:49 +0800
Subject: [PATCH] chore: replace 'shardingName' to isSharding of
opsRequest.componentOps (#7147)
(cherry picked from commit 14b691ebcd205b94db6a37b346c1ac7e7689e71e)
---
apis/apps/v1alpha1/opsrequest_types.go | 62 +++++--
apis/apps/v1alpha1/opsrequest_webhook.go | 38 +++--
apis/apps/v1alpha1/zz_generated.deepcopy.go | 10 +-
.../bases/apps.kubeblocks.io_opsrequests.yaml | 153 +++++++++---------
controllers/apps/operations/custom.go | 4 +-
.../apps/operations/custom/action_exec.go | 10 +-
.../apps/operations/custom/action_workload.go | 10 +-
controllers/apps/operations/custom/utils.go | 2 +-
controllers/apps/operations/custom_test.go | 2 +-
.../apps/operations/custom_workflow.go | 4 +-
.../apps/operations/horizontal_scaling.go | 13 +-
.../apps/operations/ops_comp_helper.go | 30 ++--
.../apps/operations/ops_progress_util.go | 6 +-
controllers/apps/operations/start.go | 16 +-
controllers/apps/operations/stop.go | 19 +--
.../apps/operations/volume_expansion.go | 27 ++--
.../crds/apps.kubeblocks.io_opsrequests.yaml | 153 +++++++++---------
docs/developer_docs/api-reference/cluster.md | 18 +--
18 files changed, 306 insertions(+), 271 deletions(-)
diff --git a/apis/apps/v1alpha1/opsrequest_types.go b/apis/apps/v1alpha1/opsrequest_types.go
index 4b3aeb53c5c..2b8ac0b3729 100644
--- a/apis/apps/v1alpha1/opsrequest_types.go
+++ b/apis/apps/v1alpha1/opsrequest_types.go
@@ -61,30 +61,50 @@ type OpsRequestSpec struct {
// Defines what component need to horizontal scale the specified replicas.
// +optional
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.horizontalScaling"
- HorizontalScalingList []HorizontalScaling `json:"horizontalScaling,omitempty"`
+ HorizontalScalingList []HorizontalScaling `json:"horizontalScaling,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Note: Quantity struct can not do immutable check by CEL.
// Defines what component and volumeClaimTemplate need to expand the specified storage.
// +optional
- VolumeExpansionList []VolumeExpansion `json:"volumeExpansion,omitempty"`
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
+ VolumeExpansionList []VolumeExpansion `json:"volumeExpansion,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Restarts the specified components.
// +optional
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.restart"
// +kubebuilder:validation:MaxItems=1024
- RestartList []ComponentOps `json:"restart,omitempty"`
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
+ RestartList []ComponentOps `json:"restart,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Switches over the specified components.
// +optional
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.switchover"
- SwitchoverList []Switchover `json:"switchover,omitempty"`
+ SwitchoverList []Switchover `json:"switchover,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Note: Quantity struct can not do immutable check by CEL.
// Defines what component need to vertical scale the specified compute resources.
// +kubebuilder:validation:MaxItems=1024
// +optional
- VerticalScalingList []VerticalScaling `json:"verticalScaling,omitempty"`
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
+ VerticalScalingList []VerticalScaling `json:"verticalScaling,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Deprecated: replace by reconfigures.
// Defines the variables that need to input when updating configuration.
@@ -94,7 +114,11 @@ type OpsRequestSpec struct {
// Defines the variables that need to input when updating configuration.
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.reconfigure"
// +optional
- Reconfigures []Reconfigure `json:"reconfigures,omitempty"`
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
+ Reconfigures []Reconfigure `json:"reconfigures,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Defines services the component needs to expose.
// +optional
@@ -126,8 +150,12 @@ type OpsRequestSpec struct {
// Specifies the instances that require re-creation.
// +optional
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="forbidden to update spec.rebuildFrom"
- RebuildFrom []RebuildInstance `json:"rebuildFrom,omitempty"`
+ RebuildFrom []RebuildInstance `json:"rebuildFrom,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
// Specifies a custom operation as defined by OpsDefinition.
// +optional
@@ -135,13 +163,13 @@ type OpsRequestSpec struct {
}
// ComponentOps represents the common variables required for operations within the scope of a normal component/shard component.
-// +kubebuilder:validation:XValidation:rule="(has(self.componentName) && !has(self.shardingName)) || (has(self.shardingName) && !has(self.componentName))",message="either componentName or shardingName"
type ComponentOps struct {
// Specifies the name of the cluster component.
- ComponentName string `json:"componentName,omitempty"`
+ // +kubebuilder:validation:Required
+ ComponentName string `json:"componentName"`
- // Specifies the name of the cluster sharding component.
- ShardingName string `json:"shardingName,omitempty"`
+ // Specifies that the componentName refers to the cluster's sharding component.
+ IsSharding bool `json:"isSharding,omitempty"`
}
type RebuildInstance struct {
@@ -355,10 +383,14 @@ type CustomOpsSpec struct {
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinItems=1
// +kubebuilder:validation:MaxItems=1024
- CustomOpsItems []CustomOpsItem `json:"items"`
+ // +patchMergeKey=componentName
+ // +patchStrategy=merge,retainKeys
+ // +listType=map
+ // +listMapKey=componentName
+ CustomOpsItems []CustomOpsComponent `json:"components" patchStrategy:"merge,retainKeys" patchMergeKey:"componentName"`
}
-type CustomOpsItem struct {
+type CustomOpsComponent struct {
ComponentOps `json:",inline"`
// Represents the parameters for this operation as declared in the opsDefinition.spec.parametersSchema.
@@ -999,8 +1031,8 @@ func (c ComponentOps) GetComponentName() string {
return c.ComponentName
}
-func (c ComponentOps) GetShardingName() string {
- return c.ShardingName
+func (c ComponentOps) IsShardingComponent() bool {
+ return c.IsSharding
}
// ToExposeListToMap build expose map
diff --git a/apis/apps/v1alpha1/opsrequest_webhook.go b/apis/apps/v1alpha1/opsrequest_webhook.go
index af542208456..bc8a889247a 100644
--- a/apis/apps/v1alpha1/opsrequest_webhook.go
+++ b/apis/apps/v1alpha1/opsrequest_webhook.go
@@ -441,9 +441,9 @@ func (r *OpsRequest) checkInstanceTemplate(cluster *Cluster, componentOps Compon
instanceNameMap[instances[i].Name] = sets.Empty{}
}
}
- if componentOps.ShardingName != "" {
+ if componentOps.IsSharding {
for _, shardingSpec := range cluster.Spec.ShardingSpecs {
- if shardingSpec.Name != componentOps.ShardingName {
+ if shardingSpec.Name != componentOps.ComponentName {
continue
}
setInstanceMap(shardingSpec.Template.Instances)
@@ -483,19 +483,17 @@ func (r *OpsRequest) checkComponentExistence(cluster *Cluster, compOpsList []Com
notFoundShardingNames []string
)
for _, compOps := range compOpsList {
- if compOps.ComponentName != "" {
- if _, ok := compSpecNameMap[compOps.ComponentName]; !ok {
- notFoundCompNames = append(notFoundCompNames, compOps.ComponentName)
+ if compOps.IsSharding {
+ if _, ok := shardingMap[compOps.ComponentName]; !ok {
+ notFoundShardingNames = append(notFoundShardingNames, compOps.ComponentName)
}
continue
- }
- if compOps.ShardingName != "" {
- if _, ok := shardingMap[compOps.ShardingName]; !ok {
- notFoundShardingNames = append(notFoundShardingNames, compOps.ShardingName)
+ } else {
+ if _, ok := compSpecNameMap[compOps.ComponentName]; !ok {
+ notFoundCompNames = append(notFoundCompNames, compOps.ComponentName)
}
continue
}
- return fmt.Errorf("shardingName or componentName can not be empty")
}
if len(notFoundCompNames) > 0 {
@@ -517,19 +515,19 @@ func (r *OpsRequest) checkVolumesAllowExpansion(ctx context.Context, cli client.
vols := make(map[string]map[string]Entity)
// component name/ sharding name -> vct name -> entity
- getKey := func(compName, shardingName, templateName string) string {
+ getKey := func(compOps ComponentOps, templateName string) string {
templateKey := ""
if templateName != "" {
templateKey = "." + templateName
}
- if compName != "" {
- return fmt.Sprintf("component.%s%s", compName, templateKey)
+ if compOps.IsSharding {
+ return fmt.Sprintf("sharding.%s%s", compOps.ComponentName, templateKey)
}
- return fmt.Sprintf("sharding.%s%s", shardingName, templateKey)
+ return fmt.Sprintf("component.%s%s", compOps.ComponentName, templateKey)
}
setVols := func(vcts []OpsRequestVolumeClaimTemplate, compOps ComponentOps, templateName string) {
for _, vct := range vcts {
- key := getKey(compOps.ComponentName, compOps.ShardingName, templateName)
+ key := getKey(compOps, templateName)
if _, ok := vols[key]; !ok {
vols[key] = make(map[string]Entity)
}
@@ -552,8 +550,8 @@ func (r *OpsRequest) checkVolumesAllowExpansion(ctx context.Context, cli client.
e.storageClassName = vct.Spec.StorageClassName
vols[key][vct.Name] = e
}
- fillCompVols := func(compSpec ClusterComponentSpec, compName, shardingName string) {
- key := getKey(compName, shardingName, "")
+ fillCompVols := func(compSpec ClusterComponentSpec, compOps ComponentOps) {
+ key := getKey(compOps, "")
if _, ok := vols[key]; !ok {
return // ignore not-exist component
}
@@ -561,7 +559,7 @@ func (r *OpsRequest) checkVolumesAllowExpansion(ctx context.Context, cli client.
fillVol(vct, key)
}
for _, ins := range compSpec.Instances {
- key = getKey(compName, shardingName, ins.Name)
+ key = getKey(compOps, ins.Name)
for _, vct := range ins.VolumeClaimTemplates {
fillVol(vct, key)
}
@@ -569,10 +567,10 @@ func (r *OpsRequest) checkVolumesAllowExpansion(ctx context.Context, cli client.
}
// traverse the spec to update volumes
for _, comp := range cluster.Spec.ComponentSpecs {
- fillCompVols(comp, comp.Name, "")
+ fillCompVols(comp, ComponentOps{ComponentName: comp.Name})
}
for _, sharding := range cluster.Spec.ShardingSpecs {
- fillCompVols(sharding.Template, "", sharding.Name)
+ fillCompVols(sharding.Template, ComponentOps{ComponentName: sharding.Name, IsSharding: true})
}
// check all used storage classes
diff --git a/apis/apps/v1alpha1/zz_generated.deepcopy.go b/apis/apps/v1alpha1/zz_generated.deepcopy.go
index f64db857237..a93c55ab9e8 100644
--- a/apis/apps/v1alpha1/zz_generated.deepcopy.go
+++ b/apis/apps/v1alpha1/zz_generated.deepcopy.go
@@ -3273,7 +3273,7 @@ func (in *CustomLabelSpec) DeepCopy() *CustomLabelSpec {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *CustomOpsItem) DeepCopyInto(out *CustomOpsItem) {
+func (in *CustomOpsComponent) DeepCopyInto(out *CustomOpsComponent) {
*out = *in
out.ComponentOps = in.ComponentOps
if in.Parameters != nil {
@@ -3283,12 +3283,12 @@ func (in *CustomOpsItem) DeepCopyInto(out *CustomOpsItem) {
}
}
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOpsItem.
-func (in *CustomOpsItem) DeepCopy() *CustomOpsItem {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomOpsComponent.
+func (in *CustomOpsComponent) DeepCopy() *CustomOpsComponent {
if in == nil {
return nil
}
- out := new(CustomOpsItem)
+ out := new(CustomOpsComponent)
in.DeepCopyInto(out)
return out
}
@@ -3304,7 +3304,7 @@ func (in *CustomOpsSpec) DeepCopyInto(out *CustomOpsSpec) {
out.Parallelism = in.Parallelism
if in.CustomOpsItems != nil {
in, out := &in.CustomOpsItems, &out.CustomOpsItems
- *out = make([]CustomOpsItem, len(*in))
+ *out = make([]CustomOpsComponent, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
diff --git a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml
index 0e2fa1ba995..850c9d02a8a 100644
--- a/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml
+++ b/config/crd/bases/apps.kubeblocks.io_opsrequests.yaml
@@ -114,7 +114,7 @@ spec:
customSpec:
description: Specifies a custom operation as defined by OpsDefinition.
properties:
- items:
+ components:
description: Defines which components need to perform the actions
defined by this OpsDefinition. At least one component/shardComponent
is required. The components are identified by their name and
@@ -124,6 +124,10 @@ spec:
componentName:
description: Specifies the name of the cluster component.
type: string
+ isSharding:
+ description: Specifies that the componentName refers to
+ the cluster's sharding component.
+ type: boolean
parameters:
description: Represents the parameters for this operation
as declared in the opsDefinition.spec.parametersSchema.
@@ -146,18 +150,15 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- shardingName:
- description: Specifies the name of the cluster sharding
- component.
- type: string
+ required:
+ - componentName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName))
- || (has(self.shardingName) && !has(self.componentName))
maxItems: 1024
minItems: 1
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
opsDefinitionRef:
description: Is a reference to an OpsDefinition.
type: string
@@ -176,7 +177,7 @@ spec:
serviceAccountName:
type: string
required:
- - items
+ - components
- opsDefinitionRef
type: object
expose:
@@ -2642,6 +2643,10 @@ spec:
- name
type: object
type: array
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
offlineInstances:
description: Specifies instances to be scaled in with dedicated
names in the list.
@@ -2653,17 +2658,14 @@ spec:
format: int32
minimum: 0
type: integer
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
required:
+ - componentName
- replicas
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.horizontalScaling
rule: self == oldSelf
@@ -2821,17 +2823,18 @@ spec:
- name
type: object
type: array
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
required:
+ - componentName
- instances
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.rebuildFrom
rule: self == oldSelf
@@ -2914,16 +2917,14 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the cluster's
+ sharding component.
+ type: boolean
required:
+ - componentName
- configurations
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) || (has(self.shardingName)
- && !has(self.componentName))
reconfigures:
description: Defines the variables that need to input when updating
configuration.
@@ -3008,17 +3009,18 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
required:
+ - componentName
- configurations
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.reconfigure
rule: self == oldSelf
@@ -3031,16 +3033,18 @@ spec:
componentName:
description: Specifies the name of the cluster component.
type: string
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
+ required:
+ - componentName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
maxItems: 1024
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.restart
rule: self == oldSelf
@@ -3125,6 +3129,10 @@ spec:
description: Specifies the image to be used for the exec command.
By default, the image of kubeblocks-datascript is used.
type: string
+ isSharding:
+ description: Specifies that the componentName refers to the cluster's
+ sharding component.
+ type: boolean
script:
description: Defines the script to be executed.
items:
@@ -3264,14 +3272,9 @@ spec:
x-kubernetes-validations:
- message: forbidden to update spec.scriptSpec.script.selector
rule: self == oldSelf
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ required:
+ - componentName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) || (has(self.shardingName)
- && !has(self.componentName))
switchover:
description: Switches over the specified components.
items:
@@ -3294,17 +3297,18 @@ spec:
will be executed, and it is mandatory that clusterDefinition.componentDefs[x].switchoverSpec.withCandidate
is not left blank."
type: string
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
required:
+ - componentName
- instanceName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.switchover
rule: self == oldSelf
@@ -3486,6 +3490,10 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: array
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
limits:
additionalProperties:
anyOf:
@@ -3509,17 +3517,15 @@ spec:
to an implementation-defined value. Requests cannot exceed
Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ required:
+ - componentName
type: object
x-kubernetes-preserve-unknown-fields: true
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
maxItems: 1024
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
volumeExpansion:
description: 'Note: Quantity struct can not do immutable check by
CEL. Defines what component and volumeClaimTemplate need to expand
@@ -3616,9 +3622,10 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: array
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
volumeClaimTemplates:
description: volumeClaimTemplates specifies the storage size
and volumeClaimTemplate name.
@@ -3645,13 +3652,13 @@ spec:
- name
x-kubernetes-list-type: map
required:
+ - componentName
- volumeClaimTemplates
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
required:
- clusterRef
- type
diff --git a/controllers/apps/operations/custom.go b/controllers/apps/operations/custom.go
index 6d8369dd47e..a6edce65049 100644
--- a/controllers/apps/operations/custom.go
+++ b/controllers/apps/operations/custom.go
@@ -124,7 +124,7 @@ func (c CustomOpsHandler) checkExpression(reqCtx intctrlutil.RequestCtx,
cli client.Client,
opsRes *OpsResource,
rule *appsv1alpha1.Rule,
- compCustomItem appsv1alpha1.CustomOpsItem) error {
+ compCustomItem appsv1alpha1.CustomOpsComponent) error {
opsSpec := opsRes.OpsRequest.Spec
if opsSpec.Force {
return nil
@@ -174,7 +174,7 @@ func (c CustomOpsHandler) checkExpression(reqCtx intctrlutil.RequestCtx,
func (c CustomOpsHandler) initCompActionStatusAndPreCheck(reqCtx intctrlutil.RequestCtx,
cli client.Client,
opsRes *OpsResource,
- compCustomItem appsv1alpha1.CustomOpsItem) bool {
+ compCustomItem appsv1alpha1.CustomOpsComponent) bool {
if opsRes.OpsRequest.Status.Components == nil {
opsRes.OpsRequest.Status.Components = map[string]appsv1alpha1.OpsRequestComponentStatus{}
}
diff --git a/controllers/apps/operations/custom/action_exec.go b/controllers/apps/operations/custom/action_exec.go
index 3b36a4334d3..07eb5d551ba 100644
--- a/controllers/apps/operations/custom/action_exec.go
+++ b/controllers/apps/operations/custom/action_exec.go
@@ -33,7 +33,7 @@ type ExecAction struct {
OpsRequest *appsv1alpha1.OpsRequest
Cluster *appsv1alpha1.Cluster
OpsDef *appsv1alpha1.OpsDefinition
- CustomOpsItem *appsv1alpha1.CustomOpsItem
+ CustomCompOps *appsv1alpha1.CustomOpsComponent
Comp *appsv1alpha1.ClusterComponentSpec
progressDetail appsv1alpha1.ProgressStatusDetail
}
@@ -41,14 +41,14 @@ type ExecAction struct {
func NewExecAction(opsRequest *appsv1alpha1.OpsRequest,
cluster *appsv1alpha1.Cluster,
opsDef *appsv1alpha1.OpsDefinition,
- customOpsItem *appsv1alpha1.CustomOpsItem,
+ customCompOps *appsv1alpha1.CustomOpsComponent,
comp *appsv1alpha1.ClusterComponentSpec,
progressDetail appsv1alpha1.ProgressStatusDetail) *ExecAction {
return &ExecAction{
OpsRequest: opsRequest,
Cluster: cluster,
OpsDef: opsDef,
- CustomOpsItem: customOpsItem,
+ CustomCompOps: customCompOps,
Comp: comp,
progressDetail: progressDetail,
}
@@ -68,7 +68,7 @@ func (e *ExecAction) Execute(actionCtx ActionContext) (*ActionStatus, error) {
if targetPodTemplate == nil {
return nil, intctrlutil.NewFatalError("can not found the targetPodTemplate by " + podTemplateName)
}
- targetPods, err := getTargetPods(actionCtx.ReqCtx.Ctx, actionCtx.Client, e.Cluster, targetPodTemplate.PodSelector, e.CustomOpsItem.ComponentName)
+ targetPods, err := getTargetPods(actionCtx.ReqCtx.Ctx, actionCtx.Client, e.Cluster, targetPodTemplate.PodSelector, e.CustomCompOps.ComponentName)
if err != nil {
return nil, err
}
@@ -142,7 +142,7 @@ func (e *ExecAction) buildExecPodSpec(actionCtx ActionContext,
targetPod *corev1.Pod) (*corev1.PodSpec, error) {
// inject component and componentDef envs
env, err := buildActionPodEnv(actionCtx.ReqCtx, actionCtx.Client, e.Cluster, e.OpsDef,
- e.OpsRequest, e.Comp, e.CustomOpsItem, targetPodTemplate, targetPod)
+ e.OpsRequest, e.Comp, e.CustomCompOps, targetPodTemplate, targetPod)
if err != nil {
return nil, err
}
diff --git a/controllers/apps/operations/custom/action_workload.go b/controllers/apps/operations/custom/action_workload.go
index a32b9070860..39750e80e64 100644
--- a/controllers/apps/operations/custom/action_workload.go
+++ b/controllers/apps/operations/custom/action_workload.go
@@ -33,7 +33,7 @@ type WorkloadAction struct {
OpsRequest *appsv1alpha1.OpsRequest
Cluster *appsv1alpha1.Cluster
OpsDef *appsv1alpha1.OpsDefinition
- CompCustomItem *appsv1alpha1.CustomOpsItem
+ CustomCompOps *appsv1alpha1.CustomOpsComponent
Comp *appsv1alpha1.ClusterComponentSpec
progressDetail appsv1alpha1.ProgressStatusDetail
}
@@ -41,14 +41,14 @@ type WorkloadAction struct {
func NewWorkloadAction(opsRequest *appsv1alpha1.OpsRequest,
cluster *appsv1alpha1.Cluster,
opsDef *appsv1alpha1.OpsDefinition,
- compCustomItem *appsv1alpha1.CustomOpsItem,
+ customCompOps *appsv1alpha1.CustomOpsComponent,
comp *appsv1alpha1.ClusterComponentSpec,
progressDetail appsv1alpha1.ProgressStatusDetail) *WorkloadAction {
return &WorkloadAction{
OpsRequest: opsRequest,
Cluster: cluster,
OpsDef: opsDef,
- CompCustomItem: compCustomItem,
+ CustomCompOps: customCompOps,
Comp: comp,
progressDetail: progressDetail,
}
@@ -71,7 +71,7 @@ func (w *WorkloadAction) Execute(actionCtx ActionContext) (*ActionStatus, error)
if targetPodTemplate == nil {
return nil, intctrlutil.NewFatalError("can not found the targetPodTemplate by " + podTemplateName)
}
- targetPods, err = getTargetPods(actionCtx.ReqCtx.Ctx, actionCtx.Client, w.Cluster, targetPodTemplate.PodSelector, w.CompCustomItem.ComponentName)
+ targetPods, err = getTargetPods(actionCtx.ReqCtx.Ctx, actionCtx.Client, w.Cluster, targetPodTemplate.PodSelector, w.CustomCompOps.ComponentName)
if err != nil {
return nil, err
}
@@ -144,7 +144,7 @@ func (w *WorkloadAction) buildPodSpec(actionCtx ActionContext,
)
env, err := buildActionPodEnv(actionCtx.ReqCtx, actionCtx.Client, w.Cluster, w.OpsDef, w.OpsRequest,
- w.Comp, w.CompCustomItem, targetPodTemplate, targetPod)
+ w.Comp, w.CustomCompOps, targetPodTemplate, targetPod)
if err != nil {
return nil, err
}
diff --git a/controllers/apps/operations/custom/utils.go b/controllers/apps/operations/custom/utils.go
index b87c41defc2..61576b10c02 100644
--- a/controllers/apps/operations/custom/utils.go
+++ b/controllers/apps/operations/custom/utils.go
@@ -236,7 +236,7 @@ func buildActionPodEnv(reqCtx intctrlutil.RequestCtx,
opsDef *appsv1alpha1.OpsDefinition,
ops *appsv1alpha1.OpsRequest,
comp *appsv1alpha1.ClusterComponentSpec,
- compCustomItem *appsv1alpha1.CustomOpsItem,
+ compCustomItem *appsv1alpha1.CustomOpsComponent,
targetPodTemplate *appsv1alpha1.TargetPodTemplate,
targetPod *corev1.Pod) ([]corev1.EnvVar, error) {
var env = []corev1.EnvVar{
diff --git a/controllers/apps/operations/custom_test.go b/controllers/apps/operations/custom_test.go
index 3d889c85788..6b30ac6fd37 100644
--- a/controllers/apps/operations/custom_test.go
+++ b/controllers/apps/operations/custom_test.go
@@ -81,7 +81,7 @@ var _ = Describe("CustomOps", func() {
cluster.Name, appsv1alpha1.CustomType)
ops.Spec.CustomSpec = &appsv1alpha1.CustomOpsSpec{
OpsDefinitionRef: opsDef.Name,
- CustomOpsItems: []appsv1alpha1.CustomOpsItem{
+ CustomOpsItems: []appsv1alpha1.CustomOpsComponent{
{
ComponentOps: appsv1alpha1.ComponentOps{
ComponentName: comp,
diff --git a/controllers/apps/operations/custom_workflow.go b/controllers/apps/operations/custom_workflow.go
index 8d206601f50..932b05a9de3 100644
--- a/controllers/apps/operations/custom_workflow.go
+++ b/controllers/apps/operations/custom_workflow.go
@@ -53,7 +53,7 @@ func NewWorkflowContext(
}
// Run actions execution layer.
-func (w *WorkflowContext) Run(compCustomSpec *appsv1alpha1.CustomOpsItem) (*WorkflowStatus, error) {
+func (w *WorkflowContext) Run(compCustomSpec *appsv1alpha1.CustomOpsComponent) (*WorkflowStatus, error) {
var (
err error
actionStatus *custom.ActionStatus
@@ -145,7 +145,7 @@ steps:
}
func (w *WorkflowContext) getAction(action appsv1alpha1.OpsAction,
- compCustomItem *appsv1alpha1.CustomOpsItem,
+ compCustomItem *appsv1alpha1.CustomOpsComponent,
comp *appsv1alpha1.ClusterComponentSpec,
progressDetail appsv1alpha1.ProgressStatusDetail) custom.OpsAction {
switch {
diff --git a/controllers/apps/operations/horizontal_scaling.go b/controllers/apps/operations/horizontal_scaling.go
index 93dcba99e42..6978e968095 100644
--- a/controllers/apps/operations/horizontal_scaling.go
+++ b/controllers/apps/operations/horizontal_scaling.go
@@ -134,22 +134,15 @@ func (hs horizontalScalingOpsHandler) SaveLastConfiguration(reqCtx intctrlutil.R
return nil
}
-func (hs horizontalScalingOpsHandler) getExpectReplicas(opsRequest *appsv1alpha1.OpsRequest, shardName, componentName string) *int32 {
- compStatus := opsRequest.Status.Components[componentName]
+func (hs horizontalScalingOpsHandler) getExpectReplicas(opsRequest *appsv1alpha1.OpsRequest, compOps ComponentOpsInteface) *int32 {
+ compStatus := opsRequest.Status.Components[compOps.GetComponentName()]
if compStatus.OverrideBy != nil {
return compStatus.OverrideBy.Replicas
}
for _, v := range opsRequest.Spec.HorizontalScalingList {
- if shardName != "" {
- if v.ShardingName == shardName {
- return &v.Replicas
- }
- continue
- }
- if v.ComponentName == componentName {
+ if v.ComponentName == compOps.GetComponentName() {
return &v.Replicas
}
-
}
return nil
}
diff --git a/controllers/apps/operations/ops_comp_helper.go b/controllers/apps/operations/ops_comp_helper.go
index 680f7d4e0e8..69332befcb2 100644
--- a/controllers/apps/operations/ops_comp_helper.go
+++ b/controllers/apps/operations/ops_comp_helper.go
@@ -35,7 +35,7 @@ import (
type ComponentOpsInteface interface {
GetComponentName() string
- GetShardingName() string
+ IsShardingComponent() bool
}
type componentOpsHelper struct {
@@ -48,25 +48,21 @@ func newComponentOpsHelper[T ComponentOpsInteface](compOpsList []T) componentOps
}
for i := range compOpsList {
compOps := compOpsList[i]
- compOpsKey := getCompOpsKey(compOps.GetShardingName(), compOps.GetComponentName())
+ compOpsKey := getCompOpsKey(compOps.GetComponentName(), compOps.IsShardingComponent())
compOpsHelper.componentOpsSet[compOpsKey] = compOps
}
return compOpsHelper
}
-func (c componentOpsHelper) isSharding(compOps ComponentOpsInteface) bool {
- return compOps.GetShardingName() != ""
-}
-
func (c componentOpsHelper) getOpsComponentAndShardStatus(opsRequest *appsv1alpha1.OpsRequest, comOps ComponentOpsInteface) appsv1alpha1.OpsRequestComponentStatus {
- compKey := getCompOpsKey(comOps.GetShardingName(), comOps.GetComponentName())
+ compKey := getCompOpsKey(comOps.GetComponentName(), comOps.IsShardingComponent())
return opsRequest.Status.Components[compKey]
}
func (c componentOpsHelper) setOpsComponentAndShardStatus(opsRequest *appsv1alpha1.OpsRequest,
opsComStatus appsv1alpha1.OpsRequestComponentStatus,
comOps ComponentOpsInteface) {
- compKey := getCompOpsKey(comOps.GetShardingName(), comOps.GetComponentName())
+ compKey := getCompOpsKey(comOps.GetComponentName(), comOps.IsShardingComponent())
opsRequest.Status.Components[compKey] = opsComStatus
}
@@ -200,17 +196,17 @@ func (c componentOpsHelper) reconcileActionWithComponentOps(reqCtx intctrlutil.R
})
return nil
}
- getCompOps := func(shardingName, componentName string) (ComponentOpsInteface, bool) {
+ getCompOps := func(componentName string, isSharding bool) (ComponentOpsInteface, bool) {
if len(c.componentOpsSet) == 0 {
- return appsv1alpha1.ComponentOps{ComponentName: componentName, ShardingName: shardingName}, true
+ return appsv1alpha1.ComponentOps{ComponentName: componentName, IsSharding: isSharding}, true
}
- compOps, ok := c.componentOpsSet[getCompOpsKey(shardingName, componentName)]
+ compOps, ok := c.componentOpsSet[getCompOpsKey(componentName, isSharding)]
return compOps, ok
}
// 1. handle the component status
for i := range opsRes.Cluster.Spec.ComponentSpecs {
compSpec := &opsRes.Cluster.Spec.ComponentSpecs[i]
- compOps, ok := getCompOps("", compSpec.Name)
+ compOps, ok := getCompOps(compSpec.Name, false)
if !ok {
continue
}
@@ -222,7 +218,7 @@ func (c componentOpsHelper) reconcileActionWithComponentOps(reqCtx intctrlutil.R
// 2. handle the sharding status.
for i := range opsRes.Cluster.Spec.ShardingSpecs {
shardingSpec := opsRes.Cluster.Spec.ShardingSpecs[i]
- compOps, ok := getCompOps(shardingSpec.Name, "")
+ compOps, ok := getCompOps(shardingSpec.Name, true)
if !ok {
continue
}
@@ -250,7 +246,7 @@ func (c componentOpsHelper) reconcileActionWithComponentOps(reqCtx intctrlutil.R
}
expectProgressCount += expectCount
completedProgressCount += completedCount
- if !c.isSharding(pgResource.compOps) {
+ if !pgResource.compOps.IsShardingComponent() {
lastFailedTime := opsCompStatus.LastFailedTime
componentPhase := opsRes.Cluster.Status.Components[pgResource.compOps.GetComponentName()].Phase
if isFailedOrAbnormal(componentPhase) {
@@ -296,9 +292,9 @@ func (c componentOpsHelper) reconcileActionWithComponentOps(reqCtx intctrlutil.R
return appsv1alpha1.OpsSucceedPhase, 0, nil
}
-func getCompOpsKey(shardingName, componentName string) string {
- if shardingName != "" {
- return getShardingKey(shardingName)
+func getCompOpsKey(componentName string, isSharding bool) string {
+ if isSharding {
+ return getShardingKey(componentName)
}
return componentName
}
diff --git a/controllers/apps/operations/ops_progress_util.go b/controllers/apps/operations/ops_progress_util.go
index 9144268d0d5..549b0e0c016 100644
--- a/controllers/apps/operations/ops_progress_util.go
+++ b/controllers/apps/operations/ops_progress_util.go
@@ -395,7 +395,7 @@ func handleComponentProgressForScalingReplicas(reqCtx intctrlutil.RequestCtx,
opsRes *OpsResource,
pgRes progressResource,
compStatus *appsv1alpha1.OpsRequestComponentStatus,
- getExpectReplicas func(opsRequest *appsv1alpha1.OpsRequest, shardingName, componentName string) *int32) (int32, int32, error) {
+ getExpectReplicas func(opsRequest *appsv1alpha1.OpsRequest, compOps ComponentOpsInteface) *int32) (int32, int32, error) {
var (
podList *corev1.PodList
clusterComponent = pgRes.clusterComponent
@@ -405,11 +405,11 @@ func handleComponentProgressForScalingReplicas(reqCtx intctrlutil.RequestCtx,
if clusterComponent == nil {
return 0, 0, nil
}
- expectReplicas := getExpectReplicas(opsRequest, pgRes.compOps.GetShardingName(), pgRes.fullComponentName)
+ expectReplicas := getExpectReplicas(opsRequest, pgRes.compOps)
if expectReplicas == nil {
return 0, 0, nil
}
- compOpsKey := getCompOpsKey(pgRes.compOps.GetShardingName(), pgRes.compOps.GetComponentName())
+ compOpsKey := getCompOpsKey(pgRes.compOps.GetComponentName(), pgRes.compOps.IsShardingComponent())
lastComponentReplicas := opsRequest.Status.LastConfiguration.Components[compOpsKey].Replicas
if lastComponentReplicas == nil {
return 0, 0, nil
diff --git a/controllers/apps/operations/start.go b/controllers/apps/operations/start.go
index e9ea10d1950..a1c444264e6 100644
--- a/controllers/apps/operations/start.go
+++ b/controllers/apps/operations/start.go
@@ -59,8 +59,8 @@ func (start StartOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Cl
if err != nil {
return intctrlutil.NewFatalError(err.Error())
}
- applyReplicas := func(compSpec *appsv1alpha1.ClusterComponentSpec, shardingName string) {
- componentKey := getComponentKeyForStartSnapshot(shardingName, compSpec.Name, "")
+ applyReplicas := func(compSpec *appsv1alpha1.ClusterComponentSpec, componentName string, isSharding bool) {
+ componentKey := getComponentKeyForStartSnapshot(componentName, "", isSharding)
replicasOfSnapshot := componentReplicasMap[componentKey]
if replicasOfSnapshot == 0 {
return
@@ -69,7 +69,7 @@ func (start StartOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Cl
if compSpec.Replicas == 0 {
compSpec.Replicas = replicasOfSnapshot
for i := range compSpec.Instances {
- componentKey = getComponentKeyForStartSnapshot(shardingName, compSpec.Name, compSpec.Instances[i].Name)
+ componentKey = getComponentKeyForStartSnapshot(componentName, compSpec.Instances[i].Name, isSharding)
replicasOfSnapshot = componentReplicasMap[componentKey]
if replicasOfSnapshot == 0 {
continue
@@ -80,11 +80,11 @@ func (start StartOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Cl
}
for i := range cluster.Spec.ComponentSpecs {
compSpec := &cluster.Spec.ComponentSpecs[i]
- applyReplicas(compSpec, "")
+ applyReplicas(compSpec, compSpec.Name, false)
}
for i := range cluster.Spec.ShardingSpecs {
shardingSpec := &cluster.Spec.ShardingSpecs[i]
- applyReplicas(&shardingSpec.Template, shardingSpec.Name)
+ applyReplicas(&shardingSpec.Template, shardingSpec.Name, true)
}
// delete the replicas snapshot of components from the cluster.
delete(cluster.Annotations, constant.SnapShotForStartAnnotationKey)
@@ -94,13 +94,13 @@ func (start StartOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Cl
// ReconcileAction will be performed when action is done and loops till OpsRequest.status.phase is Succeed/Failed.
// the Reconcile function for start opsRequest.
func (start StartOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes *OpsResource) (appsv1alpha1.OpsPhase, time.Duration, error) {
- getExpectReplicas := func(opsRequest *appsv1alpha1.OpsRequest, shardingName, componentName string) *int32 {
- compStatus := opsRequest.Status.Components[componentName]
+ getExpectReplicas := func(opsRequest *appsv1alpha1.OpsRequest, compOps ComponentOpsInteface) *int32 {
+ compStatus := opsRequest.Status.Components[compOps.GetComponentName()]
if compStatus.OverrideBy != nil {
return compStatus.OverrideBy.Replicas
}
componentReplicasMap, _ := getComponentReplicasSnapshot(opsRequest.Annotations)
- componentKey := getComponentKeyForStartSnapshot(shardingName, componentName, "")
+ componentKey := getComponentKeyForStartSnapshot(compOps.GetComponentName(), "", compOps.IsShardingComponent())
replicas, ok := componentReplicasMap[componentKey]
if !ok {
return nil
diff --git a/controllers/apps/operations/stop.go b/controllers/apps/operations/stop.go
index 7afe934345b..6cd1a74f2ca 100644
--- a/controllers/apps/operations/stop.go
+++ b/controllers/apps/operations/stop.go
@@ -62,22 +62,23 @@ func (stop StopOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Clie
if _, ok := cluster.Annotations[constant.SnapShotForStartAnnotationKey]; ok {
return nil
}
- setReplicas := func(compSpec *appsv1alpha1.ClusterComponentSpec, shardingName string) {
- compKey := getComponentKeyForStartSnapshot(shardingName, compSpec.Name, "")
+ setReplicas := func(compSpec *appsv1alpha1.ClusterComponentSpec, componentName string, isSharding bool) {
+ compKey := getComponentKeyForStartSnapshot(componentName, "", isSharding)
componentReplicasMap[compKey] = compSpec.Replicas
expectReplicas := int32(0)
compSpec.Replicas = expectReplicas
for i := range compSpec.Instances {
- compKey = getComponentKeyForStartSnapshot(shardingName, compSpec.Name, compSpec.Instances[i].Name)
+ compKey = getComponentKeyForStartSnapshot(componentName, compSpec.Instances[i].Name, isSharding)
componentReplicasMap[compKey] = intctrlutil.TemplateReplicas(compSpec.Instances[i])
compSpec.Instances[i].Replicas = &expectReplicas
}
}
for i := range cluster.Spec.ComponentSpecs {
- setReplicas(&cluster.Spec.ComponentSpecs[i], "")
+ compSpec := &cluster.Spec.ComponentSpecs[i]
+ setReplicas(compSpec, compSpec.Name, false)
}
for i, v := range cluster.Spec.ShardingSpecs {
- setReplicas(&cluster.Spec.ShardingSpecs[i].Template, v.Name)
+ setReplicas(&cluster.Spec.ShardingSpecs[i].Template, v.Name, true)
}
componentReplicasSnapshot, err := json.Marshal(componentReplicasMap)
if err != nil {
@@ -94,8 +95,8 @@ func (stop StopOpsHandler) Action(reqCtx intctrlutil.RequestCtx, cli client.Clie
// ReconcileAction will be performed when action is done and loops till OpsRequest.status.phase is Succeed/Failed.
// the Reconcile function for stop opsRequest.
func (stop StopOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes *OpsResource) (appsv1alpha1.OpsPhase, time.Duration, error) {
- getExpectReplicas := func(opsRequest *appsv1alpha1.OpsRequest, shardingName, componentName string) *int32 {
- compStatus := opsRequest.Status.Components[componentName]
+ getExpectReplicas := func(opsRequest *appsv1alpha1.OpsRequest, compOps ComponentOpsInteface) *int32 {
+ compStatus := opsRequest.Status.Components[compOps.GetComponentName()]
if compStatus.OverrideBy != nil {
return compStatus.OverrideBy.Replicas
}
@@ -122,8 +123,8 @@ func (stop StopOpsHandler) SaveLastConfiguration(reqCtx intctrlutil.RequestCtx,
return nil
}
-func getComponentKeyForStartSnapshot(shardingName, compName, templateName string) string {
- key := getCompOpsKey(shardingName, compName)
+func getComponentKeyForStartSnapshot(compName, templateName string, isSharding bool) string {
+ key := getCompOpsKey(compName, isSharding)
if templateName != "" {
key += "." + templateName
}
diff --git a/controllers/apps/operations/volume_expansion.go b/controllers/apps/operations/volume_expansion.go
index 03fa2d2216a..c89cb97cfc1 100644
--- a/controllers/apps/operations/volume_expansion.go
+++ b/controllers/apps/operations/volume_expansion.go
@@ -128,7 +128,7 @@ func (ve volumeExpansionOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCt
ve.initComponentStatus(opsRequest)
}
compOpsHelper := newComponentOpsHelper(opsRes.OpsRequest.Spec.VolumeExpansionList)
- storageMap := ve.getRequestStorageMap(opsRequest, compOpsHelper)
+ storageMap := ve.getRequestStorageMap(opsRequest)
var veHelpers []volumeExpansionHelper
setVeHelpers := func(compSpec appsv1alpha1.ClusterComponentSpec, compOps ComponentOpsInteface, fullComponentName string) {
volumeExpansion := compOps.(appsv1alpha1.VolumeExpansion)
@@ -180,7 +180,7 @@ func (ve volumeExpansionOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCt
// sync the volumeClaimTemplate status and component phase On the OpsRequest and Cluster.
for _, veHelper := range veHelpers {
opsCompStatus := compOpsHelper.getOpsComponentAndShardStatus(opsRequest, veHelper.compOps)
- key := getComponentVCTKey(veHelper.compOps.GetShardingName(), veHelper.compOps.GetComponentName(), veHelper.templateName, veHelper.vctName)
+ key := getComponentVCTKey(veHelper.compOps.GetComponentName(), veHelper.templateName, veHelper.vctName, veHelper.compOps.IsShardingComponent())
requestStorage, ok := storageMap[key]
if !ok {
continue
@@ -228,12 +228,12 @@ func (ve volumeExpansionOpsHandler) ReconcileAction(reqCtx intctrlutil.RequestCt
func (ve volumeExpansionOpsHandler) SaveLastConfiguration(reqCtx intctrlutil.RequestCtx, cli client.Client, opsRes *OpsResource) error {
opsRequest := opsRes.OpsRequest
compOpsHelper := newComponentOpsHelper(opsRequest.Spec.VolumeExpansionList)
- storageMap := ve.getRequestStorageMap(opsRequest, compOpsHelper)
+ storageMap := ve.getRequestStorageMap(opsRequest)
compOpsHelper.saveLastConfigurations(opsRes, func(compSpec appsv1alpha1.ClusterComponentSpec, comOps ComponentOpsInteface) appsv1alpha1.LastComponentConfiguration {
getLastVCTs := func(vcts []appsv1alpha1.ClusterComponentVolumeClaimTemplate, templateName string) []appsv1alpha1.ClusterComponentVolumeClaimTemplate {
lastVCTs := make([]appsv1alpha1.ClusterComponentVolumeClaimTemplate, 0)
for _, vct := range vcts {
- key := getComponentVCTKey(comOps.GetShardingName(), comOps.GetComponentName(), templateName, vct.Name)
+ key := getComponentVCTKey(comOps.GetComponentName(), comOps.GetComponentName(), templateName, comOps.IsShardingComponent())
if _, ok := storageMap[key]; !ok {
continue
}
@@ -283,10 +283,10 @@ func (ve volumeExpansionOpsHandler) pvcIsResizing(pvc *corev1.PersistentVolumeCl
return isResizing
}
-func (ve volumeExpansionOpsHandler) getRequestStorageMap(opsRequest *appsv1alpha1.OpsRequest, compOpsHelper componentOpsHelper) map[string]resource.Quantity {
+func (ve volumeExpansionOpsHandler) getRequestStorageMap(opsRequest *appsv1alpha1.OpsRequest) map[string]resource.Quantity {
storageMap := map[string]resource.Quantity{}
setStorageMap := func(vct appsv1alpha1.OpsRequestVolumeClaimTemplate, compOps appsv1alpha1.ComponentOps, templateName string) {
- key := getComponentVCTKey(compOps.ShardingName, compOps.ComponentName, templateName, vct.Name)
+ key := getComponentVCTKey(compOps.GetComponentName(), templateName, vct.Name, compOps.IsShardingComponent())
storageMap[key] = vct.Storage
}
for _, v := range opsRequest.Spec.VolumeExpansionList {
@@ -306,7 +306,7 @@ func (ve volumeExpansionOpsHandler) getRequestStorageMap(opsRequest *appsv1alpha
func (ve volumeExpansionOpsHandler) initComponentStatus(opsRequest *appsv1alpha1.OpsRequest) {
opsRequest.Status.Components = map[string]appsv1alpha1.OpsRequestComponentStatus{}
for _, v := range opsRequest.Spec.VolumeExpansionList {
- opsRequest.Status.Components[getCompOpsKey(v.ShardingName, v.ComponentName)] = appsv1alpha1.OpsRequestComponentStatus{}
+ opsRequest.Status.Components[getCompOpsKey(v.ComponentName, v.IsSharding)] = appsv1alpha1.OpsRequestComponentStatus{}
}
}
@@ -323,8 +323,8 @@ func (ve volumeExpansionOpsHandler) handleVCTExpansionProgress(reqCtx intctrluti
err error
)
messageKey := fmt.Sprintf("component: %s", veHelper.compOps.GetComponentName())
- if veHelper.compOps.GetShardingName() != "" {
- messageKey = fmt.Sprintf("sharding: %s", veHelper.compOps.GetShardingName())
+ if veHelper.compOps.IsShardingComponent() {
+ messageKey = fmt.Sprintf("sharding: %s", veHelper.compOps.GetComponentName())
}
matchingLabels := client.MatchingLabels{
constant.AppInstanceLabelKey: opsRes.Cluster.Name,
@@ -385,15 +385,16 @@ func (ve volumeExpansionOpsHandler) handleVCTExpansionProgress(reqCtx intctrluti
return succeedCount, completedCount, nil
}
-func getComponentVCTKey(shardingName, cName, insTemplateName, vctName string) string {
+func getComponentVCTKey(compoName, insTemplateName, vctName string, isSharding bool) string {
var instanceNameKey string
if insTemplateName != "" {
instanceNameKey = "." + insTemplateName
}
- if shardingName != "" {
- return fmt.Sprintf("sharding/%s%s.%s", shardingName, instanceNameKey, vctName)
+ compVCTKey := fmt.Sprintf("%s%s.%s", compoName, instanceNameKey, vctName)
+ if isSharding {
+ return fmt.Sprintf("sharding/%s", compVCTKey)
}
- return fmt.Sprintf("%s%s.%s", cName, insTemplateName, vctName)
+ return compVCTKey
}
func getPVCProgressObjectKey(pvcName string) string {
diff --git a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml
index 0e2fa1ba995..850c9d02a8a 100644
--- a/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml
+++ b/deploy/helm/crds/apps.kubeblocks.io_opsrequests.yaml
@@ -114,7 +114,7 @@ spec:
customSpec:
description: Specifies a custom operation as defined by OpsDefinition.
properties:
- items:
+ components:
description: Defines which components need to perform the actions
defined by this OpsDefinition. At least one component/shardComponent
is required. The components are identified by their name and
@@ -124,6 +124,10 @@ spec:
componentName:
description: Specifies the name of the cluster component.
type: string
+ isSharding:
+ description: Specifies that the componentName refers to
+ the cluster's sharding component.
+ type: boolean
parameters:
description: Represents the parameters for this operation
as declared in the opsDefinition.spec.parametersSchema.
@@ -146,18 +150,15 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- shardingName:
- description: Specifies the name of the cluster sharding
- component.
- type: string
+ required:
+ - componentName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName))
- || (has(self.shardingName) && !has(self.componentName))
maxItems: 1024
minItems: 1
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
opsDefinitionRef:
description: Is a reference to an OpsDefinition.
type: string
@@ -176,7 +177,7 @@ spec:
serviceAccountName:
type: string
required:
- - items
+ - components
- opsDefinitionRef
type: object
expose:
@@ -2642,6 +2643,10 @@ spec:
- name
type: object
type: array
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
offlineInstances:
description: Specifies instances to be scaled in with dedicated
names in the list.
@@ -2653,17 +2658,14 @@ spec:
format: int32
minimum: 0
type: integer
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
required:
+ - componentName
- replicas
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.horizontalScaling
rule: self == oldSelf
@@ -2821,17 +2823,18 @@ spec:
- name
type: object
type: array
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
required:
+ - componentName
- instances
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.rebuildFrom
rule: self == oldSelf
@@ -2914,16 +2917,14 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the cluster's
+ sharding component.
+ type: boolean
required:
+ - componentName
- configurations
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) || (has(self.shardingName)
- && !has(self.componentName))
reconfigures:
description: Defines the variables that need to input when updating
configuration.
@@ -3008,17 +3009,18 @@ spec:
x-kubernetes-list-map-keys:
- name
x-kubernetes-list-type: map
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
required:
+ - componentName
- configurations
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.reconfigure
rule: self == oldSelf
@@ -3031,16 +3033,18 @@ spec:
componentName:
description: Specifies the name of the cluster component.
type: string
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
+ required:
+ - componentName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
maxItems: 1024
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.restart
rule: self == oldSelf
@@ -3125,6 +3129,10 @@ spec:
description: Specifies the image to be used for the exec command.
By default, the image of kubeblocks-datascript is used.
type: string
+ isSharding:
+ description: Specifies that the componentName refers to the cluster's
+ sharding component.
+ type: boolean
script:
description: Defines the script to be executed.
items:
@@ -3264,14 +3272,9 @@ spec:
x-kubernetes-validations:
- message: forbidden to update spec.scriptSpec.script.selector
rule: self == oldSelf
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ required:
+ - componentName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) || (has(self.shardingName)
- && !has(self.componentName))
switchover:
description: Switches over the specified components.
items:
@@ -3294,17 +3297,18 @@ spec:
will be executed, and it is mandatory that clusterDefinition.componentDefs[x].switchoverSpec.withCandidate
is not left blank."
type: string
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
required:
+ - componentName
- instanceName
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
x-kubernetes-validations:
- message: forbidden to update spec.switchover
rule: self == oldSelf
@@ -3486,6 +3490,10 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: array
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
limits:
additionalProperties:
anyOf:
@@ -3509,17 +3517,15 @@ spec:
to an implementation-defined value. Requests cannot exceed
Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/'
type: object
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ required:
+ - componentName
type: object
x-kubernetes-preserve-unknown-fields: true
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
maxItems: 1024
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
volumeExpansion:
description: 'Note: Quantity struct can not do immutable check by
CEL. Defines what component and volumeClaimTemplate need to expand
@@ -3616,9 +3622,10 @@ spec:
type: object
x-kubernetes-preserve-unknown-fields: true
type: array
- shardingName:
- description: Specifies the name of the cluster sharding component.
- type: string
+ isSharding:
+ description: Specifies that the componentName refers to the
+ cluster's sharding component.
+ type: boolean
volumeClaimTemplates:
description: volumeClaimTemplates specifies the storage size
and volumeClaimTemplate name.
@@ -3645,13 +3652,13 @@ spec:
- name
x-kubernetes-list-type: map
required:
+ - componentName
- volumeClaimTemplates
type: object
- x-kubernetes-validations:
- - message: either componentName or shardingName
- rule: (has(self.componentName) && !has(self.shardingName)) ||
- (has(self.shardingName) && !has(self.componentName))
type: array
+ x-kubernetes-list-map-keys:
+ - componentName
+ x-kubernetes-list-type: map
required:
- clusterRef
- type
diff --git a/docs/developer_docs/api-reference/cluster.md b/docs/developer_docs/api-reference/cluster.md
index e04381145f6..e030768c0df 100644
--- a/docs/developer_docs/api-reference/cluster.md
+++ b/docs/developer_docs/api-reference/cluster.md
@@ -8527,7 +8527,7 @@ and other administrative tasks.
ComponentOps
-(Appears on:CustomOpsItem, HorizontalScaling, OpsRequestSpec, RebuildInstance, Reconfigure, ScriptSpec, Switchover, VerticalScaling, VolumeExpansion)
+(Appears on:CustomOpsComponent, HorizontalScaling, OpsRequestSpec, RebuildInstance, Reconfigure, ScriptSpec, Switchover, VerticalScaling, VolumeExpansion)
ComponentOps represents the common variables required for operations within the scope of a normal component/shard component.
@@ -8553,13 +8553,13 @@ string
-shardingName
+isSharding
-string
+bool
|
- Specifies the name of the cluster sharding component.
+Specifies that the componentName refers to the cluster’s sharding component.
|
@@ -11712,7 +11712,7 @@ string
-
CustomOpsItem
+CustomOpsComponent
(Appears on:CustomOpsSpec)
@@ -11814,10 +11814,10 @@ the calculated number will be rounded up to 1.
-items
+components
-
-[]CustomOpsItem
+
+[]CustomOpsComponent
|
@@ -15306,7 +15306,7 @@ LastComponentConfiguration
Parameter
-(Appears on:CustomOpsItem)
+(Appears on:CustomOpsComponent)