Skip to content

Commit

Permalink
Make additional worker node pool fields mandatory
Browse files Browse the repository at this point in the history
  • Loading branch information
KsaweryZietara committed Dec 27, 2024
1 parent 02c7833 commit 068f2c9
Show file tree
Hide file tree
Showing 6 changed files with 190 additions and 217 deletions.
13 changes: 10 additions & 3 deletions common/runtime/model.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,8 +389,15 @@ type ModuleDTO struct {
}

type AdditionalWorkerNodePool struct {
AutoScalerParameters `json:",inline"`
Name string `json:"name"`
MachineType string `json:"machineType"`
AutoScalerMin int `json:"autoScalerMin"`
AutoScalerMax int `json:"autoScalerMax"`
}

Name string `json:"name"`
MachineType *string `json:"machineType,omitempty"`
func (a AdditionalWorkerNodePool) Validate() error {
if a.AutoScalerMin > a.AutoScalerMax {
return fmt.Errorf("AutoScalerMax %v should be larger than AutoScalerMin %v for %s worker node pool", a.AutoScalerMax, a.AutoScalerMin, a.Name)
}
return nil
}
2 changes: 1 addition & 1 deletion internal/broker/instance_create.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ func (b *ProvisionEndpoint) validateAndExtract(details domain.ProvisionDetails,

if IsPreviewPlan(details.PlanID) {
for _, workerNodePool := range parameters.AdditionalWorkerNodePools {
if err := workerNodePool.AutoScalerParameters.Validate(autoscalerMin, autoscalerMax); err != nil {
if err := workerNodePool.Validate(); err != nil {
return ersContext, parameters, apiresponses.NewFailureResponse(err, http.StatusUnprocessableEntity, err.Error())
}
}
Expand Down
248 changes: 96 additions & 152 deletions internal/broker/instance_create_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1491,165 +1491,109 @@ func TestProvision_Provision(t *testing.T) {
// then
require.EqualError(t, err, "while validating input parameters: region: region must be one of the following: \"me-central2\"")
})
}

t.Run("Should pass with additional worker node pools", func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()

queue := &automock.Queue{}
queue.On("Add", mock.AnythingOfType("string"))

factoryBuilder := &automock.PlanValidator{}
factoryBuilder.On("IsPlanSupport", broker.PreviewPlanID).Return(true)

planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
}
kcBuilder := &kcMock.KcBuilder{}
kcBuilder.On("GetServerURL", "").Return("", fmt.Errorf("error"))
// #create provisioner endpoint
provisionEndpoint := broker.NewProvision(
broker.Config{
EnablePlans: []string{"preview"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
},
gardener.Config{Project: "test", ShootDomain: "example.com", DNSProviders: fixDNSProviders()},
memoryStorage.Operations(),
memoryStorage.Instances(),
memoryStorage.InstancesArchived(),
queue,
factoryBuilder,
broker.PlansConfig{},
planDefaults,
log,
dashboardConfig,
kcBuilder,
whitelist.Set{},
&broker.OneForAllConvergedCloudRegionsProvider{},
)

additionalWorkerNodePools := `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`

// when
_, err := provisionEndpoint.Provision(fixRequestContext(t, "cf-sa30"), instanceID, domain.ProvisionDetails{
ServiceID: serviceID,
PlanID: broker.PreviewPlanID,
RawParameters: json.RawMessage(fmt.Sprintf(`{"name": "%s", "region": "%s","additionalWorkerNodePools": %s }`, clusterName, "eu-central-1", additionalWorkerNodePools)),
RawContext: json.RawMessage(fmt.Sprintf(`{"globalaccount_id": "%s", "subaccount_id": "%s", "user_id": "%s"}`, "any-global-account-id", subAccountID, "[email protected]")),
}, true)
t.Logf("%+v\n", *provisionEndpoint)

// then
require.NoError(t, err)
})

t.Run("Should pass with empty additional worker node pools", func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()

queue := &automock.Queue{}
queue.On("Add", mock.AnythingOfType("string"))

factoryBuilder := &automock.PlanValidator{}
factoryBuilder.On("IsPlanSupport", broker.PreviewPlanID).Return(true)

planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
}
kcBuilder := &kcMock.KcBuilder{}
kcBuilder.On("GetServerURL", "").Return("", fmt.Errorf("error"))
// #create provisioner endpoint
provisionEndpoint := broker.NewProvision(
broker.Config{
EnablePlans: []string{"preview"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
},
gardener.Config{Project: "test", ShootDomain: "example.com", DNSProviders: fixDNSProviders()},
memoryStorage.Operations(),
memoryStorage.Instances(),
memoryStorage.InstancesArchived(),
queue,
factoryBuilder,
broker.PlansConfig{},
planDefaults,
log,
dashboardConfig,
kcBuilder,
whitelist.Set{},
&broker.OneForAllConvergedCloudRegionsProvider{},
)

additionalWorkerNodePools := "[]"

// when
_, err := provisionEndpoint.Provision(fixRequestContext(t, "cf-sa30"), instanceID, domain.ProvisionDetails{
ServiceID: serviceID,
PlanID: broker.PreviewPlanID,
RawParameters: json.RawMessage(fmt.Sprintf(`{"name": "%s", "region": "%s","additionalWorkerNodePools": %s }`, clusterName, "eu-central-1", additionalWorkerNodePools)),
RawContext: json.RawMessage(fmt.Sprintf(`{"globalaccount_id": "%s", "subaccount_id": "%s", "user_id": "%s"}`, "any-global-account-id", subAccountID, "[email protected]")),
}, true)
t.Logf("%+v\n", *provisionEndpoint)

// then
require.NoError(t, err)
})

t.Run("Should fail for autoScalerMin bigger than autoScalerMax", func(t *testing.T) {
// given
memoryStorage := storage.NewMemoryStorage()
func TestAdditionalWorkerNodePools(t *testing.T) {
for tn, tc := range map[string]struct {
additionalWorkerNodePools string
expectedError bool
}{
"Valid additional worker node pools": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 0, "autoScalerMax": 0}]`,
expectedError: false,
},
"Empty additional worker node pools": {
additionalWorkerNodePools: "[]",
expectedError: false,
},
"Empty name": {
additionalWorkerNodePools: `[{"name": "", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing name": {
additionalWorkerNodePools: `[{"machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Empty machine type": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "", "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing machine type": {
additionalWorkerNodePools: `[{"name": "name-1", "autoScalerMin": 3, "autoScalerMax": 20}]`,
expectedError: true,
},
"Missing autoScalerMin": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMax": 3}]`,
expectedError: true,
},
"Missing autoScalerMax": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20}]`,
expectedError: true,
},
"AutoScalerMax bigger than 300": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 301}]`,
expectedError: true,
},
"AutoScalerMin bigger than autoScalerMax": {
additionalWorkerNodePools: `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20, "autoScalerMax": 3}]`,
expectedError: true,
},
} {
t.Run(tn, func(t *testing.T) {
// given
log := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
Level: slog.LevelInfo,
}))

queue := &automock.Queue{}
queue.On("Add", mock.AnythingOfType("string"))
memoryStorage := storage.NewMemoryStorage()

factoryBuilder := &automock.PlanValidator{}
factoryBuilder.On("IsPlanSupport", broker.PreviewPlanID).Return(true)
queue := &automock.Queue{}
queue.On("Add", mock.AnythingOfType("string"))

planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
}
kcBuilder := &kcMock.KcBuilder{}
kcBuilder.On("GetServerURL", "").Return("", fmt.Errorf("error"))
// #create provisioner endpoint
provisionEndpoint := broker.NewProvision(
broker.Config{
EnablePlans: []string{"preview"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
},
gardener.Config{Project: "test", ShootDomain: "example.com", DNSProviders: fixDNSProviders()},
memoryStorage.Operations(),
memoryStorage.Instances(),
memoryStorage.InstancesArchived(),
queue,
factoryBuilder,
broker.PlansConfig{},
planDefaults,
log,
dashboardConfig,
kcBuilder,
whitelist.Set{},
&broker.OneForAllConvergedCloudRegionsProvider{},
)
factoryBuilder := &automock.PlanValidator{}
factoryBuilder.On("IsPlanSupport", broker.PreviewPlanID).Return(true)

additionalWorkerNodePools := `[{"name": "name-1", "machineType": "m6i.large", "autoScalerMin": 20, "autoScalerMax": 3}, {"name": "name-2", "machineType": "m6i.large", "autoScalerMin": 3, "autoScalerMax": 20}]`
planDefaults := func(planID string, platformProvider pkg.CloudProvider, provider *pkg.CloudProvider) (*gqlschema.ClusterConfigInput, error) {
return &gqlschema.ClusterConfigInput{}, nil
}
kcBuilder := &kcMock.KcBuilder{}
kcBuilder.On("GetServerURL", "").Return("", fmt.Errorf("error"))
// #create provisioner endpoint
provisionEndpoint := broker.NewProvision(
broker.Config{
EnablePlans: []string{"preview"},
URL: brokerURL,
OnlySingleTrialPerGA: true,
EnableKubeconfigURLLabel: true,
},
gardener.Config{Project: "test", ShootDomain: "example.com", DNSProviders: fixDNSProviders()},
memoryStorage.Operations(),
memoryStorage.Instances(),
memoryStorage.InstancesArchived(),
queue,
factoryBuilder,
broker.PlansConfig{},
planDefaults,
log,
dashboardConfig,
kcBuilder,
whitelist.Set{},
&broker.OneForAllConvergedCloudRegionsProvider{},
)

// when
_, err := provisionEndpoint.Provision(fixRequestContext(t, "cf-sa30"), instanceID, domain.ProvisionDetails{
ServiceID: serviceID,
PlanID: broker.PreviewPlanID,
RawParameters: json.RawMessage(fmt.Sprintf(`{"name": "%s", "region": "%s","additionalWorkerNodePools": %s }`, clusterName, "eu-central-1", additionalWorkerNodePools)),
RawContext: json.RawMessage(fmt.Sprintf(`{"globalaccount_id": "%s", "subaccount_id": "%s", "user_id": "%s"}`, "any-global-account-id", subAccountID, "[email protected]")),
}, true)
t.Logf("%+v\n", *provisionEndpoint)
// when
_, err := provisionEndpoint.Provision(fixRequestContext(t, "cf-sa30"), instanceID, domain.ProvisionDetails{
ServiceID: serviceID,
PlanID: broker.PreviewPlanID,
RawParameters: json.RawMessage(fmt.Sprintf(`{"name": "%s", "region": "%s","additionalWorkerNodePools": %s }`, clusterName, "eu-central-1", tc.additionalWorkerNodePools)),
RawContext: json.RawMessage(fmt.Sprintf(`{"globalaccount_id": "%s", "subaccount_id": "%s", "user_id": "%s"}`, "any-global-account-id", subAccountID, "[email protected]")),
}, true)
t.Logf("%+v\n", *provisionEndpoint)

// then
require.EqualError(t, err, "AutoScalerMax 3 should be larger than AutoScalerMin 20. User provided values min:20, max:3; plan defaults min:0, max:0")
})
// then
assert.Equal(t, tc.expectedError, err != nil)
})
}
}

func TestNetworkingValidation(t *testing.T) {
Expand Down
2 changes: 1 addition & 1 deletion internal/broker/instance_update.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ func (b *UpdateEndpoint) processUpdateParameters(instance *internal.Instance, de

if IsPreviewPlan(details.PlanID) {
for _, workerNodePool := range params.AdditionalWorkerNodePools {
if err := workerNodePool.AutoScalerParameters.Validate(autoscalerMin, autoscalerMax); err != nil {
if err := workerNodePool.Validate(); err != nil {
return domain.UpdateServiceSpec{}, apiresponses.NewFailureResponse(err, http.StatusBadRequest, err.Error())
}
}
Expand Down
Loading

0 comments on commit 068f2c9

Please sign in to comment.