diff --git a/Makefile b/Makefile
index 797ab51a..ef3f1ec4 100644
--- a/Makefile
+++ b/Makefile
@@ -63,7 +63,7 @@ test:
.PHONY: testacc
testacc:
@echo "==> Running acceptance tests"
- TF_ACC=1 go test ./castai/... '-run=^TestAcc' -v -timeout 30m
+ TF_ACC=1 go test ./castai/... '-run=^TestAcc' -v -timeout 40m
.PHONY: validate-terraform-examples
validate-terraform-examples:
diff --git a/castai/data_source_resource_rebalancing_schedule.go b/castai/data_source_resource_rebalancing_schedule.go
new file mode 100644
index 00000000..00138d88
--- /dev/null
+++ b/castai/data_source_resource_rebalancing_schedule.go
@@ -0,0 +1,45 @@
+package castai
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+func dataSourceRebalancingSchedule() *schema.Resource {
+ dataSourceRebalancingSchedule := &schema.Resource{
+ Description: "Retrieve Rebalancing Schedule ",
+ ReadContext: dataSourceRebalancingScheduleRead,
+ Schema: map[string]*schema.Schema{},
+ }
+
+ resourceRebalancingSchedule := resourceRebalancingSchedule()
+ for key, value := range resourceRebalancingSchedule.Schema {
+ dataSourceRebalancingSchedule.Schema[key] = value
+ if key != "name" {
+ // only name is provided in terraform configuration by user
+ // other parameters are "computed" from existing rebalancing schedule
+ dataSourceRebalancingSchedule.Schema[key].Computed = true
+ dataSourceRebalancingSchedule.Schema[key].Required = false
+ // MaxItems is for configurable attributes, there's nothing to configure on computed-only field
+ dataSourceRebalancingSchedule.Schema[key].MaxItems = 0
+ }
+ }
+ return dataSourceRebalancingSchedule
+}
+
+func dataSourceRebalancingScheduleRead(ctx context.Context, data *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ rebalancingScheduleName := data.Get("name").(string)
+ client := meta.(*ProviderConfig).api
+ schedule, err := getRebalancingScheduleByName(ctx, client, rebalancingScheduleName)
+ if err != nil {
+ return diag.FromErr(fmt.Errorf("error retrieving rebalancing schedule: %w", err))
+ }
+
+ if err := scheduleToState(schedule, data); err != nil {
+ return diag.FromErr(fmt.Errorf("error converting schdeure to terraform state: %w", err))
+ }
+ return nil
+}
diff --git a/castai/data_source_resource_rebalancing_schedule_test.go b/castai/data_source_resource_rebalancing_schedule_test.go
new file mode 100644
index 00000000..84d7d543
--- /dev/null
+++ b/castai/data_source_resource_rebalancing_schedule_test.go
@@ -0,0 +1,147 @@
+package castai
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "net/http"
+ "testing"
+
+ "github.com/golang/mock/gomock"
+ "github.com/hashicorp/go-cty/cty"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+ "github.com/stretchr/testify/require"
+
+ "github.com/castai/terraform-provider-castai/castai/sdk"
+ mock_sdk "github.com/castai/terraform-provider-castai/castai/sdk/mock"
+)
+
+func TestRebalancingScheduleDataSourceRead(t *testing.T) {
+ t.Parallel()
+
+ r := require.New(t)
+ mockClient := mock_sdk.NewMockClientInterface(gomock.NewController(t))
+
+ ctx := context.Background()
+ provider := &ProviderConfig{
+ api: &sdk.ClientWithResponses{
+ ClientInterface: mockClient,
+ },
+ }
+
+ body := io.NopCloser(bytes.NewReader([]byte(`{
+ "schedules": [
+ {
+ "id": "9302fdac-4922-4b09-afa6-0a12be99f112",
+ "schedule": {
+ "cron": "5 * * * * *"
+ },
+ "launchConfiguration": {
+ "selector": {
+ "nodeSelectorTerms": []
+ },
+ "rebalancingOptions": {
+ "minNodes": 2,
+ "executionConditions": {
+ "enabled": true,
+ "achievedSavingsPercentage": 15
+ },
+ "keepDrainTimeoutNodes": false,
+ "evictGracefully": false,
+ "aggressiveMode": false
+ },
+ "numTargetedNodes": 20,
+ "nodeTtlSeconds": 350,
+ "targetNodeSelectionAlgorithm": "TargetNodeSelectionAlgorithmNormalizedPrice"
+ },
+ "triggerConditions": {
+ "savingsPercentage": 15,
+ "ignoreSavings": false
+ },
+ "nextTriggerAt": "2024-10-31T10:46:05Z",
+ "name": "rebalancing schedule 1",
+ "jobs": [],
+ "lastTriggerAt": "2024-10-31T10:45:08.915021Z"
+ },
+ {
+ "id": "d1954729-6fc0-4741-aeb0-c497e16f59f7",
+ "schedule": {
+ "cron": "5 * * * * *"
+ },
+ "launchConfiguration": {
+ "selector": {
+ "nodeSelectorTerms": []
+ },
+ "rebalancingOptions": {
+ "minNodes": 2,
+ "executionConditions": {
+ "enabled": true,
+ "achievedSavingsPercentage": 15
+ },
+ "keepDrainTimeoutNodes": false,
+ "evictGracefully": false,
+ "aggressiveMode": false
+ },
+ "numTargetedNodes": 20,
+ "nodeTtlSeconds": 350,
+ "targetNodeSelectionAlgorithm": "TargetNodeSelectionAlgorithmNormalizedPrice"
+ },
+ "triggerConditions": {
+ "savingsPercentage": 15,
+ "ignoreSavings": false
+ },
+ "nextTriggerAt": "2024-10-31T10:46:05Z",
+ "name": "rebalancing schedule 2",
+ "jobs": [
+ {
+ "id": "2ac90b71-8adc-468a-8680-ea4f99e4df27",
+ "clusterId": "d8bdd6d1-6b9a-4dbb-a276-5e44ff512322",
+ "rebalancingScheduleId": "d1954729-6fc0-4741-aeb0-c497e16f59f7",
+ "rebalancingPlanId": "",
+ "enabled": true,
+ "lastTriggerAt": "2024-10-31T10:38:06.594638Z",
+ "nextTriggerAt": "2024-10-31T10:46:05Z",
+ "status": "JobStatusSkipped"
+ }
+ ],
+ "lastTriggerAt": "2024-10-31T10:45:08.922097Z"
+ }
+ ]
+}`)))
+ mockClient.EXPECT().
+ ScheduledRebalancingAPIListRebalancingSchedules(gomock.Any()).
+ Return(&http.Response{StatusCode: 200, Body: body, Header: map[string][]string{"Content-Type": {"json"}}}, nil)
+
+ state := terraform.NewInstanceStateShimmedFromValue(cty.ObjectVal(map[string]cty.Value{}), 0)
+
+ resource := dataSourceRebalancingSchedule()
+ data := resource.Data(state)
+
+ r.NoError(data.Set("name", "rebalancing schedule 1"))
+
+ result := resource.ReadContext(ctx, data, provider)
+ r.Nil(result)
+ r.False(result.HasError())
+
+ expectedState := `ID = 9302fdac-4922-4b09-afa6-0a12be99f112
+launch_configuration.# = 1
+launch_configuration.0.aggressive_mode = false
+launch_configuration.0.execution_conditions.# = 1
+launch_configuration.0.execution_conditions.0.achieved_savings_percentage = 15
+launch_configuration.0.execution_conditions.0.enabled = true
+launch_configuration.0.keep_drain_timeout_nodes = false
+launch_configuration.0.node_ttl_seconds = 350
+launch_configuration.0.num_targeted_nodes = 20
+launch_configuration.0.rebalancing_min_nodes = 2
+launch_configuration.0.selector =
+launch_configuration.0.target_node_selection_algorithm = TargetNodeSelectionAlgorithmNormalizedPrice
+name = rebalancing schedule 1
+schedule.# = 1
+schedule.0.cron = 5 * * * * *
+trigger_conditions.# = 1
+trigger_conditions.0.ignore_savings = false
+trigger_conditions.0.savings_percentage = 15
+Tainted = false
+`
+ r.Equal(expectedState, data.State().String())
+}
diff --git a/castai/provider.go b/castai/provider.go
index 8113882b..a227f8b8 100644
--- a/castai/provider.go
+++ b/castai/provider.go
@@ -56,9 +56,10 @@ func Provider(version string) *schema.Provider {
},
DataSourcesMap: map[string]*schema.Resource{
- "castai_eks_settings": dataSourceEKSSettings(),
- "castai_gke_user_policies": dataSourceGKEPolicies(),
- "castai_organization": dataSourceOrganization(),
+ "castai_eks_settings": dataSourceEKSSettings(),
+ "castai_gke_user_policies": dataSourceGKEPolicies(),
+ "castai_organization": dataSourceOrganization(),
+ "castai_rebalancing_schedule": dataSourceRebalancingSchedule(),
// TODO: remove in next major release
"castai_eks_user_arn": dataSourceEKSClusterUserARN(),
diff --git a/castai/resource_rebalancing_job_eks_test.go b/castai/resource_rebalancing_job_eks_test.go
index eb8bee81..8e6f3b84 100644
--- a/castai/resource_rebalancing_job_eks_test.go
+++ b/castai/resource_rebalancing_job_eks_test.go
@@ -2,10 +2,11 @@ package castai
import (
"fmt"
+ "testing"
+
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
- "testing"
)
func TestAccResourceRebalancingJob_eks(t *testing.T) {
@@ -81,3 +82,88 @@ func makeInitialRebalancingJobConfig(rName, clusterName string) string {
func makeUpdatedRebalancingJobConfig(rName, clusterName string) string {
return ConfigCompose(testAccEKSClusterConfig(rName, clusterName), makeRebalancingJobConfig(rName, "enabled=false"))
}
+
+func TestAccResourceRebalancingJobWithDataSource_eks(t *testing.T) {
+ rName := fmt.Sprintf("%v-rebalancing-job-with-data-source-%v", ResourcePrefix, acctest.RandString(8))
+ clusterName := "core-tf-acc"
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { testAccPreCheck(t) },
+
+ ProviderFactories: providerFactories,
+ Steps: []resource.TestStep{
+ {
+ Config: makeRebalancingScheduleConfig(rName),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("castai_rebalancing_schedule.test-with-data-source", "name", rName),
+ ),
+ },
+ {
+ Config: makeRebalancingJobWithDataSource(rName, clusterName),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("data.castai_rebalancing_schedule.data-source-for-rebalancing-schedule", "name", rName),
+ ),
+ },
+ },
+ ExternalProviders: map[string]resource.ExternalProvider{
+ "aws": {
+ Source: "hashicorp/aws",
+ VersionConstraint: "~> 4.0",
+ },
+ },
+ })
+}
+
+func makeRebalancingScheduleConfig(rName string) string {
+ template := `
+resource "castai_rebalancing_schedule" "test-with-data-source" {
+ name = %[1]q
+ schedule {
+ cron = "5 4 * * *"
+ }
+ trigger_conditions {
+ savings_percentage = 15.25
+ }
+ launch_configuration {
+ execution_conditions {
+ enabled = false
+ achieved_savings_percentage = 0
+ }
+ }
+}
+`
+ return fmt.Sprintf(template, rName)
+}
+
+func makeRebalancingJobWithDataSourceConfig(rName string) string {
+ template := `
+resource "castai_rebalancing_schedule" "test-with-data-source" {
+ name = %[1]q
+ schedule {
+ cron = "5 4 * * *"
+ }
+ trigger_conditions {
+ savings_percentage = 15.25
+ }
+ launch_configuration {
+ execution_conditions {
+ enabled = false
+ achieved_savings_percentage = 0
+ }
+ }
+}
+
+data "castai_rebalancing_schedule" "data-source-for-rebalancing-schedule" {
+ name = %[1]q
+}
+
+resource "castai_rebalancing_job" "test-with-data-source" {
+ cluster_id = castai_eks_cluster.test.id
+ rebalancing_schedule_id = data.castai_rebalancing_schedule.data-source-for-rebalancing-schedule.id
+}
+`
+ return fmt.Sprintf(template, rName)
+}
+
+func makeRebalancingJobWithDataSource(rName, clusterName string) string {
+ return ConfigCompose(testAccEKSClusterConfig(rName, clusterName), makeRebalancingJobWithDataSourceConfig(rName))
+}
diff --git a/castai/sdk/api.gen.go b/castai/sdk/api.gen.go
index 5aa0c8ab..24587f06 100644
--- a/castai/sdk/api.gen.go
+++ b/castai/sdk/api.gen.go
@@ -283,6 +283,7 @@ const (
// Defines values for WorkloadoptimizationV1EventType.
const (
EVENTTYPECONFIGURATIONCHANGEDV2 WorkloadoptimizationV1EventType = "EVENT_TYPE_CONFIGURATION_CHANGEDV2"
+ EVENTTYPEFAILEDHELMTESTHOOK WorkloadoptimizationV1EventType = "EVENT_TYPE_FAILED_HELM_TEST_HOOK"
EVENTTYPEINVALID WorkloadoptimizationV1EventType = "EVENT_TYPE_INVALID"
EVENTTYPEOOMKILL WorkloadoptimizationV1EventType = "EVENT_TYPE_OOM_KILL"
EVENTTYPERECOMMENDEDPODCOUNTCHANGED WorkloadoptimizationV1EventType = "EVENT_TYPE_RECOMMENDED_POD_COUNT_CHANGED"
@@ -359,6 +360,10 @@ type CastaiAuthtokenV1beta1AuthToken struct {
// created_by is used to link this token to a user who created it.
CreatedBy *string `json:"createdBy"`
+ // (read only) Time when the token will expire (unix timestamp in nanoseconds).
+ // A null value means that the key will never expire.
+ ExpiresAt *time.Time `json:"expiresAt,omitempty"`
+
// (read only) ID of the token.
Id *string `json:"id,omitempty"`
@@ -1537,6 +1542,7 @@ type ExternalclusterV1AKSClusterParams struct {
// Node resource group of the cluster.
NodeResourceGroup *string `json:"nodeResourceGroup,omitempty"`
+ PodCidr *string `json:"podCidr"`
// Region of the cluster.
Region *string `json:"region,omitempty"`
@@ -1544,6 +1550,9 @@ type ExternalclusterV1AKSClusterParams struct {
// Azure subscription ID where cluster runs.
SubscriptionId *string `json:"subscriptionId,omitempty"`
+ // Azure tenant id.
+ TenantId *string `json:"tenantId,omitempty"`
+
// Zone name pattern in the cluster.
ZoneNamePattern *string `json:"zoneNamePattern,omitempty"`
}
@@ -3386,6 +3395,7 @@ type WorkloadoptimizationV1DownscalingSettings struct {
// WorkloadoptimizationV1Event defines model for workloadoptimization.v1.Event.
type WorkloadoptimizationV1Event struct {
ConfigurationChangedV2 *WorkloadoptimizationV1ConfigurationChangedEventV2 `json:"configurationChangedV2,omitempty"`
+ FailedHook *WorkloadoptimizationV1FailedHookEvent `json:"failedHook,omitempty"`
OomKill *WorkloadoptimizationV1OOMKillEvent `json:"oomKill,omitempty"`
RecommendedPodCountChanged *WorkloadoptimizationV1RecommendedPodCountChangedEvent `json:"recommendedPodCountChanged,omitempty"`
RecommendedRequestsChanged *WorkloadoptimizationV1RecommendedRequestsChangedEvent `json:"recommendedRequestsChanged,omitempty"`
@@ -3405,6 +3415,12 @@ type WorkloadoptimizationV1EventContainer struct {
// EventType defines possible types for workload events.
type WorkloadoptimizationV1EventType string
+// WorkloadoptimizationV1FailedHookEvent defines model for workloadoptimization.v1.FailedHookEvent.
+type WorkloadoptimizationV1FailedHookEvent struct {
+ Message string `json:"message"`
+ Time time.Time `json:"time"`
+}
+
// WorkloadoptimizationV1GetAgentStatusResponse defines model for workloadoptimization.v1.GetAgentStatusResponse.
type WorkloadoptimizationV1GetAgentStatusResponse struct {
CastAgentCurrentVersion *string `json:"castAgentCurrentVersion"`
@@ -4103,6 +4119,9 @@ type ExternalClusterAPIGetCredentialsScriptParams struct {
// Whether GCP SA Impersonate feature should be enabled.
GcpSaImpersonate *bool `form:"gcpSaImpersonate,omitempty" json:"gcpSaImpersonate,omitempty"`
+
+ // Whether Netflow network exporter should be installed.
+ InstallNetflowExporter *bool `form:"installNetflowExporter,omitempty" json:"installNetflowExporter,omitempty"`
}
// ExternalClusterAPIDisconnectClusterJSONBody defines parameters for ExternalClusterAPIDisconnectCluster.
@@ -4157,6 +4176,12 @@ type ExternalClusterAPIDeleteNodeParams struct {
// ExternalClusterAPIDrainNodeJSONBody defines parameters for ExternalClusterAPIDrainNode.
type ExternalClusterAPIDrainNodeJSONBody = ExternalclusterV1DrainConfig
+// ExternalClusterAPIReconcileClusterParams defines parameters for ExternalClusterAPIReconcileCluster.
+type ExternalClusterAPIReconcileClusterParams struct {
+ // Whether to skip AKS refresh of instance-template.
+ SkipAksInitData *bool `form:"skipAksInitData,omitempty" json:"skipAksInitData,omitempty"`
+}
+
// ExternalClusterAPITriggerResumeClusterJSONBody defines parameters for ExternalClusterAPITriggerResumeCluster.
type ExternalClusterAPITriggerResumeClusterJSONBody = ExternalclusterV1NodeConfig
diff --git a/castai/sdk/client.gen.go b/castai/sdk/client.gen.go
index 087d9790..ea5433a1 100644
--- a/castai/sdk/client.gen.go
+++ b/castai/sdk/client.gen.go
@@ -304,7 +304,7 @@ type ClientInterface interface {
ExternalClusterAPIDrainNode(ctx context.Context, clusterId string, nodeId string, body ExternalClusterAPIDrainNodeJSONRequestBody, reqEditors ...RequestEditorFn) (*http.Response, error)
// ExternalClusterAPIReconcileCluster request
- ExternalClusterAPIReconcileCluster(ctx context.Context, clusterId string, reqEditors ...RequestEditorFn) (*http.Response, error)
+ ExternalClusterAPIReconcileCluster(ctx context.Context, clusterId string, params *ExternalClusterAPIReconcileClusterParams, reqEditors ...RequestEditorFn) (*http.Response, error)
// ExternalClusterAPITriggerResumeCluster request with any body
ExternalClusterAPITriggerResumeClusterWithBody(ctx context.Context, clusterId string, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error)
@@ -1491,8 +1491,8 @@ func (c *Client) ExternalClusterAPIDrainNode(ctx context.Context, clusterId stri
return c.Client.Do(req)
}
-func (c *Client) ExternalClusterAPIReconcileCluster(ctx context.Context, clusterId string, reqEditors ...RequestEditorFn) (*http.Response, error) {
- req, err := NewExternalClusterAPIReconcileClusterRequest(c.Server, clusterId)
+func (c *Client) ExternalClusterAPIReconcileCluster(ctx context.Context, clusterId string, params *ExternalClusterAPIReconcileClusterParams, reqEditors ...RequestEditorFn) (*http.Response, error) {
+ req, err := NewExternalClusterAPIReconcileClusterRequest(c.Server, clusterId, params)
if err != nil {
return nil, err
}
@@ -4650,6 +4650,22 @@ func NewExternalClusterAPIGetCredentialsScriptRequest(server string, clusterId s
}
+ if params.InstallNetflowExporter != nil {
+
+ if queryFrag, err := runtime.StyleParamWithLocation("form", true, "installNetflowExporter", runtime.ParamLocationQuery, *params.InstallNetflowExporter); err != nil {
+ return nil, err
+ } else if parsed, err := url.ParseQuery(queryFrag); err != nil {
+ return nil, err
+ } else {
+ for k, v := range parsed {
+ for _, v2 := range v {
+ queryValues.Add(k, v2)
+ }
+ }
+ }
+
+ }
+
queryURL.RawQuery = queryValues.Encode()
req, err := http.NewRequest("GET", queryURL.String(), nil)
@@ -5367,7 +5383,7 @@ func NewExternalClusterAPIDrainNodeRequestWithBody(server string, clusterId stri
}
// NewExternalClusterAPIReconcileClusterRequest generates requests for ExternalClusterAPIReconcileCluster
-func NewExternalClusterAPIReconcileClusterRequest(server string, clusterId string) (*http.Request, error) {
+func NewExternalClusterAPIReconcileClusterRequest(server string, clusterId string, params *ExternalClusterAPIReconcileClusterParams) (*http.Request, error) {
var err error
var pathParam0 string
@@ -5392,6 +5408,26 @@ func NewExternalClusterAPIReconcileClusterRequest(server string, clusterId strin
return nil, err
}
+ queryValues := queryURL.Query()
+
+ if params.SkipAksInitData != nil {
+
+ if queryFrag, err := runtime.StyleParamWithLocation("form", true, "skipAksInitData", runtime.ParamLocationQuery, *params.SkipAksInitData); err != nil {
+ return nil, err
+ } else if parsed, err := url.ParseQuery(queryFrag); err != nil {
+ return nil, err
+ } else {
+ for k, v := range parsed {
+ for _, v2 := range v {
+ queryValues.Add(k, v2)
+ }
+ }
+ }
+
+ }
+
+ queryURL.RawQuery = queryValues.Encode()
+
req, err := http.NewRequest("POST", queryURL.String(), nil)
if err != nil {
return nil, err
@@ -8699,7 +8735,7 @@ type ClientWithResponsesInterface interface {
ExternalClusterAPIDrainNodeWithResponse(ctx context.Context, clusterId string, nodeId string, body ExternalClusterAPIDrainNodeJSONRequestBody) (*ExternalClusterAPIDrainNodeResponse, error)
// ExternalClusterAPIReconcileCluster request
- ExternalClusterAPIReconcileClusterWithResponse(ctx context.Context, clusterId string) (*ExternalClusterAPIReconcileClusterResponse, error)
+ ExternalClusterAPIReconcileClusterWithResponse(ctx context.Context, clusterId string, params *ExternalClusterAPIReconcileClusterParams) (*ExternalClusterAPIReconcileClusterResponse, error)
// ExternalClusterAPITriggerResumeCluster request with any body
ExternalClusterAPITriggerResumeClusterWithBodyWithResponse(ctx context.Context, clusterId string, contentType string, body io.Reader) (*ExternalClusterAPITriggerResumeClusterResponse, error)
@@ -13416,8 +13452,8 @@ func (c *ClientWithResponses) ExternalClusterAPIDrainNodeWithResponse(ctx contex
}
// ExternalClusterAPIReconcileClusterWithResponse request returning *ExternalClusterAPIReconcileClusterResponse
-func (c *ClientWithResponses) ExternalClusterAPIReconcileClusterWithResponse(ctx context.Context, clusterId string) (*ExternalClusterAPIReconcileClusterResponse, error) {
- rsp, err := c.ExternalClusterAPIReconcileCluster(ctx, clusterId)
+func (c *ClientWithResponses) ExternalClusterAPIReconcileClusterWithResponse(ctx context.Context, clusterId string, params *ExternalClusterAPIReconcileClusterParams) (*ExternalClusterAPIReconcileClusterResponse, error) {
+ rsp, err := c.ExternalClusterAPIReconcileCluster(ctx, clusterId, params)
if err != nil {
return nil, err
}
diff --git a/castai/sdk/mock/client.go b/castai/sdk/mock/client.go
index 587857a7..3b95d76f 100644
--- a/castai/sdk/mock/client.go
+++ b/castai/sdk/mock/client.go
@@ -1156,9 +1156,9 @@ func (mr *MockClientInterfaceMockRecorder) ExternalClusterAPIListNodes(ctx, clus
}
// ExternalClusterAPIReconcileCluster mocks base method.
-func (m *MockClientInterface) ExternalClusterAPIReconcileCluster(ctx context.Context, clusterId string, reqEditors ...sdk.RequestEditorFn) (*http.Response, error) {
+func (m *MockClientInterface) ExternalClusterAPIReconcileCluster(ctx context.Context, clusterId string, params *sdk.ExternalClusterAPIReconcileClusterParams, reqEditors ...sdk.RequestEditorFn) (*http.Response, error) {
m.ctrl.T.Helper()
- varargs := []interface{}{ctx, clusterId}
+ varargs := []interface{}{ctx, clusterId, params}
for _, a := range reqEditors {
varargs = append(varargs, a)
}
@@ -1169,9 +1169,9 @@ func (m *MockClientInterface) ExternalClusterAPIReconcileCluster(ctx context.Con
}
// ExternalClusterAPIReconcileCluster indicates an expected call of ExternalClusterAPIReconcileCluster.
-func (mr *MockClientInterfaceMockRecorder) ExternalClusterAPIReconcileCluster(ctx, clusterId interface{}, reqEditors ...interface{}) *gomock.Call {
+func (mr *MockClientInterfaceMockRecorder) ExternalClusterAPIReconcileCluster(ctx, clusterId, params interface{}, reqEditors ...interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- varargs := append([]interface{}{ctx, clusterId}, reqEditors...)
+ varargs := append([]interface{}{ctx, clusterId, params}, reqEditors...)
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExternalClusterAPIReconcileCluster", reflect.TypeOf((*MockClientInterface)(nil).ExternalClusterAPIReconcileCluster), varargs...)
}
@@ -4289,18 +4289,18 @@ func (mr *MockClientWithResponsesInterfaceMockRecorder) ExternalClusterAPIListNo
}
// ExternalClusterAPIReconcileClusterWithResponse mocks base method.
-func (m *MockClientWithResponsesInterface) ExternalClusterAPIReconcileClusterWithResponse(ctx context.Context, clusterId string) (*sdk.ExternalClusterAPIReconcileClusterResponse, error) {
+func (m *MockClientWithResponsesInterface) ExternalClusterAPIReconcileClusterWithResponse(ctx context.Context, clusterId string, params *sdk.ExternalClusterAPIReconcileClusterParams) (*sdk.ExternalClusterAPIReconcileClusterResponse, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "ExternalClusterAPIReconcileClusterWithResponse", ctx, clusterId)
+ ret := m.ctrl.Call(m, "ExternalClusterAPIReconcileClusterWithResponse", ctx, clusterId, params)
ret0, _ := ret[0].(*sdk.ExternalClusterAPIReconcileClusterResponse)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// ExternalClusterAPIReconcileClusterWithResponse indicates an expected call of ExternalClusterAPIReconcileClusterWithResponse.
-func (mr *MockClientWithResponsesInterfaceMockRecorder) ExternalClusterAPIReconcileClusterWithResponse(ctx, clusterId interface{}) *gomock.Call {
+func (mr *MockClientWithResponsesInterfaceMockRecorder) ExternalClusterAPIReconcileClusterWithResponse(ctx, clusterId, params interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExternalClusterAPIReconcileClusterWithResponse", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ExternalClusterAPIReconcileClusterWithResponse), ctx, clusterId)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExternalClusterAPIReconcileClusterWithResponse", reflect.TypeOf((*MockClientWithResponsesInterface)(nil).ExternalClusterAPIReconcileClusterWithResponse), ctx, clusterId, params)
}
// ExternalClusterAPIRegisterClusterWithBodyWithResponse mocks base method.
diff --git a/docs/data-sources/rebalancing_schedule.md b/docs/data-sources/rebalancing_schedule.md
new file mode 100644
index 00000000..73f417e3
--- /dev/null
+++ b/docs/data-sources/rebalancing_schedule.md
@@ -0,0 +1,69 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "castai_rebalancing_schedule Data Source - terraform-provider-castai"
+subcategory: ""
+description: |-
+ Retrieve Rebalancing Schedule
+---
+
+# castai_rebalancing_schedule (Data Source)
+
+Retrieve Rebalancing Schedule
+
+
+
+
+## Schema
+
+### Required
+
+- `name` (String) Name of the schedule.
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+- `launch_configuration` (List of Object) (see [below for nested schema](#nestedatt--launch_configuration))
+- `schedule` (List of Object) (see [below for nested schema](#nestedatt--schedule))
+- `trigger_conditions` (List of Object) (see [below for nested schema](#nestedatt--trigger_conditions))
+
+
+### Nested Schema for `launch_configuration`
+
+Read-Only:
+
+- `aggressive_mode` (Boolean)
+- `execution_conditions` (List of Object) (see [below for nested schema](#nestedobjatt--launch_configuration--execution_conditions))
+- `keep_drain_timeout_nodes` (Boolean)
+- `node_ttl_seconds` (Number)
+- `num_targeted_nodes` (Number)
+- `rebalancing_min_nodes` (Number)
+- `selector` (String)
+- `target_node_selection_algorithm` (String)
+
+
+### Nested Schema for `launch_configuration.execution_conditions`
+
+Read-Only:
+
+- `achieved_savings_percentage` (Number)
+- `enabled` (Boolean)
+
+
+
+
+### Nested Schema for `schedule`
+
+Read-Only:
+
+- `cron` (String)
+
+
+
+### Nested Schema for `trigger_conditions`
+
+Read-Only:
+
+- `ignore_savings` (Boolean)
+- `savings_percentage` (Number)
+
+
diff --git a/examples/data-sources/castai_rebalancing_schedule/data_source.tf b/examples/data-sources/castai_rebalancing_schedule/data_source.tf
new file mode 100644
index 00000000..d5633f41
--- /dev/null
+++ b/examples/data-sources/castai_rebalancing_schedule/data_source.tf
@@ -0,0 +1,3 @@
+data "castai_rebalancing_schedule" "rebalancing_schedule" {
+ name = var.castai_rebalancing_schedule_name
+}
diff --git a/examples/gke/gke_clusters_with_workspaces/README.md b/examples/gke/gke_clusters_with_workspaces/README.md
new file mode 100644
index 00000000..2681d5cd
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/README.md
@@ -0,0 +1,68 @@
+# GKE clusters in terraform workspaces using resource from rebalancing schedule from organization workspace
+
+This example onboards existing GKE clusters created in different terraform workspaces to CAST AI,
+with usage of rebalancing schedule which is created in another terraform workspace.
+
+## Usage
+
+1. Rename:
+ - `tf.vars.example` to `tf.vars`
+ - `tf_clusterA.vars.example` to `tf_clusterA.vars`
+ - `tf_clusterB.vars.example` to `tf_clusterB.vars`
+
+ e.g.
+
+ ```
+ cp tf.vars.example tf.vars
+ cp tf_clusterA.vars.example tf_clusterA.vars
+ cp tf_clusterB.vars.example tf_clusterB.vars
+ ```
+
+2. Update `tf.vars`, `tf_clusterA.vars`, `tf_clusterB.vars`
+
+**Note:** please see [this][gcp-iam-doc] instruction to configure `service_accounts_unique_ids`
+
+[gcp-iam-doc]: https://github.com/castai/terraform-provider-castai/tree/master/examples/gke/gke_castai_iam#steps-to-take-to-successfully-create-gcp-iam-resources-with-iamserviceaccountuser-role-and-custom-condition
+
+3. Initialize Terraform. Under example root folder run:
+```
+terraform init
+```
+
+4. Create organization workspace and create resource in organization workspace
+
+```
+terraform workspace new org-workspace
+terraform plan -var-file=tf.vars
+terraform apply -var-file=tf.vars
+```
+
+5. Create workspace for the first cluster and create resources in this workspace
+
+```
+terraform workspace new clusterA
+terraform plan -var-file=tf_clusterA.vars
+terraform apply -var-file=tf_clusterA.vars
+```
+
+6. Create workspace for the second cluster and create resources in this workspace
+
+```
+terraform workspace new clusterB
+terraform plan -var-file=tf_clusterB.vars
+terraform apply -var-file=tf_clusterB.vars
+```
+
+7. Open CAST AI console and check that clusters are using the same configuration for Rebalancing Schedule
+
+8. To destroy resources created by this example:
+```
+terraform workspace select org-workspace
+terraform destroy -var-file=tf.vars
+
+terraform workspace select clusterA
+terraform destroy -var-file=tf_clusterA.vars
+
+terraform workspace select clusterB
+terraform destroy -var-file=tf_clusterB.vars
+```
diff --git a/examples/gke/gke_clusters_with_workspaces/cluster_workspace.tf b/examples/gke/gke_clusters_with_workspaces/cluster_workspace.tf
new file mode 100644
index 00000000..a3a47dcb
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/cluster_workspace.tf
@@ -0,0 +1,145 @@
+data "castai_rebalancing_schedule" "data_rs" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ name = "org rebalancing schedule"
+}
+
+module "castai-gke-iam" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ source = "castai/gke-iam/castai"
+ project_id = var.gke_project_id
+ gke_cluster_name = var.gke_cluster_name
+ service_accounts_unique_ids = length(var.service_accounts_unique_ids) == 0 ? [] : var.service_accounts_unique_ids
+}
+
+resource "castai_gke_cluster" "castai_cluster" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ project_id = var.gke_project_id
+ location = var.gke_cluster_location
+ name = var.gke_cluster_name
+ delete_nodes_on_disconnect = var.delete_nodes_on_disconnect
+ credentials_json = terraform.workspace == var.org_workspace ? "" : module.castai-gke-iam[0].private_key
+}
+
+
+resource "castai_rebalancing_job" "foo-job" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ cluster_id = castai_gke_cluster.castai_cluster[0].id
+ rebalancing_schedule_id = data.castai_rebalancing_schedule.data_rs[0].id
+ enabled = true
+}
+
+resource "castai_autoscaler" "castai_autoscaler_policy" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ cluster_id = castai_gke_cluster.castai_cluster[0].id
+
+ autoscaler_settings {
+ enabled = true
+ is_scoped_mode = false
+ node_templates_partial_matching_enabled = false
+
+ unschedulable_pods {
+ enabled = true
+ }
+
+ cluster_limits {
+ enabled = false
+
+ cpu {
+ min_cores = 1
+ max_cores = 200
+ }
+ }
+
+ node_downscaler {
+ enabled = true
+
+ empty_nodes {
+ enabled = true
+ }
+
+ evictor {
+ aggressive_mode = false
+ cycle_interval = "60s"
+ dry_run = false
+ enabled = true
+ node_grace_period_minutes = 10
+ scoped_mode = false
+ }
+ }
+ }
+}
+
+resource "castai_node_configuration" "default" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ cluster_id = castai_gke_cluster.castai_cluster[0].id
+ name = "default"
+ disk_cpu_ratio = 0
+ min_disk_size = 100
+ subnets = var.gke_subnets
+}
+
+resource "castai_node_configuration_default" "this" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ cluster_id = castai_gke_cluster.castai_cluster[0].id
+ configuration_id = castai_node_configuration.default[0].id
+}
+
+resource "castai_node_template" "default_by_castai" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ cluster_id = castai_gke_cluster.castai_cluster[0].id
+
+ name = "default-by-castai"
+ is_default = true
+ is_enabled = true
+ configuration_id = castai_node_configuration.default[0].id
+ should_taint = false
+
+ constraints {
+ on_demand = true
+ }
+}
+
+resource "castai_node_template" "example_spot_template" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ cluster_id = castai_gke_cluster.castai_cluster[0].id
+
+ name = "example_spot_template"
+ is_default = false
+ is_enabled = true
+ configuration_id = castai_node_configuration.default[0].id
+ should_taint = true
+ custom_instances_enabled = true # gke specific
+
+ custom_labels = {
+ type = "spot"
+ }
+
+ custom_taints {
+ key = "dedicated"
+ value = "backend"
+ effect = "NoSchedule"
+ }
+
+ constraints {
+ spot = true
+ use_spot_fallbacks = true
+ fallback_restore_rate_seconds = 1800
+ enable_spot_diversity = true
+ spot_diversity_price_increase_limit_percent = 20
+ min_cpu = 2
+ max_cpu = 8
+ min_memory = 4096
+ max_memory = 16384
+ architectures = ["amd64"]
+ burstable_instances = "disabled"
+ customer_specific = "enabled"
+
+ instance_families {
+ exclude = ["e2"]
+ }
+ custom_priority {
+ instance_families = ["c4"]
+ spot = true
+ }
+ }
+}
diff --git a/examples/gke/gke_clusters_with_workspaces/cluster_workspace_helm.tf b/examples/gke/gke_clusters_with_workspaces/cluster_workspace_helm.tf
new file mode 100644
index 00000000..0044ac02
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/cluster_workspace_helm.tf
@@ -0,0 +1,140 @@
+resource "helm_release" "castai_agent" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+ name = "castai-agent"
+ repository = "https://castai.github.io/helm-charts"
+ chart = "castai-agent"
+ namespace = "castai-agent"
+ create_namespace = true
+ cleanup_on_fail = true
+ wait = true
+
+ version = var.agent_version
+ values = var.agent_values
+
+ set {
+ name = "replicaCount"
+ value = "2"
+ }
+
+ set {
+ name = "provider"
+ value = "gke"
+ }
+
+ set {
+ name = "additionalEnv.STATIC_CLUSTER_ID"
+ value = castai_gke_cluster.castai_cluster[0].id
+ }
+
+ set {
+ name = "createNamespace"
+ value = "false"
+ }
+
+ dynamic "set" {
+ for_each = var.castai_api_url != "" ? [var.castai_api_url] : []
+ content {
+ name = "apiURL"
+ value = var.castai_api_url
+ }
+ }
+
+ dynamic "set" {
+ for_each = var.castai_components_labels
+ content {
+ name = "podLabels.${set.key}"
+ value = set.value
+ }
+ }
+
+ set_sensitive {
+ name = "apiKey"
+ value = castai_gke_cluster.castai_cluster[0].cluster_token
+ }
+}
+
+resource "helm_release" "castai_cluster_controller" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+
+ name = "cluster-controller"
+ repository = "https://castai.github.io/helm-charts"
+ chart = "castai-cluster-controller"
+ namespace = "castai-agent"
+ create_namespace = true
+ cleanup_on_fail = true
+ wait = true
+
+ version = var.cluster_controller_version
+ values = var.cluster_controller_values
+
+ set {
+ name = "castai.clusterID"
+ value = castai_gke_cluster.castai_cluster[0].id
+ }
+
+ dynamic "set" {
+ for_each = var.castai_api_url != "" ? [var.castai_api_url] : []
+ content {
+ name = "castai.apiURL"
+ value = var.castai_api_url
+ }
+ }
+
+ set_sensitive {
+ name = "castai.apiKey"
+ value = castai_gke_cluster.castai_cluster[0].cluster_token
+ }
+
+ dynamic "set" {
+ for_each = var.castai_components_labels
+ content {
+ name = "podLabels.${set.key}"
+ value = set.value
+ }
+ }
+
+ depends_on = [helm_release.castai_agent]
+
+ lifecycle {
+ ignore_changes = [version]
+ }
+}
+
+resource "helm_release" "castai_evictor" {
+ count = terraform.workspace == var.org_workspace ? 0 : 1 # Create only in the cluster workspace
+
+ name = "castai-evictor"
+ repository = "https://castai.github.io/helm-charts"
+ chart = "castai-evictor"
+ namespace = "castai-agent"
+ create_namespace = true
+ cleanup_on_fail = true
+ wait = true
+
+ version = var.evictor_version
+ values = var.evictor_values
+
+ set {
+ name = "replicaCount"
+ value = "0"
+ }
+
+ set {
+ name = "castai-evictor-ext.enabled"
+ value = "false"
+ }
+
+ dynamic "set" {
+ for_each = var.castai_components_labels
+ content {
+ name = "podLabels.${set.key}"
+ value = set.value
+ }
+ }
+
+ depends_on = [helm_release.castai_agent]
+
+ lifecycle {
+ ignore_changes = [set, version]
+ }
+}
diff --git a/examples/gke/gke_clusters_with_workspaces/org_workspace.tf b/examples/gke/gke_clusters_with_workspaces/org_workspace.tf
new file mode 100644
index 00000000..f976caae
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/org_workspace.tf
@@ -0,0 +1,19 @@
+resource "castai_rebalancing_schedule" "org_rebalancing_schedule" {
+ count = terraform.workspace == var.org_workspace ? 1 : 0 # Create only in the organization workspace
+ name = "org rebalancing schedule"
+ schedule {
+ cron = "5 * * * * *"
+ }
+ trigger_conditions {
+ savings_percentage = 15
+ }
+ launch_configuration {
+ node_ttl_seconds = 350
+ num_targeted_nodes = 20
+ rebalancing_min_nodes = 2
+ execution_conditions {
+ achieved_savings_percentage = 15
+ enabled = true
+ }
+ }
+}
diff --git a/examples/gke/gke_clusters_with_workspaces/providers.tf b/examples/gke/gke_clusters_with_workspaces/providers.tf
new file mode 100644
index 00000000..e17b8339
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/providers.tf
@@ -0,0 +1,11 @@
+provider "castai" {
+ api_url = var.castai_api_url
+ api_token = var.castai_api_token
+}
+
+provider "helm" {
+ kubernetes {
+ config_path = "~/.kube/config"
+ config_context = var.kube_config_context
+ }
+}
diff --git a/examples/gke/gke_clusters_with_workspaces/tf.vars.example b/examples/gke/gke_clusters_with_workspaces/tf.vars.example
new file mode 100644
index 00000000..9f376b80
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/tf.vars.example
@@ -0,0 +1,4 @@
+castai_api_token = ""
+# castai_api_url = ""
+
+org_workspace = ""
diff --git a/examples/gke/gke_clusters_with_workspaces/tf_clusterA.vars.example b/examples/gke/gke_clusters_with_workspaces/tf_clusterA.vars.example
new file mode 100644
index 00000000..4289f3d3
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/tf_clusterA.vars.example
@@ -0,0 +1,12 @@
+gke_cluster_location = ""
+gke_cluster_name = ""
+gke_subnets = []
+gke_project_id = ""
+# service_accounts_unique_ids = []
+
+castai_api_token = ""
+# castai_api_url = ""
+
+kube_config_context = ""
+
+org_workspace = ""
diff --git a/examples/gke/gke_clusters_with_workspaces/tf_clusterB.vars.example b/examples/gke/gke_clusters_with_workspaces/tf_clusterB.vars.example
new file mode 100644
index 00000000..4289f3d3
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/tf_clusterB.vars.example
@@ -0,0 +1,12 @@
+gke_cluster_location = ""
+gke_cluster_name = ""
+gke_subnets = []
+gke_project_id = ""
+# service_accounts_unique_ids = []
+
+castai_api_token = ""
+# castai_api_url = ""
+
+kube_config_context = ""
+
+org_workspace = ""
diff --git a/examples/gke/gke_clusters_with_workspaces/variables.tf b/examples/gke/gke_clusters_with_workspaces/variables.tf
new file mode 100644
index 00000000..f50838ad
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/variables.tf
@@ -0,0 +1,102 @@
+variable "gke_project_id" {
+ type = string
+ description = "The project id from GCP"
+ default = ""
+}
+
+variable "gke_cluster_name" {
+ type = string
+ description = "Name of the cluster to be connected to CAST AI."
+ default = ""
+}
+
+variable "gke_cluster_location" {
+ type = string
+ description = "Location of the cluster to be connected to CAST AI. Can be region or zone for zonal clusters"
+ default = ""
+}
+
+variable "gke_subnets" {
+ type = list(string)
+ description = "Subnet IDs used by CAST AI to provision nodes."
+ default = []
+}
+
+variable "service_accounts_unique_ids" {
+ type = list(string)
+ description = "Service Accounts' unique IDs used by node pools in the cluster."
+ default = []
+}
+
+variable "castai_api_url" {
+ type = string
+ description = "URL of alternative CAST AI API to be used during development or testing"
+ default = "https://api.cast.ai"
+}
+
+variable "castai_api_token" {
+ type = string
+ description = "Optional CAST AI API token created in console.cast.ai API Access keys section. Used only when `wait_for_cluster_ready` is set to true"
+ sensitive = true
+ default = ""
+}
+
+variable "delete_nodes_on_disconnect" {
+ type = bool
+ description = "Optionally delete Cast AI created nodes when the cluster is destroyed"
+ default = false
+}
+
+variable "castai_components_labels" {
+ type = map(any)
+ description = "Optional additional Kubernetes labels for CAST AI pods"
+ default = {}
+}
+
+variable "agent_version" {
+ description = "Version of castai-agent helm chart. Default latest"
+ type = string
+ default = null
+}
+
+variable "cluster_controller_version" {
+ description = "Version of castai-cluster-controller helm chart. Default latest"
+ type = string
+ default = null
+}
+
+variable "evictor_version" {
+ description = "Version of castai-evictor chart. Default latest"
+ type = string
+ default = null
+}
+
+variable "agent_values" {
+ description = "List of YAML formatted string values for agent helm chart"
+ type = list(string)
+ default = []
+}
+
+variable "cluster_controller_values" {
+ description = "List of YAML formatted string values for cluster-controller helm chart"
+ type = list(string)
+ default = []
+}
+
+variable "evictor_values" {
+ description = "List of YAML formatted string values for evictor helm chart"
+ type = list(string)
+ default = []
+}
+
+variable "kube_config_context" {
+ type = string
+ description = "kube_config_context"
+ default = ""
+}
+
+variable "org_workspace" {
+ type = string
+ description = "organization terraform workspace"
+ default = "org-workspace"
+}
diff --git a/examples/gke/gke_clusters_with_workspaces/versions.tf b/examples/gke/gke_clusters_with_workspaces/versions.tf
new file mode 100644
index 00000000..3cc24f3f
--- /dev/null
+++ b/examples/gke/gke_clusters_with_workspaces/versions.tf
@@ -0,0 +1,19 @@
+terraform {
+ required_version = ">= 0.13"
+
+ required_providers {
+ google = {
+ source = "hashicorp/google"
+ version = ">= 2.49"
+ }
+ castai = {
+ source = "castai/castai"
+ version = "~> 7.17"
+ }
+ helm = {
+ source = "hashicorp/helm"
+ version = ">= 2.0.0"
+ }
+ }
+}
+