diff --git a/castai/resource_eviction_config.go b/castai/resource_eviction_config.go index 95af9c26..37465267 100644 --- a/castai/resource_eviction_config.go +++ b/castai/resource_eviction_config.go @@ -498,7 +498,7 @@ func flattenPodSelector(ps *sdk.CastaiEvictorV1PodSelector) []map[string]any { if ps.Namespace != nil { out[FieldPodSelectorNamespace] = *ps.Namespace } - if ps.LabelSelector.MatchLabels != nil { + if ps.LabelSelector != nil && ps.LabelSelector.MatchLabels != nil { out[FieldMatchLabels] = ps.LabelSelector.MatchLabels.AdditionalProperties } if ps.LabelSelector.MatchExpressions != nil { diff --git a/castai/resource_node_configuration.go b/castai/resource_node_configuration.go index 2cf247d1..ea60a7fb 100644 --- a/castai/resource_node_configuration.go +++ b/castai/resource_node_configuration.go @@ -25,6 +25,7 @@ const ( FieldNodeConfigurationName = "name" FieldNodeConfigurationDiskCpuRatio = "disk_cpu_ratio" FieldNodeConfigurationMinDiskSize = "min_disk_size" + FieldNodeConfigurationDrainTimeoutSec = "drain_timeout_sec" FieldNodeConfigurationSubnets = "subnets" FieldNodeConfigurationSSHPublicKey = "ssh_public_key" FieldNodeConfigurationImage = "image" @@ -79,6 +80,13 @@ func resourceNodeConfiguration() *schema.Resource { ValidateDiagFunc: validation.ToDiagFunc(validation.IntAtLeast(0)), Description: "Disk to CPU ratio. Sets the number of GiBs to be added for every CPU on the node. Defaults to 0", }, + FieldNodeConfigurationDrainTimeoutSec: { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateDiagFunc: validation.ToDiagFunc(validation.IntBetween(0, 3600)), + Description: "Timeout in seconds for draining the node. Defaults to 0", + }, FieldNodeConfigurationMinDiskSize: { Type: schema.TypeInt, Optional: true, @@ -317,9 +325,10 @@ func resourceNodeConfigurationCreate(ctx context.Context, d *schema.ResourceData clusterID := d.Get(FieldClusterID).(string) req := sdk.NodeConfigurationAPICreateConfigurationJSONRequestBody{ - Name: d.Get(FieldNodeConfigurationName).(string), - DiskCpuRatio: toPtr(int32(d.Get(FieldNodeConfigurationDiskCpuRatio).(int))), - MinDiskSize: toPtr(int32(d.Get(FieldNodeConfigurationMinDiskSize).(int))), + Name: d.Get(FieldNodeConfigurationName).(string), + DiskCpuRatio: toPtr(int32(d.Get(FieldNodeConfigurationDiskCpuRatio).(int))), + DrainTimeoutSec: toPtr(int32(d.Get(FieldNodeConfigurationDrainTimeoutSec).(int))), + MinDiskSize: toPtr(int32(d.Get(FieldNodeConfigurationMinDiskSize).(int))), } if v, ok := d.GetOk(FieldNodeConfigurationSubnets); ok { @@ -406,6 +415,9 @@ func resourceNodeConfigurationRead(ctx context.Context, d *schema.ResourceData, if err := d.Set(FieldNodeConfigurationDiskCpuRatio, nodeConfig.DiskCpuRatio); err != nil { return diag.FromErr(fmt.Errorf("setting disk cpu ratio: %w", err)) } + if err := d.Set(FieldNodeConfigurationDrainTimeoutSec, nodeConfig.DrainTimeoutSec); err != nil { + return diag.FromErr(fmt.Errorf("setting drain timeout: %w", err)) + } if err := d.Set(FieldNodeConfigurationMinDiskSize, nodeConfig.MinDiskSize); err != nil { return diag.FromErr(fmt.Errorf("setting min disk size: %w", err)) } @@ -466,6 +478,7 @@ func resourceNodeConfigurationRead(ctx context.Context, d *schema.ResourceData, func resourceNodeConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { if !d.HasChanges( FieldNodeConfigurationDiskCpuRatio, + FieldNodeConfigurationDrainTimeoutSec, FieldNodeConfigurationMinDiskSize, FieldNodeConfigurationSubnets, FieldNodeConfigurationSSHPublicKey, @@ -487,8 +500,9 @@ func resourceNodeConfigurationUpdate(ctx context.Context, d *schema.ResourceData client := meta.(*ProviderConfig).api clusterID := d.Get(FieldClusterID).(string) req := sdk.NodeConfigurationAPIUpdateConfigurationJSONRequestBody{ - DiskCpuRatio: toPtr(int32(d.Get(FieldNodeConfigurationDiskCpuRatio).(int))), - MinDiskSize: toPtr(int32(d.Get(FieldNodeConfigurationMinDiskSize).(int))), + DiskCpuRatio: toPtr(int32(d.Get(FieldNodeConfigurationDiskCpuRatio).(int))), + DrainTimeoutSec: toPtr(int32(d.Get(FieldNodeConfigurationDrainTimeoutSec).(int))), + MinDiskSize: toPtr(int32(d.Get(FieldNodeConfigurationMinDiskSize).(int))), } if v, ok := d.GetOk(FieldNodeConfigurationSubnets); ok { diff --git a/castai/resource_node_configuration_eks_test.go b/castai/resource_node_configuration_eks_test.go index 3b5a2d63..e079afcc 100644 --- a/castai/resource_node_configuration_eks_test.go +++ b/castai/resource_node_configuration_eks_test.go @@ -67,6 +67,7 @@ func TestAccResourceNodeConfiguration_eks(t *testing.T) { Config: testAccEKSNodeConfigurationUpdated(rName, clusterName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "disk_cpu_ratio", "0"), + resource.TestCheckResourceAttr(resourceName, "drain_timeout_sec", "120"), resource.TestCheckResourceAttr(resourceName, "min_disk_size", "100"), resource.TestCheckResourceAttr(resourceName, "image", "amazon-eks-node-1.23-v20220824"), resource.TestCheckResourceAttr(resourceName, "init_script", ""), @@ -149,6 +150,7 @@ func testAccEKSNodeConfigurationUpdated(rName, clusterName string) string { resource "castai_node_configuration" "test" { name = %[1]q cluster_id = castai_eks_cluster.test.id + drain_timeout_sec = 120 subnets = data.aws_subnets.core.ids image = "amazon-eks-node-1.23-v20220824" container_runtime = "containerd" diff --git a/castai/resource_node_configuration_gke_test.go b/castai/resource_node_configuration_gke_test.go index 46533fee..47ccdd93 100644 --- a/castai/resource_node_configuration_gke_test.go +++ b/castai/resource_node_configuration_gke_test.go @@ -25,6 +25,7 @@ func TestAccResourceNodeConfiguration_gke(t *testing.T) { Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "disk_cpu_ratio", "35"), + resource.TestCheckResourceAttr(resourceName, "drain_timeout_sec", "10"), resource.TestCheckResourceAttr(resourceName, "min_disk_size", "122"), resource.TestCheckResourceAttr(resourceName, "aks.#", "0"), resource.TestCheckResourceAttr(resourceName, "eks.#", "0"), @@ -69,6 +70,7 @@ resource "castai_node_configuration" "test" { name = %[1]q cluster_id = castai_gke_cluster.test.id disk_cpu_ratio = 35 + drain_timeout_sec = 10 min_disk_size = 122 subnets = [local.subnet_id] tags = { diff --git a/castai/resource_node_configuration_test.go b/castai/resource_node_configuration_test.go index f72853fc..125aa0b6 100644 --- a/castai/resource_node_configuration_test.go +++ b/castai/resource_node_configuration_test.go @@ -50,8 +50,9 @@ func Test_resourceNodeConfigurationCreate(t *testing.T) { ImdsHopLimit: toPtr(int32(0)), ImdsV1: toPtr(false), }, - DiskCpuRatio: toPtr(int32(0)), - MinDiskSize: toPtr(int32(0)), + DiskCpuRatio: toPtr(int32(0)), + DrainTimeoutSec: toPtr(int32(0)), + MinDiskSize: toPtr(int32(0)), }). Return( &http.Response{ @@ -179,8 +180,9 @@ func Test_NodeConfiguration_UpdateContext(t *testing.T) { ImdsHopLimit: toPtr(int32(0)), ImdsV1: toPtr(false), }, - DiskCpuRatio: toPtr(int32(0)), - MinDiskSize: toPtr(int32(100)), + DiskCpuRatio: toPtr(int32(0)), + DrainTimeoutSec: toPtr(int32(0)), + MinDiskSize: toPtr(int32(100)), }). Return( &http.Response{ diff --git a/docs/resources/node_configuration.md b/docs/resources/node_configuration.md index 30cf4f38..00795e05 100644 --- a/docs/resources/node_configuration.md +++ b/docs/resources/node_configuration.md @@ -66,6 +66,7 @@ resource "castai_node_configuration" "default" { - `container_runtime` (String) Optional container runtime to be used by kubelet. Applicable for EKS only. Supported values include: `dockerd`, `containerd` - `disk_cpu_ratio` (Number) Disk to CPU ratio. Sets the number of GiBs to be added for every CPU on the node. Defaults to 0 - `docker_config` (String) Optional docker daemon configuration properties in JSON format. Provide only properties that you want to override. Applicable for EKS only. [Available values](https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file) +- `drain_timeout_sec` (Number) Timeout in seconds for draining the node. Defaults to 0 - `eks` (Block List, Max: 1) (see [below for nested schema](#nestedblock--eks)) - `gke` (Block List, Max: 1) (see [below for nested schema](#nestedblock--gke)) - `image` (String) Image to be used while provisioning the node. If nothing is provided will be resolved to latest available image based on Kubernetes version if possible