Skip to content

Commit

Permalink
Update max_unavailable and max_surge validate functions (hashicorp#2653)
Browse files Browse the repository at this point in the history
  • Loading branch information
arybolovlev authored Dec 18, 2024
1 parent 32ccfe5 commit 7bba357
Show file tree
Hide file tree
Showing 3 changed files with 35 additions and 4 deletions.
3 changes: 3 additions & 0 deletions .changelog/2653.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
`kubernetes_daemon_set_v1`: fix issue where fields `spec.strategy.rolling_update.max_surge` and `spec.strategy.rolling_update.max_unavailable` were not being validated correctly.
```
4 changes: 2 additions & 2 deletions kubernetes/resource_kubernetes_daemon_set_v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,14 @@ func resourceKubernetesDaemonSetSchemaV1() map[string]*schema.Schema {
Description: "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictionsduring disruption.",
Optional: true,
Default: 0,
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]%|100%)$`), ""),
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]?%|100%)$`), ""),
},
"max_unavailable": {
Type: schema.TypeString,
Description: "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
Optional: true,
Default: 1,
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]%|100%)$`), ""),
ValidateFunc: validation.StringMatch(regexp.MustCompile(`^(0|[1-9][0-9]*|[1-9][0-9]?%|100%)$`), ""),
},
},
},
Expand Down
32 changes: 30 additions & 2 deletions kubernetes/resource_kubernetes_daemon_set_v1_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,10 +453,31 @@ func TestAccKubernetesDaemonSetV1_MaxSurge(t *testing.T) {
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "2"),
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "5"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "2"),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "5"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "10"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "10"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "100"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "100"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "5%"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "5%"),
),
},
{
Expand All @@ -466,6 +487,13 @@ func TestAccKubernetesDaemonSetV1_MaxSurge(t *testing.T) {
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "10%"),
),
},
{
Config: testAccKubernetesDaemonSetV1ConfigWithMaxSurge(name, imageName, "100%"),
Check: resource.ComposeAggregateTestCheckFunc(
testAccCheckKubernetesDaemonSetV1Exists(resourceName, &conf),
resource.TestCheckResourceAttr(resourceName, "spec.0.strategy.0.rolling_update.0.max_surge", "100%"),
),
},
},
})
}
Expand Down

0 comments on commit 7bba357

Please sign in to comment.