Skip to content

Commit

Permalink
New Feature - Downscaling PodDisruptionBudgets (#43)
Browse files Browse the repository at this point in the history
* added feature to downscale pod disruption budgets

* also added tests inside test_scaler.py
  • Loading branch information
samuel-esp authored May 23, 2024
1 parent 336d48a commit 00be0b4
Show file tree
Hide file tree
Showing 6 changed files with 466 additions and 3 deletions.
10 changes: 10 additions & 0 deletions chart/templates/rbac.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,16 @@ rules:
- list
- update
- patch
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- watch
- list
- update
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
Expand Down
1 change: 1 addition & 0 deletions kube_downscaler/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
"horizontalpodautoscalers",
"rollouts",
"scaledobjects",
"poddisruptionbudgets"
]
)

Expand Down
41 changes: 40 additions & 1 deletion kube_downscaler/scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from pykube import HorizontalPodAutoscaler
from pykube import Namespace
from pykube import StatefulSet
from pykube.objects import NamespacedAPIObject
from pykube.objects import NamespacedAPIObject, PodDisruptionBudget

from kube_downscaler import helper
from kube_downscaler.helper import matches_time_spec
Expand All @@ -39,6 +39,7 @@
HorizontalPodAutoscaler,
ArgoRollout,
ScaledObject,
PodDisruptionBudget
]

TIMESTAMP_FORMATS = [
Expand Down Expand Up @@ -166,6 +167,22 @@ def get_replicas(
logger.debug(
f"{resource.kind} {resource.namespace}/{resource.name} is {state} (original: {original_state}, uptime: {uptime})"
)
elif resource.kind == "PodDisruptionBudget":
if "minAvailable" in resource.obj["spec"]:
replicas = resource.obj["spec"]["minAvailable"]
logger.debug(
f"{resource.kind} {resource.namespace}/{resource.name} has {replicas} minAvailable (original: {original_replicas}, uptime: {uptime})"
)
elif "maxUnavailable" in resource.obj["spec"]:
replicas = resource.obj["spec"]["maxUnavailable"]
logger.debug(
f"{resource.kind} {resource.namespace}/{resource.name} has {replicas} maxUnavailable (original: {original_replicas}, uptime: {uptime})"
)
else:
replicas = 0
logger.debug(
f"{resource.kind} {resource.namespace}/{resource.name} has neither minAvailable nor maxUnavailable (original: {original_replicas}, uptime: {uptime})"
)
elif resource.kind == "HorizontalPodAutoscaler":
replicas = resource.obj["spec"]["minReplicas"]
logger.debug(
Expand Down Expand Up @@ -195,6 +212,17 @@ def scale_up(
f"Unsuspending {resource.kind} {resource.namespace}/{resource.name} (uptime: {uptime}, downtime: {downtime})"
)
event_message = "Unsuspending CronJob"
elif resource.kind == "PodDisruptionBudget":
if "minAvailable" in resource.obj["spec"]:
resource.obj["spec"]["minAvailable"] = original_replicas
logger.info(
f"Scaling up {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {original_replicas} minAvailable (uptime: {uptime}, downtime: {downtime})"
)
elif "maxUnavailable" in resource.obj["spec"]:
resource.obj["spec"]["maxUnavailable"] = original_replicas
logger.info(
f"Scaling up {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {original_replicas} maxUnavailable (uptime: {uptime}, downtime: {downtime})"
)
elif resource.kind == "HorizontalPodAutoscaler":
resource.obj["spec"]["minReplicas"] = original_replicas
logger.info(
Expand Down Expand Up @@ -247,6 +275,17 @@ def scale_down(
f"Suspending {resource.kind} {resource.namespace}/{resource.name} (uptime: {uptime}, downtime: {downtime})"
)
event_message = "Suspending CronJob"
elif resource.kind == "PodDisruptionBudget":
if "minAvailable" in resource.obj["spec"]:
resource.obj["spec"]["minAvailable"] = target_replicas
logger.info(
f"Scaling down {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {target_replicas} minAvailable (uptime: {uptime}, downtime: {downtime})"
)
elif "maxUnavailable" in resource.obj["spec"]:
resource.obj["spec"]["maxUnavailable"] = target_replicas
logger.info(
f"Scaling down {resource.kind} {resource.namespace}/{resource.name} from {replicas} to {target_replicas} maxUnavailable (uptime: {uptime}, downtime: {downtime})"
)
elif resource.kind == "HorizontalPodAutoscaler":
resource.obj["spec"]["minReplicas"] = target_replicas
logger.info(
Expand Down
130 changes: 129 additions & 1 deletion tests/test_autoscale_resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@

import pykube
import pytest
from pykube import Deployment
from pykube import Deployment, PodDisruptionBudget
from pykube import HorizontalPodAutoscaler

from kube_downscaler.resources.stack import Stack
Expand Down Expand Up @@ -865,3 +865,131 @@ def test_upscale_hpa_with_autoscaling():
)
assert hpa.obj["spec"]["minReplicas"] == 4
assert hpa.obj["metadata"]["annotations"][ORIGINAL_REPLICAS_ANNOTATION] is None

def test_downscale_pdb_minavailable_with_autoscaling():
pdb = PodDisruptionBudget(
None,
{
"metadata": {
"name": "my-pdb",
"namespace": "my-ns",
"creationTimestamp": "2018-10-23T21:55:00Z",
"annotations": {DOWNTIME_REPLICAS_ANNOTATION: str(1)},
},
"spec": {"minAvailable": 4},
},
)
now = datetime.strptime("2018-10-23T21:56:00Z", "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=timezone.utc
)
autoscale_resource(
pdb,
upscale_period="never",
downscale_period="never",
default_uptime="never",
default_downtime="always",
forced_uptime=False,
forced_downtime=False,
dry_run=True,
now=now,
)
assert pdb.obj["spec"]["minAvailable"] == 1
assert pdb.obj["metadata"]["annotations"][ORIGINAL_REPLICAS_ANNOTATION] == str(4)


def test_upscale_pdb_minavailable_with_autoscaling():
pdb = PodDisruptionBudget(
None,
{
"metadata": {
"name": "my-pdb",
"namespace": "my-ns",
"creationTimestamp": "2018-10-23T21:55:00Z",
"annotations": {
DOWNTIME_REPLICAS_ANNOTATION: str(1),
ORIGINAL_REPLICAS_ANNOTATION: str(4),
},
},
"spec": {"minAvailable": 1},
},
)
now = datetime.strptime("2018-10-23T22:15:00Z", "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=timezone.utc
)
autoscale_resource(
pdb,
upscale_period="never",
downscale_period="never",
default_uptime="always",
default_downtime="never",
forced_uptime=False,
forced_downtime=False,
dry_run=True,
now=now,
)
assert pdb.obj["spec"]["minAvailable"] == 4
assert pdb.obj["metadata"]["annotations"][ORIGINAL_REPLICAS_ANNOTATION] is None

def test_downscale_pdb_maxunavailable_with_autoscaling():
pdb = PodDisruptionBudget(
None,
{
"metadata": {
"name": "my-pdb",
"namespace": "my-ns",
"creationTimestamp": "2018-10-23T21:55:00Z",
"annotations": {DOWNTIME_REPLICAS_ANNOTATION: str(1)},
},
"spec": {"maxUnavailable": 4},
},
)
now = datetime.strptime("2018-10-23T21:56:00Z", "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=timezone.utc
)
autoscale_resource(
pdb,
upscale_period="never",
downscale_period="never",
default_uptime="never",
default_downtime="always",
forced_uptime=False,
forced_downtime=False,
dry_run=True,
now=now,
)
assert pdb.obj["spec"]["maxUnavailable"] == 1
assert pdb.obj["metadata"]["annotations"][ORIGINAL_REPLICAS_ANNOTATION] == str(4)


def test_upscale_pdb_maxunavailable_with_autoscaling():
pdb = PodDisruptionBudget(
None,
{
"metadata": {
"name": "my-pdb",
"namespace": "my-ns",
"creationTimestamp": "2018-10-23T21:55:00Z",
"annotations": {
DOWNTIME_REPLICAS_ANNOTATION: str(1),
ORIGINAL_REPLICAS_ANNOTATION: str(4),
},
},
"spec": {"maxUnavailable": 1},
},
)
now = datetime.strptime("2018-10-23T22:15:00Z", "%Y-%m-%dT%H:%M:%SZ").replace(
tzinfo=timezone.utc
)
autoscale_resource(
pdb,
upscale_period="never",
downscale_period="never",
default_uptime="always",
default_downtime="never",
forced_uptime=False,
forced_downtime=False,
dry_run=True,
now=now,
)
assert pdb.obj["spec"]["maxUnavailable"] == 4
assert pdb.obj["metadata"]["annotations"][ORIGINAL_REPLICAS_ANNOTATION] is None
2 changes: 1 addition & 1 deletion tests/test_cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ def test_check_include_resources_invalid():
with pytest.raises(Exception) as excinfo:
check_include_resources("deployments,foo")
assert (
"--include-resources argument should contain a subset of [cronjobs, deployments, horizontalpodautoscalers, rollouts, scaledobjects, stacks, statefulsets]"
"--include-resources argument should contain a subset of [cronjobs, deployments, horizontalpodautoscalers, poddisruptionbudgets, rollouts, scaledobjects, stacks, statefulsets]"
in str(excinfo.value)
)
Loading

0 comments on commit 00be0b4

Please sign in to comment.