Skip to content

Commit

Permalink
fixed pytest and pre-commit issues for rebase
Browse files Browse the repository at this point in the history
  • Loading branch information
JTaeuber committed Nov 29, 2024
1 parent 6fb3187 commit 7765574
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 43 deletions.
2 changes: 1 addition & 1 deletion kube_downscaler/cmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def get_parser():
"--max-retries-on-conflict",
type=int,
help="Maximum number of retries for handling concurrent update conflicts (default: 0)",
default=os.getenv("MAX_RETRIES_ON_CONFLICT", 0)
default=os.getenv("MAX_RETRIES_ON_CONFLICT", 0),
)
upscale_group.add_argument(
"--upscale-period",
Expand Down
42 changes: 29 additions & 13 deletions kube_downscaler/scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,19 +11,19 @@
from typing import Tuple

import pykube
from pykube import HTTPClient
import requests
from pykube import CronJob
from pykube import CustomResourceDefinition
from pykube import DaemonSet
from pykube import Deployment
from pykube import HorizontalPodAutoscaler
from pykube import HTTPClient
from pykube import Job
from pykube import Namespace
from pykube import StatefulSet
from pykube.exceptions import HTTPError
from pykube.objects import NamespacedAPIObject
from pykube.objects import APIObject
from pykube.objects import NamespacedAPIObject
from pykube.objects import PodDisruptionBudget

from kube_downscaler import helper
Expand Down Expand Up @@ -311,9 +311,12 @@ def get_resources(kind, api, namespaces: FrozenSet[str], excluded_namespaces):


def get_resource(kind, api, namespace, resource_name: str):

try:
resource = kind.objects(api).filter(namespace=namespace).get_or_none(name=resource_name)
resource = (
kind.objects(api)
.filter(namespace=namespace)
.get_or_none(name=resource_name)
)
if resource is None:
logger.debug(f"{kind.endpoint} {namespace}/{resource_name} not found")
except requests.HTTPError as e:
Expand All @@ -332,7 +335,9 @@ def get_resource(kind, api, namespace, resource_name: str):
return resource


def scale_jobs_without_admission_controller(plural, admission_controller, constrainted_downscaler):
def scale_jobs_without_admission_controller(
plural, admission_controller, constrainted_downscaler
):
return (plural == "jobs" and admission_controller == "") or constrainted_downscaler


Expand Down Expand Up @@ -1075,15 +1080,21 @@ def autoscale_resource(
else:
resource.update()
except Exception as e:
if isinstance(e, HTTPError) and "the object has been modified" in str(e).lower():
if (
isinstance(e, HTTPError)
and "the object has been modified" in str(e).lower()
):
logger.warning(
f"Unable to process {resource.kind} {resource.namespace}/{resource.name} because it was recently modified"
)
if max_retries_on_conflict > 0:
logger.info(
f"Retrying processing {resource.kind} {resource.namespace}/{resource.name} (Remaining Retries: {max_retries_on_conflict})")
f"Retrying processing {resource.kind} {resource.namespace}/{resource.name} (Remaining Retries: {max_retries_on_conflict})"
)
max_retries_on_conflict = max_retries_on_conflict - 1
refreshed_resource = get_resource(kind, api, resource.namespace, resource.name)
refreshed_resource = get_resource(
kind, api, resource.namespace, resource.name
)
if refreshed_resource is not None:
autoscale_resource(
refreshed_resource,
Expand All @@ -1108,13 +1119,16 @@ def autoscale_resource(
)
else:
logger.warning(
f"Retry process failed for {resource.kind} {resource.namespace}/{resource.name} because the resource cannot be found, it may have been deleted from the cluster")
f"Retry process failed for {resource.kind} {resource.namespace}/{resource.name} because the resource cannot be found, it may have been deleted from the cluster"
)
else:
logger.warning(
f"Will retry processing {resource.kind} {resource.namespace}/{resource.name} in the next iteration, unless the --once argument is specified"
)
elif isinstance(e, HTTPError) and "not found" in str(e).lower():
logger.info(f"While waiting to process {resource.kind} {resource.namespace}/{resource.name}, the resource was removed from the cluster")
logger.info(
f"While waiting to process {resource.kind} {resource.namespace}/{resource.name}, the resource was removed from the cluster"
)
else:
logger.exception(
f"Failed to process {resource.kind} {resource.namespace}/{resource.name}: {e}"
Expand Down Expand Up @@ -1406,8 +1420,10 @@ def autoscale_jobs(
enable_events: bool = False,
):
if admission_controller != "" and admission_controller in ADMISSION_CONTROLLERS:

if admission_controller == "gatekeeper" and gatekeeper_constraint_template_crd_exist(api):
if (
admission_controller == "gatekeeper"
and gatekeeper_constraint_template_crd_exist(api)
):
apply_kubedownscalerjobsconstraint_crd(exclude_names, matching_labels, api)
if admission_controller == "gatekeeper" and not gatekeeper_healthy(api):
logging.error(
Expand All @@ -1417,7 +1433,7 @@ def autoscale_jobs(
return
elif (
admission_controller == "gatekeeper"
and not gatekeeper_constraint_template_crd_exist()
and not gatekeeper_constraint_template_crd_exist(api)
):
logging.warning(
"unable to scale jobs with gatekeeper until you install constrainttemplates.templates.gatekeeper.sh "
Expand Down
52 changes: 26 additions & 26 deletions tests/test_autoscale_resource.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import re
from datetime import datetime
from datetime import timezone
from unittest.mock import MagicMock, patch
from unittest.mock import MagicMock

import pykube
import pytest
Expand All @@ -12,7 +12,6 @@
from pykube import HorizontalPodAutoscaler
from pykube import PodDisruptionBudget
from pykube.exceptions import HTTPError
from pykube import PodDisruptionBudget

from kube_downscaler.resources.keda import ScaledObject
from kube_downscaler.resources.stack import Stack
Expand All @@ -34,6 +33,7 @@ def resource():
res.annotations = {}
return res


def test_swallow_exception(monkeypatch, resource, caplog):
api = MagicMock()
monkeypatch.setattr(
Expand Down Expand Up @@ -1616,9 +1616,12 @@ def test_downscale_resource_concurrently_modified(monkeypatch):
)

# Mock HTTPError to simulate conflict
http_error = HTTPError(409, "Operation cannot be fulfilled on daemonsets.apps "
"\"daemonset-1\": the object has been modified; "
"please apply your changes to the latest version and try again")
http_error = HTTPError(
409,
"Operation cannot be fulfilled on daemonsets.apps "
'"daemonset-1": the object has been modified; '
"please apply your changes to the latest version and try again",
)

# Simulate update behavior: conflict on first call, success on second
api.patch.side_effect = [http_error, None] # First attempt raises, second succeeds
Expand All @@ -1632,16 +1635,14 @@ def test_downscale_resource_concurrently_modified(monkeypatch):
"namespace": "default",
"creationTimestamp": "2018-10-23T21:55:00Z",
},
"spec": {
"template": {
"spec": {}
}
}
}
"spec": {"template": {"spec": {}}},
},
)

# Replace update method to track calls
ds.update = MagicMock(side_effect=[http_error, None]) # Simulate conflict and success
ds.update = MagicMock(
side_effect=[http_error, None]
) # Simulate conflict and success

# Mock get_resource with MagicMock
mock_get_resource = MagicMock(return_value=ds)
Expand All @@ -1662,14 +1663,14 @@ def test_downscale_resource_concurrently_modified(monkeypatch):
forced_uptime=False,
forced_downtime=False,
dry_run=False,
max_retries_on_conflict=1, #1 Retry Allowed
max_retries_on_conflict=1, # 1 Retry Allowed
api=api,
kind=DaemonSet,
now=now,
matching_labels=frozenset([re.compile("")]),
)

#Assert the kube_downscaler.scaler.get_resource method was called at least once to retrieve the refreshed resource
# Assert the kube_downscaler.scaler.get_resource method was called at least once to retrieve the refreshed resource
assert mock_get_resource.call_count == 1


Expand All @@ -1680,9 +1681,12 @@ def test_downscale_resource_concurrently_modified_without_retries_allowed(monkey
)

# Mock HTTPError to simulate conflict
http_error = HTTPError(409, "Operation cannot be fulfilled on daemonsets.apps "
"\"daemonset-1\": the object has been modified; "
"please apply your changes to the latest version and try again")
http_error = HTTPError(
409,
"Operation cannot be fulfilled on daemonsets.apps "
'"daemonset-1": the object has been modified; '
"please apply your changes to the latest version and try again",
)

# Simulate update behavior: conflict on first call, success on second
api.patch.side_effect = [http_error, None] # First attempt raises, second succeeds
Expand All @@ -1695,12 +1699,8 @@ def test_downscale_resource_concurrently_modified_without_retries_allowed(monkey
"namespace": "default",
"creationTimestamp": "2018-10-23T21:55:00Z",
},
"spec": {
"template": {
"spec": {}
}
}
}
"spec": {"template": {"spec": {}}},
},
)

# Mock get_resource with MagicMock
Expand All @@ -1721,12 +1721,12 @@ def test_downscale_resource_concurrently_modified_without_retries_allowed(monkey
forced_uptime=False,
forced_downtime=False,
dry_run=False,
max_retries_on_conflict=0, #No Retries Allowed
max_retries_on_conflict=0, # No Retries Allowed
api=api,
kind=DaemonSet,
now=now,
matching_labels=frozenset([re.compile("")]),
)

#Assert the kube_downscaler.scaler.get_resource method was not called at all (meaning no retry was performed)
assert mock_get_resource.call_count == 0
# Assert the kube_downscaler.scaler.get_resource method was not called at all (meaning no retry was performed)
assert mock_get_resource.call_count == 0
6 changes: 3 additions & 3 deletions tests/test_scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,14 @@
from kube_downscaler.scaler import scale_down_jobs
from kube_downscaler.scaler import scale_up_jobs


def test_scale_custom_timeout(monkeypatch):
api_server_timeout = 15 # Defined by the user
api = MagicMock()
api.timeout = 15 # Expected timeout

mock_get_kube_api = MagicMock(return_value=api)
monkeypatch.setattr(
"kube_downscaler.scaler.helper.get_kube_api", mock_get_kube_api
)
monkeypatch.setattr("kube_downscaler.scaler.helper.get_kube_api", mock_get_kube_api)

scale(
namespaces=frozenset({"default"}),
Expand Down Expand Up @@ -50,6 +49,7 @@ def test_scale_custom_timeout(monkeypatch):
# ensure timeout value is correctly set on the returned object
assert api.timeout == api_server_timeout


def test_scaler_always_up(monkeypatch):
api = MagicMock()
monkeypatch.setattr(
Expand Down

0 comments on commit 7765574

Please sign in to comment.