Skip to content

Commit

Permalink
Merge pull request #493 from jsmolar/limitador_metrics
Browse files Browse the repository at this point in the history
Add test for limitador metrics
  • Loading branch information
jsmolar authored Sep 3, 2024
2 parents 06046af + 0a2ee47 commit b831e9f
Show file tree
Hide file tree
Showing 12 changed files with 208 additions and 55 deletions.
24 changes: 22 additions & 2 deletions testsuite/kuadrant/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ def deployment(self):
with self.context:
return selector("deployment", labels={"app": self.spec_name}).object(cls=Deployment)

def name(self):
"""Overrides `name` method from `apiobject` so it returns name of Kuadrant section"""
return self.spec_name

def __getitem__(self, name):
return self.kuadrant_cr.model.spec[self.spec_name][name]

Expand Down Expand Up @@ -67,6 +71,22 @@ def metrics_service(self):
return selector(f"service/{self.spec_name}-controller-metrics").object()


class LimitadorSection(KuadrantSection):
"""Limitador `spec.limitador` from KuadrantCR object"""

@property
def deployment(self) -> Deployment:
"""Returns Deployment object for this Limitador"""
with self.context:
return selector(f"deployment/{self.name()}").object(cls=Deployment)

@property
def pod(self):
"""Returns Pod object for this Limitadaor"""
with self.context:
return selector("pod", labels={"app": self.name()}).object()


class KuadrantCR(CustomResource):
"""Represents Kuadrant CR objects"""

Expand All @@ -77,7 +97,7 @@ def authorino(self) -> AuthorinoSection:
return AuthorinoSection(self, "authorino")

@property
def limitador(self) -> KuadrantSection:
def limitador(self) -> LimitadorSection:
"""Returns spec.limitador from Kuadrant object"""
self.model.spec.setdefault("limitador", {})
return KuadrantSection(self, "limitador")
return LimitadorSection(self, "limitador")
6 changes: 6 additions & 0 deletions testsuite/kuadrant/limitador.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,9 @@ def deployment(self) -> Deployment:
"""Returns Deployment object for this Limitador"""
with self.context:
return selector(f"deployment/{self.name()}").object(cls=Deployment)

@property
def pod(self):
"""Returns Pod object for this Limitadaor"""
with self.context:
return selector("pod", labels={"app": "limitador"}).object()
13 changes: 13 additions & 0 deletions testsuite/kubernetes/monitoring/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
"""Kubernetes monitoring common objects"""

from dataclasses import dataclass


@dataclass
class MetricsEndpoint:
"""Dataclass for endpoint definition in ServiceMonitor Kubernetes object.
It contains endpoint path and port to the exported metrics."""

path: str = "/metrics"
port: str = "http"
interval: str = "30s"
37 changes: 37 additions & 0 deletions testsuite/kubernetes/monitoring/pod_monitor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
"""Module implements Pod Monitor CR"""

from testsuite.kubernetes import KubernetesObject
from testsuite.kubernetes.client import KubernetesClient
from testsuite.kubernetes.monitoring import MetricsEndpoint
from testsuite.utils import asdict


class PodMonitor(KubernetesObject):
"""Represents Pod Monitor object for OpenShift"""

@classmethod
def create_instance(
cls,
cluster: KubernetesClient,
name: str,
endpoints: list[MetricsEndpoint],
match_labels: dict[str, str],
labels: dict[str, str] = None,
):
"""Creates new instance of ServiceMonitor"""
model = {
"apiVersion": "monitoring.coreos.com/v1",
"kind": "PodMonitor",
"metadata": {
"name": name,
"labels": labels,
},
"spec": {
"podMetricsEndpoints": [asdict(e) for e in endpoints],
"selector": {
"matchLabels": match_labels,
},
},
}

return cls(model, context=cluster.context)
Original file line number Diff line number Diff line change
@@ -1,22 +1,11 @@
"""Module implements Service Monitor CR """

from dataclasses import dataclass
"""Module implements Service Monitor CR"""

from testsuite.kubernetes.monitoring import MetricsEndpoint
from testsuite.utils import asdict
from testsuite.kubernetes.client import KubernetesClient
from testsuite.kubernetes import KubernetesObject


@dataclass
class MetricsEndpoint:
"""Dataclass for endpoint definition in ServiceMonitor Kubernetes object.
It contains endpoint path and port to the exported metrics."""

path: str = "/metrics"
port: str = "http"
interval: str = "30s"


class ServiceMonitor(KubernetesObject):
"""Kubernetes ServiceMonitor object"""

Expand Down
20 changes: 13 additions & 7 deletions testsuite/prometheus.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
from apyproxy import ApyProxy
from httpx import Client

from testsuite.kubernetes.service_monitor import ServiceMonitor
from testsuite.kubernetes.monitoring.pod_monitor import PodMonitor
from testsuite.kubernetes.monitoring.service_monitor import ServiceMonitor


def _params(key: str = "", labels: dict[str, str] = None) -> dict[str, str]:
Expand Down Expand Up @@ -62,17 +63,22 @@ def get_metrics(self, key: str = "", labels: dict[str, str] = None) -> Metrics:
return Metrics(response.json()["data"]["result"])

@backoff.on_predicate(backoff.constant, interval=10, jitter=None, max_tries=35)
def is_reconciled(self, service_monitor: ServiceMonitor):
def is_reconciled(self, monitor: ServiceMonitor | PodMonitor):
"""True, if all endpoints in ServiceMonitor are active targets"""
scrape_pools = set(target["scrapePool"] for target in self.get_active_targets())
endpoints = len(service_monitor.model.spec.endpoints)
scrape_pools = set(target["scrapePool"].lower() for target in self.get_active_targets())

if isinstance(monitor, ServiceMonitor):
endpoints = len(monitor.model.spec["endpoints"])
else:
endpoints = len(monitor.model.spec["podMetricsEndpoints"])

for i in range(endpoints):
if f"serviceMonitor/{service_monitor.namespace()}/{service_monitor.name()}/{i}" not in scrape_pools:
if f"{monitor.kind()}/{monitor.namespace()}/{monitor.name()}/{i}".lower() not in scrape_pools:
return False

return True

def wait_for_scrape(self, service_monitor: ServiceMonitor, metrics_path: str):
def wait_for_scrape(self, monitor: ServiceMonitor | PodMonitor, metrics_path: str):
"""Wait before next metrics scrape on service is finished"""
call_time = datetime.now(timezone.utc)

Expand All @@ -81,7 +87,7 @@ def _wait_for_scrape():
"""Wait for new scrape after the function call time"""
for target in self.get_active_targets():
if (
f"serviceMonitor/{service_monitor.namespace()}/{service_monitor.name()}" in target["scrapePool"]
f"{monitor.kind()}/{monitor.namespace()}/{monitor.name()}".lower() in target["scrapePool"].lower()
and metrics_path in target["scrapeUrl"]
):
return call_time < datetime.fromisoformat(target["lastScrape"][:26]).replace(tzinfo=timezone.utc)
Expand Down
35 changes: 2 additions & 33 deletions testsuite/tests/singlecluster/authorino/metrics/conftest.py
Original file line number Diff line number Diff line change
@@ -1,40 +1,9 @@
"""Conftest for the Authorino metrics tests"""

import pytest
import yaml
from openshift_client import selector

from testsuite.httpx import KuadrantClient
from testsuite.kubernetes.config_map import ConfigMap
from testsuite.kubernetes.service_monitor import ServiceMonitor, MetricsEndpoint
from testsuite.prometheus import Prometheus


@pytest.fixture(scope="package")
def prometheus(cluster):
"""
Return an instance of Thanos metrics client
Skip tests if query route is not properly configured
"""
openshift_monitoring = cluster.change_project("openshift-monitoring")
# Check if metrics are enabled
try:
with openshift_monitoring.context:
cm = selector("cm/cluster-monitoring-config").object(cls=ConfigMap)
assert yaml.safe_load(cm["config.yaml"])["enableUserWorkload"]
except Exception: # pylint: disable=broad-exception-caught
pytest.skip("User workload monitoring is disabled")

# find thanos-querier route in the openshift-monitoring project
# this route allows to query metrics

routes = openshift_monitoring.get_routes_for_service("thanos-querier")
if len(routes) == 0:
pytest.skip("Skipping metrics tests as query route is not properly configured")

url = ("https://" if "tls" in routes[0].model.spec else "http://") + routes[0].model.spec.host
with KuadrantClient(headers={"Authorization": f"Bearer {cluster.token}"}, base_url=url, verify=False) as client:
yield Prometheus(client)
from testsuite.kubernetes.monitoring import MetricsEndpoint
from testsuite.kubernetes.monitoring.service_monitor import ServiceMonitor


@pytest.fixture(scope="package")
Expand Down
31 changes: 31 additions & 0 deletions testsuite/tests/singlecluster/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
all methods are placeholders for now since we do not work with Kuadrant"""

import pytest
import yaml
from openshift_client import selector

from testsuite.backend.httpbin import Httpbin
Expand All @@ -10,9 +11,12 @@
from testsuite.gateway.envoy.route import EnvoyVirtualRoute
from testsuite.gateway.gateway_api.gateway import KuadrantGateway
from testsuite.gateway.gateway_api.route import HTTPRoute
from testsuite.httpx import KuadrantClient
from testsuite.kuadrant import KuadrantCR
from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy
from testsuite.kuadrant.policy.rate_limit import RateLimitPolicy
from testsuite.kubernetes.config_map import ConfigMap
from testsuite.prometheus import Prometheus


@pytest.fixture(scope="session")
Expand Down Expand Up @@ -84,6 +88,33 @@ def kuadrant(request, testconfig):
return kuadrant


@pytest.fixture(scope="package")
def prometheus(cluster):
"""
Return an instance of Thanos metrics client
Skip tests if query route is not properly configured
"""
openshift_monitoring = cluster.change_project("openshift-monitoring")
# Check if metrics are enabled
try:
with openshift_monitoring.context:
cm = selector("cm/cluster-monitoring-config").object(cls=ConfigMap)
assert yaml.safe_load(cm["config.yaml"])["enableUserWorkload"]
except Exception: # pylint: disable=broad-exception-caught
pytest.skip("User workload monitoring is disabled")

# find thanos-querier route in the openshift-monitoring project
# this route allows to query metrics

routes = openshift_monitoring.get_routes_for_service("thanos-querier")
if len(routes) == 0:
pytest.skip("Skipping metrics tests as query route is not properly configured")

url = ("https://" if "tls" in routes[0].model.spec else "http://") + routes[0].model.spec.host
with KuadrantClient(headers={"Authorization": f"Bearer {cluster.token}"}, base_url=url, verify=False) as client:
yield Prometheus(client)


@pytest.fixture(scope="session")
def backend(request, cluster, blame, label, testconfig):
"""Deploys Httpbin backend"""
Expand Down
7 changes: 7 additions & 0 deletions testsuite/tests/singlecluster/limitador/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,13 @@
import pytest


@pytest.fixture(scope="session")
def limitador(kuadrant):
"""Returns Limitador CR"""

return kuadrant.limitador


@pytest.fixture(scope="module", autouse=True)
def commit(request, rate_limit):
"""Commits all important stuff before tests"""
Expand Down
Empty file.
24 changes: 24 additions & 0 deletions testsuite/tests/singlecluster/limitador/metrics/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
"""Conftest for limitador metrics tests"""

import pytest

from testsuite.kubernetes.monitoring import MetricsEndpoint
from testsuite.kubernetes.monitoring.pod_monitor import PodMonitor


@pytest.fixture(scope="module")
def pod_monitor(cluster, testconfig, request, blame, limitador):
"""Creates Pod Monitor object to watch over '/metrics' endpoint of limitador pod"""
project = cluster.change_project(testconfig["service_protection"]["system_project"])

endpoints = [MetricsEndpoint("/metrics", "http")]
monitor = PodMonitor.create_instance(project, blame("pd"), endpoints, match_labels={"app": limitador.name()})
request.addfinalizer(monitor.delete)
monitor.commit()
return monitor


@pytest.fixture(scope="module", autouse=True)
def wait_for_active_targets(prometheus, pod_monitor):
"""Waits for all endpoints in Pod Monitor to become active targets"""
assert prometheus.is_reconciled(pod_monitor)
51 changes: 51 additions & 0 deletions testsuite/tests/singlecluster/limitador/metrics/test_metrics.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
"""Tests for Limitador metrics"""

import pytest

from testsuite.kuadrant.policy.rate_limit import Limit


@pytest.fixture(scope="module")
def rate_limit(rate_limit):
"""Add limit to the policy"""
rate_limit.add_limit("multiple", [Limit(3, 10)])
return rate_limit


@pytest.fixture(scope="module", autouse=True)
def scrape_metrics_created_by_requests(prometheus, pod_monitor, client):
"""
Creates 5 requests, from which 3 are authorized and 2 are rate limited.
Waits until Prometheus scrapes '/metrics' endpoint.
"""
client.get_many("/get", 5)
prometheus.wait_for_scrape(pod_monitor, "/metrics")


@pytest.mark.parametrize("metric, expected_value", [("authorized_calls", 3), ("limited_calls", 2)])
def test_calls_metric(prometheus, limitador, rate_limit, metric, expected_value, pod_monitor):
"""Tests that `authorized_calls` and `limited_calls` are emitted and correctly incremented"""
metrics = prometheus.get_metrics(
labels={
"pod": limitador.pod.name(),
"limitador_namespace": f"{rate_limit.namespace()}/{rate_limit.name()}",
"job": f"{pod_monitor.namespace()}/{pod_monitor.name()}",
}
)

authorized = metrics.filter(lambda x: x["metric"]["__name__"] == metric)
assert len(authorized.metrics) == 1
assert authorized.values[0] == expected_value


def test_limitador_status_metric(prometheus, limitador, pod_monitor):
"""Tests that `limitador_up` metric is emitted"""
# We have to use `PodMonitor` here. If `ServiceMonitor` is used, `job` label contains limitador service name,
# therefore it is not possible to test, if the metric was created by this test (by this monitor)
metrics = prometheus.get_metrics(
labels={"pod": limitador.pod.name(), "job": f"{pod_monitor.namespace()}/{pod_monitor.name()}"}
)

limitador_up = metrics.filter(lambda x: x["metric"]["__name__"] == "limitador_up")
assert len(limitador_up.metrics) == 1
assert limitador_up.values[0] == 1

0 comments on commit b831e9f

Please sign in to comment.