diff --git a/testsuite/kuadrant/__init__.py b/testsuite/kuadrant/__init__.py index 9e6687a4..43c61fcd 100644 --- a/testsuite/kuadrant/__init__.py +++ b/testsuite/kuadrant/__init__.py @@ -28,6 +28,10 @@ def deployment(self): with self.context: return selector("deployment", labels={"app": self.spec_name}).object(cls=Deployment) + def name(self): + """Overrides `name` method from `apiobject` so it returns name of Kuadrant section""" + return self.spec_name + def __getitem__(self, name): return self.kuadrant_cr.model.spec[self.spec_name][name] @@ -67,6 +71,22 @@ def metrics_service(self): return selector(f"service/{self.spec_name}-controller-metrics").object() +class LimitadorSection(KuadrantSection): + """Limitador `spec.limitador` from KuadrantCR object""" + + @property + def deployment(self) -> Deployment: + """Returns Deployment object for this Limitador""" + with self.context: + return selector(f"deployment/{self.name()}").object(cls=Deployment) + + @property + def pod(self): + """Returns Pod object for this Limitadaor""" + with self.context: + return selector("pod", labels={"app": self.name()}).object() + + class KuadrantCR(CustomResource): """Represents Kuadrant CR objects""" @@ -77,7 +97,7 @@ def authorino(self) -> AuthorinoSection: return AuthorinoSection(self, "authorino") @property - def limitador(self) -> KuadrantSection: + def limitador(self) -> LimitadorSection: """Returns spec.limitador from Kuadrant object""" self.model.spec.setdefault("limitador", {}) - return KuadrantSection(self, "limitador") + return LimitadorSection(self, "limitador") diff --git a/testsuite/kuadrant/limitador.py b/testsuite/kuadrant/limitador.py index 68ad1efd..07194245 100644 --- a/testsuite/kuadrant/limitador.py +++ b/testsuite/kuadrant/limitador.py @@ -14,3 +14,9 @@ def deployment(self) -> Deployment: """Returns Deployment object for this Limitador""" with self.context: return selector(f"deployment/{self.name()}").object(cls=Deployment) + + @property + def pod(self): + """Returns Pod object for this Limitadaor""" + with self.context: + return selector("pod", labels={"app": "limitador"}).object() diff --git a/testsuite/kubernetes/monitoring/__init__.py b/testsuite/kubernetes/monitoring/__init__.py new file mode 100644 index 00000000..e6d47b04 --- /dev/null +++ b/testsuite/kubernetes/monitoring/__init__.py @@ -0,0 +1,13 @@ +"""Kubernetes monitoring common objects""" + +from dataclasses import dataclass + + +@dataclass +class MetricsEndpoint: + """Dataclass for endpoint definition in ServiceMonitor Kubernetes object. + It contains endpoint path and port to the exported metrics.""" + + path: str = "/metrics" + port: str = "http" + interval: str = "30s" diff --git a/testsuite/kubernetes/monitoring/pod_monitor.py b/testsuite/kubernetes/monitoring/pod_monitor.py new file mode 100644 index 00000000..771fc0e6 --- /dev/null +++ b/testsuite/kubernetes/monitoring/pod_monitor.py @@ -0,0 +1,37 @@ +"""Module implements Pod Monitor CR""" + +from testsuite.kubernetes import KubernetesObject +from testsuite.kubernetes.client import KubernetesClient +from testsuite.kubernetes.monitoring import MetricsEndpoint +from testsuite.utils import asdict + + +class PodMonitor(KubernetesObject): + """Represents Pod Monitor object for OpenShift""" + + @classmethod + def create_instance( + cls, + cluster: KubernetesClient, + name: str, + endpoints: list[MetricsEndpoint], + match_labels: dict[str, str], + labels: dict[str, str] = None, + ): + """Creates new instance of ServiceMonitor""" + model = { + "apiVersion": "monitoring.coreos.com/v1", + "kind": "PodMonitor", + "metadata": { + "name": name, + "labels": labels, + }, + "spec": { + "podMetricsEndpoints": [asdict(e) for e in endpoints], + "selector": { + "matchLabels": match_labels, + }, + }, + } + + return cls(model, context=cluster.context) diff --git a/testsuite/kubernetes/service_monitor.py b/testsuite/kubernetes/monitoring/service_monitor.py similarity index 74% rename from testsuite/kubernetes/service_monitor.py rename to testsuite/kubernetes/monitoring/service_monitor.py index 43fac224..51d3fd72 100644 --- a/testsuite/kubernetes/service_monitor.py +++ b/testsuite/kubernetes/monitoring/service_monitor.py @@ -1,22 +1,11 @@ -"""Module implements Service Monitor CR """ - -from dataclasses import dataclass +"""Module implements Service Monitor CR""" +from testsuite.kubernetes.monitoring import MetricsEndpoint from testsuite.utils import asdict from testsuite.kubernetes.client import KubernetesClient from testsuite.kubernetes import KubernetesObject -@dataclass -class MetricsEndpoint: - """Dataclass for endpoint definition in ServiceMonitor Kubernetes object. - It contains endpoint path and port to the exported metrics.""" - - path: str = "/metrics" - port: str = "http" - interval: str = "30s" - - class ServiceMonitor(KubernetesObject): """Kubernetes ServiceMonitor object""" @@ -42,6 +31,7 @@ def create_instance( "selector": { "matchLabels": match_labels, }, + # "namespaceSelector": {"matchNames": [namespace_selector]}, }, } diff --git a/testsuite/prometheus.py b/testsuite/prometheus.py index eb7d8371..e4ce52ea 100644 --- a/testsuite/prometheus.py +++ b/testsuite/prometheus.py @@ -7,7 +7,8 @@ from apyproxy import ApyProxy from httpx import Client -from testsuite.kubernetes.service_monitor import ServiceMonitor +from testsuite.kubernetes.monitoring.pod_monitor import PodMonitor +from testsuite.kubernetes.monitoring.service_monitor import ServiceMonitor def _params(key: str = "", labels: dict[str, str] = None) -> dict[str, str]: @@ -62,17 +63,22 @@ def get_metrics(self, key: str = "", labels: dict[str, str] = None) -> Metrics: return Metrics(response.json()["data"]["result"]) @backoff.on_predicate(backoff.constant, interval=10, jitter=None, max_tries=35) - def is_reconciled(self, service_monitor: ServiceMonitor): + def is_reconciled(self, monitor: ServiceMonitor | PodMonitor): """True, if all endpoints in ServiceMonitor are active targets""" - scrape_pools = set(target["scrapePool"] for target in self.get_active_targets()) - endpoints = len(service_monitor.model.spec.endpoints) + scrape_pools = set(target["scrapePool"].lower() for target in self.get_active_targets()) + + if isinstance(monitor, ServiceMonitor): + endpoints = len(monitor.model.spec["endpoints"]) + else: + endpoints = len(monitor.model.spec["podMetricsEndpoints"]) + for i in range(endpoints): - if f"serviceMonitor/{service_monitor.namespace()}/{service_monitor.name()}/{i}" not in scrape_pools: + if f"{monitor.kind()}/{monitor.namespace()}/{monitor.name()}/{i}".lower() not in scrape_pools: return False return True - def wait_for_scrape(self, service_monitor: ServiceMonitor, metrics_path: str): + def wait_for_scrape(self, monitor: ServiceMonitor | PodMonitor, metrics_path: str): """Wait before next metrics scrape on service is finished""" call_time = datetime.now(timezone.utc) @@ -81,7 +87,7 @@ def _wait_for_scrape(): """Wait for new scrape after the function call time""" for target in self.get_active_targets(): if ( - f"serviceMonitor/{service_monitor.namespace()}/{service_monitor.name()}" in target["scrapePool"] + f"{monitor.kind()}/{monitor.namespace()}/{monitor.name()}".lower() in target["scrapePool"].lower() and metrics_path in target["scrapeUrl"] ): return call_time < datetime.fromisoformat(target["lastScrape"][:26]).replace(tzinfo=timezone.utc) diff --git a/testsuite/tests/singlecluster/authorino/metrics/conftest.py b/testsuite/tests/singlecluster/authorino/metrics/conftest.py index 395960c4..5debeefb 100644 --- a/testsuite/tests/singlecluster/authorino/metrics/conftest.py +++ b/testsuite/tests/singlecluster/authorino/metrics/conftest.py @@ -1,40 +1,9 @@ """Conftest for the Authorino metrics tests""" import pytest -import yaml -from openshift_client import selector -from testsuite.httpx import KuadrantClient -from testsuite.kubernetes.config_map import ConfigMap -from testsuite.kubernetes.service_monitor import ServiceMonitor, MetricsEndpoint -from testsuite.prometheus import Prometheus - - -@pytest.fixture(scope="package") -def prometheus(cluster): - """ - Return an instance of Thanos metrics client - Skip tests if query route is not properly configured - """ - openshift_monitoring = cluster.change_project("openshift-monitoring") - # Check if metrics are enabled - try: - with openshift_monitoring.context: - cm = selector("cm/cluster-monitoring-config").object(cls=ConfigMap) - assert yaml.safe_load(cm["config.yaml"])["enableUserWorkload"] - except Exception: # pylint: disable=broad-exception-caught - pytest.skip("User workload monitoring is disabled") - - # find thanos-querier route in the openshift-monitoring project - # this route allows to query metrics - - routes = openshift_monitoring.get_routes_for_service("thanos-querier") - if len(routes) == 0: - pytest.skip("Skipping metrics tests as query route is not properly configured") - - url = ("https://" if "tls" in routes[0].model.spec else "http://") + routes[0].model.spec.host - with KuadrantClient(headers={"Authorization": f"Bearer {cluster.token}"}, base_url=url, verify=False) as client: - yield Prometheus(client) +from testsuite.kubernetes.monitoring import MetricsEndpoint +from testsuite.kubernetes.monitoring.service_monitor import ServiceMonitor @pytest.fixture(scope="package") diff --git a/testsuite/tests/singlecluster/conftest.py b/testsuite/tests/singlecluster/conftest.py index 06730f5d..ab456f01 100644 --- a/testsuite/tests/singlecluster/conftest.py +++ b/testsuite/tests/singlecluster/conftest.py @@ -2,6 +2,7 @@ all methods are placeholders for now since we do not work with Kuadrant""" import pytest +import yaml from openshift_client import selector from testsuite.backend.httpbin import Httpbin @@ -10,9 +11,12 @@ from testsuite.gateway.envoy.route import EnvoyVirtualRoute from testsuite.gateway.gateway_api.gateway import KuadrantGateway from testsuite.gateway.gateway_api.route import HTTPRoute +from testsuite.httpx import KuadrantClient from testsuite.kuadrant import KuadrantCR from testsuite.kuadrant.policy.authorization.auth_policy import AuthPolicy from testsuite.kuadrant.policy.rate_limit import RateLimitPolicy +from testsuite.kubernetes.config_map import ConfigMap +from testsuite.prometheus import Prometheus @pytest.fixture(scope="session") @@ -84,6 +88,33 @@ def kuadrant(request, testconfig): return kuadrant +@pytest.fixture(scope="package") +def prometheus(cluster): + """ + Return an instance of Thanos metrics client + Skip tests if query route is not properly configured + """ + openshift_monitoring = cluster.change_project("openshift-monitoring") + # Check if metrics are enabled + try: + with openshift_monitoring.context: + cm = selector("cm/cluster-monitoring-config").object(cls=ConfigMap) + assert yaml.safe_load(cm["config.yaml"])["enableUserWorkload"] + except Exception: # pylint: disable=broad-exception-caught + pytest.skip("User workload monitoring is disabled") + + # find thanos-querier route in the openshift-monitoring project + # this route allows to query metrics + + routes = openshift_monitoring.get_routes_for_service("thanos-querier") + if len(routes) == 0: + pytest.skip("Skipping metrics tests as query route is not properly configured") + + url = ("https://" if "tls" in routes[0].model.spec else "http://") + routes[0].model.spec.host + with KuadrantClient(headers={"Authorization": f"Bearer {cluster.token}"}, base_url=url, verify=False) as client: + yield Prometheus(client) + + @pytest.fixture(scope="session") def backend(request, cluster, blame, label, testconfig): """Deploys Httpbin backend""" diff --git a/testsuite/tests/singlecluster/limitador/conftest.py b/testsuite/tests/singlecluster/limitador/conftest.py index 61bcd014..8bbb329e 100644 --- a/testsuite/tests/singlecluster/limitador/conftest.py +++ b/testsuite/tests/singlecluster/limitador/conftest.py @@ -3,6 +3,13 @@ import pytest +@pytest.fixture(scope="session") +def limitador(kuadrant): + """Returns Limitador CR""" + + return kuadrant.limitador + + @pytest.fixture(scope="module", autouse=True) def commit(request, rate_limit): """Commits all important stuff before tests""" diff --git a/testsuite/tests/singlecluster/limitador/metrics/__init__.py b/testsuite/tests/singlecluster/limitador/metrics/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/testsuite/tests/singlecluster/limitador/metrics/conftest.py b/testsuite/tests/singlecluster/limitador/metrics/conftest.py new file mode 100644 index 00000000..1e04c177 --- /dev/null +++ b/testsuite/tests/singlecluster/limitador/metrics/conftest.py @@ -0,0 +1,24 @@ +"""Conftest for limitador metrics tests""" + +import pytest + +from testsuite.kubernetes.monitoring import MetricsEndpoint +from testsuite.kubernetes.monitoring.pod_monitor import PodMonitor + + +@pytest.fixture(scope="module") +def pod_monitor(cluster, testconfig, request, blame, limitador): + """Creates Pod Monitor object to watch over '/metrics' endpoint of limitador pod""" + project = cluster.change_project(testconfig["service_protection"]["system_project"]) + + endpoints = [MetricsEndpoint("/metrics", "http")] + monitor = PodMonitor.create_instance(project, blame("pd"), endpoints, match_labels={"app": limitador.name()}) + request.addfinalizer(monitor.delete) + monitor.commit() + return monitor + + +@pytest.fixture(scope="module", autouse=True) +def wait_for_active_targets(prometheus, pod_monitor): + """Waits for all endpoints in Pod Monitor to become active targets""" + assert prometheus.is_reconciled(pod_monitor) diff --git a/testsuite/tests/singlecluster/limitador/metrics/test_metrics.py b/testsuite/tests/singlecluster/limitador/metrics/test_metrics.py new file mode 100644 index 00000000..c1c550a7 --- /dev/null +++ b/testsuite/tests/singlecluster/limitador/metrics/test_metrics.py @@ -0,0 +1,51 @@ +"""Tests for Limitador metrics""" + +import pytest + +from testsuite.kuadrant.policy.rate_limit import Limit + + +@pytest.fixture(scope="module") +def rate_limit(rate_limit): + """Add limit to the policy""" + rate_limit.add_limit("multiple", [Limit(3, 10)]) + return rate_limit + + +@pytest.fixture(scope="module", autouse=True) +def scrape_metrics_created_by_requests(prometheus, pod_monitor, client): + """ + Creates 5 requests, from which 3 are authorized and 2 are rate limited. + Waits until Prometheus scrapes '/metrics' endpoint. + """ + client.get_many("/get", 5) + prometheus.wait_for_scrape(pod_monitor, "/metrics") + + +@pytest.mark.parametrize("metric, expected_value", [("authorized_calls", 3), ("limited_calls", 2)]) +def test_calls_metric(prometheus, limitador, rate_limit, metric, expected_value, pod_monitor): + """Tests that `authorized_calls` and `limited_calls` are emitted and correctly incremented""" + metrics = prometheus.get_metrics( + labels={ + "pod": limitador.pod.name(), + "limitador_namespace": f"{rate_limit.namespace()}/{rate_limit.name()}", + "job": f"{pod_monitor.namespace()}/{pod_monitor.name()}", + } + ) + + authorized = metrics.filter(lambda x: x["metric"]["__name__"] == metric) + assert len(authorized.metrics) == 1 + assert authorized.values[0] == expected_value + + +def test_limitador_status_metric(prometheus, limitador, pod_monitor): + """Tests that `limitador_up` metric is emitted""" + # We have to use `PodMonitor` here. If `ServiceMonitor` is used, `job` label contains limitador service name, + # therefore it is not possible to test, if the metric was created by this test (by this monitor) + metrics = prometheus.get_metrics( + labels={"pod": limitador.pod.name(), "job": f"{pod_monitor.namespace()}/{pod_monitor.name()}"} + ) + + limitador_up = metrics.filter(lambda x: x["metric"]["__name__"] == "limitador_up") + assert len(limitador_up.metrics) == 1 + assert limitador_up.values[0] == 1