diff --git a/testsuite/openshift/client.py b/testsuite/openshift/client.py index dfebaca5..3ef18780 100644 --- a/testsuite/openshift/client.py +++ b/testsuite/openshift/client.py @@ -98,6 +98,11 @@ def get_route(self, name): with self.context: return oc.selector(f"route/{name}").object(cls=OpenshiftRoute) + def get_routes_for_service(self, service_name: str) -> list[OpenshiftRoute]: + """Returns list of routes for given service""" + with self.context: + return oc.selector("route", field_selectors={"spec.to.name": service_name}).objects(cls=OpenshiftRoute) + def do_action(self, verb: str, *args, auto_raise: bool = True, parse_output: bool = False): """Run an oc command.""" with self.context: diff --git a/testsuite/tests/kuadrant/authorino/metrics/conftest.py b/testsuite/tests/kuadrant/authorino/metrics/conftest.py index c19ff5cd..ab2faddb 100644 --- a/testsuite/tests/kuadrant/authorino/metrics/conftest.py +++ b/testsuite/tests/kuadrant/authorino/metrics/conftest.py @@ -19,9 +19,9 @@ def prometheus(request, openshift): # find thanos-querier route in the openshift-monitoring project # this route allows to query metrics openshift_monitoring = openshift.change_project("openshift-monitoring") - routes = openshift_monitoring.routes.for_service("thanos-querier") + routes = openshift_monitoring.get_routes_for_service("thanos-querier") if len(routes) > 0: - url = ("https://" if "tls" in routes[0]["spec"] else "http://") + routes[0]["spec"]["host"] + url = ("https://" if "tls" in routes[0].model.spec else "http://") + routes[0].model.spec.host prometheus = Prometheus(url, openshift.token, openshift.project) request.addfinalizer(prometheus.close) return prometheus diff --git a/testsuite/tests/kuadrant/authorino/metrics/test_deep_metrics.py b/testsuite/tests/kuadrant/authorino/metrics/test_deep_metrics.py index 7a2b34e0..1ba4576a 100644 --- a/testsuite/tests/kuadrant/authorino/metrics/test_deep_metrics.py +++ b/testsuite/tests/kuadrant/authorino/metrics/test_deep_metrics.py @@ -1,6 +1,8 @@ """Tests for the functionality of the deep-evaluator metric samples""" import pytest +from testsuite.objects import Property, Value + @pytest.fixture(scope="module") def mockserver_expectation(request, mockserver, module_label): @@ -20,20 +22,10 @@ def authorization(authorization, mockserver_expectation): - http metadata from the mockserver - non-empty response """ - authorization.identity.anonymous("anonymous", metrics=True) - authorization.authorization.opa_policy("opa", "allow { true }", metrics=True) - authorization.metadata.http_metadata("http", mockserver_expectation, "GET", metrics=True) - authorization.responses.add( - { - "name": "json", - "json": { - "properties": [ - {"name": "auth", "value": "response"}, - ] - }, - }, - metrics=True, - ) + authorization.identity.add_anonymous("anonymous", metrics=True) + authorization.authorization.add_opa_policy("opa", "allow { true }", metrics=True) + authorization.metadata.add_http("http", mockserver_expectation, "GET", metrics=True) + authorization.responses.add_json("json", [Property("auth", Value("response"))], metrics=True) return authorization