diff --git a/Makefile b/Makefile index a21b9db5..eb773082 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: commit-acceptance pylint mypy black reformat test performance authorino poetry poetry-no-dev mgc container-image polish-junit reportportal authorino-standalone limitador kuadrant kuadrant-only disruptive +.PHONY: commit-acceptance pylint mypy black reformat test authorino poetry poetry-no-dev mgc container-image polish-junit reportportal authorino-standalone limitador kuadrant kuadrant-only disruptive TB ?= short LOGLEVEL ?= INFO @@ -38,7 +38,7 @@ all-is-package: # pattern to run individual testfile or all testfiles in directory testsuite/%: FORCE poetry-no-dev - $(PYTEST) --performance -v $(flags) $@ + $(PYTEST) -v $(flags) $@ test: ## Run all non mgc tests test pytest tests: kuadrant @@ -67,10 +67,6 @@ dnstls: ## Run DNS and TLS tests dnstls: poetry-no-dev $(PYTEST) -n4 -m 'dnspolicy or tlspolicy' --dist loadfile --enforce $(flags) testsuite -performance: ## Run performance tests -performance: poetry-no-dev - $(PYTEST) --performance $(flags) testsuite/tests/kuadrant/authorino/performance - disruptive: ## Run disruptive tests disruptive: poetry-no-dev $(PYTEST) -m 'disruptive' $(flags) testsuite diff --git a/config/settings.local.yaml.tpl b/config/settings.local.yaml.tpl index 54d6ef4b..413e16c2 100644 --- a/config/settings.local.yaml.tpl +++ b/config/settings.local.yaml.tpl @@ -25,10 +25,6 @@ # collector_url: "rpc://jaeger-collector.com:4317" # Tracing collector URL (may be internal) # query_url: "http://jaeger-query.com" # Tracing query URL # cfssl: "cfssl" # Path to the CFSSL library for TLS tests -# hyperfoil: -# url: "HYPERFOIL_URL" -# generate_reports: True # True, if each test should generate a report -# report_dir: "reports" # Directory, to which the reports should be saved # service_protection: # system_project: "kuadrant-system" # Namespace where Kuadrant resource resides # project: "kuadrant" # Namespace where tests will run diff --git a/config/settings.yaml b/config/settings.yaml index 09e3eb19..c6426e31 100644 --- a/config/settings.yaml +++ b/config/settings.yaml @@ -21,9 +21,6 @@ default: authorino: deploy: true log_level: "debug" - hyperfoil: - generate_reports: True - reports_dir: "reports" control_plane: managedzone: "aws-mz" issuer: diff --git a/pyproject.toml b/pyproject.toml index 90d185f6..70a82894 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,7 +41,6 @@ line-length = 120 [tool.pytest.ini_options] markers = [ "issue: Reference to covered issue", - "performance: Performance tests have unique needs", "authorino: Test is using Authorino features", "standalone_only: Test is using features available only in standalone mode, without Kuadrant", "kuadrant_only: Test is using features available only in Kuadrant mode", diff --git a/testsuite/hyperfoil/__init__.py b/testsuite/hyperfoil/__init__.py deleted file mode 100644 index 23fca4d2..00000000 --- a/testsuite/hyperfoil/__init__.py +++ /dev/null @@ -1,95 +0,0 @@ -"""Module containing facade for hyperfoil manipulation""" - -import shutil -import typing -from io import StringIO -from pathlib import Path - -import backoff -import yaml -from apyproxy import ApyProxy - - -class StartedRun: - """Hyperfoil run that was already started""" - - def __init__(self, client, run_id) -> None: - super().__init__() - self.client = client - self.run_id = run_id - - def wait(self, timeout: int) -> dict: - """Waits until the run completes""" - - @backoff.on_predicate(backoff.constant, lambda x: not x["completed"], interval=5, max_time=timeout) - def _wait(): - response = self.client.run._(self.run_id).get() - return response.json() - - return _wait() - - def stats(self): - """Returns Stats for the run, needs to be finished""" - return self.client.run._(self.run_id).stats.all.json.get().json() - - def report(self, name, directory): - """Returns Stats for the run, needs to be finished""" - with self.client.run._(self.run_id).report.get(stream=True) as response: - response.raise_for_status() - with Path(directory).joinpath(name).open("wb") as file: - shutil.copyfileobj(response.raw, file) - - -class Benchmark: - """Hyperfoil Benchmark object""" - - def __init__(self, client, name) -> None: - super().__init__() - self.client = client - self.name = name - - def start(self, desc: str = "", **params) -> StartedRun: - """Starts the Benchmark and returns a specific Run that was started""" - run_id = ( - self.client.benchmark._(self.name).start.get(params={"templateParam": params, "desc": desc}).json()["id"] - ) - return StartedRun(self.client, run_id) - - -class Hyperfoil: - """Facade for Hyperfoil client""" - - def __init__(self, url) -> None: - super().__init__() - self.client = ApyProxy(url) - - def create_benchmark( - self, - name, - agents: dict, - http: dict, - benchmark: dict, - additional_files: dict[str, typing.IO], - ) -> Benchmark: - """ - Creates or overrides benchmark - - :param name: Name of the new benchmark, if already defined in the definition, it will be overriden - :param agents: Dict representation for agents section, will be used only if missing in the definition - https://hyperfoil.io/userguide/benchmark/agents.html - :param http: Dict representation for http section, will be used only if missing in the definition - https://hyperfoil.io/userguide/benchmark/http.html - :param benchmark: Definition of the benchmark in the dict form, may contain template parameters - :param additional_files: All files handles (already opened) that will be included in the request, - can be closed afterwards - :return: Benchmark - """ - additional_files = additional_files or {} - if "agents" not in benchmark: - benchmark["agents"] = agents["agents"] - if "http" not in benchmark: - benchmark["http"] = http["http"] - benchmark["name"] = name - files = {"benchmark": StringIO(yaml.dump(benchmark)), **additional_files} # type: ignore - self.client.benchmark.post(files=files) - return Benchmark(self.client, name) diff --git a/testsuite/tests/conftest.py b/testsuite/tests/conftest.py index 023fa74e..d202c081 100644 --- a/testsuite/tests/conftest.py +++ b/testsuite/tests/conftest.py @@ -29,9 +29,6 @@ def pytest_addoption(parser): """Add options to include various kinds of tests in testrun""" - parser.addoption( - "--performance", action="store_true", default=False, help="Run also performance tests (default: False)" - ) parser.addoption( "--enforce", action="store_true", default=False, help="Fails tests instead of skip, if capabilities are missing" ) @@ -46,8 +43,6 @@ def pytest_runtest_setup(item): In this function we skip or fail the tests that were selected but their capabilities are not available """ marks = [i.name for i in item.iter_markers()] - if "performance" in marks and not item.config.getoption("--performance"): - pytest.skip("Excluding performance tests") skip_or_fail = pytest.fail if item.config.getoption("--enforce") else pytest.skip standalone = item.config.getoption("--standalone") if standalone: diff --git a/testsuite/tests/kuadrant/authorino/performance/__init__.py b/testsuite/tests/kuadrant/authorino/performance/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/testsuite/tests/kuadrant/authorino/performance/conftest.py b/testsuite/tests/kuadrant/authorino/performance/conftest.py deleted file mode 100644 index ba7bff75..00000000 --- a/testsuite/tests/kuadrant/authorino/performance/conftest.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Conftest for performance tests -""" - -import os -import warnings -from datetime import datetime -from pathlib import Path - -import pytest -from dynaconf import ValidationError -from weakget import weakget - -from testsuite.httpx.auth import HttpxOidcClientAuth -from testsuite.hyperfoil import Hyperfoil, StartedRun - - -@pytest.fixture(scope="session") -def hyperfoil(testconfig): - """Hyperfoil client""" - try: - return Hyperfoil(testconfig["hyperfoil"]["url"]) - except (KeyError, ValidationError) as exc: - return pytest.skip(f"Hyperfoil configuration item is missing: {exc}") - - -@pytest.fixture(scope="module") -def agents(): - """Agent configuration for benchmark""" - return {"agents": [{"agent-1": {"host": "localhost", "port": 22}}]} - - -@pytest.fixture(scope="module") -def http(): - """HTTP configured of the benchmark, contains hosts""" - return {} - - -@pytest.fixture(scope="module") -def files(): - """All files required by the benchmark""" - return {} - - -@pytest.fixture(scope="module") -def name(): - """Name of the benchmark""" - return None - - -@pytest.fixture(scope="module") -def benchmark(hyperfoil, name, template, agents, http, files): - """Create new benchmark""" - return hyperfoil.create_benchmark(name, agents, http, template, files) - - -@pytest.fixture(scope="module") -def keycloak_auth(keycloak): - """Returns Keycloak authentication object for HTTPX""" - return HttpxOidcClientAuth(keycloak.get_token) - - -@pytest.fixture(scope="function") -def generate_report(request, testconfig): - """Generates HTML report for the performance test""" - generate = weakget(testconfig)["hyperfoil"]["generate_reports"] % True - if not generate: - return lambda _: None - - directory = weakget(testconfig)["hyperfoil"]["report_dir"] % None - if not directory: - warnings.warn("Unable to save report, report_dir is missing in configuration") - return lambda _: None - - directory = Path(directory) - if not os.path.exists(directory): - os.makedirs(directory) - - def _gen(run: StartedRun): - name = f"{request.node.name}-{datetime.now().strftime('%d%m%Y-%H%M')}.html" - run.report(name, directory) - warnings.warn(f"Report for test {request.node.name} is saved at {directory}/{name}") - - return _gen diff --git a/testsuite/tests/kuadrant/authorino/performance/templates/__init__.py b/testsuite/tests/kuadrant/authorino/performance/templates/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/testsuite/tests/kuadrant/authorino/performance/templates/template_perf_basic_query_rhsso.hf.yaml b/testsuite/tests/kuadrant/authorino/performance/templates/template_perf_basic_query_rhsso.hf.yaml deleted file mode 100644 index 18a8a010..00000000 --- a/testsuite/tests/kuadrant/authorino/performance/templates/template_perf_basic_query_rhsso.hf.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# name, http and agents sections will be added via test -phases: - - rampUp: - increasingRate: - duration: 1m - maxDuration: 3m - initialUsersPerSec: 8 - targetUsersPerSec: 20 - scenario: - - loadCsv: &loadCsv - - randomCsvRow: - file: 'keycloak_auth.csv' - skipComments: true - removeQuotes: true - columns: - 0: 0 #hostname - 1: 1 #rhsso_url - 2: 2 #path - 3: 3 #data - - createToken: &createToken - - httpRequest: - authority: - fromVar: 1 - POST: - fromVar: 2 - headers: - Content-Type: application/x-www-form-urlencoded - body: - fromVar: 3 - handler: - body: - json: - query: .access_token - toVar: access_token - - postLargeData: &postLargeData - - template: - pattern: Bearer ${access_token} - toVar: authorization - - httpRequest: - authority: - fromVar: 0 - POST: /post - sync: true - headers: - authorization: - fromVar: authorization - body: - fromFile: message_1kb.txt - - steadyLoad: - constantRate: - duration: 2m - maxDuration: 4m - usersPerSec: 12 - startAfter: - phase: rampUp - scenario: - - loadCsv: *loadCsv - - createToken: *createToken - - postLargeData: *postLargeData \ No newline at end of file diff --git a/testsuite/tests/kuadrant/authorino/performance/test_perf_basic.py b/testsuite/tests/kuadrant/authorino/performance/test_perf_basic.py deleted file mode 100644 index df70c724..00000000 --- a/testsuite/tests/kuadrant/authorino/performance/test_perf_basic.py +++ /dev/null @@ -1,80 +0,0 @@ -""" - Test that will set up authorino and prepares objects for performance testing. - Fill necessary data to benchmark template. - Run the test and assert results. -""" - -from importlib import resources - -import pytest -import yaml - -from testsuite.utils import add_port, create_csv_file, MESSAGE_1KB - -# Maximal runtime of test (need to cover all performance stages) -MAX_RUN_TIME = 10 * 60 - -pytestmark = [pytest.mark.performance] - - -@pytest.fixture(scope="module") -def name(): - """Name of the benchmark""" - return "test_perf_basic" - - -@pytest.fixture(scope="module") -def template(): - """Template path""" - path = resources.files("testsuite.tests.kuadrant.authorino.performance.templates").joinpath( - "template_perf_basic_query_rhsso.hf.yaml" - ) - with path.open("r", encoding="UTF-8") as stream: - return yaml.safe_load(stream) - - -@pytest.fixture(scope="module") -def http(keycloak, client): - """Configures host for the gateway and Keycloak""" - return { - "http": [ - {"host": add_port(keycloak.server_url), "sharedConnections": 100}, - {"host": add_port(str(client.base_url)), "sharedConnections": 20}, - ] - } - - -@pytest.fixture(scope="module") -def files(keycloak, client): - """Adds Message and Keycloak CSV to the files""" - token_url_obj = add_port(keycloak.well_known["token_endpoint"], return_netloc=False) - client_url = add_port(str(client.base_url)) - with MESSAGE_1KB.open("r", encoding="UTF-8") as file: - yield { - "message_1kb.txt": file, - "keycloak_auth.csv": create_csv_file( - [[client_url, token_url_obj.netloc, token_url_obj.path, keycloak.token_params()]] - ), - } - - -def test_basic_perf_rhsso(generate_report, client, benchmark, keycloak_auth, blame): - """ - Test checks that authorino is set up correctly. - Runs the created benchmark. - Asserts it was successful. - """ - assert client.get("/get", auth=keycloak_auth).status_code == 200 - run = benchmark.start(blame("run")) - - obj = run.wait(MAX_RUN_TIME) - assert obj["completed"], "Ran out of time" - - generate_report(run) - stats = run.stats() - - assert stats - info = stats.get("info", {}) - assert len(info.get("errors")) == 0, f"Errors occured: {info.get('errors')}" - assert stats.get("failures") == [] - assert stats.get("stats", []) != []