From 74fab13e5f75735d673f4d8c1c44ed6a74fd0768 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Wed, 9 Oct 2024 10:47:27 -0400 Subject: [PATCH] pytest-y API test fixtures --- lib/galaxy_test/api/conftest.py | 161 ++++++++++++++++ lib/galaxy_test/base/api_asserts.py | 15 +- lib/galaxy_test/base/decorators.py | 8 + lib/galaxy_test/base/interactor.py | 5 +- lib/galaxy_test/base/populators.py | 289 ++++++++++++++++++++++++++-- pytest.ini | 1 + 6 files changed, 462 insertions(+), 17 deletions(-) create mode 100644 lib/galaxy_test/api/conftest.py diff --git a/lib/galaxy_test/api/conftest.py b/lib/galaxy_test/api/conftest.py new file mode 100644 index 000000000000..fbad6e0285a0 --- /dev/null +++ b/lib/galaxy_test/api/conftest.py @@ -0,0 +1,161 @@ +"""Fixtures for a version of API testing that relies more heavily on pytest injection.""" + +import os +from dataclasses import dataclass +from typing import ( + Any, + Iterator, + List, + Optional, +) + +import pytest + +from galaxy.tool_util.verify.test_data import TestDataResolver +from galaxy_test.base.api import ( + AnonymousGalaxyInteractor, + ApiTestInteractor, +) +from galaxy_test.base.api_util import ( + get_admin_api_key, + get_user_api_key, +) +from galaxy_test.base.env import setup_keep_outdir +from galaxy_test.base.populators import ( + _raise_skip_if, + DatasetCollectionPopulator, + DatasetPopulator, + get_tool_ids, + RequiredTool, + TargetHistory, +) +from galaxy_test.base.testcase import host_port_and_url + + +@dataclass +class ApiConfigObject: + host: str + port: Optional[str] + url: str + user_api_key: Optional[str] + admin_api_key: Optional[str] + test_data_resolver: Any + keepOutdir: Any + + +@pytest.fixture(scope="session") +def api_test_config_object(real_driver) -> ApiConfigObject: + host, port, url = host_port_and_url(real_driver) + user_api_key = get_user_api_key() + admin_api_key = get_admin_api_key() + test_data_resolver = TestDataResolver() + keepOutdir = setup_keep_outdir() + return ApiConfigObject( + host, + port, + url, + user_api_key, + admin_api_key, + test_data_resolver, + keepOutdir, + ) + + +@pytest.fixture(scope="session") +def galaxy_interactor(api_test_config_object: ApiConfigObject) -> ApiTestInteractor: + return ApiTestInteractor(api_test_config_object) + + +@pytest.fixture(scope="session") +def dataset_populator(galaxy_interactor: ApiTestInteractor) -> DatasetPopulator: + return DatasetPopulator(galaxy_interactor) + + +@pytest.fixture(scope="session") +def dataset_collection_populator(galaxy_interactor: ApiTestInteractor) -> DatasetCollectionPopulator: + return DatasetCollectionPopulator(galaxy_interactor) + + +@pytest.fixture(scope="session") +def anonymous_galaxy_interactor(api_test_config_object: ApiConfigObject) -> Iterator[AnonymousGalaxyInteractor]: + return AnonymousGalaxyInteractor(api_test_config_object) + + +_celery_app = None +_celery_worker = None + + +@pytest.fixture(autouse=True, scope="session") +def request_celery_app(celery_session_app, celery_config): + try: + global _celery_app + _celery_app = celery_session_app + yield + finally: + if os.environ.get("GALAXY_TEST_EXTERNAL") is None: + from galaxy.celery import celery_app + + celery_app.fork_pool.stop() + celery_app.fork_pool.join(timeout=5) + + +@pytest.fixture(autouse=True, scope="session") +def request_celery_worker(celery_session_worker, celery_config, celery_worker_parameters): + global _celery_worker + _celery_worker = celery_session_worker + + +@pytest.fixture(scope="session", autouse=True) +def celery_worker_parameters(): + return { + "queues": ("galaxy.internal", "galaxy.external"), + } + + +@pytest.fixture(scope="session") +def celery_parameters(): + return { + "task_create_missing_queues": True, + "task_default_queue": "galaxy.internal", + } + + +@pytest.fixture +def history_id(dataset_populator: DatasetPopulator, request) -> Iterator[str]: + history_name = f"API Test History for {request.node.nodeid}" + with dataset_populator.test_history(name=history_name) as history_id: + yield history_id + + +@pytest.fixture +def target_history( + dataset_populator: DatasetPopulator, dataset_collection_populator: DatasetCollectionPopulator, history_id: str +) -> TargetHistory: + return TargetHistory(dataset_populator, dataset_collection_populator, history_id) + + +@pytest.fixture +def required_tool(dataset_populator: DatasetPopulator, history_id: str, required_tool_ids: List[str]) -> RequiredTool: + if len(required_tool_ids) != 1: + raise AssertionError("required_tool fixture must only be used on methods that require a single tool") + tool_id = required_tool_ids[0] + tool = RequiredTool(dataset_populator, tool_id, history_id) + return tool + + +@pytest.fixture(autouse=True) +def check_required_tools(anonymous_galaxy_interactor, request): + for marker in request.node.iter_markers(): + if marker.name == "requires_tool_id": + tool_id = marker.args[0] + _raise_skip_if(tool_id not in get_tool_ids(anonymous_galaxy_interactor)) + + +@pytest.fixture +def required_tool_ids(request) -> List[str]: + tool_ids = [] + for marker in request.node.iter_markers(): + if marker.name == "requires_tool_id": + tool_id = marker.args[0] + tool_ids.append(tool_id) + return tool_ids diff --git a/lib/galaxy_test/base/api_asserts.py b/lib/galaxy_test/base/api_asserts.py index 8e5796791e98..fd423ee2735b 100644 --- a/lib/galaxy_test/base/api_asserts.py +++ b/lib/galaxy_test/base/api_asserts.py @@ -24,8 +24,8 @@ def assert_status_code_is(response: Response, expected_status_code: int, failure def assert_status_code_is_ok(response: Response, failure_message: Optional[str] = None): """Assert that the supplied response is okay. - The easier alternative ``response.raise_for_status()`` might be - preferable generally. + This is an alternative to ``response.raise_for_status()`` with a more detailed + error message. .. seealso:: :py:meth:`requests.Response.raise_for_status()` """ @@ -35,6 +35,17 @@ def assert_status_code_is_ok(response: Response, failure_message: Optional[str] _report_status_code_error(response, "2XX", failure_message) +def assert_status_code_is_not_ok(response: Response, failure_message: Optional[str] = None): + """Assert that the supplied response is not okay. + + .. seealso:: :py:meth:`assert_status_code_is_ok` + """ + response_status_code = response.status_code + is_two_hundred_status_code = response_status_code >= 200 and response_status_code <= 300 + if is_two_hundred_status_code: + _report_status_code_error(response, "2XX", failure_message) + + def _report_status_code_error( response: Response, expected_status_code: Union[str, int], failure_message: Optional[str] ): diff --git a/lib/galaxy_test/base/decorators.py b/lib/galaxy_test/base/decorators.py index 324d6e9ea884..521fa1ae0963 100644 --- a/lib/galaxy_test/base/decorators.py +++ b/lib/galaxy_test/base/decorators.py @@ -67,6 +67,14 @@ def wrapped_method(*args, **kwargs): return wrapped_method +def requires_tool_id(tool_id: str): + + def method_wrapper(method): + return getattr(pytest.mark, "requires_tool_id")(tool_id)(method) + + return method_wrapper + + def requires_new_history(method): return _wrap_method_with_galaxy_requirement(method, "new_history") diff --git a/lib/galaxy_test/base/interactor.py b/lib/galaxy_test/base/interactor.py index 45c055ce817b..4dabb5963b07 100644 --- a/lib/galaxy_test/base/interactor.py +++ b/lib/galaxy_test/base/interactor.py @@ -4,9 +4,12 @@ class TestCaseGalaxyInteractor(GalaxyInteractorApi): def __init__(self, functional_test_case, test_user=None, api_key=None): self.functional_test_case = functional_test_case + admin_api_key = getattr(functional_test_case, "master_api_key", None) or getattr( + functional_test_case, "admin_api_key", None + ) super().__init__( galaxy_url=functional_test_case.url, - master_api_key=getattr(functional_test_case, "master_api_key", None), + master_api_key=admin_api_key, api_key=api_key or getattr(functional_test_case, "user_api_key", None), test_user=test_user, keep_outputs_dir=getattr(functional_test_case, "keepOutdir", None), diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index a31c5fe73956..26683c42d066 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -57,11 +57,13 @@ from typing import ( Any, Callable, + cast, Dict, Generator, List, NamedTuple, Optional, + Self, Set, Tuple, Union, @@ -80,7 +82,10 @@ from pydantic import UUID4 from requests import Response from rocrate.rocrate import ROCrate -from typing_extensions import Literal +from typing_extensions import ( + Literal, + TypedDict, +) from galaxy.schema.schema import ( CreateToolLandingRequestPayload, @@ -118,6 +123,7 @@ from galaxy_test.base.json_schema_utils import JsonSchemaValidator from . import api_asserts from .api import ( + AnonymousGalaxyInteractor, ApiTestInteractor, HasAnonymousGalaxyInteractor, ) @@ -155,6 +161,15 @@ def wrapped_method(*args, **kwargs): return wrapped_method +def get_tool_ids(interactor: AnonymousGalaxyInteractor): + index = interactor.get("tools", data=dict(in_panel=False)) + api_asserts.assert_status_code_is_ok(index, "Failed to fetch toolbox for target Galaxy.") + tools = index.json() + # In panels by default, so flatten out sections... + tool_ids = [itemgetter("id")(_) for _ in tools] + return tool_ids + + def skip_without_tool(tool_id: str): """Decorate an API test method as requiring a specific tool. @@ -162,18 +177,10 @@ def skip_without_tool(tool_id: str): """ def method_wrapper(method): - def get_tool_ids(api_test_case: HasAnonymousGalaxyInteractor): - interactor = api_test_case.anonymous_galaxy_interactor - index = interactor.get("tools", data=dict(in_panel=False)) - api_asserts.assert_status_code_is_ok(index, "Failed to fetch toolbox for target Galaxy.") - tools = index.json() - # In panels by default, so flatten out sections... - tool_ids = [itemgetter("id")(_) for _ in tools] - return tool_ids @wraps(method) def wrapped_method(api_test_case, *args, **kwargs): - _raise_skip_if(tool_id not in get_tool_ids(api_test_case)) + _raise_skip_if(tool_id not in get_tool_ids(api_test_case.anonymous_galaxy_interactor)) return method(api_test_case, *args, **kwargs) return wrapped_method @@ -684,6 +691,11 @@ def wait_for_job( def get_job_details(self, job_id: str, full: bool = False) -> Response: return self._get(f"jobs/{job_id}", {"full": full}) + def job_outputs(self, job_id: str) -> List[Dict[str, Any]]: + outputs = self._get(f"jobs/{job_id}/outputs") + outputs.raise_for_status() + return outputs.json() + def compute_hash( self, dataset_id: str, @@ -851,19 +863,27 @@ def _cleanup_history(self, history_id: str) -> None: @contextlib.contextmanager def test_history_for(self, method) -> Generator[str, None, None]: require_new_history = has_requirement(method, "new_history") - with self.test_history(require_new=require_new_history) as history_id: + name = f"API Test History for {method.__name__}" + with self.test_history(require_new=require_new_history, name=name) as history_id: yield history_id @contextlib.contextmanager - def test_history(self, require_new: bool = True) -> Generator[str, None, None]: + def test_history(self, require_new: bool = True, name: Optional[str] = None) -> Generator[str, None, None]: with self._test_history(require_new=require_new, cleanup_callback=self._cleanup_history) as history_id: yield history_id @contextlib.contextmanager def _test_history( - self, require_new: bool = True, cleanup_callback: Optional[Callable[[str], None]] = None + self, + require_new: bool = True, + cleanup_callback: Optional[Callable[[str], None]] = None, + name: Optional[str] = None, ) -> Generator[str, None, None]: - history_id = self.new_history() + if name is not None: + kwds = {"name": name} + else: + kwds = {} + history_id = self.new_history(**kwds) try: yield history_id except Exception: @@ -983,6 +1003,9 @@ def tools_post(self, payload: dict, url="tools") -> Response: tool_response = self._post(url, data=payload) return tool_response + def describe_tool_execution(self, tool_id: str) -> "DescribeToolExecution": + return DescribeToolExecution(self, tool_id) + def materialize_dataset_instance( self, history_id: str, id: str, source: str = "hda", validate_hashes: bool = False ): @@ -3350,6 +3373,184 @@ def _store_payload( return payload +class DescribeToolExecutionOutput: + + def __init__(self, dataset_populator: DatasetPopulator, history_id: str, hda_id: str): + self._dataset_populator = dataset_populator + self._history_id = history_id + self._hda_id = hda_id + + @property + def contents(self): + return self._dataset_populator.get_history_dataset_content(history_id=self._history_id, dataset_id=self._hda_id) + + def with_contents(self, expected_contents: str) -> Self: + contents = self.contents + if contents != expected_contents: + raise AssertionError(f"Output dataset had contents {contents} but expected {expected_contents}") + return self + + def with_contents_stripped(self, expected_contents: str) -> Self: + contents = self.contents + if contents.strip() != expected_contents: + raise AssertionError(f"Output dataset had contents {contents} but expected {expected_contents}") + return self + + def containing(self, expected_contents: str) -> Self: + contents = self.contents + if expected_contents not in contents: + raise AssertionError( + f"Output dataset had contents {contents} which does not contain the expected text {expected_contents}" + ) + return self + + assert_contains = containing + assert_has_contents = with_contents + + +class DescribeJob: + + def __init__(self, dataset_populator: DatasetPopulator, history_id: str, job_id: str): + self._dataset_populator = dataset_populator + self._history_id = history_id + self._job_id = job_id + self._final_details = None + + def _wait_for(self): + if self._final_details is None: + self._dataset_populator.wait_for_job(self._job_id, assert_ok=False) + self._final_details = self._dataset_populator.get_job_details(self._job_id).json() + + def with_final_state(self, expected_state: str) -> Self: + self._wait_for() + final_state = self._final_details["state"] + if final_state != expected_state: + raise AssertionError( + f"Expected job {self._job_id} to end with state {state} but it ended with state {final_state}" + ) + return self + + @property + def with_single_output(self): + return self.with_output(0) + + def with_output(self, output: Union[str, int]): + self.with_final_state("ok") + outputs = self._dataset_populator.job_outputs(self._job_id) + by_name = isinstance(output, str) + dataset_id: Optional[str] = None + if by_name: + for output_assoc in outputs: + if output_assoc["name"] == output: + dataset_id = output_assoc["dataset"]["id"] + else: + dataset_id = outputs[output]["dataset"]["id"] + if dataset_id is None: + raise AssertionError(f"Could not find job output identified by {output}") + return DescribeToolExecutionOutput(self._dataset_populator, self._history_id, dataset_id) + + assert_has_output = with_output + assert_has_single_output = with_single_output + + +class DescribeFailure: + def __init__(self, response: Response): + self._response = response + + def with_status_code(self, code: int) -> Self: + api_asserts.assert_status_code_is(self._response, code) + return self + + def with_error_containing(self, message: str) -> Self: + assert message in self._response.text + return self + + +class RequiredTool: + + def __init__(self, dataset_populator: DatasetPopulator, tool_id: str, default_history_id: Optional[str]): + self._dataset_populator = dataset_populator + self._tool_id = tool_id + self._default_history_id = default_history_id + + @property + def execute(self) -> "DescribeToolExecution": + execution = DescribeToolExecution(self._dataset_populator, self._tool_id) + if self._default_history_id: + execution.in_history(self._default_history_id) + return execution + + +class DescribeToolExecution: + _history_id: Optional[str] = None + _execute_response: Optional[Response] = None + _input_format: Optional[str] = None + + def __init__(self, dataset_populator: DatasetPopulator, tool_id: str): + self._dataset_populator = dataset_populator + self._tool_id = tool_id + self._inputs = {} + + def in_history(self, has_history_id: Union[str, "TargetHistory"]) -> Self: + if isinstance(has_history_id, str): + self._history_id = has_history_id + else: + self._history_id = has_history_id._history_id + return self + + def with_inputs(self, inputs: Dict[str, Any]) -> Self: + self._inputs = inputs + return self + + def with_nested_inputs(self, inputs: Dict[str, Any]) -> Self: + self._inputs = inputs + self._input_format = "21.01" + return self + + def _execute(self): + kwds = {} + if self._input_format is not None: + kwds["input_format"] = self._input_format + self._execute_response = self._dataset_populator.run_tool_raw( + self._tool_id, self._inputs, self._history_id, assert_ok=False, **kwds + ) + + def _ensure_executed(self) -> None: + if self._execute_response is None: + self._execute() + + def _assert_executed_ok(self) -> Dict[str, Any]: + self._ensure_executed() + execute_response = self._execute_response + assert execute_response is not None + api_asserts.assert_status_code_is_ok(execute_response) + return execute_response.json() + + def assert_has_n_jobs(self, n: int) -> Self: + response = self._assert_executed_ok() + jobs = response["jobs"] + if len(jobs) != n: + raise AssertionError(f"Expected tool execution to produce {n} jobs but it produced {len(jobs)}") + return self + + @property + def assert_has_single_job(self) -> DescribeJob: + return self.assert_has_n_jobs(1).assert_has_job(0) + + def assert_has_job(self, job_index: int = 0) -> DescribeJob: + response = self._assert_executed_ok() + job = response["jobs"][job_index] + return DescribeJob(self._dataset_populator, self._history_id, job["id"]) + + @property + def assert_fails(self) -> DescribeFailure: + self._ensure_executed() + execute_response = self._execute_response + assert execute_response is not None + api_asserts.assert_status_code_is_not_ok(execute_response) + return DescribeFailure(execute_response) + + class GiHttpMixin: """Mixin for adapting Galaxy testing populators helpers to bioblend.""" @@ -3430,6 +3631,66 @@ def __init__(self, gi): self.dataset_populator = GiDatasetPopulator(gi) +class TargetHistory: + + def __init__( + self, + dataset_populator: DatasetPopulator, + dataset_collection_populator: DatasetCollectionPopulator, + history_id: str, + ): + self._dataset_populator = dataset_populator + self._dataset_collection_populator = dataset_collection_populator + self._history_id = history_id + + def with_dataset( + self, + content: str, + named: Optional[str] = None, + ) -> "HasSrcDict": + kwd = {} + if named is not None: + kwd["name"] = named + new_dataset = self._dataset_populator.new_dataset( + history_id=self._history_id, + content=content, + assert_ok=True, + wait=True, + **kwd, + ) + return HasSrcDict("hda", new_dataset) + + def with_pair(self, contents: List[str]) -> "HasSrcDict": + create_pair_response = self._dataset_collection_populator.create_pair_in_history( + self._history_id, contents=contents, direct_upload=True, wait=True + ) + api_asserts.assert_status_code_is_ok(create_pair_response) + hdca = create_pair_response.json()["output_collections"][0] + return HasSrcDict("hdca", hdca) + + def execute(self, tool_id: str) -> "DescribeToolExecution": + return self._dataset_populator.describe_tool_execution(tool_id).in_history(self) + + +class SrcDict(TypedDict): + src: str + id: str + + +class HasSrcDict: + def __init__(self, src_type: str, api_object: Dict[str, Any]): + self.src_type = src_type + self.api_object = api_object + + @property + def src_dict(self) -> SrcDict: + return SrcDict({"src": self.src_type, "id": cast(str, self.api_object["id"])}) + + @property + def to_dict(self): + return self.api_object + + def wait_on(function: Callable, desc: str, timeout: timeout_type = DEFAULT_TIMEOUT): return tool_util_wait_on(function, desc, timeout) diff --git a/pytest.ini b/pytest.ini index 7c1d40306310..31665649c393 100644 --- a/pytest.ini +++ b/pytest.ini @@ -10,6 +10,7 @@ markers = external_dependency_management: slow tests which resolves dependencies with e.g. conda require_new_history: test that needs to be given a new history tool: marks test as a tool test + requires_tool_id: marks API test as requiring specified tool id gtn_screenshot: marks test as a screenshot producer for galaxy training network local: mark indicates, that it is sufficient to run test locally to get relevant artifacts (e.g. screenshots) external: mark indicates, that test has to be run against external production server to get relevant artifacts (e.g. screenshots)