From 81efddafeb0dbfd0697b89a2c4d63d500b1544a0 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Wed, 9 Oct 2024 10:26:56 -0400 Subject: [PATCH 1/5] Refactor test_driver -> target for reuse in fixtures. --- lib/galaxy_test/base/env.py | 4 +++- lib/galaxy_test/base/testcase.py | 20 ++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/lib/galaxy_test/base/env.py b/lib/galaxy_test/base/env.py index 0caee6da17f3..27a149b8f270 100644 --- a/lib/galaxy_test/base/env.py +++ b/lib/galaxy_test/base/env.py @@ -12,6 +12,8 @@ DEFAULT_WEB_HOST = socket.gethostbyname("localhost") +GalaxyTarget = Tuple[str, Optional[str], str] + def setup_keep_outdir() -> str: keep_outdir = os.environ.get("GALAXY_TEST_SAVE", "") @@ -23,7 +25,7 @@ def setup_keep_outdir() -> str: return keep_outdir -def target_url_parts() -> Tuple[str, Optional[str], str]: +def target_url_parts() -> GalaxyTarget: host = socket.gethostbyname(os.environ.get("GALAXY_TEST_HOST", DEFAULT_WEB_HOST)) port = os.environ.get("GALAXY_TEST_PORT") if port: diff --git a/lib/galaxy_test/base/testcase.py b/lib/galaxy_test/base/testcase.py index 6f148d19840b..58acf00ea408 100644 --- a/lib/galaxy_test/base/testcase.py +++ b/lib/galaxy_test/base/testcase.py @@ -9,6 +9,7 @@ from galaxy.tool_util.verify.test_data import TestDataResolver from galaxy.util.unittest import TestCase from galaxy_test.base.env import ( + GalaxyTarget, setup_keep_outdir, target_url_parts, ) @@ -16,6 +17,16 @@ log = logging.getLogger(__name__) +def host_port_and_url(test_driver: Optional[Any]) -> GalaxyTarget: + host, port, url = target_url_parts() + server_wrapper = test_driver and test_driver.server_wrappers and test_driver.server_wrappers[0] + if server_wrapper: + host = server_wrapper.host + port = server_wrapper.port + url = f"http://{host}:{port}{server_wrapper.prefix.rstrip('/')}/" + return host, port, url + + @pytest.mark.usefixtures("embedded_driver") class FunctionalTestCase(TestCase): """Base class for tests targetting actual Galaxy servers. @@ -34,14 +45,7 @@ class FunctionalTestCase(TestCase): _test_driver: Optional[Any] def setUp(self) -> None: - self.host, self.port, self.url = target_url_parts() - server_wrapper = ( - self._test_driver and self._test_driver.server_wrappers and self._test_driver.server_wrappers[0] - ) - if server_wrapper: - self.host = server_wrapper.host - self.port = server_wrapper.port - self.url = f"http://{self.host}:{self.port}{server_wrapper.prefix.rstrip('/')}/" + self.host, self.port, self.url = host_port_and_url(self._test_driver) self.test_data_resolver = TestDataResolver() self.keepOutdir = setup_keep_outdir() From 3f50b253273a43b345ce62231a130f19d0293a16 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Wed, 9 Oct 2024 10:47:27 -0400 Subject: [PATCH 2/5] pytest-y API test fixtures --- lib/galaxy/tool_util/verify/interactor.py | 10 +- lib/galaxy_test/api/conftest.py | 161 ++++++++ lib/galaxy_test/api/test_tools.py | 27 +- lib/galaxy_test/base/api_asserts.py | 15 +- lib/galaxy_test/base/decorators.py | 8 + lib/galaxy_test/base/interactor.py | 5 +- lib/galaxy_test/base/populators.py | 455 +++++++++++++++++++++- pytest.ini | 1 + 8 files changed, 636 insertions(+), 46 deletions(-) create mode 100644 lib/galaxy_test/api/conftest.py diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py index 0185b6aff5b8..cccb1aaa9f58 100644 --- a/lib/galaxy/tool_util/verify/interactor.py +++ b/lib/galaxy/tool_util/verify/interactor.py @@ -390,14 +390,17 @@ def get_history(self, history_name: str = "test_history") -> Optional[Dict[str, @contextlib.contextmanager def test_history( - self, require_new: bool = True, cleanup_callback: Optional[Callable[[str], None]] = None + self, + require_new: bool = True, + cleanup_callback: Optional[Callable[[str], None]] = None, + name: Optional[str] = None, ) -> Generator[str, None, None]: history_id = None if not require_new: history_id = DEFAULT_TARGET_HISTORY cleanup = CLEANUP_TEST_HISTORIES - history_id = history_id or self.new_history() + history_id = history_id or self.new_history(name) try: yield history_id except Exception: @@ -407,7 +410,8 @@ def test_history( if cleanup and cleanup_callback is not None: cleanup_callback(history_id) - def new_history(self, history_name: str = "test_history", publish_history: bool = False) -> str: + def new_history(self, history_name: Optional[str] = None, publish_history: bool = False) -> str: + history_name = history_name or "test_history" create_response = self._post("histories", {"name": history_name}) try: create_response.raise_for_status() diff --git a/lib/galaxy_test/api/conftest.py b/lib/galaxy_test/api/conftest.py new file mode 100644 index 000000000000..e67769896c08 --- /dev/null +++ b/lib/galaxy_test/api/conftest.py @@ -0,0 +1,161 @@ +"""Fixtures for a version of API testing that relies more heavily on pytest injection.""" + +import os +from dataclasses import dataclass +from typing import ( + Any, + Iterator, + List, + Optional, +) + +import pytest + +from galaxy.tool_util.verify.test_data import TestDataResolver +from galaxy_test.base.api import ( + AnonymousGalaxyInteractor, + ApiTestInteractor, +) +from galaxy_test.base.api_util import ( + get_admin_api_key, + get_user_api_key, +) +from galaxy_test.base.env import setup_keep_outdir +from galaxy_test.base.populators import ( + _raise_skip_if, + DatasetCollectionPopulator, + DatasetPopulator, + get_tool_ids, + RequiredTool, + TargetHistory, +) +from galaxy_test.base.testcase import host_port_and_url + + +@dataclass +class ApiConfigObject: + host: str + port: Optional[str] + url: str + user_api_key: Optional[str] + admin_api_key: Optional[str] + test_data_resolver: Any + keepOutdir: Any + + +@pytest.fixture(scope="session") +def api_test_config_object(real_driver) -> ApiConfigObject: + host, port, url = host_port_and_url(real_driver) + user_api_key = get_user_api_key() + admin_api_key = get_admin_api_key() + test_data_resolver = TestDataResolver() + keepOutdir = setup_keep_outdir() + return ApiConfigObject( + host, + port, + url, + user_api_key, + admin_api_key, + test_data_resolver, + keepOutdir, + ) + + +@pytest.fixture(scope="session") +def galaxy_interactor(api_test_config_object: ApiConfigObject) -> ApiTestInteractor: + return ApiTestInteractor(api_test_config_object) + + +@pytest.fixture(scope="session") +def dataset_populator(galaxy_interactor: ApiTestInteractor) -> DatasetPopulator: + return DatasetPopulator(galaxy_interactor) + + +@pytest.fixture(scope="session") +def dataset_collection_populator(galaxy_interactor: ApiTestInteractor) -> DatasetCollectionPopulator: + return DatasetCollectionPopulator(galaxy_interactor) + + +@pytest.fixture(scope="session") +def anonymous_galaxy_interactor(api_test_config_object: ApiConfigObject) -> AnonymousGalaxyInteractor: + return AnonymousGalaxyInteractor(api_test_config_object) + + +_celery_app = None +_celery_worker = None + + +@pytest.fixture(autouse=True, scope="session") +def request_celery_app(celery_session_app, celery_config): + try: + global _celery_app + _celery_app = celery_session_app + yield + finally: + if os.environ.get("GALAXY_TEST_EXTERNAL") is None: + from galaxy.celery import celery_app + + celery_app.fork_pool.stop() + celery_app.fork_pool.join(timeout=5) + + +@pytest.fixture(autouse=True, scope="session") +def request_celery_worker(celery_session_worker, celery_config, celery_worker_parameters): + global _celery_worker + _celery_worker = celery_session_worker + + +@pytest.fixture(scope="session", autouse=True) +def celery_worker_parameters(): + return { + "queues": ("galaxy.internal", "galaxy.external"), + } + + +@pytest.fixture(scope="session") +def celery_parameters(): + return { + "task_create_missing_queues": True, + "task_default_queue": "galaxy.internal", + } + + +@pytest.fixture +def history_id(dataset_populator: DatasetPopulator, request) -> Iterator[str]: + history_name = f"API Test History for {request.node.nodeid}" + with dataset_populator.test_history(name=history_name) as history_id: + yield history_id + + +@pytest.fixture +def target_history( + dataset_populator: DatasetPopulator, dataset_collection_populator: DatasetCollectionPopulator, history_id: str +) -> TargetHistory: + return TargetHistory(dataset_populator, dataset_collection_populator, history_id) + + +@pytest.fixture +def required_tool(dataset_populator: DatasetPopulator, history_id: str, required_tool_ids: List[str]) -> RequiredTool: + if len(required_tool_ids) != 1: + raise AssertionError("required_tool fixture must only be used on methods that require a single tool") + tool_id = required_tool_ids[0] + tool = RequiredTool(dataset_populator, tool_id, history_id) + return tool + + +@pytest.fixture(autouse=True) +def check_required_tools(anonymous_galaxy_interactor, request): + for marker in request.node.iter_markers(): + if marker.name == "requires_tool_id": + tool_id = marker.args[0] + _raise_skip_if(tool_id not in get_tool_ids(anonymous_galaxy_interactor)) + + +@pytest.fixture +def required_tool_ids(request) -> List[str]: + tool_ids = [] + for marker in request.node.iter_markers(): + if marker.name == "requires_tool_id": + tool_id = marker.args[0] + tool_ids.append(tool_id) + return tool_ids diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py index 921d706b79cf..af3034e014e1 100644 --- a/lib/galaxy_test/api/test_tools.py +++ b/lib/galaxy_test/api/test_tools.py @@ -3254,31 +3254,8 @@ def __build_group_list(self, history_id): hdca_list_id = response.json()["outputs"][0]["id"] return hdca_list_id - def __build_nested_list(self, history_id): - response = self.dataset_collection_populator.upload_collection( - history_id, - "list:paired", - elements=[ - { - "name": "test0", - "elements": [ - {"src": "pasted", "paste_content": "123\n", "name": "forward", "ext": "txt"}, - {"src": "pasted", "paste_content": "456\n", "name": "reverse", "ext": "txt"}, - ], - }, - { - "name": "test1", - "elements": [ - {"src": "pasted", "paste_content": "789\n", "name": "forward", "ext": "txt"}, - {"src": "pasted", "paste_content": "0ab\n", "name": "reverse", "ext": "txt"}, - ], - }, - ], - wait=True, - ) - self._assert_status_code_is(response, 200) - hdca_list_id = response.json()["outputs"][0]["id"] - return hdca_list_id + def __build_nested_list(self, history_id: str) -> str: + return self.dataset_collection_populator.example_list_of_pairs(history_id) def _build_pair(self, history_id, contents, run_cat=False): create_response = self.dataset_collection_populator.create_pair_in_history( diff --git a/lib/galaxy_test/base/api_asserts.py b/lib/galaxy_test/base/api_asserts.py index 8e5796791e98..fd423ee2735b 100644 --- a/lib/galaxy_test/base/api_asserts.py +++ b/lib/galaxy_test/base/api_asserts.py @@ -24,8 +24,8 @@ def assert_status_code_is(response: Response, expected_status_code: int, failure def assert_status_code_is_ok(response: Response, failure_message: Optional[str] = None): """Assert that the supplied response is okay. - The easier alternative ``response.raise_for_status()`` might be - preferable generally. + This is an alternative to ``response.raise_for_status()`` with a more detailed + error message. .. seealso:: :py:meth:`requests.Response.raise_for_status()` """ @@ -35,6 +35,17 @@ def assert_status_code_is_ok(response: Response, failure_message: Optional[str] _report_status_code_error(response, "2XX", failure_message) +def assert_status_code_is_not_ok(response: Response, failure_message: Optional[str] = None): + """Assert that the supplied response is not okay. + + .. seealso:: :py:meth:`assert_status_code_is_ok` + """ + response_status_code = response.status_code + is_two_hundred_status_code = response_status_code >= 200 and response_status_code <= 300 + if is_two_hundred_status_code: + _report_status_code_error(response, "2XX", failure_message) + + def _report_status_code_error( response: Response, expected_status_code: Union[str, int], failure_message: Optional[str] ): diff --git a/lib/galaxy_test/base/decorators.py b/lib/galaxy_test/base/decorators.py index 324d6e9ea884..521fa1ae0963 100644 --- a/lib/galaxy_test/base/decorators.py +++ b/lib/galaxy_test/base/decorators.py @@ -67,6 +67,14 @@ def wrapped_method(*args, **kwargs): return wrapped_method +def requires_tool_id(tool_id: str): + + def method_wrapper(method): + return getattr(pytest.mark, "requires_tool_id")(tool_id)(method) + + return method_wrapper + + def requires_new_history(method): return _wrap_method_with_galaxy_requirement(method, "new_history") diff --git a/lib/galaxy_test/base/interactor.py b/lib/galaxy_test/base/interactor.py index 45c055ce817b..4dabb5963b07 100644 --- a/lib/galaxy_test/base/interactor.py +++ b/lib/galaxy_test/base/interactor.py @@ -4,9 +4,12 @@ class TestCaseGalaxyInteractor(GalaxyInteractorApi): def __init__(self, functional_test_case, test_user=None, api_key=None): self.functional_test_case = functional_test_case + admin_api_key = getattr(functional_test_case, "master_api_key", None) or getattr( + functional_test_case, "admin_api_key", None + ) super().__init__( galaxy_url=functional_test_case.url, - master_api_key=getattr(functional_test_case, "master_api_key", None), + master_api_key=admin_api_key, api_key=api_key or getattr(functional_test_case, "user_api_key", None), test_user=test_user, keep_outputs_dir=getattr(functional_test_case, "keepOutdir", None), diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index a31c5fe73956..d917ca7081e6 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -57,11 +57,13 @@ from typing import ( Any, Callable, + cast, Dict, Generator, List, NamedTuple, Optional, + Self, Set, Tuple, Union, @@ -80,7 +82,10 @@ from pydantic import UUID4 from requests import Response from rocrate.rocrate import ROCrate -from typing_extensions import Literal +from typing_extensions import ( + Literal, + TypedDict, +) from galaxy.schema.schema import ( CreateToolLandingRequestPayload, @@ -118,6 +123,7 @@ from galaxy_test.base.json_schema_utils import JsonSchemaValidator from . import api_asserts from .api import ( + AnonymousGalaxyInteractor, ApiTestInteractor, HasAnonymousGalaxyInteractor, ) @@ -155,6 +161,15 @@ def wrapped_method(*args, **kwargs): return wrapped_method +def get_tool_ids(interactor: AnonymousGalaxyInteractor): + index = interactor.get("tools", data=dict(in_panel=False)) + api_asserts.assert_status_code_is_ok(index, "Failed to fetch toolbox for target Galaxy.") + tools = index.json() + # In panels by default, so flatten out sections... + tool_ids = [itemgetter("id")(_) for _ in tools] + return tool_ids + + def skip_without_tool(tool_id: str): """Decorate an API test method as requiring a specific tool. @@ -162,18 +177,10 @@ def skip_without_tool(tool_id: str): """ def method_wrapper(method): - def get_tool_ids(api_test_case: HasAnonymousGalaxyInteractor): - interactor = api_test_case.anonymous_galaxy_interactor - index = interactor.get("tools", data=dict(in_panel=False)) - api_asserts.assert_status_code_is_ok(index, "Failed to fetch toolbox for target Galaxy.") - tools = index.json() - # In panels by default, so flatten out sections... - tool_ids = [itemgetter("id")(_) for _ in tools] - return tool_ids @wraps(method) def wrapped_method(api_test_case, *args, **kwargs): - _raise_skip_if(tool_id not in get_tool_ids(api_test_case)) + _raise_skip_if(tool_id not in get_tool_ids(api_test_case.anonymous_galaxy_interactor)) return method(api_test_case, *args, **kwargs) return wrapped_method @@ -684,6 +691,11 @@ def wait_for_job( def get_job_details(self, job_id: str, full: bool = False) -> Response: return self._get(f"jobs/{job_id}", {"full": full}) + def job_outputs(self, job_id: str) -> List[Dict[str, Any]]: + outputs = self._get(f"jobs/{job_id}/outputs") + outputs.raise_for_status() + return outputs.json() + def compute_hash( self, dataset_id: str, @@ -851,19 +863,27 @@ def _cleanup_history(self, history_id: str) -> None: @contextlib.contextmanager def test_history_for(self, method) -> Generator[str, None, None]: require_new_history = has_requirement(method, "new_history") - with self.test_history(require_new=require_new_history) as history_id: + name = f"API Test History for {method.__name__}" + with self.test_history(require_new=require_new_history, name=name) as history_id: yield history_id @contextlib.contextmanager - def test_history(self, require_new: bool = True) -> Generator[str, None, None]: + def test_history(self, require_new: bool = True, name: Optional[str] = None) -> Generator[str, None, None]: with self._test_history(require_new=require_new, cleanup_callback=self._cleanup_history) as history_id: yield history_id @contextlib.contextmanager def _test_history( - self, require_new: bool = True, cleanup_callback: Optional[Callable[[str], None]] = None + self, + require_new: bool = True, + cleanup_callback: Optional[Callable[[str], None]] = None, + name: Optional[str] = None, ) -> Generator[str, None, None]: - history_id = self.new_history() + if name is not None: + kwds = {"name": name} + else: + kwds = {} + history_id = self.new_history(**kwds) try: yield history_id except Exception: @@ -983,6 +1003,9 @@ def tools_post(self, payload: dict, url="tools") -> Response: tool_response = self._post(url, data=payload) return tool_response + def describe_tool_execution(self, tool_id: str) -> "DescribeToolExecution": + return DescribeToolExecution(self, tool_id) + def materialize_dataset_instance( self, history_id: str, id: str, source: str = "hda", validate_hashes: bool = False ): @@ -1737,7 +1760,10 @@ def _summarize_history(self, history_id): @contextlib.contextmanager def _test_history( - self, require_new: bool = True, cleanup_callback: Optional[Callable[[str], None]] = None + self, + require_new: bool = True, + cleanup_callback: Optional[Callable[[str], None]] = None, + name: Optional[str] = None, ) -> Generator[str, None, None]: with self.galaxy_interactor.test_history( require_new=require_new, cleanup_callback=cleanup_callback @@ -2917,6 +2943,32 @@ def create_nested_collection( ) return self.__create(payload) + def example_list_of_pairs(self, history_id: str) -> str: + response = self.upload_collection( + history_id, + "list:paired", + elements=[ + { + "name": "test0", + "elements": [ + {"src": "pasted", "paste_content": "123\n", "name": "forward", "ext": "txt"}, + {"src": "pasted", "paste_content": "456\n", "name": "reverse", "ext": "txt"}, + ], + }, + { + "name": "test1", + "elements": [ + {"src": "pasted", "paste_content": "789\n", "name": "forward", "ext": "txt"}, + {"src": "pasted", "paste_content": "0ab\n", "name": "reverse", "ext": "txt"}, + ], + }, + ], + wait=True, + ) + api_asserts.assert_status_code_is_ok(response) + hdca_id = response.json()["outputs"][0]["id"] + return hdca_id + def create_list_of_pairs_in_history(self, history_id, **kwds): return self.upload_collection( history_id, @@ -3350,6 +3402,290 @@ def _store_payload( return payload +class DescribeToolExecutionOutput: + + def __init__(self, dataset_populator: BaseDatasetPopulator, history_id: str, hda_id: str): + self._dataset_populator = dataset_populator + self._history_id = history_id + self._hda_id = hda_id + + @property + def details(self) -> Dict[str, Any]: + dataset_details = self._dataset_populator.get_history_dataset_details(self._history_id, dataset_id=self._hda_id) + return dataset_details + + @property + def contents(self) -> str: + return self._dataset_populator.get_history_dataset_content(history_id=self._history_id, dataset_id=self._hda_id) + + def with_contents(self, expected_contents: str) -> Self: + contents = self.contents + if contents != expected_contents: + raise AssertionError(f"Output dataset had contents {contents} but expected {expected_contents}") + return self + + def with_contents_stripped(self, expected_contents: str) -> Self: + contents = self.contents + if contents.strip() != expected_contents: + raise AssertionError(f"Output dataset had contents {contents} but expected {expected_contents}") + return self + + def containing(self, expected_contents: str) -> Self: + contents = self.contents + if expected_contents not in contents: + raise AssertionError( + f"Output dataset had contents {contents} which does not contain the expected text {expected_contents}" + ) + return self + + def with_file_ext(self, expected_ext: str) -> Self: + ext = self.details["file_ext"] + if ext != expected_ext: + raise AssertionError(f"Output dataset had file extension {ext}, not the expected extension {expected_ext}") + return self + + # aliases that might help make tests more like English in particular cases. Declaring them explicitly + # instead quick little aliases because of https://github.com/python/mypy/issues/6700 + def assert_contains(self, expected_contents: str) -> Self: + return self.containing(expected_contents) + + def assert_has_contents(self, expected_contents: str) -> Self: + return self.with_contents(expected_contents) + + +class DescribeToolExecutionOutputCollection: + + def __init__(self, dataset_populator: BaseDatasetPopulator, history_id: str, hdca_id: str): + self._dataset_populator = dataset_populator + self._history_id = history_id + self._hdca_id = hdca_id + + @property + def details(self) -> Dict[str, Any]: + collection_details = self._dataset_populator.get_history_collection_details( + self._history_id, content_id=self._hdca_id + ) + return collection_details + + @property + def elements(self) -> List[Dict[str, Any]]: + return self.details["elements"] + + def with_n_elements(self, n: int) -> Self: + count = len(self.elements) + if count != n: + raise AssertionError("Collection contained {count} elements and not the expected {n} elements") + return self + + def with_element_dict(self, index: Union[str, int]) -> Dict[str, Any]: + elements = self.elements + if isinstance(index, int): + element_dict = elements[index] + else: + element_dict = [e for e in elements if e["element_identifier"] == index][0] + return element_dict + + def with_dataset_element(self, index: Union[str, int]) -> "DescribeToolExecutionOutput": + element_dict = self.with_element_dict(index) + element_object = element_dict["object"] + return DescribeToolExecutionOutput(self._dataset_populator, self._history_id, element_object["id"]) + + def named(self, expected_name: str) -> Self: + name = self.details["name"] + if name != expected_name: + raise AssertionError(f"Dataset collection named {name} did not have expected name {expected_name}.") + return self + + # aliases that might help make tests more like English in particular cases. + def assert_has_dataset_element(self, index: Union[str, int]) -> "DescribeToolExecutionOutput": + return self.with_dataset_element(index) + + +class DescribeJob: + + def __init__(self, dataset_populator: BaseDatasetPopulator, history_id: str, job_id: str): + self._dataset_populator = dataset_populator + self._history_id = history_id + self._job_id = job_id + self._final_details: Optional[Dict[str, Any]] = None + + def _wait_for(self): + if self._final_details is None: + self._dataset_populator.wait_for_job(self._job_id, assert_ok=False) + self._final_details = self._dataset_populator.get_job_details(self._job_id).json() + + @property + def final_details(self) -> Dict[str, Any]: + self._wait_for() + final_details = self._final_details + assert final_details + return final_details + + @property + def final_state(self) -> str: + final_state = self.final_details["state"] + assert final_state + return final_state + + def with_final_state(self, expected_state: str) -> Self: + final_state = self.final_state + if final_state != expected_state: + raise AssertionError( + f"Expected job {self._job_id} to end with state {state} but it ended with state {final_state}" + ) + return self + + @property + def with_single_output(self) -> DescribeToolExecutionOutput: + return self.with_output(0) + + def with_output(self, output: Union[str, int]) -> DescribeToolExecutionOutput: + self.with_final_state("ok") + outputs = self._dataset_populator.job_outputs(self._job_id) + by_name = isinstance(output, str) + dataset_id: Optional[str] = None + if by_name: + for output_assoc in outputs: + if output_assoc["name"] == output: + dataset_id = output_assoc["dataset"]["id"] + else: + assert isinstance(output, int) + dataset_id = outputs[output]["dataset"]["id"] + if dataset_id is None: + raise AssertionError(f"Could not find job output identified by {output}") + return DescribeToolExecutionOutput(self._dataset_populator, self._history_id, dataset_id) + + # aliases that might help make tests more like English in particular cases. + def assert_has_output(self, output: Union[str, int]) -> DescribeToolExecutionOutput: + return self.with_output(output) + + @property + def assert_has_single_output(self) -> DescribeToolExecutionOutput: + return self.with_single_output + + +class DescribeFailure: + def __init__(self, response: Response): + self._response = response + + def with_status_code(self, code: int) -> Self: + api_asserts.assert_status_code_is(self._response, code) + return self + + def with_error_containing(self, message: str) -> Self: + assert message in self._response.text + return self + + +class RequiredTool: + + def __init__(self, dataset_populator: BaseDatasetPopulator, tool_id: str, default_history_id: Optional[str]): + self._dataset_populator = dataset_populator + self._tool_id = tool_id + self._default_history_id = default_history_id + + @property + def execute(self) -> "DescribeToolExecution": + execution = DescribeToolExecution(self._dataset_populator, self._tool_id) + if self._default_history_id: + execution.in_history(self._default_history_id) + return execution + + +class DescribeToolExecution: + _history_id: Optional[str] = None + _execute_response: Optional[Response] = None + _input_format: Optional[str] = None + _inputs: Dict[str, Any] + + def __init__(self, dataset_populator: BaseDatasetPopulator, tool_id: str): + self._dataset_populator = dataset_populator + self._tool_id = tool_id + self._inputs = {} + + def in_history(self, has_history_id: Union[str, "TargetHistory"]) -> Self: + if isinstance(has_history_id, str): + self._history_id = has_history_id + else: + self._history_id = has_history_id._history_id + return self + + def with_inputs(self, inputs: Dict[str, Any]) -> Self: + self._inputs = inputs + return self + + def with_nested_inputs(self, inputs: Dict[str, Any]) -> Self: + self._inputs = inputs + self._input_format = "21.01" + return self + + def _execute(self): + kwds = {} + if self._input_format is not None: + kwds["input_format"] = self._input_format + history_id = self._ensure_history_id + self._execute_response = self._dataset_populator.run_tool_raw( + self._tool_id, self._inputs, history_id, assert_ok=False, **kwds + ) + + @property + def _ensure_history_id(self) -> str: + history_id = self._history_id + if history_id is None: + raise AssertionError("Problem building test execution - no history ID has been specified.") + return history_id + + def _ensure_executed(self) -> None: + if self._execute_response is None: + self._execute() + + def _assert_executed_ok(self) -> Dict[str, Any]: + self._ensure_executed() + execute_response = self._execute_response + assert execute_response is not None + api_asserts.assert_status_code_is_ok(execute_response) + return execute_response.json() + + def assert_has_n_jobs(self, n: int) -> Self: + response = self._assert_executed_ok() + jobs = response["jobs"] + if len(jobs) != n: + raise AssertionError(f"Expected tool execution to produce {n} jobs but it produced {len(jobs)}") + return self + + def assert_creates_n_implicit_collections(self, n: int) -> Self: + response = self._assert_executed_ok() + collections = response["implicit_collections"] + if len(collections) != n: + raise AssertionError(f"Expected tool execution to produce {n} implicit but it produced {len(collections)}") + return self + + def assert_creates_implicit_collection(self, index: Union[str, int]) -> "DescribeToolExecutionOutputCollection": + response = self._assert_executed_ok() + collections = response["implicit_collections"] + assert isinstance(index, int) # TODO: implement and then prefer str. + history_id = self._ensure_history_id + return DescribeToolExecutionOutputCollection(self._dataset_populator, history_id, collections[index]["id"]) + + @property + def assert_has_single_job(self) -> DescribeJob: + return self.assert_has_n_jobs(1).assert_has_job(0) + + def assert_has_job(self, job_index: int = 0) -> DescribeJob: + response = self._assert_executed_ok() + job = response["jobs"][job_index] + history_id = self._ensure_history_id + return DescribeJob(self._dataset_populator, history_id, job["id"]) + + @property + def assert_fails(self) -> DescribeFailure: + self._ensure_executed() + execute_response = self._execute_response + assert execute_response is not None + api_asserts.assert_status_code_is_not_ok(execute_response) + return DescribeFailure(execute_response) + + class GiHttpMixin: """Mixin for adapting Galaxy testing populators helpers to bioblend.""" @@ -3430,6 +3766,95 @@ def __init__(self, gi): self.dataset_populator = GiDatasetPopulator(gi) +ListContentsDescription = Union[List[str], List[Tuple[str, str]]] + + +class TargetHistory: + + def __init__( + self, + dataset_populator: DatasetPopulator, + dataset_collection_populator: DatasetCollectionPopulator, + history_id: str, + ): + self._dataset_populator = dataset_populator + self._dataset_collection_populator = dataset_collection_populator + self._history_id = history_id + + @property + def id(self) -> str: + return self._history_id + + def with_dataset( + self, + content: str, + named: Optional[str] = None, + ) -> "HasSrcDict": + kwd = {} + if named is not None: + kwd["name"] = named + new_dataset = self._dataset_populator.new_dataset( + history_id=self._history_id, + content=content, + assert_ok=True, + wait=True, + **kwd, + ) + return HasSrcDict("hda", new_dataset) + + def with_pair(self, contents: Optional[List[str]] = None) -> "HasSrcDict": + return self._fetch_response( + self._dataset_collection_populator.create_pair_in_history( + self._history_id, contents=contents, direct_upload=True, wait=True + ) + ) + + def with_list(self, contents: Optional[ListContentsDescription] = None) -> "HasSrcDict": + return self._fetch_response( + self._dataset_collection_populator.create_list_in_history( + self._history_id, contents=contents, direct_upload=True, wait=True + ) + ) + + def with_example_list_of_pairs(self) -> "HasSrcDict": + return HasSrcDict("hdca", self._dataset_collection_populator.example_list_of_pairs(self._history_id)) + + @classmethod + def _fetch_response(clz, response: Response) -> "HasSrcDict": + api_asserts.assert_status_code_is_ok(response) + hdca = response.json()["output_collections"][0] + return HasSrcDict("hdca", hdca) + + def execute(self, tool_id: str) -> "DescribeToolExecution": + return self._dataset_populator.describe_tool_execution(tool_id).in_history(self) + + +class SrcDict(TypedDict): + src: str + id: str + + +class HasSrcDict: + api_object: Union[str, Dict[str, Any]] + + def __init__(self, src_type: str, api_object: Union[str, Dict[str, Any]]): + self.src_type = src_type + self.api_object = api_object + + @property + def id(self) -> str: + has_id = self.api_object + return has_id if isinstance(has_id, str) else cast(str, has_id["id"]) + + @property + def src_dict(self) -> SrcDict: + return SrcDict({"src": self.src_type, "id": self.id}) + + @property + def to_dict(self): + return self.api_object + + def wait_on(function: Callable, desc: str, timeout: timeout_type = DEFAULT_TIMEOUT): return tool_util_wait_on(function, desc, timeout) diff --git a/pytest.ini b/pytest.ini index 7c1d40306310..31665649c393 100644 --- a/pytest.ini +++ b/pytest.ini @@ -10,6 +10,7 @@ markers = external_dependency_management: slow tests which resolves dependencies with e.g. conda require_new_history: test that needs to be given a new history tool: marks test as a tool test + requires_tool_id: marks API test as requiring specified tool id gtn_screenshot: marks test as a screenshot producer for galaxy training network local: mark indicates, that it is sufficient to run test locally to get relevant artifacts (e.g. screenshots) external: mark indicates, that test has to be run against external production server to get relevant artifacts (e.g. screenshots) From 687f3661f114b15e923324339edd4a17feac6c71 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Wed, 9 Oct 2024 12:11:25 -0400 Subject: [PATCH 3/5] Migrate various tool execute tests to more declarative style. --- lib/galaxy_test/api/test_tool_execute.py | 220 +++++++++++++ lib/galaxy_test/api/test_tools.py | 380 +---------------------- lib/galaxy_test/base/decorators.py | 2 +- lib/galaxy_test/base/populators.py | 4 +- 4 files changed, 231 insertions(+), 375 deletions(-) create mode 100644 lib/galaxy_test/api/test_tool_execute.py diff --git a/lib/galaxy_test/api/test_tool_execute.py b/lib/galaxy_test/api/test_tool_execute.py new file mode 100644 index 000000000000..b77ebcbb0cd1 --- /dev/null +++ b/lib/galaxy_test/api/test_tool_execute.py @@ -0,0 +1,220 @@ +"""Test tool execution pieces. + +Longer term ideally we would separate all the tool tests in test_tools.py that +describe tool execution into this file and make sure we have parallel or matching +tests for both the legacy tool execution API and the tool request API. We would then +keep things like testing other tool APIs in ./test_tools.py (index, search, tool test +files, etc..). +""" + +from galaxy_test.base.decorators import requires_tool_id +from galaxy_test.base.populators import ( + RequiredTool, + TargetHistory, +) + + +@requires_tool_id("multi_data_param") +def test_multidata_param(target_history: TargetHistory, required_tool: RequiredTool): + hda1 = target_history.with_dataset("1\t2\t3").src_dict + hda2 = target_history.with_dataset("4\t5\t6").src_dict + execution = required_tool.execute.with_inputs( + { + "f1": {"batch": False, "values": [hda1, hda2]}, + "f2": {"batch": False, "values": [hda2, hda1]}, + } + ) + execution.assert_has_job(0).with_output("out1").with_contents("1\t2\t3\n4\t5\t6\n") + execution.assert_has_job(0).with_output("out2").with_contents("4\t5\t6\n1\t2\t3\n") + + +@requires_tool_id("expression_forty_two") +def test_galaxy_expression_tool_simplest(required_tool: RequiredTool): + required_tool.execute.assert_has_single_job.with_single_output.with_contents("42") + + +@requires_tool_id("expression_parse_int") +def test_galaxy_expression_tool_simple(required_tool: RequiredTool): + execution = required_tool.execute.with_inputs({"input1": "7"}) + execution.assert_has_single_job.with_single_output.with_contents("7") + + +@requires_tool_id("expression_log_line_count") +def test_galaxy_expression_metadata(target_history: TargetHistory, required_tool: RequiredTool): + hda1 = target_history.with_dataset("1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14").src_dict + execution = required_tool.execute.with_inputs({"input1": hda1}) + execution.assert_has_single_job.with_single_output.with_contents("3") + + +@requires_tool_id("multi_select") +def test_multi_select_as_list(required_tool: RequiredTool): + execution = required_tool.execute.with_inputs({"select_ex": ["--ex1", "ex2"]}) + execution.assert_has_single_job.with_output("output").with_contents("--ex1,ex2") + + +@requires_tool_id("multi_select") +def test_multi_select_optional(required_tool: RequiredTool): + execution = required_tool.execute.with_inputs( + { + "select_ex": ["--ex1"], + "select_optional": None, + } + ) + job = execution.assert_has_single_job + job.assert_has_output("output").with_contents("--ex1") + job.assert_has_output("output2").with_contents_stripped("None") + + +@requires_tool_id("identifier_single") +def test_identifier_outside_map(target_history: TargetHistory, required_tool: RequiredTool): + hda = target_history.with_dataset("123", named="Plain HDA") + execute = required_tool.execute.with_inputs({"input1": hda.src_dict}) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("Plain HDA") + + +@requires_tool_id("identifier_multiple") +def test_identifier_in_multiple_reduce(target_history: TargetHistory, required_tool: RequiredTool): + hdca = target_history.with_pair() + execute = required_tool.execute.with_inputs({"input1": hdca.src_dict}) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") + + +@requires_tool_id("identifier_in_conditional") +def test_identifier_map_over_multiple_input_in_conditional_legacy_format( + target_history: TargetHistory, required_tool: RequiredTool +): + hdca = target_history.with_pair() + execute = required_tool.execute.with_inputs( + { + "outer_cond|input1": hdca.src_dict, + } + ) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") + + +@requires_tool_id("identifier_in_conditional") +def test_identifier_map_over_multiple_input_in_conditional_21_01_format( + target_history: TargetHistory, required_tool: RequiredTool +): + hdca = target_history.with_pair() + execute = required_tool.execute.with_nested_inputs( + { + "outer_cond": { + "multi_input": True, + "input1": hdca.src_dict, + }, + } + ) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") + + +@requires_tool_id("identifier_multiple_in_repeat") +def test_identifier_multiple_reduce_in_repeat_new_payload_form( + target_history: TargetHistory, required_tool: RequiredTool +): + hdca = target_history.with_pair() + execute = required_tool.execute.with_nested_inputs( + { + "the_repeat": [{"the_data": {"input1": hdca.src_dict}}], + } + ) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("forward\nreverse") + + +@requires_tool_id("output_action_change_format") +def test_map_over_with_output_format_actions(target_history: TargetHistory, required_tool: RequiredTool): + hdca = target_history.with_pair() + for use_action in ["do", "dont"]: + execute = required_tool.execute.with_inputs( + { + "input_cond|dispatch": use_action, + "input_cond|input": {"batch": True, "values": [hdca.src_dict]}, + } + ) + execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) + expected_extension = "txt" if (use_action == "do") else "data" + execute.assert_has_job(0).with_single_output.with_file_ext(expected_extension) + execute.assert_has_job(1).with_single_output.with_file_ext(expected_extension) + + +@requires_tool_id("output_action_change_format_paired") +def test_map_over_with_nested_paired_output_format_actions(target_history: TargetHistory, required_tool: RequiredTool): + hdca = target_history.with_example_list_of_pairs() + execute = required_tool.execute.with_inputs( + {"input": {"batch": True, "values": [dict(map_over_type="paired", **hdca.src_dict)]}} + ) + execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) + execute.assert_has_job(0).with_single_output.with_file_ext("txt") + execute.assert_has_job(1).with_single_output.with_file_ext("txt") + + +@requires_tool_id("identifier_collection") +def test_identifier_with_data_collection(target_history: TargetHistory, required_tool: RequiredTool): + contents = [("foo", "text for foo element"), ("bar", "more text for bar element")] + hdca = target_history.with_list(contents) + execute = required_tool.execute.with_inputs({"input1": hdca.src_dict}) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("foo\nbar") + + +@requires_tool_id("identifier_in_actions") +def test_identifier_in_actions(target_history: TargetHistory, required_tool: RequiredTool): + contents = [("foo", "text for foo element"), ("bar", "more text for bar element")] + hdca = target_history.with_list(contents) + + execute = required_tool.execute.with_inputs({"input": {"batch": True, "values": [hdca.src_dict]}}) + + output = execute.assert_has_job(0).assert_has_single_output + assert output.details["metadata_column_names"][1] == "foo", output.details + + output = execute.assert_has_job(1).assert_has_single_output + assert output.details["metadata_column_names"][1] == "bar", output.details + + +@requires_tool_id("identifier_single_in_repeat") +def test_identifier_single_in_repeat(target_history: TargetHistory, required_tool: RequiredTool): + hdca = target_history.with_pair() + execute = required_tool.execute.with_inputs( + {"the_repeat_0|the_data|input1": {"batch": True, "values": [hdca.src_dict]}} + ) + execute.assert_has_n_jobs(2).assert_creates_n_implicit_collections(1) + output_collection = execute.assert_creates_implicit_collection(0) + output_collection.assert_has_dataset_element("forward").with_contents_stripped("forward") + output_collection.assert_has_dataset_element("reverse").with_contents_stripped("reverse") + + +@requires_tool_id("identifier_multiple_in_conditional") +def test_identifier_multiple_in_conditional(target_history: TargetHistory, required_tool: RequiredTool): + hda = target_history.with_dataset("123", named="Normal HDA1") + execute = required_tool.execute.with_inputs( + { + "outer_cond|inner_cond|input1": hda.src_dict, + } + ) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("Normal HDA1") + + +@requires_tool_id("identifier_multiple") +def test_identifier_with_multiple_normal_datasets(target_history: TargetHistory, required_tool: RequiredTool): + hda1 = target_history.with_dataset("123", named="Normal HDA1") + hda2 = target_history.with_dataset("456", named="Normal HDA2") + execute = required_tool.execute.with_inputs({"input1": [hda1.src_dict, hda2.src_dict]}) + execute.assert_has_single_job.assert_has_single_output.with_contents_stripped("Normal HDA1\nNormal HDA2") + + +@requires_tool_id("cat1") +def test_map_over_empty_collection(target_history: TargetHistory, required_tool: RequiredTool): + hdca = target_history.with_list([]) + inputs = { + "input1": {"batch": True, "values": [hdca.src_dict]}, + } + execute = required_tool.execute.with_inputs(inputs) + execute.assert_has_n_jobs(0) + execute.assert_creates_implicit_collection(0).named("Concatenate datasets on collection 1") + + +@requires_tool_id("gx_repeat_boolean_min") +def test_optional_repeats_with_mins_filled_id(target_history: TargetHistory, required_tool: RequiredTool): + # we have a tool test for this but I wanted to verify it wasn't just the + # tool test framework filling in a default. Creating a raw request here + # verifies that currently select parameters don't require a selection. + required_tool.execute.assert_has_single_job.with_single_output.containing("false").containing("length: 2") diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py index af3034e014e1..46c6cdad3473 100644 --- a/lib/galaxy_test/api/test_tools.py +++ b/lib/galaxy_test/api/test_tools.py @@ -963,45 +963,6 @@ def test_drill_down_first_by_default(self): output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) assert output1_content.strip() == "parameter: aba" - @skip_without_tool("multi_select") - def test_multi_select_as_list(self): - with self.dataset_populator.test_history(require_new=False) as history_id: - inputs = { - "select_ex": ["--ex1", "ex2"], - } - response = self._run("multi_select", history_id, inputs, assert_ok=True) - output = response["outputs"][0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) - - assert output1_content == "--ex1,ex2" - - @skip_without_tool("multi_select") - def test_multi_select_optional(self): - with self.dataset_populator.test_history(require_new=False) as history_id: - inputs = { - "select_ex": ["--ex1"], - "select_optional": None, - } - response = self._run("multi_select", history_id, inputs, assert_ok=True) - output = response["outputs"] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output[0]) - output2_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output[1]) - assert output1_content.strip() == "--ex1" - assert output2_content.strip() == "None", output2_content - - @skip_without_tool("gx_repeat_boolean_min") - def test_optional_repeats_with_mins_filled_id(self): - # we have a tool test for this but I wanted to verify it wasn't just the - # tool test framework filling in a default. Creating a raw request here - # verifies that currently select parameters don't require a selection. - with self.dataset_populator.test_history(require_new=False) as history_id: - inputs: Dict[str, Any] = {} - response = self._run("gx_repeat_boolean_min", history_id, inputs, assert_ok=True) - output = response["outputs"][0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) - assert "false" in output1_content - assert "length: 2" in output1_content - def test_data_column_defaults(self): for input_format in ["legacy", "21.01"]: tabular_contents = "1\t2\t3\t\n4\t5\t6\n" @@ -1078,23 +1039,6 @@ def test_library_data_param(self): output_multiple_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output[1]) assert output_multiple_content == "TestData\nTestData\n", output_multiple_content - @skip_without_tool("multi_data_param") - def test_multidata_param(self): - with self.dataset_populator.test_history(require_new=False) as history_id: - hda1 = dataset_to_param(self.dataset_populator.new_dataset(history_id, content="1\t2\t3")) - hda2 = dataset_to_param(self.dataset_populator.new_dataset(history_id, content="4\t5\t6")) - inputs = { - "f1": {"batch": False, "values": [hda1, hda2]}, - "f2": {"batch": False, "values": [hda2, hda1]}, - } - response = self._run("multi_data_param", history_id, inputs, assert_ok=True) - output1 = response["outputs"][0] - output2 = response["outputs"][1] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - output2_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output2) - assert output1_content == "1\t2\t3\n4\t5\t6\n", output1_content - assert output2_content == "4\t5\t6\n1\t2\t3\n", output2_content - @skip_without_tool("cat1") def test_run_cat1(self): with self.dataset_populator.test_history(require_new=False) as history_id: @@ -1837,60 +1781,6 @@ def test_map_over_collection(self, history_id): } self._run_and_check_simple_collection_mapping(history_id, inputs) - @skip_without_tool("cat1") - def test_map_over_empty_collection(self, history_id): - response = self.dataset_collection_populator.create_list_in_history(history_id, contents=[], wait=True).json() - hdca_id = response["output_collections"][0]["id"] - inputs = { - "input1": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]}, - } - create = self._run_cat1(history_id, inputs=inputs, assert_ok=True) - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 0 - assert len(outputs) == 0 - assert len(implicit_collections) == 1 - - empty_output = implicit_collections[0] - assert empty_output["name"] == "Concatenate datasets on collection 1", empty_output - - @skip_without_tool("output_action_change_format") - def test_map_over_with_output_format_actions(self, history_id): - for use_action in ["do", "dont"]: - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = { - "input_cond|dispatch": use_action, - "input_cond|input": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]}, - } - create = self._run("output_action_change_format", history_id, inputs).json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 2 - assert len(outputs) == 2 - assert len(implicit_collections) == 1 - output1 = outputs[0] - output2 = outputs[1] - output1_details = self.dataset_populator.get_history_dataset_details(history_id, dataset=output1) - output2_details = self.dataset_populator.get_history_dataset_details(history_id, dataset=output2) - assert output1_details["file_ext"] == "txt" if (use_action == "do") else "data" - assert output2_details["file_ext"] == "txt" if (use_action == "do") else "data" - - @skip_without_tool("output_action_change_format_paired") - def test_map_over_with_nested_paired_output_format_actions(self, history_id): - hdca_id = self.__build_nested_list(history_id) - inputs = {"input": {"batch": True, "values": [dict(map_over_type="paired", src="hdca", id=hdca_id)]}} - create = self._run("output_action_change_format_paired", history_id, inputs).json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 2 - assert len(outputs) == 2 - assert len(implicit_collections) == 1 - for output in outputs: - assert output["file_ext"] == "txt", output - @skip_without_tool("output_filter_with_input") def test_map_over_with_output_filter_no_filtering(self, history_id): hdca_id = self.dataset_collection_populator.create_list_in_history(history_id, wait=True).json()["outputs"][0][ @@ -2032,85 +1922,6 @@ def test_list_selectable_in_multidata_input(self, history_id): build = self.dataset_populator.build_tool_state("identifier_multiple", history_id) assert len(build["inputs"][0]["options"]["hdca"]) == 1 - @skip_without_tool("identifier_multiple") - def test_identifier_in_multiple_reduce(self, history_id): - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = { - "input1": {"src": "hdca", "id": hdca_id}, - } - create_response = self._run("identifier_multiple", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "forward\nreverse" - - @skip_without_tool("identifier_in_conditional") - def test_identifier_map_over_multiple_input_in_conditional(self, history_id): - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = { - "outer_cond|input1": {"src": "hdca", "id": hdca_id}, - } - create_response = self._run("identifier_in_conditional", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "forward\nreverse" - - @skip_without_tool("identifier_in_conditional") - def test_identifier_map_over_multiple_input_in_conditional_new_payload_form(self, history_id): - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = { - "outer_cond": { - "multi_input": True, - "input1": {"id": hdca_id, "src": "hdca"}, - }, - } - create_response = self._run("identifier_in_conditional", history_id, inputs, input_format="21.01") - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "forward\nreverse" - - @skip_without_tool("identifier_multiple_in_repeat") - def test_identifier_multiple_reduce_in_repeat_new_payload_form(self, history_id): - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = { - "the_repeat": [{"the_data": {"input1": {"src": "hdca", "id": hdca_id}}}], - } - create_response = self._run("identifier_multiple_in_repeat", history_id, inputs, input_format="21.01") - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "forward\nreverse" - @skip_without_tool("identifier_in_conditional") def test_identifier_map_over_input_in_conditional(self, history_id): # Run cat tool, so HDA names are different from element identifiers @@ -2119,21 +1930,14 @@ def test_identifier_map_over_input_in_conditional(self, history_id): "outer_cond|input1": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]}, "outer_cond|multi_input": False, } - create_response = self._run("identifier_in_conditional", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 2 - assert len(outputs) == 2 - assert len(implicit_collections) == 1 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "forward" - output2 = outputs[1] - output2_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output2) - assert output2_content.strip() == "reverse" + execute = ( + self.dataset_populator.describe_tool_execution("identifier_in_conditional") + .in_history(history_id) + .with_inputs(inputs) + ) + collection = execute.assert_has_n_jobs(2).assert_creates_implicit_collection(0) + collection.assert_has_dataset_element("forward").with_contents_stripped("forward") + collection.assert_has_dataset_element("reverse").with_contents_stripped("reverse") @skip_without_tool("identifier_multiple_in_conditional") def test_identifier_multiple_reduce_in_conditional(self, history_id): @@ -2154,138 +1958,6 @@ def test_identifier_multiple_reduce_in_conditional(self, history_id): output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) assert output1_content.strip() == "forward\nreverse" - @skip_without_tool("identifier_multiple_in_repeat") - def test_identifier_multiple_reduce_in_repeat(self, history_id): - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = { - "the_repeat_0|the_data|input1": {"src": "hdca", "id": hdca_id}, - } - create_response = self._run("identifier_multiple_in_repeat", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "forward\nreverse" - - @skip_without_tool("identifier_single_in_repeat") - def test_identifier_single_in_repeat(self, history_id): - hdca_id = self._build_pair(history_id, ["123", "456"]) - inputs = {"the_repeat_0|the_data|input1": {"batch": True, "values": [{"src": "hdca", "id": hdca_id}]}} - create_response = self._run("identifier_single_in_repeat", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 2 - assert len(implicit_collections) == 1 - output_collection = implicit_collections[0] - elements = output_collection["elements"] - assert len(elements) == 2 - forward_output = elements[0]["object"] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=forward_output) - assert output1_content.strip() == "forward", output1_content - - @skip_without_tool("identifier_multiple_in_conditional") - def test_identifier_multiple_in_conditional(self, history_id): - new_dataset1 = self.dataset_populator.new_dataset(history_id, content="123", name="Normal HDA1") - inputs = { - "outer_cond|inner_cond|input1": {"src": "hda", "id": new_dataset1["id"]}, - } - create_response = self._run("identifier_multiple_in_conditional", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "Normal HDA1" - - @skip_without_tool("identifier_multiple") - def test_identifier_with_multiple_normal_datasets(self, history_id): - new_dataset1 = self.dataset_populator.new_dataset(history_id, content="123", name="Normal HDA1") - new_dataset2 = self.dataset_populator.new_dataset(history_id, content="456", name="Normal HDA2") - inputs = {"input1": [{"src": "hda", "id": new_dataset1["id"]}, {"src": "hda", "id": new_dataset2["id"]}]} - create_response = self._run("identifier_multiple", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - implicit_collections = create["implicit_collections"] - assert len(jobs) == 1 - assert len(outputs) == 1 - assert len(implicit_collections) == 0 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "Normal HDA1\nNormal HDA2" - - @skip_without_tool("identifier_collection") - def test_identifier_with_data_collection(self, history_id): - element_identifiers = self.dataset_collection_populator.list_identifiers(history_id) - - payload = dict( - instance_type="history", - history_id=history_id, - element_identifiers=element_identifiers, - collection_type="list", - ) - - create_response = self._post("dataset_collections", payload, json=True) - dataset_collection = create_response.json() - - inputs = { - "input1": {"src": "hdca", "id": dataset_collection["id"]}, - } - - self.dataset_populator.wait_for_history(history_id, assert_ok=True) - create_response = self._run("identifier_collection", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - jobs = create["jobs"] - assert len(jobs) == 1 - assert len(outputs) == 1 - output1 = outputs[0] - output1_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output1) - assert output1_content.strip() == "\n".join(d["name"] for d in element_identifiers) - - @skip_without_tool("identifier_in_actions") - def test_identifier_in_actions(self, history_id): - element_identifiers = self.dataset_collection_populator.list_identifiers(history_id, contents=["1\t2"]) - - payload = dict( - instance_type="history", - history_id=history_id, - element_identifiers=element_identifiers, - collection_type="list", - ) - - create_response = self._post("dataset_collections", payload, json=True) - dataset_collection = create_response.json() - - inputs = { - "input": {"batch": True, "values": [{"src": "hdca", "id": dataset_collection["id"]}]}, - } - - self.dataset_populator.wait_for_history(history_id, assert_ok=True) - create_response = self._run("identifier_in_actions", history_id, inputs) - self._assert_status_code_is(create_response, 200) - create = create_response.json() - outputs = create["outputs"] - output1 = outputs[0] - - output_details = self.dataset_populator.get_history_dataset_details(history_id, dataset=output1) - assert output_details["metadata_column_names"][1] == "data1", output_details - @skip_without_tool("cat1") def test_map_over_nested_collections(self, history_id): hdca_id = self.__build_nested_list(history_id) @@ -2903,42 +2575,6 @@ def test_group_tag_selection_multiple(self, history_id): output_content = self.dataset_populator.get_history_dataset_content(history_id, dataset=output) assert output_content.strip() == "123\n456\n456\n0ab" - @skip_without_tool("expression_forty_two") - def test_galaxy_expression_tool_simplest(self): - history_id = self.dataset_populator.new_history() - run_response = self._run("expression_forty_two", history_id) - self._assert_status_code_is(run_response, 200) - self.dataset_populator.wait_for_history(history_id, assert_ok=True) - output_content = self.dataset_populator.get_history_dataset_content(history_id) - assert output_content == "42" - - @skip_without_tool("expression_parse_int") - def test_galaxy_expression_tool_simple(self): - history_id = self.dataset_populator.new_history() - inputs = { - "input1": "7", - } - run_response = self._run("expression_parse_int", history_id, inputs) - self._assert_status_code_is(run_response, 200) - self.dataset_populator.wait_for_history(history_id, assert_ok=True) - output_content = self.dataset_populator.get_history_dataset_content(history_id) - assert output_content == "7" - - @skip_without_tool("expression_log_line_count") - def test_galaxy_expression_metadata(self): - history_id = self.dataset_populator.new_history() - new_dataset1 = self.dataset_populator.new_dataset( - history_id, content="1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\n13\n14" - ) - inputs = { - "input1": dataset_to_param(new_dataset1), - } - run_response = self._run("expression_log_line_count", history_id, inputs) - self._assert_status_code_is(run_response, 200) - self.dataset_populator.wait_for_history(history_id, assert_ok=True) - output_content = self.dataset_populator.get_history_dataset_content(history_id) - assert output_content == "3" - @skip_without_tool("cat1") def test_run_deferred_dataset(self, history_id): details = self.dataset_populator.create_deferred_hda( diff --git a/lib/galaxy_test/base/decorators.py b/lib/galaxy_test/base/decorators.py index 521fa1ae0963..95ea1a18ab28 100644 --- a/lib/galaxy_test/base/decorators.py +++ b/lib/galaxy_test/base/decorators.py @@ -70,7 +70,7 @@ def wrapped_method(*args, **kwargs): def requires_tool_id(tool_id: str): def method_wrapper(method): - return getattr(pytest.mark, "requires_tool_id")(tool_id)(method) + return pytest.mark.requires_tool_id(tool_id)(method) return method_wrapper diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index d917ca7081e6..3ac65db5afd8 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -63,7 +63,6 @@ List, NamedTuple, Optional, - Self, Set, Tuple, Union, @@ -84,6 +83,7 @@ from rocrate.rocrate import ROCrate from typing_extensions import ( Literal, + Self, TypedDict, ) @@ -3531,7 +3531,7 @@ def with_final_state(self, expected_state: str) -> Self: final_state = self.final_state if final_state != expected_state: raise AssertionError( - f"Expected job {self._job_id} to end with state {state} but it ended with state {final_state}" + f"Expected job {self._job_id} to end with state {expected_state} but it ended with state {final_state}" ) return self From 2509234dc0c81cf8431baacf39a20ff15b07b75a Mon Sep 17 00:00:00 2001 From: John Chilton Date: Fri, 11 Oct 2024 19:48:53 -0400 Subject: [PATCH 4/5] Add a test flag to force tests not to skip on required tools. I will occasionally see tests that skip instead of failing because the tool required to run the test stopped loading - maybe a parsing error or a misconfiguration around sample data tables. I don't think this should be the default but we should make sure all our CI tests are running properly. --- lib/galaxy_test/api/conftest.py | 4 ++-- lib/galaxy_test/base/env.py | 3 +++ lib/galaxy_test/base/populators.py | 13 ++++++++++++- 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/lib/galaxy_test/api/conftest.py b/lib/galaxy_test/api/conftest.py index e67769896c08..e8cc4e2dcd42 100644 --- a/lib/galaxy_test/api/conftest.py +++ b/lib/galaxy_test/api/conftest.py @@ -22,7 +22,7 @@ ) from galaxy_test.base.env import setup_keep_outdir from galaxy_test.base.populators import ( - _raise_skip_if, + check_missing_tool, DatasetCollectionPopulator, DatasetPopulator, get_tool_ids, @@ -148,7 +148,7 @@ def check_required_tools(anonymous_galaxy_interactor, request): for marker in request.node.iter_markers(): if marker.name == "requires_tool_id": tool_id = marker.args[0] - _raise_skip_if(tool_id not in get_tool_ids(anonymous_galaxy_interactor)) + check_missing_tool(tool_id not in get_tool_ids(anonymous_galaxy_interactor)) @pytest.fixture diff --git a/lib/galaxy_test/base/env.py b/lib/galaxy_test/base/env.py index 27a149b8f270..8ad389445701 100644 --- a/lib/galaxy_test/base/env.py +++ b/lib/galaxy_test/base/env.py @@ -10,7 +10,10 @@ Tuple, ) +from galaxy.util import asbool + DEFAULT_WEB_HOST = socket.gethostbyname("localhost") +REQUIRE_ALL_NEEDED_TOOLS = asbool(os.environ.get("GALAXY_TEST_REQUIRE_ALL_NEEDED_TOOLS", "0")) GalaxyTarget = Tuple[str, Optional[str], str] diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index 3ac65db5afd8..e2c9e1544971 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -128,6 +128,7 @@ HasAnonymousGalaxyInteractor, ) from .api_util import random_name +from .env import REQUIRE_ALL_NEEDED_TOOLS FILE_URL = "https://raw.githubusercontent.com/galaxyproject/galaxy/dev/test-data/4.bed" FILE_MD5 = "37b59762b59fff860460522d271bc111" @@ -180,7 +181,7 @@ def method_wrapper(method): @wraps(method) def wrapped_method(api_test_case, *args, **kwargs): - _raise_skip_if(tool_id not in get_tool_ids(api_test_case.anonymous_galaxy_interactor)) + check_missing_tool(tool_id not in get_tool_ids(api_test_case.anonymous_galaxy_interactor)) return method(api_test_case, *args, **kwargs) return wrapped_method @@ -268,6 +269,16 @@ def _raise_skip_if(check, *args): raise unittest.SkipTest(*args) +def check_missing_tool(check): + if check: + if REQUIRE_ALL_NEEDED_TOOLS: + raise AssertionError("Test requires a missing tool and GALAXY_TEST_REQUIRE_ALL_NEEDED_TOOLS is enabled") + else: + raise unittest.SkipTest( + "Missing tool required to run test, skipping. If this is not intended, ensure GALAXY_TEST_TOOL_CONF if set contains the required tool_conf.xml target and the tool properly parses and loads in Galaxy's test configuration" + ) + + def conformance_tests_gen(directory, filename="conformance_tests.yaml"): conformance_tests_path = os.path.join(directory, filename) with open(conformance_tests_path) as f: From b6d8442c8ea4f93c9b8ea42935e09840ef79a6b8 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 14 Oct 2024 10:50:53 -0400 Subject: [PATCH 5/5] Remove some celery cruft. --- lib/galaxy_test/api/conftest.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/lib/galaxy_test/api/conftest.py b/lib/galaxy_test/api/conftest.py index e8cc4e2dcd42..5c0b36753995 100644 --- a/lib/galaxy_test/api/conftest.py +++ b/lib/galaxy_test/api/conftest.py @@ -81,15 +81,9 @@ def anonymous_galaxy_interactor(api_test_config_object: ApiConfigObject) -> Anon return AnonymousGalaxyInteractor(api_test_config_object) -_celery_app = None -_celery_worker = None - - @pytest.fixture(autouse=True, scope="session") def request_celery_app(celery_session_app, celery_config): try: - global _celery_app - _celery_app = celery_session_app yield finally: if os.environ.get("GALAXY_TEST_EXTERNAL") is None: @@ -101,8 +95,7 @@ def request_celery_app(celery_session_app, celery_config): @pytest.fixture(autouse=True, scope="session") def request_celery_worker(celery_session_worker, celery_config, celery_worker_parameters): - global _celery_worker - _celery_worker = celery_session_worker + yield @pytest.fixture(scope="session", autouse=True)