From 2f01f544e41d4bc95a559e06799ac47c20291590 Mon Sep 17 00:00:00 2001 From: John Chilton Date: Mon, 26 Aug 2024 16:51:33 -0400 Subject: [PATCH] Rework test case validation schema. --- lib/galaxy/tool_util/parameters/__init__.py | 2 + lib/galaxy/tool_util/parameters/case.py | 24 ++- lib/galaxy/tool_util/parameters/models.py | 13 +- lib/galaxy/tool_util/parser/interface.py | 202 ++++++++++++++++-- lib/galaxy/tool_util/parser/xml.py | 6 +- lib/galaxy/tool_util/verify/interactor.py | 8 +- .../tool_util/parameter_specification.yml | 61 +++++- .../tool_util/test_parameter_test_cases.py | 86 +++++++- .../tool_util/test_test_definition_parsing.py | 20 +- test/unit/tool_util/util.py | 24 +++ 10 files changed, 391 insertions(+), 55 deletions(-) diff --git a/lib/galaxy/tool_util/parameters/__init__.py b/lib/galaxy/tool_util/parameters/__init__.py index 423d4e950bf5..ac6ca0dbbf3c 100644 --- a/lib/galaxy/tool_util/parameters/__init__.py +++ b/lib/galaxy/tool_util/parameters/__init__.py @@ -62,6 +62,7 @@ repeat_inputs_to_array, validate_explicit_conditional_test_value, visit_input_values, + VISITOR_NO_REPLACEMENT, ) __all__ = ( @@ -116,6 +117,7 @@ "keys_starting_with", "visit_input_values", "repeat_inputs_to_array", + "VISITOR_NO_REPLACEMENT", "decode", "encode", "WorkflowStepToolState", diff --git a/lib/galaxy/tool_util/parameters/case.py b/lib/galaxy/tool_util/parameters/case.py index 3c0459d033a3..cd8aa4f71e04 100644 --- a/lib/galaxy/tool_util/parameters/case.py +++ b/lib/galaxy/tool_util/parameters/case.py @@ -12,10 +12,13 @@ from packaging.version import Version from galaxy.tool_util.parser.interface import ( + TestCollectionDef, ToolSource, ToolSourceTest, ToolSourceTestInput, ToolSourceTestInputs, + xml_data_input_to_json, + XmlTestCollectionDefDict, ) from galaxy.util import asbool from .factory import input_models_for_tool_source @@ -25,6 +28,7 @@ ConditionalWhen, DataCollectionParameterModel, DataColumnParameterModel, + DataParameterModel, FloatParameterModel, IntegerParameterModel, RepeatParameterModel, @@ -249,8 +253,26 @@ def _merge_into_state( else: test_input = _input_for(state_path, inputs) if test_input is not None: + input_value: Any if isinstance(tool_input, (DataCollectionParameterModel,)): - input_value = test_input.get("attributes", {}).get("collection") + input_value = TestCollectionDef.from_dict( + cast(XmlTestCollectionDefDict, test_input.get("attributes", {}).get("collection")) + ).test_format_to_dict() + elif isinstance(tool_input, (DataParameterModel,)): + data_tool_input = cast(DataParameterModel, tool_input) + if data_tool_input.multiple: + value = test_input["value"] + input_value_list = [] + if value: + test_input_values = cast(str, value).split(",") + for test_input_value in test_input_values: + instance_test_input = test_input.copy() + instance_test_input["value"] = test_input_value + input_value = xml_data_input_to_json(test_input) + input_value_list.append(input_value) + input_value = input_value_list + else: + input_value = xml_data_input_to_json(test_input) else: input_value = test_input["value"] input_value = legacy_from_string(tool_input, input_value, warnings, profile) diff --git a/lib/galaxy/tool_util/parameters/models.py b/lib/galaxy/tool_util/parameters/models.py index 976195ef440f..30d6b5a5a483 100644 --- a/lib/galaxy/tool_util/parameters/models.py +++ b/lib/galaxy/tool_util/parameters/models.py @@ -41,7 +41,8 @@ from galaxy.exceptions import RequestParameterInvalidException from galaxy.tool_util.parser.interface import ( DrillDownOptionsDict, - TestCollectionDefDict, + JsonTestCollectionDefDict, + JsonTestDatasetDefDict, ) from ._types import ( cast_as_type, @@ -312,9 +313,9 @@ def py_type_internal(self) -> Type: def py_type_test_case(self) -> Type: base_model: Type if self.multiple: - base_model = str + base_model = list_type(JsonTestDatasetDefDict) else: - base_model = str + base_model = JsonTestDatasetDefDict return optional_if_needed(base_model, self.optional) def pydantic_template(self, state_representation: StateRepresentationT) -> DynamicModelInformation: @@ -372,7 +373,7 @@ def pydantic_template(self, state_representation: StateRepresentationT) -> Dynam elif state_representation == "workflow_step_linked": return dynamic_model_information_from_py_type(self, ConnectedValue) elif state_representation == "test_case_xml": - return dynamic_model_information_from_py_type(self, TestCollectionDefDict) + return dynamic_model_information_from_py_type(self, JsonTestCollectionDefDict) else: raise NotImplementedError( f"Have not implemented data collection parameter models for state representation {state_representation}" @@ -1179,9 +1180,7 @@ def to_simple_model(input_parameter: Union[ToolParameterModel, ToolParameterT]) return cast(ToolParameterT, input_parameter) -def simple_input_models( - parameters: Union[List[ToolParameterModel], List[ToolParameterT]] -) -> Iterable[ToolParameterT]: +def simple_input_models(parameters: Union[List[ToolParameterModel], List[ToolParameterT]]) -> Iterable[ToolParameterT]: return [to_simple_model(m) for m in parameters] diff --git a/lib/galaxy/tool_util/parser/interface.py b/lib/galaxy/tool_util/parser/interface.py index a7b3896c289f..f7f6a9a8bbe5 100644 --- a/lib/galaxy/tool_util/parser/interface.py +++ b/lib/galaxy/tool_util/parser/interface.py @@ -8,6 +8,7 @@ from os.path import join from typing import ( Any, + cast, Dict, List, Optional, @@ -550,7 +551,8 @@ def parse_input_sources(self) -> List[InputSource]: """Return a list of InputSource objects.""" -TestCollectionDefElementObject = Union["TestCollectionDefDict", "ToolSourceTestInput"] +AnyTestCollectionDefDict = Union["JsonTestCollectionDefDict", "XmlTestCollectionDefDict"] +TestCollectionDefElementObject = Union[AnyTestCollectionDefDict, "ToolSourceTestInput"] TestCollectionAttributeDict = Dict[str, Any] CollectionType = str @@ -560,7 +562,16 @@ class TestCollectionDefElementDict(TypedDict): element_definition: TestCollectionDefElementObject -class TestCollectionDefDict(TypedDict): +class TestCollectionDefElementInternal(TypedDict): + element_identifier: str + element_definition: Union["TestCollectionDef", "ToolSourceTestInput"] + + +# two versions of collection inputs can be parsed out, XmlTestCollectionDefDict is historically +# used by tools and Galaxy internals and exposed in the API via the test definition endpoints for +# tool execution. JsonTestCollectionDefDict is the format consumed by Planemo that mirrors a CWL +# way of defining inputs. +class XmlTestCollectionDefDict(TypedDict): model_class: Literal["TestCollectionDef"] attributes: TestCollectionAttributeDict collection_type: CollectionType @@ -568,8 +579,96 @@ class TestCollectionDefDict(TypedDict): name: str +JsonTestDatasetDefDict = TypedDict( + "JsonTestDatasetDefDict", + { + "class": Literal["File"], + "path": NotRequired[Optional[str]], + "location": NotRequired[Optional[str]], + "name": NotRequired[Optional[str]], + "dbkey": NotRequired[Optional[str]], + "filetype": NotRequired[Optional[str]], + "composite_data": NotRequired[Optional[List[str]]], + "tags": NotRequired[Optional[List[str]]], + }, +) + +JsonTestCollectionDefElementDict = Union[ + "JsonTestCollectionDefDatasetElementDict", "JsonTestCollectionDefCollectionElementDict" +] +JsonTestCollectionDefDatasetElementDict = TypedDict( + "JsonTestCollectionDefDatasetElementDict", + { + "identifier": str, + "class": Literal["File"], + "path": NotRequired[Optional[str]], + "location": NotRequired[Optional[str]], + "name": NotRequired[Optional[str]], + "dbkey": NotRequired[Optional[str]], + "filetype": NotRequired[Optional[str]], + "composite_data": NotRequired[Optional[List[str]]], + "tags": NotRequired[Optional[List[str]]], + }, +) +BaseJsonTestCollectionDefCollectionElementDict = TypedDict( + "BaseJsonTestCollectionDefCollectionElementDict", + { + "class": Literal["Collection"], + "collection_type": str, + "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]], + }, +) +JsonTestCollectionDefCollectionElementDict = TypedDict( + "JsonTestCollectionDefCollectionElementDict", + { + "identifier": str, + "class": Literal["Collection"], + "collection_type": str, + "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]], + }, +) +JsonTestCollectionDefDict = TypedDict( + "JsonTestCollectionDefDict", + { + "class": Literal["Collection"], + "collection_type": str, + "elements": NotRequired[Optional[List[JsonTestCollectionDefElementDict]]], + "name": NotRequired[Optional[str]], + }, +) + + +def xml_data_input_to_json(xml_input: ToolSourceTestInput) -> "JsonTestDatasetDefDict": + attributes = xml_input["attributes"] + as_dict: JsonTestDatasetDefDict = { + "class": "File", + } + value = xml_input["value"] + if value: + as_dict["path"] = value + _copy_if_exists(attributes, as_dict, "location") + _copy_if_exists(attributes, as_dict, "dbkey") + _copy_if_exists(attributes, as_dict, "ftype", "filetype") + _copy_if_exists(attributes, as_dict, "composite_data", only_if_value=True) + tags = attributes.get("tags") + if tags: + as_dict["tags"] = [t.strip() for t in tags.split(",")] + return as_dict + + +def _copy_if_exists(attributes, as_dict, name: str, as_name: Optional[str] = None, only_if_value: bool = False): + if name in attributes: + value = attributes[name] + if not value and only_if_value: + return + if as_name is None: + as_name = name + as_dict[as_name] = value + + class TestCollectionDef: __test__ = False # Prevent pytest from discovering this class (issue #12071) + elements: List[TestCollectionDefElementInternal] def __init__(self, attrib, name, collection_type, elements): self.attrib = attrib @@ -577,7 +676,40 @@ def __init__(self, attrib, name, collection_type, elements): self.elements = elements self.name = name - def to_dict(self) -> TestCollectionDefDict: + def _test_format_to_dict(self) -> "BaseJsonTestCollectionDefCollectionElementDict": + + def to_element(xml_element_dict: "TestCollectionDefElementInternal") -> "JsonTestCollectionDefElementDict": + identifier = xml_element_dict["element_identifier"] + element_object = xml_element_dict["element_definition"] + as_dict: JsonTestCollectionDefElementDict + + if isinstance(element_object, TestCollectionDef): + as_dict = JsonTestCollectionDefCollectionElementDict( + identifier=identifier, **element_object._test_format_to_dict() + ) + else: + as_dict = JsonTestCollectionDefDatasetElementDict( + identifier=identifier, + **xml_data_input_to_json(cast(ToolSourceTestInput, element_object)), + ) + return as_dict + + test_format_dict = BaseJsonTestCollectionDefCollectionElementDict( + { + "class": "Collection", + "elements": list(map(to_element, self.elements)), + "collection_type": self.collection_type, + } + ) + return test_format_dict + + def test_format_to_dict(self) -> JsonTestCollectionDefDict: + test_format_dict = JsonTestCollectionDefDict(**self._test_format_to_dict()) + if self.name: + test_format_dict["name"] = self.name + return test_format_dict + + def to_dict(self) -> XmlTestCollectionDefDict: def element_to_dict(element_dict): element_identifier, element_def = element_dict["element_identifier"], element_dict["element_definition"] if isinstance(element_def, TestCollectionDef): @@ -596,23 +728,53 @@ def element_to_dict(element_dict): } @staticmethod - def from_dict(as_dict: TestCollectionDefDict): - assert as_dict["model_class"] == "TestCollectionDef" - - def element_from_dict(element_dict): - if "element_definition" not in element_dict: - raise Exception(f"Invalid element_dict {element_dict}") - element_def = element_dict["element_definition"] - if element_def.get("model_class", None) == "TestCollectionDef": - element_def = TestCollectionDef.from_dict(element_def) - return {"element_identifier": element_dict["element_identifier"], "element_definition": element_def} - - return TestCollectionDef( - attrib=as_dict["attributes"], - name=as_dict["name"], - elements=list(map(element_from_dict, as_dict["elements"] or [])), - collection_type=as_dict["collection_type"], - ) + def from_dict( + as_dict: Union[AnyTestCollectionDefDict, JsonTestCollectionDefCollectionElementDict] + ) -> "TestCollectionDef": + if "model_class" in as_dict: + xml_as_dict = cast(XmlTestCollectionDefDict, as_dict) + assert xml_as_dict["model_class"] == "TestCollectionDef" + + def element_from_dict(element_dict) -> TestCollectionDefElementInternal: + if "element_definition" not in element_dict: + raise Exception(f"Invalid element_dict {element_dict}") + element_def = element_dict["element_definition"] + if element_def.get("model_class", None) == "TestCollectionDef": + element_def = TestCollectionDef.from_dict(element_def) + return {"element_identifier": element_dict["element_identifier"], "element_definition": element_def} + + return TestCollectionDef( + attrib=xml_as_dict["attributes"], + name=xml_as_dict.get("name", "Unnamed Collection"), + elements=list(map(element_from_dict, xml_as_dict["elements"] or [])), + collection_type=xml_as_dict["collection_type"], + ) + else: + json_as_dict = cast(JsonTestCollectionDefDict, as_dict) + + def element_from_dict_json( + element_dict: JsonTestCollectionDefElementDict, + ) -> TestCollectionDefElementInternal: + element_class = element_dict.get("class") + identifier = element_dict["identifier"] + element_def: Union[TestCollectionDef, ToolSourceTestInput] + if element_class == "Collection": + collection_element_dict = cast(JsonTestCollectionDefCollectionElementDict, element_dict) + element_def = TestCollectionDef.from_dict(collection_element_dict) + else: + dataset_element_dict = cast(JsonTestCollectionDefDatasetElementDict, element_dict) + value = dataset_element_dict["path"] # todo handle location + name = dataset_element_dict.get("name") or "Unnamed Collection" + element_def = {"name": name, "value": value, "attributes": {}} + return TestCollectionDefElementInternal(element_identifier=identifier, element_definition=element_def) + + elements = list(map(element_from_dict_json, json_as_dict.get("elements") or [])) + return TestCollectionDef( + attrib={}, + name=json_as_dict.get("name") or "Unnamed Collection", + elements=elements, + collection_type=json_as_dict["collection_type"], + ) def collect_inputs(self): inputs = [] diff --git a/lib/galaxy/tool_util/parser/xml.py b/lib/galaxy/tool_util/parser/xml.py index 8e098236a110..e9500e7ad21b 100644 --- a/lib/galaxy/tool_util/parser/xml.py +++ b/lib/galaxy/tool_util/parser/xml.py @@ -45,7 +45,6 @@ PageSource, PagesSource, RequiredFiles, - TestCollectionDefDict, TestCollectionDefElementDict, TestCollectionDefElementObject, TestCollectionOutputDef, @@ -58,6 +57,7 @@ ToolSourceTestOutputAttributes, ToolSourceTestOutputs, ToolSourceTests, + XmlTestCollectionDefDict, XrefDict, ) from .output_actions import ToolOutputActionGroup @@ -1005,7 +1005,7 @@ def __parse_inputs_elems(test_elem, i) -> ToolSourceTestInputs: return raw_inputs -def _test_collection_def_dict(elem: Element) -> TestCollectionDefDict: +def _test_collection_def_dict(elem: Element) -> XmlTestCollectionDefDict: elements: List[TestCollectionDefElementDict] = [] attrib: Dict[str, Any] = _element_to_dict(elem) collection_type = attrib["type"] @@ -1021,7 +1021,7 @@ def _test_collection_def_dict(elem: Element) -> TestCollectionDefDict: element_definition = __parse_param_elem(element) elements.append({"element_identifier": element_identifier, "element_definition": element_definition}) - return TestCollectionDefDict( + return XmlTestCollectionDefDict( model_class="TestCollectionDef", attributes=attrib, collection_type=collection_type, diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py index 781fc3bd30fb..9e1dd5bf87d4 100644 --- a/lib/galaxy/tool_util/verify/interactor.py +++ b/lib/galaxy/tool_util/verify/interactor.py @@ -38,10 +38,10 @@ from galaxy.tool_util.parser.interface import ( AssertionList, TestCollectionDef, - TestCollectionDefDict, TestCollectionOutputDef, TestSourceTestOutputColllection, ToolSourceTestOutputs, + XmlTestCollectionDefDict, ) from galaxy.util import requests from galaxy.util.bunch import Bunch @@ -588,7 +588,9 @@ def _ensure_valid_location_in(self, test_data: dict) -> Optional[str]: raise ValueError(f"Invalid `location` URL: `{location}`") return location - def run_tool(self, testdef: "ToolTestDescription", history_id: str, resource_parameters: Optional[Dict[str, Any]] = None) -> RunToolResponse: + def run_tool( + self, testdef: "ToolTestDescription", history_id: str, resource_parameters: Optional[Dict[str, Any]] = None + ) -> RunToolResponse: # We need to handle the case where we've uploaded a valid compressed file since the upload # tool will have uncompressed it on the fly. resource_parameters = resource_parameters or {} @@ -1754,7 +1756,7 @@ def expanded_inputs_from_json(expanded_inputs_json: ExpandedToolInputsJsonified) loaded_inputs: ExpandedToolInputs = {} for key, value in expanded_inputs_json.items(): if isinstance(value, dict) and value.get("model_class"): - collection_def_dict = cast(TestCollectionDefDict, value) + collection_def_dict = cast(XmlTestCollectionDefDict, value) loaded_inputs[key] = TestCollectionDef.from_dict(collection_def_dict) else: loaded_inputs[key] = value diff --git a/test/unit/tool_util/parameter_specification.yml b/test/unit/tool_util/parameter_specification.yml index da3671057075..8497843a427c 100644 --- a/test/unit/tool_util/parameter_specification.yml +++ b/test/unit/tool_util/parameter_specification.yml @@ -533,6 +533,13 @@ gx_data: # expanded out. - parameter: {__class__: "Batch", values: [{src: hdca, id: 5}]} - parameter: {src: hda, id: abcdabcd} + test_case_xml_valid: + - parameter: {class: File, path: foo.bed} + - parameter: {class: File, location: "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt"} + test_case_xml_invalid: + - parameter: foo.bed + - parameter: null + - {} workflow_step_valid: - {} workflow_step_invalid: @@ -592,6 +599,11 @@ gx_data_optional: - {src: hda, id: 7} - parameter: {__class__: "Batch", values: [{src: hdca, id: 5}]} - parameter: {__class__: 'ConnectedValueX'} + test_case_xml_valid: + - {} + - parameter: {class: "File", path: "1.bed"} + test_case_xml_invalid: + - parameter: {class: "NotAFile", path: "1.bed"} gx_data_multiple: request_valid: @@ -690,6 +702,49 @@ gx_data_collection: - parameter: {__class__: 'ConnectedValue'} workflow_step_linked_invalid: - {} + test_case_xml_valid: + - parameter: {class: Collection, collection_type: list, elements: []} + - parameter: + class: Collection + collection_type: list + elements: + - {identifier: "first", path: "1.bed", class: File} + - {identifier: "second", path: "2.bed", class: File} + - parameter: + name: "A nested listed with a name" + class: Collection + collection_type: "list:paired" + elements: + - class: Collection + collection_type: paired + identifier: first_el + elements: + - {identifier: "forward", path: "1_f.bed", class: File} + - {identifier: "reverse", path: "1_r.bed", class: File} + test_case_xml_invalid: + # collection type is required + - parameter: {class: Collection, elements: []} + - parameter: + class: Collection + collection_type: list + elements: + - {identifier: "first", path: "1.bed", class: NotAFile} + - parameter: + class: Collection + collection_type: list + elements: + - {identifier: "first", pathmisspelled: "1.bed", class: File} + - parameter: + name: "A nested listed with a name" + class: Collection + collection_type: "list:paired" + elements: + - class: Collection + collection_type: paired + identifier: first_el + elements: + - {identifier: "forward", path: "1_f.bed", class: File} + - {identifier: "reverse", path: "1_r.bed", class: FileX} gx_data_collection_optional: request_valid: @@ -995,11 +1050,11 @@ gx_data_column: request_internal_invalid: - { ref_parameter: {src: hda, id: 123}, parameter: "0" } test_case_xml_valid: - - { ref_parameter: "1.bed", parameter: 3 } + - { ref_parameter: {class: "File", path: "1.bed"}, parameter: 3 } test_case_xml_invalid: - - { ref_parameter: "1.bed", parameter: "3" } + - { ref_parameter: {class: "File", path: "1.bed"}, parameter: "3" } test_case_xml_invalid: - - { ref_parameter: "1.bed", parameter: "c2: With name" } + - { ref_parameter: {class: "File", path: "1.bed"}, parameter: "c2: With name" } gx_data_column_optional: request_valid: diff --git a/test/unit/tool_util/test_parameter_test_cases.py b/test/unit/tool_util/test_parameter_test_cases.py index aa5db0319259..909c0bfd2da5 100644 --- a/test/unit/tool_util/test_parameter_test_cases.py +++ b/test/unit/tool_util/test_parameter_test_cases.py @@ -8,13 +8,18 @@ from galaxy.tool_util.models import parse_tool from galaxy.tool_util.parameters.case import ( test_case_state as case_state, + TestCaseStateAndWarnings, TestCaseStateValidationResult, validate_test_cases_for_tool_source, ) from galaxy.tool_util.parser.factory import get_tool_source -from galaxy.tool_util.parser.interface import ToolSourceTest +from galaxy.tool_util.parser.interface import ( + ToolSource, + ToolSourceTest, +) from galaxy.tool_util.unittest_utils import functional_test_tool_directory from galaxy.tool_util.verify.parse import parse_tool_test_descriptions +from .util import dict_verify_each # legacy tools allows specifying parameter and repeat parameters without # qualification. This was problematic and could result in ambigious specifications. @@ -116,6 +121,73 @@ def test_validate_framework_test_tools(): raise Exception(f"Failed to validate {tool_path}: {str(e)}") +def test_test_case_state_conversion(): + tool_source = tool_source_for("collection_nested_test") + test_cases: List[ToolSourceTest] = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["f1", "collection_type"], "list:paired"), + (["f1", "class"], "Collection"), + (["f1", "elements", 0, "class"], "Collection"), + (["f1", "elements", 0, "collection_type"], "paired"), + (["f1", "elements", 0, "elements", 0, "class"], "File"), + (["f1", "elements", 0, "elements", 0, "path"], "simple_line.txt"), + (["f1", "elements", 0, "elements", 0, "identifier"], "forward"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("dbkey_filter_input") + test_cases = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["inputs", "class"], "File"), + (["inputs", "dbkey"], "hg19"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("discover_metadata_files") + test_cases = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["input_bam", "class"], "File"), + (["input_bam", "filetype"], "bam"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("remote_test_data_location") + test_cases = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["input", "class"], "File"), + ( + ["input", "location"], + "https://raw.githubusercontent.com/galaxyproject/planemo/7be1bf5b3971a43eaa73f483125bfb8cabf1c440/tests/data/hello.txt", + ), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("composite") + test_cases = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["input", "class"], "File"), + (["input", "filetype"], "velvet"), + (["input", "composite_data", 0], "velveth_test1/Sequences"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + tool_source = tool_source_for("parameters/gx_group_tag") + test_cases = tool_source.parse_tests_to_dict()["tests"] + state = case_state_for(tool_source, test_cases[0]) + expectations = [ + (["ref_parameter", "class"], "Collection"), + (["ref_parameter", "collection_type"], "paired"), + (["ref_parameter", "elements", 0, "identifier"], "forward"), + (["ref_parameter", "elements", 0, "tags", 0], "group:type:single"), + ] + dict_verify_each(state.tool_state.input_state, expectations) + + def _validate_path(tool_path: str): tool_source = get_tool_source(tool_path) parsed_tool = parse_tool(tool_source) @@ -130,7 +202,17 @@ def _validate_path(tool_path: str): def validate_test_cases_for(tool_name: str, **kwd) -> List[TestCaseStateValidationResult]: + return validate_test_cases_for_tool_source(tool_source_for(tool_name), **kwd) + + +def case_state_for(tool_source: ToolSource, test_case: ToolSourceTest) -> TestCaseStateAndWarnings: + parsed_tool = parse_tool(tool_source) + profile = tool_source.parse_profile() + return case_state(test_case, parsed_tool.inputs, profile) + + +def tool_source_for(tool_name: str) -> ToolSource: test_tool_directory = functional_test_tool_directory() tool_path = os.path.join(test_tool_directory, f"{tool_name}.xml") tool_source = get_tool_source(tool_path) - return validate_test_cases_for_tool_source(tool_source, **kwd) + return tool_source diff --git a/test/unit/tool_util/test_test_definition_parsing.py b/test/unit/tool_util/test_test_definition_parsing.py index f7c2c3bd28b9..6576660c3b00 100644 --- a/test/unit/tool_util/test_test_definition_parsing.py +++ b/test/unit/tool_util/test_test_definition_parsing.py @@ -1,6 +1,5 @@ """Tool test parsing to dicts logic.""" -import json import os from typing import ( Any, @@ -17,6 +16,7 @@ in_packages, ) from galaxy.util.unittest import TestCase +from .util import dict_verify_each # Not the whole response, just some keys and such to test... SIMPLE_CONSTRUCTS_EXPECTATIONS_0 = [ @@ -117,18 +117,6 @@ def test_bigwigtowig_converter(self): self._verify_each(test_dicts[1].to_dict(), BIGWIG_TO_WIG_EXPECTATIONS) def _verify_each(self, target_dict: dict, expectations: List[Any]): - assert_json_encodable(target_dict) - for path, expectation in expectations: - exception = target_dict.get("exception") - assert not exception, f"Test failed to generate with exception {exception}" - self._verify(target_dict, path, expectation) - - def _verify(self, target_dict: dict, expectation_path: List[str], expectation: Any): - rest = target_dict - for path_part in expectation_path: - rest = rest[path_part] - assert rest == expectation, f"{rest} != {expectation} for {expectation_path}" - - -def assert_json_encodable(as_dict: dict): - json.dumps(as_dict) + exception = target_dict.get("exception") + assert not exception, f"Test failed to generate with exception {exception}" + dict_verify_each(target_dict, expectations) diff --git a/test/unit/tool_util/util.py b/test/unit/tool_util/util.py index c94345b29f54..b03aff0fc15f 100644 --- a/test/unit/tool_util/util.py +++ b/test/unit/tool_util/util.py @@ -1,11 +1,35 @@ +import json from contextlib import contextmanager from os import environ +from typing import ( + Any, + List, +) import pytest external_dependency_management = pytest.mark.external_dependency_management +def dict_verify_each(target_dict: dict, expectations: List[Any]): + assert_json_encodable(target_dict) + for path, expectation in expectations: + exception = target_dict.get("exception") + assert not exception, f"Test failed to generate with exception {exception}" + dict_verify(target_dict, path, expectation) + + +def dict_verify(target_dict: dict, expectation_path: List[str], expectation: Any): + rest = target_dict + for path_part in expectation_path: + rest = rest[path_part] + assert rest == expectation, f"{rest} != {expectation} for {expectation_path}" + + +def assert_json_encodable(as_dict: dict): + json.dumps(as_dict) + + @contextmanager def modify_environ(values, keys_to_remove=None): """