diff --git a/lib/galaxy/tool_util/parser/interface.py b/lib/galaxy/tool_util/parser/interface.py index 092938836a6c..889c3be8f96b 100644 --- a/lib/galaxy/tool_util/parser/interface.py +++ b/lib/galaxy/tool_util/parser/interface.py @@ -18,7 +18,10 @@ import packaging.version from pydantic import BaseModel -from typing_extensions import TypedDict +from typing_extensions import ( + NotRequired, + TypedDict, +) from galaxy.util import Element from galaxy.util.path import safe_walk @@ -43,8 +46,47 @@ class AssertionDict(TypedDict): AssertionList = Optional[List[AssertionDict]] XmlInt = Union[str, int] -ToolSourceTestInputs = Any -ToolSourceTestOutputs = Any + +class ToolSourceTestOutputAttributes(TypedDict): + object: NotRequired[Optional[Any]] + compare: str + lines_diff: int + delta: int + delta_frac: Optional[float] + sort: bool + decompress: bool + location: NotRequired[Optional[str]] + ftype: NotRequired[Optional[str]] + eps: float + metric: str + pin_labels: Optional[Any] + count: Optional[int] + metadata: Dict[str, Any] + md5: Optional[str] + checksum: Optional[str] + primary_datasets: Dict[str, Any] + elements: Dict[str, Any] + assert_list: AssertionList + extra_files: List[Dict[str, Any]] + + +class ToolSourceTestOutput(TypedDict): + name: str + value: Optional[str] + attributes: ToolSourceTestOutputAttributes + + +ToolSourceTestInputAttributes = Dict[str, Any] + + +class ToolSourceTestInput(TypedDict): + name: str + value: Optional[Any] + attributes: ToolSourceTestInputAttributes + + +ToolSourceTestInputs = List[ToolSourceTestInput] +ToolSourceTestOutputs = List[ToolSourceTestOutput] TestSourceTestOutputColllection = Any diff --git a/lib/galaxy/tool_util/parser/util.py b/lib/galaxy/tool_util/parser/util.py index f9d09e457af7..53009057128e 100644 --- a/lib/galaxy/tool_util/parser/util.py +++ b/lib/galaxy/tool_util/parser/util.py @@ -20,6 +20,8 @@ DEFAULT_METRIC = "mae" DEFAULT_EPS = 0.01 DEFAULT_PIN_LABELS = None +DEFAULT_SORT = False +DEFAULT_DECOMPRESS = False def is_dict(item): diff --git a/lib/galaxy/tool_util/parser/xml.py b/lib/galaxy/tool_util/parser/xml.py index a1e0e86d9c19..7a5864ac8fcc 100644 --- a/lib/galaxy/tool_util/parser/xml.py +++ b/lib/galaxy/tool_util/parser/xml.py @@ -18,11 +18,13 @@ from galaxy.tool_util.deps import requirements from galaxy.tool_util.parser.util import ( + DEFAULT_DECOMPRESS, DEFAULT_DELTA, DEFAULT_DELTA_FRAC, DEFAULT_EPS, DEFAULT_METRIC, DEFAULT_PIN_LABELS, + DEFAULT_SORT, ) from galaxy.util import ( Element, @@ -43,6 +45,11 @@ TestCollectionOutputDef, ToolSource, ToolSourceTest, + ToolSourceTestInput, + ToolSourceTestInputs, + ToolSourceTestOutput, + ToolSourceTestOutputAttributes, + ToolSourceTestOutputs, ToolSourceTests, XrefDict, ) @@ -726,16 +733,16 @@ def _test_elem_to_dict(test_elem, i, profile=None) -> ToolSourceTest: return rval -def __parse_input_elems(test_elem, i): +def __parse_input_elems(test_elem, i) -> ToolSourceTestInputs: __expand_input_elems(test_elem) return __parse_inputs_elems(test_elem, i) -def __parse_output_elems(test_elem): - outputs = [] +def __parse_output_elems(test_elem) -> ToolSourceTestOutputs: + outputs: ToolSourceTestOutputs = [] for output_elem in test_elem.findall("output"): name, file, attributes = __parse_output_elem(output_elem) - outputs.append({"name": name, "value": file, "attributes": attributes}) + outputs.append(ToolSourceTestOutput({"name": name, "value": file, "attributes": attributes})) return outputs @@ -786,56 +793,62 @@ def __parse_element_tests(parent_element, profile=None): return element_tests -def __parse_test_attributes(output_elem, attrib, parse_elements=False, parse_discovered_datasets=False, profile=None): +VALUE_OBJECT_UNSET = object() + + +def __parse_test_attributes( + output_elem, attrib, parse_elements=False, parse_discovered_datasets=False, profile=None +) -> Tuple[Optional[str], ToolSourceTestOutputAttributes]: assert_list = __parse_assert_list(output_elem) # Allow either file or value to specify a target file to compare result with # file was traditionally used by outputs and value by extra files. - file = attrib.pop("file", attrib.pop("value", None)) + file: Optional[str] = attrib.pop("file", attrib.pop("value", None)) # File no longer required if an list of assertions was present. - attributes = {} + value_object: Any = VALUE_OBJECT_UNSET if "value_json" in attrib: - attributes["object"] = json.loads(attrib.pop("value_json")) + value_object = json.loads(attrib.pop("value_json")) # Method of comparison - attributes["compare"] = attrib.pop("compare", "diff").lower() + compare: str = attrib.pop("compare", "diff").lower() # Number of lines to allow to vary in logs (for dates, etc) - attributes["lines_diff"] = int(attrib.pop("lines_diff", "0")) + lines_diff: int = int(attrib.pop("lines_diff", "0")) # Allow a file size to vary if sim_size compare - attributes["delta"] = int(attrib.pop("delta", DEFAULT_DELTA)) - attributes["delta_frac"] = float(attrib["delta_frac"]) if "delta_frac" in attrib else DEFAULT_DELTA_FRAC - attributes["sort"] = string_as_bool(attrib.pop("sort", False)) - attributes["decompress"] = string_as_bool(attrib.pop("decompress", False)) + delta: int = int(attrib.pop("delta", DEFAULT_DELTA)) + delta_frac: Optional[float] = float(attrib["delta_frac"]) if "delta_frac" in attrib else DEFAULT_DELTA_FRAC + sort: bool = string_as_bool(attrib.pop("sort", DEFAULT_SORT)) + decompress: bool = string_as_bool(attrib.pop("decompress", DEFAULT_DECOMPRESS)) # `location` may contain an URL to a remote file that will be used to download `file` (if not already present on disk). - location = attrib.get("location") - # Parameters for "image_diff" comparison - attributes["metric"] = attrib.pop("metric", DEFAULT_METRIC) - attributes["eps"] = float(attrib.pop("eps", DEFAULT_EPS)) - attributes["pin_labels"] = attrib.pop("pin_labels", DEFAULT_PIN_LABELS) + location: Optional[str] = attrib.get("location") if location and file is None: file = os.path.basename(location) # If no file specified, try to get filename from URL last component - attributes["location"] = location + # Parameters for "image_diff" comparison + metric: str = attrib.pop("metric", DEFAULT_METRIC) + eps: float = float(attrib.pop("eps", DEFAULT_EPS)) + pin_labels: Optional[Any] = attrib.pop("pin_labels", DEFAULT_PIN_LABELS) + count: Optional[int] = None try: - attributes["count"] = int(attrib.pop("count")) + count = int(attrib.pop("count")) except KeyError: - attributes["count"] = None - extra_files = [] + pass + extra_files: List[Dict[str, Any]] = [] + ftype: Optional[str] = None if "ftype" in attrib: - attributes["ftype"] = attrib["ftype"] + ftype = attrib["ftype"] for extra in output_elem.findall("extra_files"): extra_files.append(__parse_extra_files_elem(extra)) - metadata = {} + metadata: Dict[str, Any] = {} for metadata_elem in output_elem.findall("metadata"): metadata[metadata_elem.get("name")] = metadata_elem.get("value") md5sum = attrib.get("md5", None) checksum = attrib.get("checksum", None) - element_tests = {} + element_tests: Dict[str, Any] = {} if parse_elements: element_tests = __parse_element_tests(output_elem, profile=profile) - primary_datasets = {} + primary_datasets: Dict[str, Any] = {} if parse_discovered_datasets: for primary_elem in output_elem.findall("discovered_dataset") or []: primary_attrib = dict(primary_elem.attrib) @@ -846,22 +859,39 @@ def __parse_test_attributes(output_elem, attrib, parse_elements=False, parse_dis has_checksum = md5sum or checksum has_nested_tests = extra_files or element_tests or primary_datasets - has_object = "object" in attributes + has_object = not (value_object is VALUE_OBJECT_UNSET) if not (assert_list or file or metadata or has_checksum or has_nested_tests or has_object): raise Exception( "Test output defines nothing to check (e.g. must have a 'file' check against, assertions to check, metadata or checksum tests, etc...)" ) - attributes["assert_list"] = assert_list - attributes["extra_files"] = extra_files - attributes["metadata"] = metadata - attributes["md5"] = md5sum - attributes["checksum"] = checksum - attributes["elements"] = element_tests - attributes["primary_datasets"] = primary_datasets + attributes = ToolSourceTestOutputAttributes( + object=value_object, + ftype=ftype, + compare=compare, + lines_diff=lines_diff, + delta=delta, + delta_frac=delta_frac, + sort=sort, + decompress=decompress, + metric=metric, + eps=eps, + pin_labels=pin_labels, + location=location, + count=count, + metadata=metadata, + md5=md5sum, + checksum=checksum, + primary_datasets=primary_datasets, + elements=element_tests, + assert_list=assert_list, + extra_files=extra_files, + ) + if value_object is not VALUE_OBJECT_UNSET: + attributes["object"] = value_object return file, attributes -def __parse_assert_list(output_elem): +def __parse_assert_list(output_elem) -> AssertionList: assert_elem = output_elem.find("assert_contents") return __parse_assert_list_from_elem(assert_elem) @@ -952,15 +982,15 @@ def _copy_to_dict_if_present(elem, rval, attributes): return rval -def __parse_inputs_elems(test_elem, i): - raw_inputs = [] +def __parse_inputs_elems(test_elem, i) -> ToolSourceTestInputs: + raw_inputs: ToolSourceTestInputs = [] for param_elem in test_elem.findall("param"): raw_inputs.append(__parse_param_elem(param_elem, i)) return raw_inputs -def __parse_param_elem(param_elem, i=0): +def __parse_param_elem(param_elem, i=0) -> ToolSourceTestInput: attrib = dict(param_elem.attrib) if "values" in attrib: value = attrib["values"].split(",") diff --git a/lib/galaxy/tool_util/parser/yaml.py b/lib/galaxy/tool_util/parser/yaml.py index 5393b05b3953..c354bfd33d49 100644 --- a/lib/galaxy/tool_util/parser/yaml.py +++ b/lib/galaxy/tool_util/parser/yaml.py @@ -11,8 +11,10 @@ from galaxy.tool_util.deps import requirements from galaxy.tool_util.parser.util import ( + DEFAULT_DECOMPRESS, DEFAULT_DELTA, DEFAULT_DELTA_FRAC, + DEFAULT_SORT, ) from .interface import ( AssertionDict, @@ -250,7 +252,8 @@ def _parse_test(i, test_dict) -> ToolSourceTest: "lines_diff": 0, "delta": DEFAULT_DELTA, "delta_frac": DEFAULT_DELTA_FRAC, - "sort": False, + "sort": DEFAULT_SORT, + "decompress": DEFAULT_DECOMPRESS, } # TODO attributes["extra_files"] = [] diff --git a/lib/galaxy/tool_util/verify/_types.py b/lib/galaxy/tool_util/verify/_types.py index e7362fafd7b8..26bb7161715d 100644 --- a/lib/galaxy/tool_util/verify/_types.py +++ b/lib/galaxy/tool_util/verify/_types.py @@ -7,6 +7,10 @@ Tuple, ) +# inputs that have been processed with parse.py and expanded out +ExpandedToolInputs = Dict[str, Any] +# ExpandedToolInputs where any model objects have been json-ified with to_dict() +ExpandedToolInputsJsonified = Dict[str, Any] ExtraFileInfoDictT = Dict[str, Any] RequiredFileTuple = Tuple[str, ExtraFileInfoDictT] RequiredFilesT = List[RequiredFileTuple] diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py index 52b0218cedfb..3bc5750e8c57 100644 --- a/lib/galaxy/tool_util/verify/interactor.py +++ b/lib/galaxy/tool_util/verify/interactor.py @@ -40,6 +40,8 @@ TestCollectionDef, TestCollectionOutputDef, TestSourceTestOutputColllection, + ToolSourceTestInputs, + ToolSourceTestOutputs, ) from galaxy.util import requests from galaxy.util.bunch import Bunch @@ -49,6 +51,8 @@ ) from . import verify from ._types import ( + ExpandedToolInputs, + ExpandedToolInputsJsonified, RequiredDataTablesT, RequiredFilesT, RequiredLocFileT, @@ -96,8 +100,8 @@ def __getitem__(self, item): class ValidToolTestDict(TypedDict): - inputs: Any - outputs: Any + inputs: ExpandedToolInputs + outputs: ToolSourceTestOutputs output_collections: List[TestSourceTestOutputColllection] stdout: NotRequired[AssertionList] stderr: NotRequired[AssertionList] @@ -231,7 +235,7 @@ def get_tests_summary(self): assert response.status_code == 200, f"Non 200 response from tool tests available API. [{response.content}]" return response.json() - def get_tool_tests(self, tool_id: str, tool_version: Optional[str] = None) -> ToolTestDictsT: + def get_tool_tests(self, tool_id: str, tool_version: Optional[str] = None) -> List["ToolTestDescriptionDict"]: url = f"tools/{tool_id}/test_data" params = {"tool_version": tool_version} if tool_version else None response = self._get(url, data=params) @@ -1344,7 +1348,7 @@ def verify_tool( client_test_config: Optional[TestConfig] = None, skip_with_reference_data: bool = False, skip_on_dynamic_param_errors: bool = False, - _tool_test_dicts: Optional[ToolTestDictsT] = None, # extension point only for tests + _tool_test_dicts: Optional[List["ToolTestDescriptionDict"]] = None, # extension point only for tests ): if resource_parameters is None: resource_parameters = {} @@ -1385,7 +1389,7 @@ def verify_tool( return tool_test_dict.setdefault("maxseconds", maxseconds) - testdef = ToolTestDescription(cast(ToolTestDict, tool_test_dict)) + testdef = ToolTestDescription(tool_test_dict) _handle_def_errors(testdef) created_history = False @@ -1664,9 +1668,12 @@ def __init__(self, output_exceptions, job_stdio): class ToolTestDescriptionDict(TypedDict): + tool_id: str + tool_version: Optional[str] name: str - inputs: Any - outputs: Any + test_index: int + inputs: ExpandedToolInputsJsonified + outputs: ToolSourceTestOutputs output_collections: List[TestSourceTestOutputColllection] stdout: Optional[AssertionList] stderr: Optional[AssertionList] @@ -1680,10 +1687,138 @@ class ToolTestDescriptionDict(TypedDict): required_data_tables: List[Any] required_loc_files: List[str] error: bool - tool_id: str - tool_version: Optional[str] - test_index: int exception: Optional[str] + maxseconds: NotRequired[Optional[int]] + + +DEFAULT_NUM_OUTPUTS: Optional[int] = None +DEFAULT_OUTPUT_COLLECTIONS: List[TestSourceTestOutputColllection] = [] +DEFAULT_REQUIRED_FILES: RequiredFilesT = [] +DEFAULT_REQUIRED_DATA_TABLES: RequiredDataTablesT = [] +DEFAULT_REQUIRED_LOC_FILES: RequiredLocFileT = [] +DEFAULT_COMMAND_LINE: Optional[AssertionList] = [] +DEFAULT_COMMAND_VERSION: Optional[AssertionList] = [] +DEFAULT_STDOUT: Optional[AssertionList] = [] +DEFAULT_STDERR: Optional[AssertionList] = [] +DEFAULT_OUTPUTS: ToolSourceTestOutputs = [] +DEFAULT_EXPECT_EXIT_CODE: Optional[int] = None +DEFAULT_EXPECT_FAILURE: bool = False +DEFAULT_EXPECT_TEST_FAILURE: bool = False +DEFAULT_EXCEPTION: Optional[str] = None + + +def adapt_tool_source_dict(processed_dict: ToolTestDict) -> "ToolTestDescriptionDict": + """Convert the dictionaries parsed from tool sources (ToolTestDict) to a ToolTestDescriptionDict. + + ToolTestDescription is used inside and outside of Galaxy, so convert the dictionaries to the format + produced by ToolTestDescription.to_dict() and then construct a ToolTestDescription from that. + """ + test_index: int = _get_test_index(processed_dict) + name = _get_test_name(processed_dict, test_index) + error_in_test_definition = processed_dict["error"] + + exception: Optional[str] = DEFAULT_EXCEPTION + output_collections: List[TestSourceTestOutputColllection] = [] + num_outputs: Optional[int] = DEFAULT_NUM_OUTPUTS + required_files: RequiredFilesT = DEFAULT_REQUIRED_FILES + required_data_tables: RequiredDataTablesT = DEFAULT_REQUIRED_DATA_TABLES + required_loc_files: RequiredLocFileT = DEFAULT_REQUIRED_LOC_FILES + command_line: Optional[AssertionList] = DEFAULT_COMMAND_LINE + command_version: Optional[AssertionList] = DEFAULT_COMMAND_VERSION + stdout: Optional[AssertionList] = DEFAULT_STDERR + stderr: Optional[AssertionList] = DEFAULT_STDERR + outputs: ToolSourceTestOutputs = DEFAULT_OUTPUTS + expect_exit_code: Optional[int] = DEFAULT_EXPECT_EXIT_CODE + expect_failure: bool = DEFAULT_EXPECT_FAILURE + expect_test_failure: bool = DEFAULT_EXPECT_TEST_FAILURE + inputs: ExpandedToolInputsJsonified = {} + + if not error_in_test_definition: + processed_test_dict = cast(ValidToolTestDict, processed_dict) + maxseconds = _get_maxseconds(processed_test_dict) + output_collections = processed_test_dict.get("output_collections", []) + if "num_outputs" in processed_test_dict and processed_test_dict["num_outputs"]: + num_outputs = int(processed_test_dict["num_outputs"]) + + required_files = processed_test_dict.get("required_files", DEFAULT_REQUIRED_FILES) + required_data_tables = processed_test_dict.get("required_data_tables", DEFAULT_REQUIRED_DATA_TABLES) + required_loc_files = processed_test_dict.get("required_loc_files", DEFAULT_REQUIRED_LOC_FILES) + command_line = processed_test_dict.get("command_line", DEFAULT_COMMAND_LINE) + command_version = processed_test_dict.get("command_version", DEFAULT_COMMAND_VERSION) + stdout = processed_test_dict.get("stdout", DEFAULT_STDOUT) + stderr = processed_test_dict.get("stderr", DEFAULT_STDERR) + outputs = processed_test_dict.get("outputs", DEFAULT_OUTPUTS) + raw_expect_exit_code: Optional[Union[str, int]] = processed_test_dict.get( + "expect_exit_code", DEFAULT_EXPECT_EXIT_CODE + ) + if raw_expect_exit_code is not None: + expect_exit_code = int(raw_expect_exit_code) + + expect_failure = processed_test_dict.get("expect_failure", DEFAULT_EXPECT_FAILURE) + expect_test_failure = processed_test_dict.get("expect_test_failure", DEFAULT_EXPECT_TEST_FAILURE) + inputs = expanded_inputs_from_json(processed_test_dict.get("inputs", {})) + else: + invalid_test_dict = cast(InvalidToolTestDict, processed_dict) + maxseconds = DEFAULT_TOOL_TEST_WAIT + exception = invalid_test_dict.get("exception", DEFAULT_EXCEPTION) + + return ToolTestDescriptionDict( + test_index=test_index, + name=name, + error=error_in_test_definition, + maxseconds=maxseconds, + tool_id=processed_dict["tool_id"], + tool_version=processed_dict.get("tool_version"), + exception=exception, + num_outputs=num_outputs, + required_files=required_files, + required_data_tables=required_data_tables, + required_loc_files=required_loc_files, + command_line=command_line, + command_version=command_version, + stdout=stdout, + stderr=stderr, + outputs=outputs, + output_collections=output_collections, + expect_exit_code=expect_exit_code, + expect_failure=expect_failure, + expect_test_failure=expect_test_failure, + inputs=inputs, + ) + + +def _get_test_index(test_dict: Union[ToolTestDict, ToolTestDescriptionDict]) -> int: + assert "test_index" in test_dict, "Invalid processed test description, must have a 'test_index' for naming, etc.." + return test_dict["test_index"] + + +def _get_test_name(test_dict: Union[ToolTestDict, ToolTestDescriptionDict], test_index: int) -> str: + name = cast(str, test_dict.get("name", f"Test-{test_index + 1}")) + return name + + +def _get_maxseconds(test_dict: Union[ToolTestDict, ToolTestDescriptionDict]) -> int: + return int(test_dict.get("maxseconds") or DEFAULT_TOOL_TEST_WAIT or 86400) + + +def expanded_inputs_from_json(expanded_inputs_json: ExpandedToolInputsJsonified) -> ExpandedToolInputs: + loaded_inputs: ExpandedToolInputs = {} + for key, value in expanded_inputs_json.items(): + if isinstance(value, dict) and value.get("model_class"): + loaded_inputs[key] = TestCollectionDef.from_dict(value) + else: + loaded_inputs[key] = value + return loaded_inputs + + +def expanded_inputs_to_json(expanded_inputs: ExpandedToolInputs) -> ExpandedToolInputsJsonified: + inputs_dict: ExpandedToolInputsJsonified = {} + for key, value in expanded_inputs.items(): + if hasattr(value, "to_dict"): + inputs_dict[key] = value.to_dict() + else: + inputs_dict[key] = value + return inputs_dict class ToolTestDescription: @@ -1694,6 +1829,9 @@ class ToolTestDescription: """ name: str + tool_id: str + tool_version: Optional[str] + test_index: int num_outputs: Optional[int] stdout: Optional[AssertionList] stderr: Optional[AssertionList] @@ -1706,69 +1844,38 @@ class ToolTestDescription: expect_failure: bool expect_test_failure: bool exception: Optional[str] + inputs: ExpandedToolInputs + outputs: ToolSourceTestOutputs output_collections: List[TestCollectionOutputDef] + maxseconds: Optional[int] - def __init__(self, processed_test_dict: ToolTestDict): - assert ( - "test_index" in processed_test_dict - ), "Invalid processed test description, must have a 'test_index' for naming, etc.." - test_index = processed_test_dict["test_index"] - name = cast(str, processed_test_dict.get("name", f"Test-{test_index + 1}")) - error_in_test_definition = processed_test_dict["error"] - num_outputs: Optional[int] = None - if not error_in_test_definition: - processed_test_dict = cast(ValidToolTestDict, processed_test_dict) - maxseconds = int(processed_test_dict.get("maxseconds") or DEFAULT_TOOL_TEST_WAIT or 86400) - output_collections = processed_test_dict.get("output_collections", []) - if "num_outputs" in processed_test_dict and processed_test_dict["num_outputs"]: - num_outputs = int(processed_test_dict["num_outputs"]) - self.required_files = processed_test_dict.get("required_files", []) - self.required_data_tables = processed_test_dict.get("required_data_tables", []) - self.required_loc_files = processed_test_dict.get("required_loc_files", []) - self.command_line = processed_test_dict.get("command_line", None) - self.command_version = processed_test_dict.get("command_version", None) - self.stdout = processed_test_dict.get("stdout", None) - self.stderr = processed_test_dict.get("stderr", None) - else: - processed_test_dict = cast(InvalidToolTestDict, processed_test_dict) - maxseconds = DEFAULT_TOOL_TEST_WAIT - output_collections = [] - self.required_files = [] - self.required_data_tables = [] - self.required_loc_files = [] - self.command_line = None - self.command_version = None - self.stdout = None - self.stderr = None - - self.test_index = test_index - assert ( - "tool_id" in processed_test_dict - ), "Invalid processed test description, must have a 'tool_id' for naming, etc.." - self.tool_id = processed_test_dict["tool_id"] - self.tool_version = processed_test_dict.get("tool_version") - self.name = name - self.maxseconds = maxseconds - - inputs = processed_test_dict.get("inputs", {}) - loaded_inputs = {} - for key, value in inputs.items(): - if isinstance(value, dict) and value.get("model_class"): - loaded_inputs[key] = TestCollectionDef.from_dict(value) - else: - loaded_inputs[key] = value - - self.inputs = loaded_inputs - self.outputs = processed_test_dict.get("outputs", []) - self.num_outputs = num_outputs - - self.error = processed_test_dict.get("error", False) - self.exception = cast(Optional[str], processed_test_dict.get("exception", None)) + @staticmethod + def from_tool_source_dict(processed_test_dict: ToolTestDict) -> "ToolTestDescription": + return ToolTestDescription(adapt_tool_source_dict(processed_test_dict)) + def __init__(self, json_dict: ToolTestDescriptionDict): + self.test_index = _get_test_index(json_dict) + self.name = _get_test_name(json_dict, self.test_index) + self.error = json_dict["error"] + self.exception = json_dict.get("exception", DEFAULT_EXCEPTION) + output_collections = json_dict.get("output_collections", DEFAULT_OUTPUT_COLLECTIONS) self.output_collections = [TestCollectionOutputDef.from_dict(d) for d in output_collections] - self.expect_exit_code = cast(Optional[int], processed_test_dict.get("expect_exit_code", None)) - self.expect_failure = cast(bool, processed_test_dict.get("expect_failure", False)) - self.expect_test_failure = cast(bool, processed_test_dict.get("expect_test_failure", False)) + self.num_outputs = json_dict.get("num_outputs", DEFAULT_NUM_OUTPUTS) + self.required_files = json_dict.get("required_files", DEFAULT_REQUIRED_FILES) + self.required_data_tables = json_dict.get("required_data_tables", DEFAULT_REQUIRED_DATA_TABLES) + self.required_loc_files = json_dict.get("required_loc_files", DEFAULT_REQUIRED_LOC_FILES) + self.command_line = json_dict.get("command_line", DEFAULT_COMMAND_LINE) + self.command_version = json_dict.get("command_version", DEFAULT_COMMAND_VERSION) + self.stdout = json_dict.get("stdout", DEFAULT_STDOUT) + self.stderr = json_dict.get("stderr", DEFAULT_STDERR) + self.outputs = json_dict.get("outputs", DEFAULT_OUTPUTS) + self.expect_exit_code = json_dict.get("expect_exit_code", DEFAULT_EXPECT_EXIT_CODE) + self.expect_failure = json_dict.get("expect_failure", DEFAULT_EXPECT_FAILURE) + self.expect_test_failure = json_dict.get("expect_test_failure", DEFAULT_EXPECT_TEST_FAILURE) + self.inputs = json_dict.get("inputs", {}) + self.tool_id = json_dict["tool_id"] + self.tool_version = json_dict.get("tool_version") + self.maxseconds = _get_maxseconds(json_dict) def test_data(self): """ @@ -1777,15 +1884,9 @@ def test_data(self): return test_data_iter(self.required_files) def to_dict(self) -> ToolTestDescriptionDict: - inputs_dict = {} - for key, value in self.inputs.items(): - if hasattr(value, "to_dict"): - inputs_dict[key] = value.to_dict() - else: - inputs_dict[key] = value - + inputs = expanded_inputs_to_json(self.inputs) return { - "inputs": inputs_dict, + "inputs": inputs, "outputs": self.outputs, "output_collections": [_.to_dict() for _ in self.output_collections], "num_outputs": self.num_outputs, @@ -1805,6 +1906,7 @@ def to_dict(self) -> ToolTestDescriptionDict: "required_loc_files": self.required_loc_files, "error": self.error, "exception": self.exception, + "maxseconds": self.maxseconds, } diff --git a/lib/galaxy/tool_util/verify/parse.py b/lib/galaxy/tool_util/verify/parse.py index 00415cc76535..7df3a11d8b1f 100644 --- a/lib/galaxy/tool_util/verify/parse.py +++ b/lib/galaxy/tool_util/verify/parse.py @@ -31,6 +31,7 @@ unicodify, ) from ._types import ( + ExpandedToolInputs, ExtraFileInfoDictT, RequiredDataTablesT, RequiredFilesT, @@ -120,7 +121,7 @@ def _description_from_tool_source( } ) - return ToolTestDescription(processed_test_dict) + return ToolTestDescription.from_tool_source_dict(processed_test_dict) def _process_raw_inputs( @@ -131,14 +132,14 @@ def _process_raw_inputs( required_data_tables: RequiredDataTablesT, required_loc_files: RequiredLocFileT, parent_context: Optional[AnyParamContext] = None, -): +) -> ExpandedToolInputs: """ Recursively expand flat list of inputs into "tree" form of flat list (| using to nest to new levels) structure and expand dataset information as proceeding to populate self.required_files. """ parent_context = parent_context or RootParamContext() - expanded_inputs = {} + expanded_inputs: ExpandedToolInputs = {} for input_source in input_sources: input_type = input_source.parse_input_type() name = input_source.parse_name() diff --git a/lib/galaxy/tool_util/verify/script.py b/lib/galaxy/tool_util/verify/script.py index f14f0f68df4f..fbb67fc11b37 100644 --- a/lib/galaxy/tool_util/verify/script.py +++ b/lib/galaxy/tool_util/verify/script.py @@ -26,7 +26,7 @@ from galaxy.tool_util.verify.interactor import ( DictClientTestConfig, GalaxyInteractorApi, - ToolTestDictsT, + ToolTestDescriptionDict, verify_tool, ) @@ -341,9 +341,11 @@ def build_case_references( test_references.append(test_reference) else: assert tool_id - tool_test_dicts: ToolTestDictsT = galaxy_interactor.get_tool_tests(tool_id, tool_version=tool_version) + tool_test_dicts: List[ToolTestDescriptionDict] = galaxy_interactor.get_tool_tests( + tool_id, tool_version=tool_version + ) for i, tool_test_dict in enumerate(tool_test_dicts): - this_tool_version = tool_test_dict.get("tool_version", tool_version) + this_tool_version = tool_test_dict.get("tool_version") or tool_version this_test_index = i if test_index == ALL_TESTS or i == test_index: test_reference = TestReference(tool_id, this_tool_version, this_test_index) diff --git a/lib/galaxy/webapps/galaxy/api/tools.py b/lib/galaxy/webapps/galaxy/api/tools.py index ea78cd9f5696..59ad93ef5f54 100644 --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -31,6 +31,7 @@ FetchDataFormPayload, FetchDataPayload, ) +from galaxy.tool_util.verify.interactor import ToolTestDescriptionDict from galaxy.tools.evaluation import global_tool_errors from galaxy.util.zipstream import ZipstreamWrapper from galaxy.web import ( @@ -316,7 +317,7 @@ def tests_summary(self, trans: GalaxyWebTransaction, **kwd): return test_counts_by_tool @expose_api_anonymous_and_sessionless - def test_data(self, trans: GalaxyWebTransaction, id, **kwd): + def test_data(self, trans: GalaxyWebTransaction, id, **kwd) -> List[ToolTestDescriptionDict]: """ GET /api/tools/{tool_id}/test_data?tool_version={tool_version}