From 87014307e85a460f331caa8d344d022cc9cd59c4 Mon Sep 17 00:00:00 2001 From: mvdbeek Date: Fri, 27 Oct 2023 18:39:33 +0200 Subject: [PATCH] Render invocation messages in test reports --- planemo/galaxy/activity.py | 2 + planemo/reports/build_report.py | 43 +++++++++- planemo/reports/macros.tmpl | 10 +++ planemo/reports/report_markdown.tpl | 7 +- .../dataset_failed-test.yml | 2 + .../dataset_failed.yml | 19 +++++ .../invalid_when_expression-test.yml | 10 +++ .../invalid_when_expression.yml | 13 ++++ .../output_not_found-test.yml | 5 ++ .../output_not_found.yml | 15 ++++ .../functional_test_tools/job_properties.xml | 78 +++++++++++++++++++ tests/test_cmd_test.py | 57 ++++++++++++++ 12 files changed, 258 insertions(+), 3 deletions(-) create mode 100644 tests/data/scheduling_failure_workflows/dataset_failed-test.yml create mode 100644 tests/data/scheduling_failure_workflows/dataset_failed.yml create mode 100644 tests/data/scheduling_failure_workflows/invalid_when_expression-test.yml create mode 100644 tests/data/scheduling_failure_workflows/invalid_when_expression.yml create mode 100644 tests/data/scheduling_failure_workflows/output_not_found-test.yml create mode 100644 tests/data/scheduling_failure_workflows/output_not_found.yml create mode 100644 tests/data/tools/functional_test_tools/job_properties.xml diff --git a/planemo/galaxy/activity.py b/planemo/galaxy/activity.py index 6dead53da..3547fdcf1 100644 --- a/planemo/galaxy/activity.py +++ b/planemo/galaxy/activity.py @@ -697,6 +697,8 @@ def collect_invocation_details(self, invocation_id=None): "invocation_state": self.invocation_state, "history_state": self.history_state, "error_message": self.error_message, + # Messages are only present from 23.0 onward + "messages": invocation.get("messages", []), }, } return invocation_details diff --git a/planemo/reports/build_report.py b/planemo/reports/build_report.py index adc0d018c..32b6f37a1 100644 --- a/planemo/reports/build_report.py +++ b/planemo/reports/build_report.py @@ -9,6 +9,46 @@ TITLE = "Results (powered by Planemo)" +cancel_fragment = "Invocation scheduling cancelled because" +fail_fragment = "Invocation scheduling failed because" + + +def render_message_to_string(invocation_message): + # ChatGPT did a reasonable job of translating this from https://github.com/galaxyproject/galaxy/blob/d92bbb144ffcda7e17368cf43dd25c8a9a3a7dd6/client/src/components/WorkflowInvocationState/InvocationMessage.vue#L93-L172 + reason = invocation_message["reason"] + if reason == "user_request": + return f"{cancel_fragment} user requested cancellation." + elif reason == "history_deleted": + return f"{cancel_fragment} the history of the invocation was deleted." + elif reason == "cancelled_on_review": + return f"{cancel_fragment} the invocation was paused at step {invocation_message['workflow_step_id'] + 1} and not approved." + elif reason == "collection_failed": + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} requires a dataset collection created by step {invocation_message['dependent_workflow_step_id'] + 1}, but dataset collection entered a failed state." + elif reason == "dataset_failed": + if invocation_message.get("dependent_workflow_step_id") is not None: + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} requires a dataset created by step {invocation_message['dependent_workflow_step_id'] + 1}, but dataset entered a failed state." + else: + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} requires a dataset, but dataset entered a failed state." + elif reason == "job_failed": + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} depends on job(s) created in step {invocation_message['dependent_workflow_step_id'] + 1}, but a job for that step failed." + elif reason == "output_not_found": + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} depends on output '{invocation_message['output_name']}' of step {invocation_message['dependent_workflow_step_id'] + 1}, but this step did not produce an output of that name." + elif reason == "expression_evaluation_failed": + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} contains an expression that could not be evaluated." + elif reason == "when_not_boolean": + return f"{fail_fragment} step {invocation_message['workflow_step_id'] + 1} is a conditional step and the result of the when expression is not a boolean type." + elif reason == "unexpected_failure": + at_step = "" + if invocation_message.get("workflow_step_id") is not None: + at_step = f" at step {invocation_message['workflow_step_id'] + 1}" + if "details" in invocation_message and invocation_message["details"]: + return f"{fail_fragment} an unexpected failure occurred{at_step}: '{invocation_message['details']}'" + return f"{fail_fragment} an unexpected failure occurred{at_step}." + elif reason == "workflow_output_not_found": + return f"Defined workflow output '{invocation_message['output_name']}' was not found in step {invocation_message['workflow_step_id'] + 1}." + else: + return reason + def build_report(structured_data, report_type="html", execution_type="Test", **kwds): """Use report_{report_type}.tpl to build page for report.""" @@ -19,12 +59,12 @@ def build_report(structured_data, report_type="html", execution_type="Test", **k __fix_test_ids(environment) environment = __inject_summary(environment) + environment["execution_type"] = execution_type if report_type == "html": # The HTML report format needs a lot of extra, custom data. # IMO, this seems to suggest it should be embedded. environment["title"] = None - environment["execution_type"] = execution_type markdown = template_data(environment, "report_markdown.tpl") environment["title"] = " ".join((environment["execution_type"], TITLE)) environment["raw_data"] = base64.b64encode(markdown.encode("utf-8")).decode("utf-8") @@ -50,6 +90,7 @@ def template_data(environment, template_name, **kwds): env_kwargs["trim_blocks"] = True env = Environment(loader=PackageLoader("planemo", "reports"), **env_kwargs) env.filters["strip_control_characters"] = lambda x: strip_control_characters(x) if x else x + env.globals["render_message_to_string"] = render_message_to_string template = env.get_template(template_name) return template.render(**environment) diff --git a/planemo/reports/macros.tmpl b/planemo/reports/macros.tmpl index dd40f95af..7edb924d2 100644 --- a/planemo/reports/macros.tmpl +++ b/planemo/reports/macros.tmpl @@ -65,4 +65,14 @@ {% endif %} {% endfor %} +{% endmacro %} + + +{% macro render_invocation_messages(messages, summary_label='Invocation Messages') %} + * {{summary_label}} +{% for message in messages %} + + - {{ render_message_to_string(message) }} + +{% endfor %} {% endmacro %} \ No newline at end of file diff --git a/planemo/reports/report_markdown.tpl b/planemo/reports/report_markdown.tpl index e9f104950..2888d5354 100644 --- a/planemo/reports/report_markdown.tpl +++ b/planemo/reports/report_markdown.tpl @@ -1,4 +1,4 @@ -{% from 'macros.tmpl' import render_invocation_details, render_job_parameters, render_steps %} +{% from 'macros.tmpl' import render_invocation_details, render_invocation_messages, render_job_parameters, render_steps %} {% if title %} # {{ execution_type }} {{ title }} @@ -32,7 +32,8 @@ {% set display_job_attributes = {'command_line': 'Command Line', 'exit_code': 'Exit Code', 'stderr': 'Standard Error', 'stdout': 'Standard Output', 'traceback': 'Traceback'} %} {% for status, desc in {'error': 'Errored', 'failure': 'Failed', 'success': 'Passed'}.items() if state[status]%} -
{{ desc }} {{ execution_type }}s +{% set expanded = "open" if status in ("error", "failure") else "" %} +
{{ desc }} {{ execution_type }}s {% for test in raw_data.tests %} {% if test.data.status == status %} {% if test.data.status == 'success' %} @@ -75,6 +76,8 @@ #### Workflow invocation details +{{render_invocation_messages(test.data.invocation_details.details.messages)}} + {{render_steps(test.data.invocation_details.steps.values(), display_job_attributes)}} {{render_invocation_details(test.data.invocation_details.details)}} diff --git a/tests/data/scheduling_failure_workflows/dataset_failed-test.yml b/tests/data/scheduling_failure_workflows/dataset_failed-test.yml new file mode 100644 index 000000000..88dd44bc6 --- /dev/null +++ b/tests/data/scheduling_failure_workflows/dataset_failed-test.yml @@ -0,0 +1,2 @@ +- job: {} + outputs: {} diff --git a/tests/data/scheduling_failure_workflows/dataset_failed.yml b/tests/data/scheduling_failure_workflows/dataset_failed.yml new file mode 100644 index 000000000..51b61619f --- /dev/null +++ b/tests/data/scheduling_failure_workflows/dataset_failed.yml @@ -0,0 +1,19 @@ +class: GalaxyWorkflow +steps: + job_props: + tool_id: job_properties + state: + thebool: true + failbool: true + apply: + tool_id: __APPLY_RULES__ + in: + input: job_props/list_output + state: + rules: + rules: + - type: add_column_metadata + value: identifier0 + mapping: + - type: list_identifiers + columns: [0] diff --git a/tests/data/scheduling_failure_workflows/invalid_when_expression-test.yml b/tests/data/scheduling_failure_workflows/invalid_when_expression-test.yml new file mode 100644 index 000000000..1233e3010 --- /dev/null +++ b/tests/data/scheduling_failure_workflows/invalid_when_expression-test.yml @@ -0,0 +1,10 @@ +- job: + some_file: + class: File + path: ../hello.txt + should_run: true + outputs: + some_output: + asserts: + has_text: + text: "Hello World!" diff --git a/tests/data/scheduling_failure_workflows/invalid_when_expression.yml b/tests/data/scheduling_failure_workflows/invalid_when_expression.yml new file mode 100644 index 000000000..2a1586aa5 --- /dev/null +++ b/tests/data/scheduling_failure_workflows/invalid_when_expression.yml @@ -0,0 +1,13 @@ +class: GalaxyWorkflow +inputs: + should_run: + type: boolean + some_file: + type: data +steps: + cat1: + tool_id: cat1 + in: + input1: some_file + should_run: should_run + when: $(:syntaxError:) diff --git a/tests/data/scheduling_failure_workflows/output_not_found-test.yml b/tests/data/scheduling_failure_workflows/output_not_found-test.yml new file mode 100644 index 000000000..d7934f9db --- /dev/null +++ b/tests/data/scheduling_failure_workflows/output_not_found-test.yml @@ -0,0 +1,5 @@ +- job: + data_input: + path: ../hello.txt + class: File + outputs: {} diff --git a/tests/data/scheduling_failure_workflows/output_not_found.yml b/tests/data/scheduling_failure_workflows/output_not_found.yml new file mode 100644 index 000000000..a212bd017 --- /dev/null +++ b/tests/data/scheduling_failure_workflows/output_not_found.yml @@ -0,0 +1,15 @@ +class: GalaxyWorkflow +inputs: + data_input: data +steps: + cat1: + tool_id: cat1 + in: + input1: data_input + outputs: + out_file1: + rename: "my new name" + first_cat1: + tool_id: cat1 + in: + input1: cat1/does_not_exist diff --git a/tests/data/tools/functional_test_tools/job_properties.xml b/tests/data/tools/functional_test_tools/job_properties.xml new file mode 100644 index 000000000..19ee032ab --- /dev/null +++ b/tests/data/tools/functional_test_tools/job_properties.xml @@ -0,0 +1,78 @@ + + + + + echo 'v1.1' + &2 && + echo 'This is a line of text.' > '$out_file1' && + cp '$out_file1' '$one' && + cp '$out_file1' '$two' && + sleep $sleepsecs +#else + echo 'The bool is not true' && + echo 'The bool is very not true' 1>&2 && + echo 'This is a different line of text.' > '$out_file1' && + sleep $sleepsecs && + sh -c 'exit 2' +#end if +#if $failbool + ## use ';' to concatenate commands so that the next one is run independently + ## of the exit code of the previous one + ; exit 127 +#end if + ]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/test_cmd_test.py b/tests/test_cmd_test.py index e02658514..59fcc5e09 100644 --- a/tests/test_cmd_test.py +++ b/tests/test_cmd_test.py @@ -18,6 +18,8 @@ TEST_TOOLS_DIR, ) +SCHEDULING_WORKFLOWS_PATH = os.path.join(TEST_DATA_DIR, "scheduling_failure_workflows") +FUNCTIONAL_TEST_TOOLS = os.path.join(TEST_DATA_DIR, "tools", "functional_test_tools") FETCH_DATA_DATA_MANAGER_TEST_PATH = "data_manager/data_manager_fetch_genome_dbkeys_all_fasta/data_manager/data_manager_fetch_genome_all_fasta_dbkeys.xml" BOWTIE2_DATA_MANAGER_TEST_PATH = ( "data_manager/data_manager_bowtie2_index_builder/data_manager/bowtie2_index_builder.xml" @@ -377,3 +379,58 @@ def test_workflow_with_identical_output_names(self): test_command = self.append_profile_argument_if_needed(test_command) test_command.append(test_artifact) self._check_exit_code(test_command, exit_code=0) + + @skip_if_environ("PLANEMO_SKIP_GALAXY_TEST") + def test_scheduling_error_invalid_when_expression(self): + with self._isolate() as test_dir: + test_artifact = os.path.join(SCHEDULING_WORKFLOWS_PATH, "invalid_when_expression.yml") + markdown_output_path = os.path.join(test_dir, "test_output.md") + test_command = self._test_command() + test_command = self.append_profile_argument_if_needed(test_command) + test_command.append(test_artifact) + test_command.append("--test_output_markdown") + test_command.append(markdown_output_path) + self._check_exit_code(test_command, exit_code=1) + with open(markdown_output_path) as out: + markdown_content = out.read() + assert ( + "Invocation scheduling failed because step 3 contains an expression that could not be evaluated" + in markdown_content + ) + + @skip_if_environ("PLANEMO_SKIP_GALAXY_TEST") + def test_scheduling_error_output_not_found(self): + with self._isolate() as test_dir: + test_artifact = os.path.join(SCHEDULING_WORKFLOWS_PATH, "output_not_found.yml") + markdown_output_path = os.path.join(test_dir, "test_output.md") + test_command = self._test_command() + test_command = self.append_profile_argument_if_needed(test_command) + test_command.append(test_artifact) + test_command.append("--test_output_markdown") + test_command.append(markdown_output_path) + self._check_exit_code(test_command, exit_code=1) + with open(markdown_output_path) as out: + markdown_content = out.read() + assert ( + "Invocation scheduling failed because step 3 depends on output 'does_not_exist' of step 2, but this step did not produce an output of that name" + in markdown_content + ) + + @skip_if_environ("PLANEMO_SKIP_GALAXY_TEST") + def test_scheduling_error_dataset_failed(self): + job_properties = os.path.join(FUNCTIONAL_TEST_TOOLS, "job_properties.xml") + with self._isolate() as test_dir: + test_artifact = os.path.join(SCHEDULING_WORKFLOWS_PATH, "dataset_failed.yml") + markdown_output_path = os.path.join(test_dir, "test_output.md") + test_command = test_command = self._test_command("--extra_tools", job_properties) + test_command = self.append_profile_argument_if_needed(test_command) + test_command.append(test_artifact) + test_command.append("--test_output_markdown") + test_command.append(markdown_output_path) + self._check_exit_code(test_command, exit_code=1) + with open(markdown_output_path) as out: + markdown_content = out.read() + assert ( + "Invocation scheduling failed because step 2 requires a dataset, but dataset entered a failed state." + in markdown_content + )