diff --git a/lib/galaxy/tools/actions/__init__.py b/lib/galaxy/tools/actions/__init__.py index 1a16ed58af3c..826b5d54059a 100644 --- a/lib/galaxy/tools/actions/__init__.py +++ b/lib/galaxy/tools/actions/__init__.py @@ -681,6 +681,7 @@ def handle_output(name, output, hidden=None): output_collection.mark_as_populated() for hdca in output_collections.out_collection_instances.values(): hdca.visible = False + hdca.collection.mark_as_populated() object_store_populator = ObjectStorePopulator(trans.app, trans.user) for data in out_data.values(): data.set_skipped(object_store_populator) diff --git a/lib/galaxy_test/api/test_workflows.py b/lib/galaxy_test/api/test_workflows.py index 519281ded0a9..8c9b7187006d 100644 --- a/lib/galaxy_test/api/test_workflows.py +++ b/lib/galaxy_test/api/test_workflows.py @@ -2455,6 +2455,30 @@ def test_run_nested_conditional_workflow_steps(self): if step["workflow_step_label"] == "cat1": assert sum(1 for j in step["jobs"] if j["state"] == "skipped") == 1 + def test_run_workflow_conditional_subworkflow_step_with_hdca_creation(self): + # Regression test, ensures scheduling proceeds even if a skipped step creates a collection + with self.dataset_populator.test_history() as history_id: + self._run_workflow( + """ +class: GalaxyWorkflow +inputs: [] +steps: + conditional_subworkflow_step: + when: $(false) + run: + class: GalaxyWorkflow + inputs: [] + steps: + create_collection: + tool_id: create_input_collection + flatten_collection: + tool_id: cat_list + in: + input1: create_collection/output + """, + history_id=history_id, + ) + def test_run_workflow_conditional_step_map_over_expression_tool(self): with self.dataset_populator.test_history() as history_id: summary = self._run_workflow( diff --git a/lib/galaxy_test/base/populators.py b/lib/galaxy_test/base/populators.py index d373b23d389d..0347fbdfadb2 100644 --- a/lib/galaxy_test/base/populators.py +++ b/lib/galaxy_test/base/populators.py @@ -1770,7 +1770,11 @@ def upload_yaml_workflow(self, yaml_content: YamlContentT, **kwds) -> str: return workflow_id def wait_for_invocation( - self, workflow_id: str, invocation_id: str, timeout: timeout_type = DEFAULT_TIMEOUT, assert_ok: bool = True + self, + workflow_id: Optional[str], + invocation_id: str, + timeout: timeout_type = DEFAULT_TIMEOUT, + assert_ok: bool = True, ) -> str: url = f"invocations/{invocation_id}" @@ -1818,7 +1822,7 @@ def invocation_count(): def wait_for_workflow( self, - workflow_id: str, + workflow_id: Optional[str], invocation_id: str, history_id: str, assert_ok: bool = True, @@ -1827,6 +1831,9 @@ def wait_for_workflow( """Wait for a workflow invocation to completely schedule and then history to be complete.""" self.wait_for_invocation(workflow_id, invocation_id, timeout=timeout, assert_ok=assert_ok) + for step in self.get_invocation(invocation_id)["steps"]: + if step["subworkflow_invocation_id"]: + self.wait_for_invocation(None, step["subworkflow_invocation_id"], timeout=timeout, assert_ok=assert_ok) self.dataset_populator.wait_for_history_jobs(history_id, assert_ok=assert_ok, timeout=timeout) def get_invocation(self, invocation_id, step_details=False):