diff --git a/client/src/components/History/CurrentHistory/HistoryNavigation.vue b/client/src/components/History/CurrentHistory/HistoryNavigation.vue
index 4413c2401f31..ef9dc11ae0c9 100644
--- a/client/src/components/History/CurrentHistory/HistoryNavigation.vue
+++ b/client/src/components/History/CurrentHistory/HistoryNavigation.vue
@@ -19,6 +19,7 @@ import {
faUserLock,
} from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/vue-fontawesome";
+import axios from "axios";
import {
BButton,
BButtonGroup,
@@ -35,9 +36,12 @@ import { computed, ref } from "vue";
import { canMutateHistory, type HistorySummary } from "@/api";
import { iframeRedirect } from "@/components/plugins/legacyNavigation";
+import { useToast } from "@/composables/toast";
+import { getAppRoot } from "@/onload/loadConfig";
import { useHistoryStore } from "@/stores/historyStore";
import { useUserStore } from "@/stores/userStore";
import localize from "@/utils/localization";
+import { rethrowSimple } from "@/utils/simple-error";
import CopyModal from "@/components/History/Modals/CopyModal.vue";
import SelectorModal from "@/components/History/Modals/SelectorModal.vue";
@@ -81,6 +85,8 @@ const showCopyModal = ref(false);
const purgeHistory = ref(false);
+const toast = useToast();
+
const userStore = useUserStore();
const historyStore = useHistoryStore();
@@ -122,6 +128,16 @@ function userTitle(title: string) {
return localize(title);
}
}
+
+async function resumePausedJobs() {
+ const url = `${getAppRoot()}history/resume_paused_jobs?current=True`;
+ try {
+ const response = await axios.get(url);
+ toast.success(response.data.message);
+ } catch (e) {
+ rethrowSimple(e);
+ }
+}
@@ -205,7 +221,7 @@ function userTitle(title: string) {
+ @click="resumePausedJobs()">
Resume Paused Jobs
diff --git a/client/src/components/Workflow/Editor/NodeOutput.vue b/client/src/components/Workflow/Editor/NodeOutput.vue
index 7f0e5c68ec06..b3fe8a014f3a 100644
--- a/client/src/components/Workflow/Editor/NodeOutput.vue
+++ b/client/src/components/Workflow/Editor/NodeOutput.vue
@@ -310,7 +310,7 @@ const outputDetails = computed(() => {
const outputType =
collectionType && collectionType.isCollection && collectionType.collectionType
? `output is ${collectionTypeToDescription(collectionType)}`
- : `output is dataset`;
+ : `output is ${terminal.value.type || "dataset"}`;
if (isMultiple.value) {
if (!collectionType) {
collectionType = NULL_COLLECTION_TYPE_DESCRIPTION;
diff --git a/client/src/components/WorkflowInvocationState/WorkflowInvocationState.test.ts b/client/src/components/WorkflowInvocationState/WorkflowInvocationState.test.ts
index 3b1a1b4cb6ea..a6c7ad6b29ec 100644
--- a/client/src/components/WorkflowInvocationState/WorkflowInvocationState.test.ts
+++ b/client/src/components/WorkflowInvocationState/WorkflowInvocationState.test.ts
@@ -51,7 +51,11 @@ const invocationJobsSummaryById = {
// Mock the invocation store to return the expected invocation data given the invocation ID
jest.mock("@/stores/invocationStore", () => {
const originalModule = jest.requireActual("@/stores/invocationStore");
- const mockFetchInvocationForId = jest.fn();
+ const mockFetchInvocationForId = jest.fn().mockImplementation((fetchParams) => {
+ if (fetchParams.id === "error-invocation") {
+ throw new Error("User does not own specified item.");
+ }
+ });
const mockFetchInvocationJobsSummaryForId = jest.fn();
return {
...originalModule,
@@ -106,8 +110,8 @@ describe("WorkflowInvocationState check invocation and job terminal states", ()
const wrapper = await mountWorkflowInvocationState(invocationData.id);
expect(isInvocationAndJobTerminal(wrapper)).toBe(true);
- // Neither the invocation nor the jobs summary should be fetched for terminal invocations
- assertInvocationFetched(0);
+ // Invocation is fetched once and the jobs summary isn't fetched at all for terminal invocations
+ assertInvocationFetched(1);
assertJobsSummaryFetched(0);
});
@@ -115,17 +119,23 @@ describe("WorkflowInvocationState check invocation and job terminal states", ()
const wrapper = await mountWorkflowInvocationState("not-fetched-invocation");
expect(isInvocationAndJobTerminal(wrapper)).toBe(false);
- // Both, the invocation and jobs summary should be fetched once if the invocation is not in the store
+ // Invocation is fetched once and the jobs summary is then never fetched if the invocation is not in the store
assertInvocationFetched(1);
- assertJobsSummaryFetched(1);
+ assertJobsSummaryFetched(0);
+
+ // expect there to be an alert for the missing invocation
+ const alert = wrapper.find("balert-stub");
+ expect(alert.attributes("variant")).toBe("info");
+ const span = alert.find("span");
+ expect(span.text()).toBe("Invocation not found.");
});
it("determines that invocation is not terminal with non-terminal state", async () => {
const wrapper = await mountWorkflowInvocationState("non-terminal-id");
expect(isInvocationAndJobTerminal(wrapper)).toBe(false);
- // Only the invocation should be fetched for non-terminal invocations
- assertInvocationFetched(1);
+ // Only the invocation is fetched for non-terminal invocations; once for the initial fetch and then for the polling
+ assertInvocationFetched(2);
assertJobsSummaryFetched(0);
});
@@ -133,10 +143,24 @@ describe("WorkflowInvocationState check invocation and job terminal states", ()
const wrapper = await mountWorkflowInvocationState("non-terminal-jobs");
expect(isInvocationAndJobTerminal(wrapper)).toBe(false);
- // Only the jobs summary should be fetched, not the invocation since it is in scheduled/terminal state
- assertInvocationFetched(0);
+ // Only the jobs summary should be polled, the invocation is initially fetched only since it is in scheduled/terminal state
+ assertInvocationFetched(1);
assertJobsSummaryFetched(1);
});
+
+ it("determines that errored invocation fetches are handled correctly", async () => {
+ const wrapper = await mountWorkflowInvocationState("error-invocation");
+ expect(isInvocationAndJobTerminal(wrapper)).toBe(false);
+
+ // Invocation is fetched once and the jobs summary isn't fetched at all for errored invocations
+ assertInvocationFetched(1);
+ assertJobsSummaryFetched(0);
+
+ // expect there to be an alert for the handled error
+ const alert = wrapper.find("balert-stub");
+ expect(alert.attributes("variant")).toBe("danger");
+ expect(alert.text()).toBe("User does not own specified item.");
+ });
});
describe("WorkflowInvocationState check 'Report' tab disabled state", () => {
diff --git a/client/src/components/WorkflowInvocationState/WorkflowInvocationState.vue b/client/src/components/WorkflowInvocationState/WorkflowInvocationState.vue
index 603c51236c34..f02558ad365d 100644
--- a/client/src/components/WorkflowInvocationState/WorkflowInvocationState.vue
+++ b/client/src/components/WorkflowInvocationState/WorkflowInvocationState.vue
@@ -11,6 +11,7 @@ import { useAnimationFrameResizeObserver } from "@/composables/sensors/animation
import { useInvocationStore } from "@/stores/invocationStore";
import { useWorkflowStore } from "@/stores/workflowStore";
import localize from "@/utils/localization";
+import { errorMessageAsString } from "@/utils/simple-error";
import { cancelWorkflowScheduling } from "./services";
import { isTerminal, jobCount, runningCount } from "./util";
@@ -49,6 +50,8 @@ const invocationStore = useInvocationStore();
const stepStatesInterval = ref(undefined);
const jobStatesInterval = ref(undefined);
+const initialLoading = ref(true);
+const errorMessage = ref(null);
// after the report tab is first activated, no longer lazy-render it from then on
const reportActive = ref(false);
@@ -69,8 +72,10 @@ useAnimationFrameResizeObserver(scrollableDiv, ({ clientSize, scrollSize }) => {
isScrollable.value = scrollSize.height >= clientSize.height + 1;
});
-const invocation = computed(
- () => invocationStore.getInvocationById(props.invocationId) as WorkflowInvocationElementView
+const invocation = computed(() =>
+ !initialLoading.value && !errorMessage.value
+ ? (invocationStore.getInvocationById(props.invocationId) as WorkflowInvocationElementView)
+ : null
);
const invocationState = computed(() => invocation.value?.state || "new");
const invocationAndJobTerminal = computed(() => invocationSchedulingTerminal.value && jobStatesTerminal.value);
@@ -105,9 +110,19 @@ const workflowStore = useWorkflowStore();
const isDeletedWorkflow = computed(() => getWorkflow()?.deleted === true);
const workflowVersion = computed(() => getWorkflow()?.version);
-onMounted(() => {
- pollStepStatesUntilTerminal();
- pollJobStatesUntilTerminal();
+onMounted(async () => {
+ try {
+ await invocationStore.fetchInvocationForId({ id: props.invocationId });
+ initialLoading.value = false;
+ if (invocation.value) {
+ await pollStepStatesUntilTerminal();
+ await pollJobStatesUntilTerminal();
+ }
+ } catch (e) {
+ errorMessage.value = errorMessageAsString(e);
+ } finally {
+ initialLoading.value = false;
+ }
});
onUnmounted(() => {
@@ -116,7 +131,7 @@ onUnmounted(() => {
});
async function pollStepStatesUntilTerminal() {
- if (!invocation.value || !invocationSchedulingTerminal.value) {
+ if (!invocationSchedulingTerminal.value) {
await invocationStore.fetchInvocationForId({ id: props.invocationId });
stepStatesInterval.value = setTimeout(pollStepStatesUntilTerminal, 3000);
}
@@ -152,7 +167,7 @@ function getWorkflowId() {
}
function getWorkflowName() {
- return workflowStore.getStoredWorkflowNameByInstanceId(invocation.value?.workflow_id);
+ return workflowStore.getStoredWorkflowNameByInstanceId(invocation.value?.workflow_id || "");
}
@@ -261,7 +276,13 @@ function getWorkflowName() {
-
+
+
+ {{ errorMessage }}
+
+
+ Invocation not found.
+
diff --git a/lib/galaxy/managers/workflows.py b/lib/galaxy/managers/workflows.py
index a1534d6440cc..56ef059b9dc6 100644
--- a/lib/galaxy/managers/workflows.py
+++ b/lib/galaxy/managers/workflows.py
@@ -14,6 +14,7 @@
)
import sqlalchemy
+import yaml
from gxformat2 import (
from_galaxy_native,
ImporterGalaxyInterface,
@@ -635,9 +636,12 @@ def normalize_workflow_format(self, trans, as_dict):
galaxy_interface = Format2ConverterGalaxyInterface()
import_options = ImportOptions()
import_options.deduplicate_subworkflows = True
- as_dict = python_to_workflow(
- as_dict, galaxy_interface, workflow_directory=workflow_directory, import_options=import_options
- )
+ try:
+ as_dict = python_to_workflow(
+ as_dict, galaxy_interface, workflow_directory=workflow_directory, import_options=import_options
+ )
+ except yaml.scanner.ScannerError as e:
+ raise exceptions.MalformedContents(str(e))
return RawWorkflowDescription(as_dict, workflow_path)
diff --git a/lib/galaxy/tools/cross_product_flat.xml b/lib/galaxy/tools/cross_product_flat.xml
index 891772c78a6c..62b510a08c75 100644
--- a/lib/galaxy/tools/cross_product_flat.xml
+++ b/lib/galaxy/tools/cross_product_flat.xml
@@ -72,12 +72,66 @@
Synopsis
========
+@CROSS_PRODUCT_INTRO@
+====================
+How to use this tool
+====================
-===========
-Description
-===========
+@GALAXY_DOT_PRODUCT_SEMANTICS@
+Running input lists through this tool produces new dataset lists (described in detail below) that when using
+the same natural element-wise matching "map over" semantics described above produce every combination of the
+elements of the two lists compared against each other. Running a tool with these two outputs instead of the inital
+two input produces a list of the comparison of each combination of pairs from the respective inputs.
+
+.. image:: ${static_path}/images/tools/collection_ops/flat_crossproduct_output.png
+ :alt: The Flat Cartesian Product of Two Collections
+ :width: 500
+
+The result of running a subsequent tool with the outputs produced by this tool will be a much larger list
+whose element identifiers are the concatenation of the combinations of the elements identifiers from the
+two input lists.
+
+.. image:: ${static_path}/images/tools/collection_ops/flat_crossproduct_separator.png
+ :alt: Flat Cross Product Identifier Separator
+ :width: 500
+
+============================================
+What this tool does (technical details)
+============================================
+
+This tool consumes two lists - we will call them ``input_a`` and ``input_b``. If ``input_a``
+has length ``n`` and dataset elements identified as ``a1``, ``a2``, ... ``an`` and ``input_b``
+has length ``m`` and dataset elements identified as ``b1``, ``b2``, ... ``bm``, then this tool
+produces a pair of larger lists - each of size ``n*m``.
+
+Both output lists will be the same length and contain the same set of element identifiers in the
+same order. If the kth input can be described as ``(i-1)*n + (j-1)`` where ``1 <= i <= m`` and ``1 <= j <= n``
+then the element identifier for this kth element is the concatenation of the element identifier for
+the ith item of ``input_a`` and the jth item of ``input_b``.
+
+In the first output list, this kth element will be the ith element of ``input_a``. In the second
+output list, the kth element will be the jth element of ``input_b``.
+
+.. image:: ${static_path}/images/tools/collection_ops/flat_cross_product_outputs.png
+ :alt: Flat Cross Product Outputs
+ :width: 500
+
+These list structures might appear to be a little odd, but they have the very useful property
+that if you match up corresponding elements of the lists the result is each combination of
+elements in ``input_a`` and ``input_b`` are matched up once.
+
+.. image:: ${static_path}/images/tools/collection_ops/flat_cross_product_matched.png
+ :alt: Flat Cross Product Matching Datasets
+ :width: 500
+
+Running a downstream comparison tool that compares two datasets with these two lists produces a
+new list with every combination of comparisons.
+
+.. image:: ${static_path}/images/tools/collection_ops/flat_cross_product_downstream.png
+ :alt: Flat Cross Product All-vs-All Result
+ :width: 500
----
diff --git a/lib/galaxy/tools/cross_product_nested.xml b/lib/galaxy/tools/cross_product_nested.xml
index b4ba4d596de5..4bf89c2f020a 100644
--- a/lib/galaxy/tools/cross_product_nested.xml
+++ b/lib/galaxy/tools/cross_product_nested.xml
@@ -76,12 +76,69 @@
Synopsis
========
+@CROSS_PRODUCT_INTRO@
+====================
+How to use this tool
+====================
-===========
-Description
-===========
+@GALAXY_DOT_PRODUCT_SEMANTICS@
+Running input lists through this tool produces new list structures (described in detail below) that when using
+the same natural element-wise matching "map over" semantics described above produce every combination of the
+elements of the two lists compared against each other. Running a tool with these two outputs instead of the inital
+two input produces a nested list structure where the jth element of the inner list of the ith element of the outer
+list is a comparison of the ith element of the first list to the jth element of the second list.
+Put more simply, the result is a nested list where the identifiers of an element describe which inputs were
+matched to produce the comparison output found at that element.
+
+.. image:: ${static_path}/images/tools/collection_ops/nested_crossproduct_output.png
+ :alt: The Cartesian Product of Two Collections
+ :width: 500
+
+============================================
+What this tool does (technical details)
+============================================
+
+This tool consumes two flat lists. We will call the input collections ``input_a`` and ``input_b``. If ``input_a``
+has length ``n`` and dataset elements identified as ``a1``, ``a2``, ... ``an`` and ``input_b``
+has length ``m`` and dataset elements identified as ``b1``, ``b2``, ... ``bm``, then this tool
+produces a pair of output nested lists (specifically of the ``list:list`` collection type) where
+the outer list is of length ``n`` and each inner list has a length of ``m`` (a ``n X m`` nested list). The jth element
+inside the outer list's ith element is a pseudo copy of the ith dataset of ``inputa``. One
+way to think about the output nested lists is as matrices. Here is a diagram of the first output
+showing the element identifiers of the outer and inner lists along with the what dataset is being
+"copied" into this new collection.
+
+.. image:: ${static_path}/images/tools/collection_ops/nested_cross_product_out_1.png
+ :alt: Nested Cross Product First Output
+ :width: 500
+
+The second output is a nested list of pseudo copies of the elements of ``input_b`` instead of
+``input_a``. In particular the outer list is again of length ``n`` and each inner list is again
+of lenth ``m`` but this time the jth element inside the outer list's ith element is a pseudo copy
+of the jth dataset of ``inputb``. Here is the matrix of these outputs.
+
+.. image:: ${static_path}/images/tools/collection_ops/nested_cross_product_out_2.png
+ :alt: Nested Cross Product Second Output
+ :width: 500
+
+These nested list structures might appear to be a little odd, but they have the very useful property
+that if you match up corresponding elements of the nested lists the result is each combination of
+elements in ``input_a`` and ``input_b`` are matched up once. The following diagram describes these matching
+datasets.
+
+.. image:: ${static_path}/images/tools/collection_ops/nested_cross_product_matching.png
+ :alt: Matching Inputs
+ :width: 500
+
+Running a tool that compares two datasets with these two nested lists produces a new nested list
+as described above. The following diagram shows the structure of this output and how the element
+identifiers are preserved and indicate what comparison was performed.
+
+.. image:: ${static_path}/images/tools/collection_ops/nested_cross_product_output.png
+ :alt: Matching Inputs
+ :width: 500
----
diff --git a/lib/galaxy/tools/model_operation_macros.xml b/lib/galaxy/tools/model_operation_macros.xml
index a3f16c5398e3..a3089ea0687d 100644
--- a/lib/galaxy/tools/model_operation_macros.xml
+++ b/lib/galaxy/tools/model_operation_macros.xml
@@ -4,6 +4,38 @@
class="ModelOperationToolAction"/>
This tool will create new history datasets copied from your input collections but your quota usage will not increase.
+
+
+
+
operation_3436
diff --git a/lib/galaxy/webapps/galaxy/controllers/history.py b/lib/galaxy/webapps/galaxy/controllers/history.py
index 2fa7e4130e81..ce4112871279 100644
--- a/lib/galaxy/webapps/galaxy/controllers/history.py
+++ b/lib/galaxy/webapps/galaxy/controllers/history.py
@@ -244,7 +244,7 @@ def resume_paused_jobs(self, trans, current=False, ids=None, **kwargs):
history = trans.get_history()
if history:
history.resume_paused_jobs()
- return trans.show_ok_message("Your jobs have been resumed.")
+ return {"message": "Your jobs have been resumed.", "status": "success"}
raise exceptions.RequestParameterInvalidException(
"You can currently only resume all the datasets of the current history."
)
diff --git a/lib/galaxy/workflow/modules.py b/lib/galaxy/workflow/modules.py
index eeece34fe554..d8e73f71f8ee 100644
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -32,6 +32,7 @@
from galaxy.model import (
PostJobAction,
Workflow,
+ WorkflowInvocationStep,
WorkflowStep,
WorkflowStepConnection,
)
@@ -43,6 +44,7 @@
InvocationCancellationReviewFailed,
InvocationFailureDatasetFailed,
InvocationFailureExpressionEvaluationFailed,
+ InvocationFailureOutputNotFound,
InvocationFailureWhenNotBoolean,
)
from galaxy.tool_util.cwl.util import set_basename_and_derived_properties
@@ -762,7 +764,7 @@ def get_content_id(self):
return self.trans.security.encode_id(self.subworkflow.id)
def execute(
- self, trans, progress: "WorkflowProgress", invocation_step, use_cached_job: bool = False
+ self, trans, progress: "WorkflowProgress", invocation_step: WorkflowInvocationStep, use_cached_job: bool = False
) -> Optional[bool]:
"""Execute the given workflow step in the given workflow invocation.
Use the supplied workflow progress object to track outputs, find
@@ -822,7 +824,17 @@ def execute(
workflow_output_label = (
workflow_output.label or f"{workflow_output.workflow_step.order_index}:{workflow_output.output_name}"
)
- replacement = subworkflow_progress.get_replacement_workflow_output(workflow_output)
+ try:
+ replacement = subworkflow_progress.get_replacement_workflow_output(workflow_output)
+ except KeyError:
+ raise FailWorkflowEvaluation(
+ why=InvocationFailureOutputNotFound(
+ reason=FailureReason.output_not_found,
+ workflow_step_id=workflow_output.workflow_step_id,
+ output_name=workflow_output.output_name,
+ dependent_workflow_step_id=step.id,
+ )
+ )
outputs[workflow_output_label] = replacement
progress.set_step_outputs(invocation_step, outputs)
return None
@@ -974,8 +986,11 @@ def execute(
progress.set_outputs_for_input(invocation_step, step_outputs)
return None
- def recover_mapping(self, invocation_step, progress):
- progress.set_outputs_for_input(invocation_step, already_persisted=True)
+ def recover_mapping(self, invocation_step: WorkflowInvocationStep, progress: "WorkflowProgress"):
+ super().recover_mapping(invocation_step, progress)
+ progress.set_outputs_for_input(
+ invocation_step, progress.outputs.get(invocation_step.workflow_step_id), already_persisted=True
+ )
def get_export_state(self):
return self._parse_state_into_dict()
diff --git a/lib/galaxy_test/api/test_workflows.py b/lib/galaxy_test/api/test_workflows.py
index a3e1a4a8bab7..39b288ef7fd0 100644
--- a/lib/galaxy_test/api/test_workflows.py
+++ b/lib/galaxy_test/api/test_workflows.py
@@ -1,4 +1,5 @@
import base64
+import io
import json
import os
import shutil
@@ -7154,6 +7155,84 @@ def test_invocation_with_collection_mapping(self):
assert invocation_steps[1]["state"] == "ok"
+ def test_data_input_recovery_on_delayed_input(self):
+ self.workflow_populator.run_workflow(
+ """
+class: GalaxyWorkflow
+inputs: {}
+outputs:
+ the_output:
+ outputSource: child/output
+steps:
+ running_output:
+ tool_id: job_properties
+ tool_state:
+ failbool: false
+ sleepsecs: 3
+ thebool: false
+ child:
+ in:
+ input_dataset:
+ source: running_output/out_file1
+ run:
+ class: GalaxyWorkflow
+ inputs:
+ input_dataset: data
+ run_step:
+ default: false
+ optional: true
+ type: boolean
+ outputs:
+ output:
+ outputSource: conditional_cat/out_file1
+ steps:
+ conditional_cat:
+ tool_id: cat
+ when: $(inputs.when)
+ in:
+ input1: input_dataset
+ when:
+ source: run_step"""
+ )
+
+ def test_subworkflow_output_not_found_fails(self):
+ # This test might start failing if we ever validate connections before attempting to schedule
+ summary = self.workflow_populator.run_workflow(
+ """
+class: GalaxyWorkflow
+inputs:
+ input: data
+outputs:
+ the_output:
+ outputSource: child/output
+steps:
+ child:
+ in:
+ input_dataset:
+ source: input
+ run:
+ class: GalaxyWorkflow
+ inputs:
+ input_dataset: data
+ outputs:
+ output:
+ outputSource: cat/out_file_that_doesnt_exist
+ steps:
+ cat:
+ tool_id: cat
+ in:
+ input1: input_dataset
+test_data:
+ input:
+ value: 1.fasta
+ type: File
+ """,
+ assert_ok=False,
+ )
+ invocation = self.workflow_populator.get_invocation(summary.invocation_id)
+ assert invocation["state"] == "failed"
+ assert invocation["messages"][0]["reason"] == "output_not_found"
+
def _run_mapping_workflow(self):
history_id = self.dataset_populator.new_history()
summary = self._run_workflow(
@@ -7456,6 +7535,11 @@ def test_subworkflow_tags(self):
subworkflow = downloaded_workflow["steps"]["1"]["subworkflow"]
assert subworkflow["tags"] == []
+ def test_upload_malformated_yaml(self):
+ malformated_yaml = "class: GalaxyWorkflow:\n a-1:()"
+ r = self._post("workflows", files={"archive_file": io.StringIO(malformated_yaml)})
+ assert r.status_code == 400
+
class TestAdminWorkflowsApi(BaseWorkflowsApiTestCase):
require_admin_user = True
diff --git a/lib/galaxy_test/selenium/test_invocation_grid.py b/lib/galaxy_test/selenium/test_invocation_grid.py
index 32db11471280..5316b1740e13 100644
--- a/lib/galaxy_test/selenium/test_invocation_grid.py
+++ b/lib/galaxy_test/selenium/test_invocation_grid.py
@@ -22,10 +22,11 @@ def test_grid(self):
invocations=30,
)
gx_selenium_context.navigate_to_invocations()
+ invocations = gx_selenium_context.components.invocations
+ invocations.invocations_table.wait_for_visible()
# shows a maximum of 25 invocations per page
self._assert_showing_n_invocations(25)
- invocations = gx_selenium_context.components.invocations
invocations.pager.wait_for_visible()
self.screenshot("invocations_paginated_first_page")
self._next_page(invocations)
diff --git a/static/images/tools/collection_ops/dot_product.png b/static/images/tools/collection_ops/dot_product.png
new file mode 100644
index 000000000000..d5af4cc4df1b
Binary files /dev/null and b/static/images/tools/collection_ops/dot_product.png differ
diff --git a/static/images/tools/collection_ops/flat_cross_product_downstream.png b/static/images/tools/collection_ops/flat_cross_product_downstream.png
new file mode 100644
index 000000000000..4a86769453ea
Binary files /dev/null and b/static/images/tools/collection_ops/flat_cross_product_downstream.png differ
diff --git a/static/images/tools/collection_ops/flat_cross_product_matched.png b/static/images/tools/collection_ops/flat_cross_product_matched.png
new file mode 100644
index 000000000000..b03d0a62f223
Binary files /dev/null and b/static/images/tools/collection_ops/flat_cross_product_matched.png differ
diff --git a/static/images/tools/collection_ops/flat_cross_product_outputs.png b/static/images/tools/collection_ops/flat_cross_product_outputs.png
new file mode 100644
index 000000000000..6b2fed6eb612
Binary files /dev/null and b/static/images/tools/collection_ops/flat_cross_product_outputs.png differ
diff --git a/static/images/tools/collection_ops/flat_crossproduct_output.png b/static/images/tools/collection_ops/flat_crossproduct_output.png
new file mode 100644
index 000000000000..ba32e2c5a9d8
Binary files /dev/null and b/static/images/tools/collection_ops/flat_crossproduct_output.png differ
diff --git a/static/images/tools/collection_ops/flat_crossproduct_separator.png b/static/images/tools/collection_ops/flat_crossproduct_separator.png
new file mode 100644
index 000000000000..d843acebe571
Binary files /dev/null and b/static/images/tools/collection_ops/flat_crossproduct_separator.png differ
diff --git a/static/images/tools/collection_ops/nested_cross_product_matching.png b/static/images/tools/collection_ops/nested_cross_product_matching.png
new file mode 100644
index 000000000000..9a1a9f7c5b17
Binary files /dev/null and b/static/images/tools/collection_ops/nested_cross_product_matching.png differ
diff --git a/static/images/tools/collection_ops/nested_cross_product_out_1.png b/static/images/tools/collection_ops/nested_cross_product_out_1.png
new file mode 100644
index 000000000000..d28486693ea1
Binary files /dev/null and b/static/images/tools/collection_ops/nested_cross_product_out_1.png differ
diff --git a/static/images/tools/collection_ops/nested_cross_product_out_2.png b/static/images/tools/collection_ops/nested_cross_product_out_2.png
new file mode 100644
index 000000000000..322842362cfe
Binary files /dev/null and b/static/images/tools/collection_ops/nested_cross_product_out_2.png differ
diff --git a/static/images/tools/collection_ops/nested_cross_product_output.png b/static/images/tools/collection_ops/nested_cross_product_output.png
new file mode 100644
index 000000000000..3efeb8eb39e6
Binary files /dev/null and b/static/images/tools/collection_ops/nested_cross_product_output.png differ
diff --git a/static/images/tools/collection_ops/nested_crossproduct_output.png b/static/images/tools/collection_ops/nested_crossproduct_output.png
new file mode 100644
index 000000000000..8ce2a14128c8
Binary files /dev/null and b/static/images/tools/collection_ops/nested_crossproduct_output.png differ