diff --git a/.github/workflows/framework.yaml b/.github/workflows/framework_tools.yaml
similarity index 95%
rename from .github/workflows/framework.yaml
rename to .github/workflows/framework_tools.yaml
index dcc5e7614b15..a06f6e469aad 100644
--- a/.github/workflows/framework.yaml
+++ b/.github/workflows/framework_tools.yaml
@@ -1,4 +1,4 @@
-name: Framework tests
+name: Tool framework tests
on:
push:
paths-ignore:
@@ -64,9 +64,9 @@ jobs:
uses: actions/cache@v4
with:
path: 'galaxy root/.venv'
- key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework
+ key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework-tools
- name: Run tests
- run: ./run_tests.sh --coverage --framework
+ run: ./run_tests.sh --coverage --framework-tools
working-directory: 'galaxy root'
- uses: codecov/codecov-action@v3
with:
diff --git a/.github/workflows/framework_workflows.yaml b/.github/workflows/framework_workflows.yaml
new file mode 100644
index 000000000000..accbbd4c3736
--- /dev/null
+++ b/.github/workflows/framework_workflows.yaml
@@ -0,0 +1,79 @@
+name: Workflow framework tests
+on:
+ push:
+ paths-ignore:
+ - 'client/**'
+ - 'doc/**'
+ - 'lib/galaxy_test/selenium/**'
+ pull_request:
+ paths-ignore:
+ - 'client/**'
+ - 'doc/**'
+ - 'lib/galaxy_test/selenium/**'
+ schedule:
+ # Run at midnight UTC every Tuesday
+ - cron: '0 0 * * 2'
+env:
+ GALAXY_TEST_DBURI: 'postgresql://postgres:postgres@localhost:5432/galaxy?client_encoding=utf8'
+ GALAXY_TEST_RAISE_EXCEPTION_ON_HISTORYLESS_HDA: '1'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+jobs:
+ test:
+ name: Test
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ['3.8']
+ services:
+ postgres:
+ image: postgres:13
+ env:
+ POSTGRES_USER: postgres
+ POSTGRES_PASSWORD: postgres
+ POSTGRES_DB: postgres
+ ports:
+ - 5432:5432
+ steps:
+ - if: github.event_name == 'schedule'
+ run: |
+ echo "GALAXY_CONFIG_OVERRIDE_METADATA_STRATEGY=extended" >> $GITHUB_ENV
+ echo "GALAXY_CONFIG_OVERRIDE_OUTPUTS_TO_WORKING_DIRECTORY=true" >> $GITHUB_ENV
+ - uses: actions/checkout@v4
+ with:
+ path: 'galaxy root'
+ - uses: actions/setup-node@v4
+ with:
+ node-version: '18.12.1'
+ cache: 'yarn'
+ cache-dependency-path: 'galaxy root/client/yarn.lock'
+ - uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Get full Python version
+ id: full-python-version
+ shell: bash
+ run: echo "version=$(python -c 'import sys; print("-".join(str(v) for v in sys.version_info))')" >> $GITHUB_OUTPUT
+ - name: Cache pip dir
+ uses: actions/cache@v4
+ with:
+ path: ~/.cache/pip
+ key: pip-cache-${{ matrix.python-version }}-${{ hashFiles('galaxy root/requirements.txt') }}
+ - name: Cache galaxy venv
+ uses: actions/cache@v4
+ with:
+ path: 'galaxy root/.venv'
+ key: gxy-venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('galaxy root/requirements.txt') }}-framework-workflows
+ - name: Run tests
+ run: ./run_tests.sh --coverage --framework-workflows
+ working-directory: 'galaxy root'
+ - uses: codecov/codecov-action@v3
+ with:
+ flags: framework
+ working-directory: 'galaxy root'
+ - uses: actions/upload-artifact@v4
+ if: failure()
+ with:
+ name: Framework test results (${{ matrix.python-version }})
+ path: 'galaxy root/run_framework_workflows_tests.html'
diff --git a/lib/galaxy/tool_util/parser/yaml.py b/lib/galaxy/tool_util/parser/yaml.py
index c354bfd33d49..6813db0f5211 100644
--- a/lib/galaxy/tool_util/parser/yaml.py
+++ b/lib/galaxy/tool_util/parser/yaml.py
@@ -310,6 +310,8 @@ def expand_dict_form(item):
return assert_list or None # XML variant is None if no assertions made
+# Planemo depends on this and was never updated unfortunately.
+# https://github.com/galaxyproject/planemo/blob/master/planemo/test/_check_output.py
__to_test_assert_list = to_test_assert_list
diff --git a/lib/galaxy/tool_util/verify/__init__.py b/lib/galaxy/tool_util/verify/__init__.py
index ec84f23ae345..2bcb31106bfb 100644
--- a/lib/galaxy/tool_util/verify/__init__.py
+++ b/lib/galaxy/tool_util/verify/__init__.py
@@ -45,6 +45,7 @@
DEFAULT_METRIC,
DEFAULT_PIN_LABELS,
)
+from galaxy.tool_util.parser.yaml import to_test_assert_list
from galaxy.util import unicodify
from galaxy.util.compression_utils import get_fileobj
from .asserts import verify_assertions
@@ -56,6 +57,8 @@
log = logging.getLogger(__name__)
DEFAULT_TEST_DATA_RESOLVER = TestDataResolver()
+GetFilenameT = Optional[Callable[[str], str]]
+GetLocationT = Optional[Callable[[str], str]]
def verify(
@@ -64,7 +67,7 @@ def verify(
attributes: Optional[Dict[str, Any]],
filename: Optional[str] = None,
get_filecontent: Optional[Callable[[str], bytes]] = None,
- get_filename: Optional[Callable[[str], str]] = None,
+ get_filename: GetFilenameT = None,
keep_outputs_dir: Optional[str] = None,
verify_extra_files: Optional[Callable] = None,
mode="file",
@@ -585,3 +588,56 @@ def files_image_diff(file1: str, file2: str, attributes: Optional[Dict[str, Any]
distance_eps = attributes.get("eps", DEFAULT_EPS)
if distance > distance_eps:
raise AssertionError(f"Image difference {distance} exceeds eps={distance_eps}.")
+
+
+# TODO: After tool-util with this included is published, fefactor planemo.test._check_output
+# to use this function. There is already a comment there about breaking fewer abstractions.
+# https://github.com/galaxyproject/planemo/blob/master/planemo/test/_check_output.py
+def verify_file_path_against_dict(
+ get_filename: GetFilenameT,
+ get_location: GetLocationT,
+ path: str,
+ output_content: bytes,
+ test_properties,
+ test_data_target_dir: Optional[str] = None,
+) -> None:
+ with open(path, "rb") as f:
+ output_content = f.read()
+ item_label = f"Output with path {path}"
+ verify_file_contents_against_dict(
+ get_filename, get_location, item_label, output_content, test_properties, test_data_target_dir
+ )
+
+
+def verify_file_contents_against_dict(
+ get_filename: GetFilenameT,
+ get_location: GetLocationT,
+ item_label: str,
+ output_content: bytes,
+ test_properties,
+ test_data_target_dir: Optional[str] = None,
+) -> None:
+ # Support Galaxy-like file location (using "file") or CWL-like ("path" or "location").
+ expected_file = test_properties.get("file", None)
+ if expected_file is None:
+ expected_file = test_properties.get("path", None)
+ if expected_file is None:
+ location = test_properties.get("location")
+ if location:
+ if location.startswith(("http://", "https://")):
+ assert get_location
+ expected_file = get_location(location)
+ else:
+ expected_file = location.split("file://", 1)[-1]
+
+ if "asserts" in test_properties:
+ test_properties["assert_list"] = to_test_assert_list(test_properties["asserts"])
+ verify(
+ item_label,
+ output_content,
+ attributes=test_properties,
+ filename=expected_file,
+ get_filename=get_filename,
+ keep_outputs_dir=test_data_target_dir,
+ verify_extra_files=None,
+ )
diff --git a/lib/galaxy/tool_util/verify/interactor.py b/lib/galaxy/tool_util/verify/interactor.py
index 77a27609a3ad..e403d08fa1aa 100644
--- a/lib/galaxy/tool_util/verify/interactor.py
+++ b/lib/galaxy/tool_util/verify/interactor.py
@@ -346,18 +346,7 @@ def _verify_metadata(self, history_id, hid, attributes):
`dbkey` and `tags` all map to the API description directly. Other metadata attributes
are assumed to be datatype-specific and mapped with a prefix of `metadata_`.
"""
- metadata = attributes.get("metadata", {}).copy()
- for key in metadata.copy().keys():
- if key not in ["name", "info", "tags", "created_from_basename"]:
- new_key = f"metadata_{key}"
- metadata[new_key] = metadata[key]
- del metadata[key]
- elif key == "info":
- metadata["misc_info"] = metadata["info"]
- del metadata["info"]
- expected_file_type = attributes.get("ftype", None)
- if expected_file_type:
- metadata["file_ext"] = expected_file_type
+ metadata = get_metadata_to_test(attributes)
if metadata:
@@ -370,29 +359,7 @@ def wait_for_content():
return None
dataset = wait_on(wait_for_content, desc="dataset metadata", timeout=10)
-
- for key, value in metadata.items():
- try:
- dataset_value = dataset.get(key, None)
-
- def compare(val, expected):
- if str(val) != str(expected):
- raise Exception(
- f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}]. Dataset API value was [{dataset}]." # noqa: B023
- )
-
- if isinstance(dataset_value, list):
- value = str(value).split(",")
- if len(value) != len(dataset_value):
- raise Exception(
- f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}], lists differ in length. Dataset API value was [{dataset}]."
- )
- for val, expected in zip(dataset_value, value):
- compare(val, expected)
- else:
- compare(dataset_value, value)
- except KeyError:
- raise Exception(f"Failed to verify dataset metadata, metadata key [{key}] was not found.")
+ compare_expected_metadata_to_api_response(metadata, dataset)
def wait_for_job(self, job_id: str, history_id: Optional[str] = None, maxseconds=DEFAULT_TOOL_TEST_WAIT) -> None:
self.wait_for(lambda: self.__job_ready(job_id, history_id), maxseconds=maxseconds)
@@ -1934,3 +1901,45 @@ def test_data_iter(required_files):
raise Exception(f"edit_attributes type ({edit_att.get('type', None)}) is unimplemented")
yield data_dict
+
+
+def compare_expected_metadata_to_api_response(metadata: dict, dataset: dict) -> None:
+ for key, value in metadata.items():
+ try:
+ dataset_value = dataset.get(key, None)
+
+ def compare(val, expected):
+ if str(val) != str(expected):
+ raise Exception(
+ f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}]. Dataset API value was [{dataset}]." # noqa: B023
+ )
+
+ if isinstance(dataset_value, list):
+ value = str(value).split(",")
+ if len(value) != len(dataset_value):
+ raise Exception(
+ f"Dataset metadata verification for [{key}] failed, expected [{value}] but found [{dataset_value}], lists differ in length. Dataset API value was [{dataset}]."
+ )
+ for val, expected in zip(dataset_value, value):
+ compare(val, expected)
+ else:
+ compare(dataset_value, value)
+ except KeyError:
+ raise Exception(f"Failed to verify dataset metadata, metadata key [{key}] was not found.")
+
+
+def get_metadata_to_test(test_properties: dict) -> dict:
+ """Fetch out metadata to test from test property dict and adapt it to keys the API produces."""
+ metadata = test_properties.get("metadata", {}).copy()
+ for key in metadata.copy().keys():
+ if key not in ["name", "info", "tags", "created_from_basename"]:
+ new_key = f"metadata_{key}"
+ metadata[new_key] = metadata[key]
+ del metadata[key]
+ elif key == "info":
+ metadata["misc_info"] = metadata["info"]
+ del metadata["info"]
+ expected_file_type = test_properties.get("ftype", None)
+ if expected_file_type:
+ metadata["file_ext"] = expected_file_type
+ return metadata
diff --git a/lib/galaxy_test/api/test_workflows.py b/lib/galaxy_test/api/test_workflows.py
index c42da227984e..88d4112966fd 100644
--- a/lib/galaxy_test/api/test_workflows.py
+++ b/lib/galaxy_test/api/test_workflows.py
@@ -1495,36 +1495,6 @@ def test_run_workflow_by_name(self):
def test_run_workflow(self):
self.__run_cat_workflow(inputs_by="step_id")
- @skip_without_tool("multiple_versions")
- def test_run_versioned_tools(self):
- with self.dataset_populator.test_history() as history_01_id:
- workflow_version_01 = self._upload_yaml_workflow(
- """
-class: GalaxyWorkflow
-steps:
- multiple:
- tool_id: multiple_versions
- tool_version: "0.1"
- state:
- inttest: 0
-"""
- )
- self.workflow_populator.invoke_workflow_and_wait(workflow_version_01, history_id=history_01_id)
-
- with self.dataset_populator.test_history() as history_02_id:
- workflow_version_02 = self._upload_yaml_workflow(
- """
-class: GalaxyWorkflow
-steps:
- multiple:
- tool_id: multiple_versions
- tool_version: "0.2"
- state:
- inttest: 1
-"""
- )
- self.workflow_populator.invoke_workflow_and_wait(workflow_version_02, history_id=history_02_id)
-
def __run_cat_workflow(self, inputs_by):
workflow = self.workflow_populator.load_workflow(name="test_for_run")
workflow["steps"]["0"]["uuid"] = str(uuid4())
@@ -2771,93 +2741,6 @@ def test_run_subworkflow_replacement_parameters(self):
details = self.dataset_populator.get_history_dataset_details(history_id)
assert details["name"] == "moocow suffix"
- @skip_without_tool("create_2")
- def test_placements_from_text_inputs(self):
- with self.dataset_populator.test_history() as history_id:
- run_def = """
-class: GalaxyWorkflow
-inputs: []
-steps:
- create_2:
- tool_id: create_2
- state:
- sleep_time: 0
- outputs:
- out_file1:
- rename: "${replaceme} name"
- out_file2:
- rename: "${replaceme} name 2"
-test_data:
- replacement_parameters:
- replaceme: moocow
-"""
-
- self._run_jobs(run_def, history_id=history_id)
- details = self.dataset_populator.get_history_dataset_details(history_id)
- assert details["name"] == "moocow name 2"
-
- run_def = """
-class: GalaxyWorkflow
-inputs:
- replaceme: text
-steps:
- create_2:
- tool_id: create_2
- state:
- sleep_time: 0
- outputs:
- out_file1:
- rename: "${replaceme} name"
- out_file2:
- rename: "${replaceme} name 2"
-test_data:
- replaceme:
- value: moocow
- type: raw
-"""
- self._run_jobs(run_def, history_id=history_id)
- details = self.dataset_populator.get_history_dataset_details(history_id)
- assert details["name"] == "moocow name 2", details["name"]
-
- def test_placements_from_text_inputs_nested(self):
- with self.dataset_populator.test_history() as history_id:
- run_def = """
-class: GalaxyWorkflow
-inputs:
- replacemeouter: text
-steps:
- nested_workflow:
- run:
- class: GalaxyWorkflow
- inputs:
- replacemeinner: text
- outputs:
- workflow_output_1:
- outputSource: create_2/out_file1
- workflow_output_2:
- outputSource: create_2/out_file2
- steps:
- create_2:
- tool_id: create_2
- state:
- sleep_time: 0
- outputs:
- out_file1:
- rename: "${replacemeinner} name"
- out_file2:
- rename: "${replacemeinner} name 2"
- in:
- replacemeinner: replacemeouter
-
-test_data:
- replacemeouter:
- value: moocow
- type: raw
-"""
- self._run_jobs(run_def, history_id=history_id)
- details = self.dataset_populator.get_history_dataset_details(history_id)
- assert details["name"] == "moocow name 2", details["name"]
-
@skip_without_tool("random_lines1")
def test_run_runtime_parameters_after_pause(self):
with self.dataset_populator.test_history() as history_id:
@@ -2917,111 +2800,6 @@ def run_test(workflow_text):
run_test(NESTED_WORKFLOW_AUTO_LABELS_MODERN_SYNTAX)
- @skip_without_tool("cat1")
- @skip_without_tool("collection_paired_test")
- def test_workflow_run_zip_collections(self):
- with self.dataset_populator.test_history() as history_id:
- workflow_id = self._upload_yaml_workflow(
- """
-class: GalaxyWorkflow
-inputs:
- test_input_1: data
- test_input_2: data
-steps:
- first_cat:
- tool_id: cat1
- in:
- input1: test_input_1
- zip_it:
- tool_id: "__ZIP_COLLECTION__"
- in:
- input_forward: first_cat/out_file1
- input_reverse: test_input_2
- concat_pair:
- tool_id: collection_paired_test
- in:
- f1: zip_it/output
-"""
- )
- hda1 = self.dataset_populator.new_dataset(history_id, content="samp1\t10.0\nsamp2\t20.0\n")
- hda2 = self.dataset_populator.new_dataset(history_id, content="samp1\t20.0\nsamp2\t40.0\n")
- self.dataset_populator.wait_for_history(history_id, assert_ok=True)
- inputs = {
- "0": self._ds_entry(hda1),
- "1": self._ds_entry(hda2),
- }
- invocation_id = self.__invoke_workflow(workflow_id, inputs=inputs, history_id=history_id)
- self.workflow_populator.wait_for_invocation_and_jobs(history_id, workflow_id, invocation_id)
- content = self.dataset_populator.get_history_dataset_content(history_id)
- assert content.strip() == "samp1\t10.0\nsamp2\t20.0\nsamp1\t20.0\nsamp2\t40.0"
-
- @skip_without_tool("collection_paired_test")
- def test_workflow_flatten(self):
- with self.dataset_populator.test_history() as history_id:
- self._run_jobs(
- """
-class: GalaxyWorkflow
-steps:
- nested:
- tool_id: collection_creates_dynamic_nested
- state:
- sleep_time: 0
- foo: 'dummy'
- flatten:
- tool_id: '__FLATTEN__'
- state:
- input:
- $link: nested/list_output
- join_identifier: '-'
-""",
- test_data={},
- history_id=history_id,
- )
- details = self.dataset_populator.get_history_collection_details(history_id, hid=14)
- assert details["collection_type"] == "list"
- elements = details["elements"]
- identifiers = [e["element_identifier"] for e in elements]
- assert len(identifiers) == 6
- assert "oe1-ie1" in identifiers
-
- @skip_without_tool("collection_paired_test")
- def test_workflow_flatten_with_mapped_over_execution(self):
- with self.dataset_populator.test_history() as history_id:
- self._run_jobs(
- r"""
-class: GalaxyWorkflow
-inputs:
- input_fastqs: collection
-steps:
- split_up:
- tool_id: collection_split_on_column
- in:
- input1: input_fastqs
- flatten:
- tool_id: '__FLATTEN__'
- in:
- input: split_up/split_output
- join_identifier: '-'
-test_data:
- input_fastqs:
- collection_type: list
- elements:
- - identifier: samp1
- content: "0\n1"
-""",
- history_id=history_id,
- )
- history = self._get(f"histories/{history_id}/contents").json()
- flattened_collection = history[-1]
- assert flattened_collection["history_content_type"] == "dataset_collection"
- assert flattened_collection["collection_type"] == "list"
- assert flattened_collection["element_count"] == 2
- nested_collection = self.dataset_populator.get_history_collection_details(history_id, hid=3)
- assert nested_collection["collection_type"] == "list:list"
- assert nested_collection["element_count"] == 1
- assert nested_collection["elements"][0]["object"]["populated"]
- assert nested_collection["elements"][0]["object"]["element_count"] == 2
-
@skip_without_tool("cat")
def test_workflow_invocation_report_1(self):
test_data = """
@@ -6249,52 +6027,6 @@ def test_run_rename_based_on_input_conditional_legacy_pja_reference(self):
name = content["name"]
assert name == "fastq1 suffix", name
- @skip_without_tool("mapper2")
- def test_run_rename_based_on_input_collection(self):
- with self.dataset_populator.test_history() as history_id:
- self._run_jobs(
- """
-class: GalaxyWorkflow
-inputs:
- fasta_input: data
- fastq_inputs: data
-steps:
- mapping:
- tool_id: mapper2
- state:
- fastq_input:
- fastq_input_selector: paired_collection
- fastq_input1:
- $link: fastq_inputs
- reference:
- $link: fasta_input
- outputs:
- out_file1:
- rename: "#{fastq_input.fastq_input1 | basename} suffix"
-""",
- test_data="""
-fasta_input:
- value: 1.fasta
- type: File
- name: fasta1
- file_type: fasta
-fastq_inputs:
- collection_type: list
- name: the_dataset_pair
- elements:
- - identifier: forward
- value: 1.fastq
- type: File
- - identifier: reverse
- value: 1.fastq
- type: File
-""",
- history_id=history_id,
- )
- content = self.dataset_populator.get_history_dataset_details(history_id, wait=True, assert_ok=True)
- name = content["name"]
- assert name == "the_dataset_pair suffix", name
-
@skip_without_tool("collection_creates_pair")
def test_run_hide_on_collection_output(self):
with self.dataset_populator.test_history() as history_id:
@@ -7184,44 +6916,6 @@ def test_defaults_editor(self):
put_response = self._update_workflow(workflow_id, workflow_object)
assert put_response.status_code == 200
- def test_empty_collection_sort(self, history_id):
- self._run_workflow(
- """class: GalaxyWorkflow
-inputs:
- input: collection
- filter_file: data
-steps:
- filter_collection:
- tool_id: __FILTER_FROM_FILE__
- in:
- input: input
- how|filter_source: filter_file
- sort_collection_1:
- tool_id: __SORTLIST__
- in:
- input: filter_collection/output_filtered
- sort_collection_2:
- tool_id: __SORTLIST__
- in:
- input: filter_collection/output_discarded
- merge_collection:
- tool_id: __MERGE_COLLECTION__
- in:
- inputs_0|input: sort_collection_1/output
- inputs_1|input: sort_collection_2/output
-test_data:
- input:
- collection_type: list
- elements:
- - identifier: i1
- content: "0"
- filter_file: i1
-""",
- history_id=history_id,
- wait=True,
- assert_ok=True,
- )
-
@skip_without_tool("random_lines1")
def test_run_replace_params_over_default_delayed(self):
with self.dataset_populator.test_history() as history_id:
diff --git a/lib/galaxy_test/driver/driver_util.py b/lib/galaxy_test/driver/driver_util.py
index 77ab88ca0682..a2ee29f5f774 100644
--- a/lib/galaxy_test/driver/driver_util.py
+++ b/lib/galaxy_test/driver/driver_util.py
@@ -850,7 +850,8 @@ def _configure(self, config_object=None) -> None:
default_tool_conf: Optional[str]
datatypes_conf_override: Optional[str]
- if getattr(config_object, "framework_tool_and_types", False):
+ framework_tools_and_types = getattr(config_object, "framework_tool_and_types", False)
+ if framework_tools_and_types:
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
diff --git a/lib/galaxy_test/workflow/__init__.py b/lib/galaxy_test/workflow/__init__.py
new file mode 100644
index 000000000000..2355ab50ab9a
--- /dev/null
+++ b/lib/galaxy_test/workflow/__init__.py
@@ -0,0 +1,7 @@
+"""Framework tests for checking workflow correctness and defining expected workflow behavior.
+
+This is meant to grow into the workflow based mirror of what the framework tests are for tools.
+api/test_workflows.py is still the place to test exceptional conditions, errors, etc... but tests
+of normal operation where semantics can be verified with simple inputs and outputs can now be
+placed in here.
+"""
diff --git a/lib/galaxy_test/workflow/conftest.py b/lib/galaxy_test/workflow/conftest.py
new file mode 100644
index 000000000000..1075bb4a374d
--- /dev/null
+++ b/lib/galaxy_test/workflow/conftest.py
@@ -0,0 +1,22 @@
+import os
+
+import pytest
+
+from galaxy_test.driver.driver_util import GalaxyTestDriver
+
+
+class ConfigObject:
+ framework_tool_and_types = True
+
+
+@pytest.fixture(scope="session")
+def real_driver():
+ if not os.environ.get("GALAXY_TEST_ENVIRONMENT_CONFIGURED"):
+ driver = GalaxyTestDriver()
+ driver.setup(ConfigObject)
+ try:
+ yield driver
+ finally:
+ driver.tear_down()
+ else:
+ yield None
diff --git a/lib/galaxy_test/workflow/empty_collection_sort.gxwf-tests.yml b/lib/galaxy_test/workflow/empty_collection_sort.gxwf-tests.yml
new file mode 100644
index 000000000000..ad83bb676b67
--- /dev/null
+++ b/lib/galaxy_test/workflow/empty_collection_sort.gxwf-tests.yml
@@ -0,0 +1,16 @@
+- doc: |
+ Test to verify collection operations like the sort tool work fine with empty collections.
+ job:
+ input:
+ collection_type: list
+ elements:
+ - identifier: i1
+ content: "0"
+ filter_file: i1
+ outputs:
+ output:
+ elements:
+ i1:
+ asserts:
+ - that: has_text
+ text: "0"
diff --git a/lib/galaxy_test/workflow/empty_collection_sort.gxwf.yml b/lib/galaxy_test/workflow/empty_collection_sort.gxwf.yml
new file mode 100644
index 000000000000..7520bcd5d68f
--- /dev/null
+++ b/lib/galaxy_test/workflow/empty_collection_sort.gxwf.yml
@@ -0,0 +1,26 @@
+class: GalaxyWorkflow
+inputs:
+ input: collection
+ filter_file: data
+outputs:
+ output:
+ outputSource: merge_collection/output
+steps:
+ filter_collection:
+ tool_id: __FILTER_FROM_FILE__
+ in:
+ input: input
+ how|filter_source: filter_file
+ sort_collection_1:
+ tool_id: __SORTLIST__
+ in:
+ input: filter_collection/output_filtered
+ sort_collection_2:
+ tool_id: __SORTLIST__
+ in:
+ input: filter_collection/output_discarded
+ merge_collection:
+ tool_id: __MERGE_COLLECTION__
+ in:
+ inputs_0|input: sort_collection_1/output
+ inputs_1|input: sort_collection_2/output
diff --git a/lib/galaxy_test/workflow/flatten_collection.gxwf-tests.yml b/lib/galaxy_test/workflow/flatten_collection.gxwf-tests.yml
new file mode 100644
index 000000000000..1a2b5c65b596
--- /dev/null
+++ b/lib/galaxy_test/workflow/flatten_collection.gxwf-tests.yml
@@ -0,0 +1,18 @@
+- doc: |
+ Test to verify collection flatten collection operation in the context of a workflow.
+ job: {}
+ outputs:
+ out:
+ elements:
+ 'oe1-ie1':
+ asserts:
+ - that: has_text
+ text: "A"
+ 'oe1-ie2':
+ asserts:
+ - that: has_text
+ text: "B"
+ 'oe2-ie1':
+ asserts:
+ - that: has_text
+ text: "C"
diff --git a/lib/galaxy_test/workflow/flatten_collection.gxwf.yml b/lib/galaxy_test/workflow/flatten_collection.gxwf.yml
new file mode 100644
index 000000000000..993bd9dac005
--- /dev/null
+++ b/lib/galaxy_test/workflow/flatten_collection.gxwf.yml
@@ -0,0 +1,17 @@
+class: GalaxyWorkflow
+inputs: {}
+outputs:
+ out:
+ outputSource: flatten/output
+steps:
+ nested:
+ tool_id: collection_creates_dynamic_nested
+ state:
+ sleep_time: 0
+ foo: 'dummy'
+ flatten:
+ tool_id: '__FLATTEN__'
+ state:
+ join_identifier: '-'
+ in:
+ input: nested/list_output
diff --git a/lib/galaxy_test/workflow/flatten_collection_over_execution.gxwf-tests.yml b/lib/galaxy_test/workflow/flatten_collection_over_execution.gxwf-tests.yml
new file mode 100644
index 000000000000..5c54212815e9
--- /dev/null
+++ b/lib/galaxy_test/workflow/flatten_collection_over_execution.gxwf-tests.yml
@@ -0,0 +1,19 @@
+- doc: |
+ Test to verify collection flatten collection operation mid workflow.
+ job:
+ input_fastqs:
+ collection_type: list
+ elements:
+ - identifier: samp1
+ content: "0 mycoolline\n1 mysecondline\n"
+ outputs:
+ out:
+ elements:
+ 'samp1-0':
+ asserts:
+ - that: has_text
+ text: "mycoolline"
+ 'samp1-1':
+ asserts:
+ - that: has_text
+ text: "mysecondline"
diff --git a/lib/galaxy_test/workflow/flatten_collection_over_execution.gxwf.yml b/lib/galaxy_test/workflow/flatten_collection_over_execution.gxwf.yml
new file mode 100644
index 000000000000..172eaa7d4b26
--- /dev/null
+++ b/lib/galaxy_test/workflow/flatten_collection_over_execution.gxwf.yml
@@ -0,0 +1,17 @@
+class: GalaxyWorkflow
+inputs:
+ input_fastqs: collection
+outputs:
+ out:
+ outputSource: flatten/output
+steps:
+ split_up:
+ tool_id: collection_split_on_column
+ in:
+ input1: input_fastqs
+ flatten:
+ tool_id: '__FLATTEN__'
+ state:
+ join_identifier: '-'
+ in:
+ input: split_up/split_output
diff --git a/lib/galaxy_test/workflow/multi_select_mapping.gxwf-tests.yml b/lib/galaxy_test/workflow/multi_select_mapping.gxwf-tests.yml
new file mode 100644
index 000000000000..065849a34082
--- /dev/null
+++ b/lib/galaxy_test/workflow/multi_select_mapping.gxwf-tests.yml
@@ -0,0 +1,27 @@
+- doc: |
+ Test to verify that mapping a list of a multi-select parameter maps over the job.
+ Ideally there would be a way to specify that the list should be reduced into the
+ parameter. It might work to have an expression tool to combine the list into a
+ string.
+ job:
+ input:
+ type: collection
+ collection_type: list
+ elements:
+ - identifier: the_example_2
+ content: '"ex2"'
+ ext: 'expression.json'
+ - identifier: the_example_5
+ content: '"ex5"'
+ ext: 'expression.json'
+ outputs:
+ output:
+ elements:
+ the_example_2:
+ asserts:
+ - that: has_text
+ text: 'ex2'
+ the_example_5:
+ asserts:
+ - that: has_text
+ text: 'ex5'
diff --git a/lib/galaxy_test/workflow/multi_select_mapping.gxwf.yml b/lib/galaxy_test/workflow/multi_select_mapping.gxwf.yml
new file mode 100644
index 000000000000..f810d022fb1a
--- /dev/null
+++ b/lib/galaxy_test/workflow/multi_select_mapping.gxwf.yml
@@ -0,0 +1,14 @@
+class: GalaxyWorkflow
+inputs:
+ input:
+ type: collection
+ collection_type: list
+outputs:
+ output:
+ outputSource: map_over_multi_select/output2
+steps:
+ map_over_multi_select:
+ tool_id: multi_select
+ in:
+ select_ex: input
+ select_optional: input
diff --git a/lib/galaxy_test/workflow/multiple_versions.gxwf-tests.yml b/lib/galaxy_test/workflow/multiple_versions.gxwf-tests.yml
new file mode 100644
index 000000000000..1d1db5dba980
--- /dev/null
+++ b/lib/galaxy_test/workflow/multiple_versions.gxwf-tests.yml
@@ -0,0 +1,19 @@
+- doc: |
+ Test that workflows can run with multiple versions of tools and the version specified matches the one run.
+ job:
+ input:
+ value: 1
+ type: raw
+ outputs:
+ output_1:
+ asserts:
+ - that: has_text
+ text: 'Version 0.1'
+ - that: not_has_text
+ text: 'Version 0.2'
+ output_2:
+ asserts:
+ - that: has_text
+ text: 'Version 0.2'
+ - that: not_has_text
+ text: 'Version 0.1'
diff --git a/lib/galaxy_test/workflow/multiple_versions.gxwf.yml b/lib/galaxy_test/workflow/multiple_versions.gxwf.yml
new file mode 100644
index 000000000000..abcdd44e2ca8
--- /dev/null
+++ b/lib/galaxy_test/workflow/multiple_versions.gxwf.yml
@@ -0,0 +1,20 @@
+class: GalaxyWorkflow
+inputs:
+ input:
+ type: int
+outputs:
+ output_1:
+ outputSource: multiple_1/out_file1
+ output_2:
+ outputSource: multiple_2/out_file1
+steps:
+ multiple_1:
+ tool_id: multiple_versions
+ tool_version: "0.1"
+ in:
+ inttest: input
+ multiple_2:
+ tool_id: multiple_versions
+ tool_version: "0.2"
+ in:
+ inttest: input
diff --git a/lib/galaxy_test/workflow/rename_based_on_input_collection.gxwf-tests.yml b/lib/galaxy_test/workflow/rename_based_on_input_collection.gxwf-tests.yml
new file mode 100644
index 000000000000..a6b357635316
--- /dev/null
+++ b/lib/galaxy_test/workflow/rename_based_on_input_collection.gxwf-tests.yml
@@ -0,0 +1,22 @@
+- doc: |
+ Test output dataset renaming when the target basename is based on an input collection.
+ job:
+ fasta_input:
+ value: 1.fasta
+ type: File
+ name: fasta1
+ file_type: fasta
+ fastq_inputs:
+ collection_type: list
+ name: the_dataset_pair
+ elements:
+ - identifier: forward
+ value: 1.fastq
+ type: File
+ - identifier: reverse
+ value: 1.fastq
+ type: File
+ outputs:
+ output:
+ metadata:
+ name: 'the_dataset_pair suffix'
diff --git a/lib/galaxy_test/workflow/rename_based_on_input_collection.gxwf.yml b/lib/galaxy_test/workflow/rename_based_on_input_collection.gxwf.yml
new file mode 100644
index 000000000000..0d6a19abfdd3
--- /dev/null
+++ b/lib/galaxy_test/workflow/rename_based_on_input_collection.gxwf.yml
@@ -0,0 +1,20 @@
+class: GalaxyWorkflow
+inputs:
+ fasta_input: data
+ fastq_inputs: data
+outputs:
+ output:
+ outputSource: mapping/out_file1
+steps:
+ mapping:
+ tool_id: mapper2
+ state:
+ fastq_input:
+ fastq_input_selector: paired_collection
+ fastq_input1:
+ $link: fastq_inputs
+ reference:
+ $link: fasta_input
+ outputs:
+ out_file1:
+ rename: "#{fastq_input.fastq_input1 | basename} suffix"
diff --git a/lib/galaxy_test/workflow/replacement_parameters_legacy.gxwf-tests.yml b/lib/galaxy_test/workflow/replacement_parameters_legacy.gxwf-tests.yml
new file mode 100644
index 000000000000..a3120f277072
--- /dev/null
+++ b/lib/galaxy_test/workflow/replacement_parameters_legacy.gxwf-tests.yml
@@ -0,0 +1,12 @@
+- doc: |
+ Test to verify legacy behavior of implicit runtime parameters continues to function correctly.
+ job:
+ replacement_parameters:
+ replaceme: moocow
+ outputs:
+ out1:
+ metadata:
+ name: 'moocow name'
+ out2:
+ metadata:
+ name: 'moocow name 2'
diff --git a/lib/galaxy_test/workflow/replacement_parameters_legacy.gxwf.yml b/lib/galaxy_test/workflow/replacement_parameters_legacy.gxwf.yml
new file mode 100644
index 000000000000..834721c3cc2b
--- /dev/null
+++ b/lib/galaxy_test/workflow/replacement_parameters_legacy.gxwf.yml
@@ -0,0 +1,17 @@
+class: GalaxyWorkflow
+inputs: {}
+outputs:
+ out1:
+ outputSource: create_2/out_file1
+ out2:
+ outputSource: create_2/out_file2
+steps:
+ create_2:
+ tool_id: create_2
+ state:
+ sleep_time: 0
+ outputs:
+ out_file1:
+ rename: "${replaceme} name"
+ out_file2:
+ rename: "${replaceme} name 2"
diff --git a/lib/galaxy_test/workflow/replacement_parameters_nested.gxwf-tests.yml b/lib/galaxy_test/workflow/replacement_parameters_nested.gxwf-tests.yml
new file mode 100644
index 000000000000..c5152f0e4add
--- /dev/null
+++ b/lib/galaxy_test/workflow/replacement_parameters_nested.gxwf-tests.yml
@@ -0,0 +1,13 @@
+- doc: |
+ Test to verify text parameters can be implicitly used as replacement parameters in subworkflows if they are connected up.
+ job:
+ replacemeouter:
+ value: moocow
+ type: raw
+ outputs:
+ out1:
+ metadata:
+ name: 'moocow name'
+ out2:
+ metadata:
+ name: 'moocow name 2'
diff --git a/lib/galaxy_test/workflow/replacement_parameters_nested.gxwf.yml b/lib/galaxy_test/workflow/replacement_parameters_nested.gxwf.yml
new file mode 100644
index 000000000000..d783195ae584
--- /dev/null
+++ b/lib/galaxy_test/workflow/replacement_parameters_nested.gxwf.yml
@@ -0,0 +1,31 @@
+class: GalaxyWorkflow
+inputs:
+ replacemeouter: text
+outputs:
+ out1:
+ outputSource: nested_workflow/workflow_output_1
+ out2:
+ outputSource: nested_workflow/workflow_output_2
+steps:
+ nested_workflow:
+ run:
+ class: GalaxyWorkflow
+ inputs:
+ replacemeinner: text
+ outputs:
+ workflow_output_1:
+ outputSource: create_2/out_file1
+ workflow_output_2:
+ outputSource: create_2/out_file2
+ steps:
+ create_2:
+ tool_id: create_2
+ state:
+ sleep_time: 0
+ outputs:
+ out_file1:
+ rename: "${replacemeinner} name"
+ out_file2:
+ rename: "${replacemeinner} name 2"
+ in:
+ replacemeinner: replacemeouter
diff --git a/lib/galaxy_test/workflow/replacement_parameters_text.gxwf-tests.yml b/lib/galaxy_test/workflow/replacement_parameters_text.gxwf-tests.yml
new file mode 100644
index 000000000000..b76353ae9325
--- /dev/null
+++ b/lib/galaxy_test/workflow/replacement_parameters_text.gxwf-tests.yml
@@ -0,0 +1,13 @@
+- doc: |
+ Test to verify text parameters can be implicitly used as replacement parameters in workflow step PJAs.
+ job:
+ replaceme:
+ value: moocow
+ type: raw
+ outputs:
+ out1:
+ metadata:
+ name: 'moocow name'
+ out2:
+ metadata:
+ name: 'moocow name 2'
diff --git a/lib/galaxy_test/workflow/replacement_parameters_text.gxwf.yml b/lib/galaxy_test/workflow/replacement_parameters_text.gxwf.yml
new file mode 100644
index 000000000000..18f73aadb699
--- /dev/null
+++ b/lib/galaxy_test/workflow/replacement_parameters_text.gxwf.yml
@@ -0,0 +1,18 @@
+class: GalaxyWorkflow
+inputs:
+ replaceme: text
+outputs:
+ out1:
+ outputSource: create_2/out_file1
+ out2:
+ outputSource: create_2/out_file2
+steps:
+ create_2:
+ tool_id: create_2
+ state:
+ sleep_time: 0
+ outputs:
+ out_file1:
+ rename: "${replaceme} name"
+ out_file2:
+ rename: "${replaceme} name 2"
diff --git a/lib/galaxy_test/workflow/tests.py b/lib/galaxy_test/workflow/tests.py
new file mode 100644
index 000000000000..f0702fb8240b
--- /dev/null
+++ b/lib/galaxy_test/workflow/tests.py
@@ -0,0 +1,136 @@
+import glob
+import os
+import tempfile
+from pathlib import Path
+
+import pytest
+import requests
+import yaml
+from gxformat2.yaml import ordered_load
+
+from galaxy.tool_util.parser.interface import TestCollectionOutputDef
+from galaxy.tool_util.verify import verify_file_contents_against_dict
+from galaxy.tool_util.verify.interactor import (
+ compare_expected_metadata_to_api_response,
+ get_metadata_to_test,
+ verify_collection,
+)
+from galaxy_test.api._framework import ApiTestCase
+from galaxy_test.base.populators import (
+ DatasetCollectionPopulator,
+ DatasetPopulator,
+ RunJobsSummary,
+ WorkflowPopulator,
+)
+
+SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
+
+
+def find_workflows():
+ return [Path(p) for p in glob.glob(f"{SCRIPT_DIRECTORY}/*.gxwf.yml")]
+
+
+def pytest_generate_tests(metafunc):
+ parameter_combinations = []
+ test_ids = []
+ for workflow_path in find_workflows():
+ for index, test_job in enumerate(_test_jobs(workflow_path)):
+ parameter_combinations.append([workflow_path, test_job])
+ workflow_test_name = workflow_path.name[0 : -len(".gxwf.yml")]
+ test_ids.append(f"{workflow_test_name}_{index}")
+ if "workflow_path" in metafunc.fixturenames:
+ metafunc.parametrize("workflow_path,test_job", parameter_combinations, ids=test_ids)
+
+
+class TestWorkflow(ApiTestCase):
+ framework_tool_and_types = True
+
+ def setUp(self):
+ super().setUp()
+ self.workflow_populator = WorkflowPopulator(self.galaxy_interactor)
+ self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
+ self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
+
+ @pytest.mark.workflow
+ def test_workflow(self, workflow_path: Path, test_job):
+ with workflow_path.open() as f:
+ yaml_content = ordered_load(f)
+ with self.dataset_populator.test_history() as history_id:
+ run_summary = self.workflow_populator.run_workflow(
+ yaml_content,
+ test_data=test_job["job"],
+ history_id=history_id,
+ )
+ self._verify(run_summary, test_job["outputs"])
+
+ def _verify(self, run_summary: RunJobsSummary, output_definitions):
+ for output_name, output_definition in output_definitions.items():
+ self._verify_output(run_summary, output_name, output_definition)
+
+ def _verify_output(self, run_summary: RunJobsSummary, output_name, test_properties):
+ is_collection_test = "elements" in test_properties
+ item_label = f"Output named {output_name}"
+
+ def get_filename(name):
+ return tempfile.NamedTemporaryFile(prefix=f"gx_workflow_framework_test_file_{output_name}", delete=False)
+
+ def verify_dataset(dataset: dict, test_properties: dict):
+ output_content = self.dataset_populator.get_history_dataset_content(
+ run_summary.history_id, dataset=dataset, type="bytes"
+ )
+ verify_file_contents_against_dict(get_filename, _get_location, item_label, output_content, test_properties)
+ metadata = get_metadata_to_test(test_properties)
+ if metadata:
+ dataset_details = self.dataset_populator.get_history_dataset_details(
+ run_summary.history_id, content_id=dataset["id"]
+ )
+ compare_expected_metadata_to_api_response(metadata, dataset_details)
+
+ if is_collection_test:
+ test_properties["name"] = output_name
+ # setup preferred name "elements" in accordance with work in https://github.com/galaxyproject/planemo/pull/1417
+ test_properties["element_tests"] = test_properties["elements"]
+ output_def = TestCollectionOutputDef.from_dict(test_properties)
+
+ invocation_details = self.workflow_populator.get_invocation(run_summary.invocation_id, step_details=True)
+ assert output_name in invocation_details["output_collections"]
+ test_output = invocation_details["output_collections"][output_name]
+ output_collection = self.dataset_populator.get_history_collection_details(
+ run_summary.history_id, content_id=test_output["id"]
+ )
+
+ def verify_dataset_element(element, test_properties, element_outfile):
+ hda = element["object"]
+ verify_dataset(hda, test_properties)
+
+ verify_collection(output_def, output_collection, verify_dataset_element)
+ else:
+ test_properties["name"] = output_name
+ invocation_details = self.workflow_populator.get_invocation(run_summary.invocation_id, step_details=True)
+ assert output_name in invocation_details["outputs"]
+ test_output = invocation_details["outputs"][output_name]
+ verify_dataset(test_output, test_properties)
+
+
+def _test_jobs(workflow_path: Path) -> list:
+ test_path = _workflow_test_path(workflow_path)
+ with test_path.open() as f:
+ jobs = yaml.safe_load(f)
+ return jobs
+
+
+def _workflow_test_path(workflow_path: Path) -> Path:
+ base_name = workflow_path.name[0 : -len(".gxwf.yml")]
+ test_path = workflow_path.parent / f"{base_name}.gxwf-tests.yml"
+ return test_path
+
+
+def _get_location(location: str) -> str:
+ data_file = tempfile.NamedTemporaryFile(prefix="gx_workflow_framework_test_file_", delete=False)
+ with requests.get(location, stream=True) as r:
+ r.raise_for_status()
+
+ for chunk in r.iter_content():
+ if chunk:
+ data_file.write(chunk)
+ return data_file.name
diff --git a/lib/galaxy_test/workflow/zip_collection.gxwf-tests.yml b/lib/galaxy_test/workflow/zip_collection.gxwf-tests.yml
new file mode 100644
index 000000000000..e0e4ffc5ba69
--- /dev/null
+++ b/lib/galaxy_test/workflow/zip_collection.gxwf-tests.yml
@@ -0,0 +1,10 @@
+- doc: |
+ Test simple use of __ZIP_COLLECTION__ in a workflow.
+ job:
+ test_input_1: "samp1\t10.0\nsamp2\t20.0\n"
+ test_input_2: "samp1\t20.0\nsamp2\t40.0\n"
+ outputs:
+ out:
+ asserts:
+ - that: has_text
+ text: "samp1\t10.0\nsamp2\t20.0\nsamp1\t20.0\nsamp2\t40.0"
diff --git a/lib/galaxy_test/workflow/zip_collection.gxwf.yml b/lib/galaxy_test/workflow/zip_collection.gxwf.yml
new file mode 100644
index 000000000000..151e0ffd1aa6
--- /dev/null
+++ b/lib/galaxy_test/workflow/zip_collection.gxwf.yml
@@ -0,0 +1,21 @@
+class: GalaxyWorkflow
+inputs:
+ test_input_1: data
+ test_input_2: data
+outputs:
+ out:
+ outputSource: concat_pair/out1
+steps:
+ first_cat:
+ tool_id: cat
+ in:
+ input1: test_input_1
+ zip_it:
+ tool_id: "__ZIP_COLLECTION__"
+ in:
+ input_forward: first_cat/out_file1
+ input_reverse: test_input_2
+ concat_pair:
+ tool_id: collection_paired_test
+ in:
+ f1: zip_it/output
diff --git a/run_tests.sh b/run_tests.sh
index 2e04eab5eee9..c5b6104a5827 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -14,6 +14,8 @@ cat <
+