diff --git a/lib/galaxy/celery/tasks.py b/lib/galaxy/celery/tasks.py
index acc65a593c0c..e1ac7456ad41 100644
--- a/lib/galaxy/celery/tasks.py
+++ b/lib/galaxy/celery/tasks.py
@@ -291,6 +291,7 @@ def abort_when_job_stops(function: Callable, session: galaxy_scoped_session, job
return future.result(timeout=1)
except TimeoutError:
if is_aborted(session, job_id):
+ future.cancel()
return
diff --git a/lib/galaxy/exceptions/__init__.py b/lib/galaxy/exceptions/__init__.py
index 5806ed157c21..8deab77bc17b 100644
--- a/lib/galaxy/exceptions/__init__.py
+++ b/lib/galaxy/exceptions/__init__.py
@@ -16,6 +16,8 @@
and messages.
"""
+from typing import Optional
+
from .error_codes import (
error_codes_by_name,
ErrorCode,
@@ -30,7 +32,7 @@ class MessageException(Exception):
# Error code information embedded into API json responses.
err_code: ErrorCode = error_codes_by_name["UNKNOWN"]
- def __init__(self, err_msg=None, type="info", **extra_error_info):
+ def __init__(self, err_msg: Optional[str] = None, type="info", **extra_error_info):
self.err_msg = err_msg or self.err_code.default_error_message
self.type = type
self.extra_error_info = extra_error_info
@@ -64,7 +66,7 @@ class AcceptedRetryLater(MessageException):
err_code = error_codes_by_name["ACCEPTED_RETRY_LATER"]
retry_after: int
- def __init__(self, msg, retry_after=60):
+ def __init__(self, msg: Optional[str] = None, retry_after=60):
super().__init__(msg)
self.retry_after = retry_after
@@ -136,7 +138,7 @@ class ToolMissingException(MessageException):
status_code = 400
err_code = error_codes_by_name["USER_TOOL_MISSING_PROBLEM"]
- def __init__(self, err_msg=None, type="info", tool_id=None, **extra_error_info):
+ def __init__(self, err_msg: Optional[str] = None, type="info", tool_id=None, **extra_error_info):
super().__init__(err_msg, type, **extra_error_info)
self.tool_id = tool_id
@@ -152,7 +154,7 @@ class ToolInputsNotReadyException(MessageException):
class ToolInputsNotOKException(MessageException):
- def __init__(self, err_msg=None, type="info", *, src: str, id: int, **extra_error_info):
+ def __init__(self, err_msg: Optional[str] = None, type="info", *, src: str, id: int, **extra_error_info):
super().__init__(err_msg, type, **extra_error_info)
self.src = src
self.id = id
diff --git a/lib/galaxy/tools/__init__.py b/lib/galaxy/tools/__init__.py
index 6e64d3a11011..cf625953a15f 100644
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -3356,9 +3356,15 @@ def produce_outputs(self, trans, out_data, output_collections, incoming, history
if how == "first":
extracted_element = collection.first_dataset_element
elif how == "by_identifier":
- extracted_element = collection[incoming["which"]["identifier"]]
+ try:
+ extracted_element = collection[incoming["which"]["identifier"]]
+ except KeyError as e:
+ raise exceptions.MessageException(e.args[0])
elif how == "by_index":
- extracted_element = collection[int(incoming["which"]["index"])]
+ try:
+ extracted_element = collection[int(incoming["which"]["index"])]
+ except KeyError as e:
+ raise exceptions.MessageException(e.args[0])
else:
raise exceptions.MessageException("Invalid tool parameters.")
extracted = extracted_element.element_object
diff --git a/lib/galaxy/tools/extract_dataset.xml b/lib/galaxy/tools/extract_dataset.xml
index 3e066ebebec5..37986eb722f9 100644
--- a/lib/galaxy/tools/extract_dataset.xml
+++ b/lib/galaxy/tools/extract_dataset.xml
@@ -19,7 +19,7 @@
-
+
@@ -52,9 +52,9 @@ Description
The tool allow extracting datasets based on position (**The first dataset** and **Select by index** options) or name (**Select by element identifier** option). This tool effectively collapses the inner-most collection into a dataset. For nested collections (e.g a list of lists of lists: outer:middle:inner, extracting the inner dataset element) a new list is created where the selected element takes the position of the inner-most collection (so outer:middle, where middle is not a collection but the inner dataset element).
-.. class:: warningmark
+.. class:: warningmark
-**Note**: Dataset index (numbering) begins with 0 (zero).
+**Note**: Dataset index (numbering) begins with 0 (zero).
.. class:: infomark
diff --git a/lib/galaxy/webapps/base/api.py b/lib/galaxy/webapps/base/api.py
index df779127f0c3..3da3ba3d3900 100644
--- a/lib/galaxy/webapps/base/api.py
+++ b/lib/galaxy/webapps/base/api.py
@@ -198,6 +198,11 @@ async def validate_exception_middleware(request: Request, exc: RequestValidation
@app.exception_handler(MessageException)
async def message_exception_middleware(request: Request, exc: MessageException) -> Response:
+ # Intentionally not logging traceback here as the full context will be
+ # dispatched to Sentry if configured. This just makes logs less opaque
+ # when one sees a 500.
+ if exc.status_code >= 500:
+ log.info(f"MessageException: {exc}")
return get_error_response_for_request(request, exc)
diff --git a/lib/galaxy/workflow/modules.py b/lib/galaxy/workflow/modules.py
index 46926aaca35c..d7dff3e5ee53 100644
--- a/lib/galaxy/workflow/modules.py
+++ b/lib/galaxy/workflow/modules.py
@@ -2573,11 +2573,13 @@ def populate_module_and_state(
step_args = param_map.get(step.id, {})
step_errors = module_injector.compute_runtime_state(step, step_args=step_args)
if step_errors:
- raise exceptions.MessageException(step_errors, err_data={step.order_index: step_errors})
+ raise exceptions.MessageException(
+ "Error computing workflow step runtime state", err_data={step.order_index: step_errors}
+ )
if step.upgrade_messages:
if allow_tool_state_corrections:
log.debug('Workflow step "%i" had upgrade messages: %s', step.id, step.upgrade_messages)
else:
raise exceptions.MessageException(
- step.upgrade_messages, err_data={step.order_index: step.upgrade_messages}
+ "Workflow step has upgrade messages", err_data={step.order_index: step.upgrade_messages}
)
diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py
index f5ba53508b81..78a4b3e05c92 100644
--- a/lib/galaxy_test/api/test_tools.py
+++ b/lib/galaxy_test/api/test_tools.py
@@ -701,7 +701,7 @@ def test_database_operation_tool_with_pending_inputs(self):
hdca1_id = self.dataset_collection_populator.create_list_in_history(
history_id, contents=["a\nb\nc\nd", "e\nf\ng\nh"], wait=True
).json()["outputs"][0]["id"]
- self.dataset_populator.run_tool(
+ run_response = self.dataset_populator.run_tool(
tool_id="cat_data_and_sleep",
inputs={
"sleep_time": 15,
@@ -709,15 +709,34 @@ def test_database_operation_tool_with_pending_inputs(self):
},
history_id=history_id,
)
+ output_hdca_id = run_response["implicit_collections"][0]["id"]
run_response = self.dataset_populator.run_tool(
tool_id="__EXTRACT_DATASET__",
inputs={
- "data_collection": {"src": "hdca", "id": hdca1_id},
+ "data_collection": {"src": "hdca", "id": output_hdca_id},
},
history_id=history_id,
)
assert run_response["outputs"][0]["state"] != "ok"
+ @skip_without_tool("__EXTRACT_DATASET__")
+ def test_extract_dataset_invalid_element_identifier(self):
+ with self.dataset_populator.test_history(require_new=False) as history_id:
+ hdca1_id = self.dataset_collection_populator.create_list_in_history(
+ history_id, contents=["a\nb\nc\nd", "e\nf\ng\nh"], wait=True
+ ).json()["outputs"][0]["id"]
+ run_response = self.dataset_populator.run_tool_raw(
+ tool_id="__EXTRACT_DATASET__",
+ inputs={
+ "data_collection": {"src": "hdca", "id": hdca1_id},
+ "which": {"which_dataset": "by_index", "index": 100},
+ },
+ history_id=history_id,
+ input_format="21.01",
+ )
+ assert run_response.status_code == 400
+ assert run_response.json()["err_msg"] == "Dataset collection has no element_index with key 100."
+
@skip_without_tool("__FILTER_FAILED_DATASETS__")
def test_filter_failed_list(self):
with self.dataset_populator.test_history(require_new=False) as history_id: