diff --git a/lib/galaxy/celery/tasks.py b/lib/galaxy/celery/tasks.py
index fc60f6921327..657ae3ef909b 100644
--- a/lib/galaxy/celery/tasks.py
+++ b/lib/galaxy/celery/tasks.py
@@ -288,6 +288,7 @@ def abort_when_job_stops(function: Callable, session: galaxy_scoped_session, job
return future.result(timeout=1)
except TimeoutError:
if is_aborted(session, job_id):
+ future.cancel()
return
diff --git a/lib/galaxy/tools/__init__.py b/lib/galaxy/tools/__init__.py
index 57aa93d8000e..20d7e4f47608 100644
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -3356,9 +3356,15 @@ def produce_outputs(self, trans, out_data, output_collections, incoming, history
if how == "first":
extracted_element = collection.first_dataset_element
elif how == "by_identifier":
- extracted_element = collection[incoming["which"]["identifier"]]
+ try:
+ extracted_element = collection[incoming["which"]["identifier"]]
+ except KeyError as e:
+ raise exceptions.MessageException(e.args[0])
elif how == "by_index":
- extracted_element = collection[int(incoming["which"]["index"])]
+ try:
+ extracted_element = collection[int(incoming["which"]["index"])]
+ except KeyError as e:
+ raise exceptions.MessageException(e.args[0])
else:
raise exceptions.MessageException("Invalid tool parameters.")
extracted = extracted_element.element_object
diff --git a/lib/galaxy/tools/extract_dataset.xml b/lib/galaxy/tools/extract_dataset.xml
index 3e066ebebec5..37986eb722f9 100644
--- a/lib/galaxy/tools/extract_dataset.xml
+++ b/lib/galaxy/tools/extract_dataset.xml
@@ -19,7 +19,7 @@
-
+
@@ -52,9 +52,9 @@ Description
The tool allow extracting datasets based on position (**The first dataset** and **Select by index** options) or name (**Select by element identifier** option). This tool effectively collapses the inner-most collection into a dataset. For nested collections (e.g a list of lists of lists: outer:middle:inner, extracting the inner dataset element) a new list is created where the selected element takes the position of the inner-most collection (so outer:middle, where middle is not a collection but the inner dataset element).
-.. class:: warningmark
+.. class:: warningmark
-**Note**: Dataset index (numbering) begins with 0 (zero).
+**Note**: Dataset index (numbering) begins with 0 (zero).
.. class:: infomark
diff --git a/lib/galaxy_test/api/test_tools.py b/lib/galaxy_test/api/test_tools.py
index 7befe7f221b1..3e99d0115ac1 100644
--- a/lib/galaxy_test/api/test_tools.py
+++ b/lib/galaxy_test/api/test_tools.py
@@ -701,7 +701,7 @@ def test_database_operation_tool_with_pending_inputs(self):
hdca1_id = self.dataset_collection_populator.create_list_in_history(
history_id, contents=["a\nb\nc\nd", "e\nf\ng\nh"], wait=True
).json()["outputs"][0]["id"]
- self.dataset_populator.run_tool(
+ run_response = self.dataset_populator.run_tool(
tool_id="cat_data_and_sleep",
inputs={
"sleep_time": 15,
@@ -709,15 +709,34 @@ def test_database_operation_tool_with_pending_inputs(self):
},
history_id=history_id,
)
+ output_hdca_id = run_response["implicit_collections"][0]["id"]
run_response = self.dataset_populator.run_tool(
tool_id="__EXTRACT_DATASET__",
inputs={
- "data_collection": {"src": "hdca", "id": hdca1_id},
+ "data_collection": {"src": "hdca", "id": output_hdca_id},
},
history_id=history_id,
)
assert run_response["outputs"][0]["state"] != "ok"
+ @skip_without_tool("__EXTRACT_DATASET__")
+ def test_extract_dataset_invalid_element_identifier(self):
+ with self.dataset_populator.test_history(require_new=False) as history_id:
+ hdca1_id = self.dataset_collection_populator.create_list_in_history(
+ history_id, contents=["a\nb\nc\nd", "e\nf\ng\nh"], wait=True
+ ).json()["outputs"][0]["id"]
+ run_response = self.dataset_populator.run_tool_raw(
+ tool_id="__EXTRACT_DATASET__",
+ inputs={
+ "data_collection": {"src": "hdca", "id": hdca1_id},
+ "which": {"which_dataset": "by_index", "index": 100},
+ },
+ history_id=history_id,
+ input_format="21.01",
+ )
+ assert run_response.status_code == 400
+ assert run_response.json()["err_msg"] == "Dataset collection has no element_index with key 100."
+
@skip_without_tool("__FILTER_FAILED_DATASETS__")
def test_filter_failed_list(self):
with self.dataset_populator.test_history(require_new=False) as history_id: