Skip to content

Commit

Permalink
Merge branch 'release_24.1' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
mvdbeek committed Jul 10, 2024
2 parents 5acd5c1 + b4af9b3 commit 6d0dc8f
Show file tree
Hide file tree
Showing 5 changed files with 95 additions and 16 deletions.
17 changes: 17 additions & 0 deletions lib/galaxy/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6498,6 +6498,23 @@ def attribute_columns(column_collection, attributes, nesting_level=None):
q = q.order_by(*order_by_columns)
return q

@property
def elements_deleted(self):
if not hasattr(self, "_elements_deleted"):
if session := object_session(self):
stmt = self._build_nested_collection_attributes_stmt(
hda_attributes=("deleted",), dataset_attributes=("deleted",)
)
stmt = stmt.exists().where(or_(HistoryDatasetAssociation.deleted == true(), Dataset.deleted == true()))
self._elements_deleted = session.execute(select(stmt)).scalar()
else:
self._elements_deleted = False
for dataset_instance in self.dataset_instances:
if dataset_instance.deleted or dataset_instance.dataset.deleted:
self._elements_deleted = True
break
return self._elements_deleted

@property
def dataset_states_and_extensions_summary(self):
if not hasattr(self, "_dataset_states_and_extensions_summary"):
Expand Down
53 changes: 40 additions & 13 deletions lib/galaxy/tools/parameters/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -2189,19 +2189,37 @@ def from_json(self, value, trans, other_values=None):
dataset_matcher_factory = get_dataset_matcher_factory(trans)
dataset_matcher = dataset_matcher_factory.dataset_matcher(self, other_values)
for v in rval:
if v:
if hasattr(v, "deleted") and v.deleted:
if isinstance(v, DatasetCollectionElement):
if hda := v.hda:
v = hda
elif ldda := v.ldda:
v = ldda
elif collection := v.child_collection:
v = collection
elif not v.collection and v.collection.populated_optimized:
raise ParameterValueError("the selected collection has not been populated.", self.name)
else:
raise ParameterValueError("Collection element in unexpected state", self.name)
if isinstance(v, DatasetInstance):
if v.deleted:
raise ParameterValueError("the previously selected dataset has been deleted.", self.name)
elif hasattr(v, "dataset") and v.dataset.state in [Dataset.states.ERROR, Dataset.states.DISCARDED]:
elif v.dataset and v.dataset.state in [Dataset.states.ERROR, Dataset.states.DISCARDED]:
raise ParameterValueError(
"the previously selected dataset has entered an unusable state", self.name
)
elif hasattr(v, "dataset"):
if isinstance(v, DatasetCollectionElement):
v = v.hda
match = dataset_matcher.hda_match(v)
if match and match.implicit_conversion:
v.implicit_conversion = True # type:ignore[union-attr]
match = dataset_matcher.hda_match(v)
if match and match.implicit_conversion:
v.implicit_conversion = True # type:ignore[union-attr]
elif isinstance(v, HistoryDatasetCollectionAssociation):
if v.deleted:
raise ParameterValueError("the previously selected dataset collection has been deleted.", self.name)
v = v.collection
if isinstance(v, DatasetCollection):
if v.elements_deleted:
raise ParameterValueError(
"the previously selected dataset collection has elements that are deleted.", self.name
)

if not self.multiple:
if len(rval) > 1:
raise ParameterValueError("more than one dataset supplied to single input dataset parameter", self.name)
Expand Down Expand Up @@ -2498,10 +2516,19 @@ def from_json(self, value, trans, other_values=None):
rval = session.get(HistoryDatasetCollectionAssociation, int(value[len("hdca:") :]))
else:
rval = session.get(HistoryDatasetCollectionAssociation, int(value))
if rval and isinstance(rval, HistoryDatasetCollectionAssociation):
if rval.deleted:
raise ParameterValueError("the previously selected dataset collection has been deleted", self.name)
# TODO: Handle error states, implement error states ...
if rval:
if isinstance(rval, HistoryDatasetCollectionAssociation):
if rval.deleted:
raise ParameterValueError("the previously selected dataset collection has been deleted", self.name)
if rval.collection.elements_deleted:
raise ParameterValueError(
"the previously selected dataset collection has elements that are deleted.", self.name
)
if isinstance(rval, DatasetCollectionElement):
if (child_collection := rval.child_collection) and child_collection.elements_deleted:
raise ParameterValueError(
"the previously selected dataset collection has elements that are deleted.", self.name
)
return rval

def to_text(self, value):
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/webapps/galaxy/api/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@
)

ViewQueryParam: JobIndexViewEnum = Query(
default="collection",
default=JobIndexViewEnum.collection,
title="View",
description="Determines columns to return. Defaults to 'collection'.",
)
Expand Down
4 changes: 2 additions & 2 deletions lib/galaxy/webapps/galaxy/services/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def index(
# TODO: optimize if this crucial
if check_security_of_jobs and not security_check(trans, job.history, check_accessible=True):
raise exceptions.ItemAccessibilityException("Cannot access the request job objects.")
job_dict = job.to_dict(view, system_details=is_admin)
job_dict = job.to_dict(view.value, system_details=is_admin)
if view == JobIndexViewEnum.admin_job_list:
job_dict["decoded_job_id"] = job.id
if user_details:
Expand All @@ -97,7 +97,7 @@ def index(

def _check_nonadmin_access(
self,
view: str,
view: JobIndexViewEnum,
user_details: bool,
decoded_user_id: Optional[DecodedDatabaseIdField],
trans_user_id: Optional[int],
Expand Down
35 changes: 35 additions & 0 deletions lib/galaxy_test/api/test_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,24 @@ def test_admin_job_list(self, history_id):
job = jobs[0]
self._assert_has_keys(job, "command_line", "external_id", "handler")

@pytest.mark.require_new_history
def test_job_list_collection_view(self, history_id):
self.__history_with_new_dataset(history_id)
jobs_response = self._get("jobs?view=collection")
self._assert_status_code_is_ok(jobs_response)
jobs = jobs_response.json()
job = jobs[0]
self._assert_has_keys(job, "id", "tool_id", "state")

@pytest.mark.require_new_history
def test_job_list_default_view(self, history_id):
self.__history_with_new_dataset(history_id)
jobs_response = self._get(f"jobs?history_id={history_id}")
self._assert_status_code_is_ok(jobs_response)
jobs = jobs_response.json()
job = jobs[0]
self._assert_has_keys(job, "id", "tool_id", "state")

@pytest.mark.require_new_history
def test_index_state_filter(self, history_id):
# Initial number of ok jobs
Expand Down Expand Up @@ -576,6 +594,23 @@ def paths_deleted():
if output_dataset_paths_exist:
wait_on(paths_deleted, "path deletion")

def test_submission_on_collection_with_deleted_element(self, history_id):
hdca = self.dataset_collection_populator.create_list_of_list_in_history(history_id=history_id, wait=True).json()
hda_id = hdca["elements"][0]["object"]["elements"][0]["object"]["id"]
self.dataset_populator.delete_dataset(history_id=history_id, content_id=hda_id)
response = self.dataset_populator.run_tool_raw(
"is_of_type",
inputs={
"collection": {"batch": True, "values": [{"src": "hdca", "id": hdca["id"], "map_over_type": "list"}]},
},
history_id=history_id,
)
assert response.status_code == 400
assert (
response.json()["err_msg"]
== "parameter 'collection': the previously selected dataset collection has elements that are deleted."
)

@pytest.mark.require_new_history
@skip_without_tool("create_2")
def test_purging_output_cleaned_after_ok_run(self, history_id):
Expand Down

0 comments on commit 6d0dc8f

Please sign in to comment.