diff --git a/lib/galaxy/model/__init__.py b/lib/galaxy/model/__init__.py index 586543f0f9c4..7f3159b131e7 100644 --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -6498,6 +6498,23 @@ def attribute_columns(column_collection, attributes, nesting_level=None): q = q.order_by(*order_by_columns) return q + @property + def elements_deleted(self): + if not hasattr(self, "_elements_deleted"): + if session := object_session(self): + stmt = self._build_nested_collection_attributes_stmt( + hda_attributes=("deleted",), dataset_attributes=("deleted",) + ) + stmt = stmt.exists().where(or_(HistoryDatasetAssociation.deleted == true(), Dataset.deleted == true())) + self._elements_deleted = session.execute(select(stmt)).scalar() + else: + self._elements_deleted = False + for dataset_instance in self.dataset_instances: + if dataset_instance.deleted or dataset_instance.dataset.deleted: + self._elements_deleted = True + break + return self._elements_deleted + @property def dataset_states_and_extensions_summary(self): if not hasattr(self, "_dataset_states_and_extensions_summary"): diff --git a/lib/galaxy/tools/parameters/basic.py b/lib/galaxy/tools/parameters/basic.py index 8d243cb88014..4a20224a7953 100644 --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -2189,19 +2189,37 @@ def from_json(self, value, trans, other_values=None): dataset_matcher_factory = get_dataset_matcher_factory(trans) dataset_matcher = dataset_matcher_factory.dataset_matcher(self, other_values) for v in rval: - if v: - if hasattr(v, "deleted") and v.deleted: + if isinstance(v, DatasetCollectionElement): + if hda := v.hda: + v = hda + elif ldda := v.ldda: + v = ldda + elif collection := v.child_collection: + v = collection + elif not v.collection and v.collection.populated_optimized: + raise ParameterValueError("the selected collection has not been populated.", self.name) + else: + raise ParameterValueError("Collection element in unexpected state", self.name) + if isinstance(v, DatasetInstance): + if v.deleted: raise ParameterValueError("the previously selected dataset has been deleted.", self.name) - elif hasattr(v, "dataset") and v.dataset.state in [Dataset.states.ERROR, Dataset.states.DISCARDED]: + elif v.dataset and v.dataset.state in [Dataset.states.ERROR, Dataset.states.DISCARDED]: raise ParameterValueError( "the previously selected dataset has entered an unusable state", self.name ) - elif hasattr(v, "dataset"): - if isinstance(v, DatasetCollectionElement): - v = v.hda - match = dataset_matcher.hda_match(v) - if match and match.implicit_conversion: - v.implicit_conversion = True # type:ignore[union-attr] + match = dataset_matcher.hda_match(v) + if match and match.implicit_conversion: + v.implicit_conversion = True # type:ignore[union-attr] + elif isinstance(v, HistoryDatasetCollectionAssociation): + if v.deleted: + raise ParameterValueError("the previously selected dataset collection has been deleted.", self.name) + v = v.collection + if isinstance(v, DatasetCollection): + if v.elements_deleted: + raise ParameterValueError( + "the previously selected dataset collection has elements that are deleted.", self.name + ) + if not self.multiple: if len(rval) > 1: raise ParameterValueError("more than one dataset supplied to single input dataset parameter", self.name) @@ -2498,10 +2516,19 @@ def from_json(self, value, trans, other_values=None): rval = session.get(HistoryDatasetCollectionAssociation, int(value[len("hdca:") :])) else: rval = session.get(HistoryDatasetCollectionAssociation, int(value)) - if rval and isinstance(rval, HistoryDatasetCollectionAssociation): - if rval.deleted: - raise ParameterValueError("the previously selected dataset collection has been deleted", self.name) - # TODO: Handle error states, implement error states ... + if rval: + if isinstance(rval, HistoryDatasetCollectionAssociation): + if rval.deleted: + raise ParameterValueError("the previously selected dataset collection has been deleted", self.name) + if rval.collection.elements_deleted: + raise ParameterValueError( + "the previously selected dataset collection has elements that are deleted.", self.name + ) + if isinstance(rval, DatasetCollectionElement): + if (child_collection := rval.child_collection) and child_collection.elements_deleted: + raise ParameterValueError( + "the previously selected dataset collection has elements that are deleted.", self.name + ) return rval def to_text(self, value): diff --git a/lib/galaxy/webapps/galaxy/api/jobs.py b/lib/galaxy/webapps/galaxy/api/jobs.py index 6c6f7757f9a7..6aebebe5ec3c 100644 --- a/lib/galaxy/webapps/galaxy/api/jobs.py +++ b/lib/galaxy/webapps/galaxy/api/jobs.py @@ -98,7 +98,7 @@ ) ViewQueryParam: JobIndexViewEnum = Query( - default="collection", + default=JobIndexViewEnum.collection, title="View", description="Determines columns to return. Defaults to 'collection'.", ) diff --git a/lib/galaxy/webapps/galaxy/services/jobs.py b/lib/galaxy/webapps/galaxy/services/jobs.py index 9ecfb718b963..5c39175567bf 100644 --- a/lib/galaxy/webapps/galaxy/services/jobs.py +++ b/lib/galaxy/webapps/galaxy/services/jobs.py @@ -86,7 +86,7 @@ def index( # TODO: optimize if this crucial if check_security_of_jobs and not security_check(trans, job.history, check_accessible=True): raise exceptions.ItemAccessibilityException("Cannot access the request job objects.") - job_dict = job.to_dict(view, system_details=is_admin) + job_dict = job.to_dict(view.value, system_details=is_admin) if view == JobIndexViewEnum.admin_job_list: job_dict["decoded_job_id"] = job.id if user_details: @@ -97,7 +97,7 @@ def index( def _check_nonadmin_access( self, - view: str, + view: JobIndexViewEnum, user_details: bool, decoded_user_id: Optional[DecodedDatabaseIdField], trans_user_id: Optional[int], diff --git a/lib/galaxy_test/api/test_jobs.py b/lib/galaxy_test/api/test_jobs.py index 09193ea2065e..82f9ecbab416 100644 --- a/lib/galaxy_test/api/test_jobs.py +++ b/lib/galaxy_test/api/test_jobs.py @@ -62,6 +62,24 @@ def test_admin_job_list(self, history_id): job = jobs[0] self._assert_has_keys(job, "command_line", "external_id", "handler") + @pytest.mark.require_new_history + def test_job_list_collection_view(self, history_id): + self.__history_with_new_dataset(history_id) + jobs_response = self._get("jobs?view=collection") + self._assert_status_code_is_ok(jobs_response) + jobs = jobs_response.json() + job = jobs[0] + self._assert_has_keys(job, "id", "tool_id", "state") + + @pytest.mark.require_new_history + def test_job_list_default_view(self, history_id): + self.__history_with_new_dataset(history_id) + jobs_response = self._get(f"jobs?history_id={history_id}") + self._assert_status_code_is_ok(jobs_response) + jobs = jobs_response.json() + job = jobs[0] + self._assert_has_keys(job, "id", "tool_id", "state") + @pytest.mark.require_new_history def test_index_state_filter(self, history_id): # Initial number of ok jobs @@ -576,6 +594,23 @@ def paths_deleted(): if output_dataset_paths_exist: wait_on(paths_deleted, "path deletion") + def test_submission_on_collection_with_deleted_element(self, history_id): + hdca = self.dataset_collection_populator.create_list_of_list_in_history(history_id=history_id, wait=True).json() + hda_id = hdca["elements"][0]["object"]["elements"][0]["object"]["id"] + self.dataset_populator.delete_dataset(history_id=history_id, content_id=hda_id) + response = self.dataset_populator.run_tool_raw( + "is_of_type", + inputs={ + "collection": {"batch": True, "values": [{"src": "hdca", "id": hdca["id"], "map_over_type": "list"}]}, + }, + history_id=history_id, + ) + assert response.status_code == 400 + assert ( + response.json()["err_msg"] + == "parameter 'collection': the previously selected dataset collection has elements that are deleted." + ) + @pytest.mark.require_new_history @skip_without_tool("create_2") def test_purging_output_cleaned_after_ok_run(self, history_id):