Skip to content

Commit

Permalink
Merge pull request #18608 from nsoranzo/mypy_1.11.0
Browse files Browse the repository at this point in the history
Fixes for errors reported by mypy 1.11.0
  • Loading branch information
nsoranzo authored Aug 2, 2024
2 parents 297aeae + aeb8880 commit 51b723b
Show file tree
Hide file tree
Showing 46 changed files with 600 additions and 426 deletions.
4 changes: 2 additions & 2 deletions lib/galaxy/authnz/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(self, provider, config, backend_config):
def refresh(self, trans, token):
raise NotImplementedError()

def authenticate(self, provider, trans):
def authenticate(self, trans, idphint=None):
"""Runs for authentication process. Checks the database if a
valid identity exists in the database; if yes, then the user
is authenticated, if not, it generates a provider-specific
Expand Down Expand Up @@ -72,7 +72,7 @@ def callback(self, state_token: str, authz_code: str, trans, login_redirect_url)
"""
raise NotImplementedError()

def disconnect(self, provider, trans, disconnect_redirect_url=None):
def disconnect(self, provider, trans, disconnect_redirect_url=None, email=None, association_id=None):
raise NotImplementedError()

def logout(self, trans, post_user_logout_href=None):
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/authnz/custos_authnz.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def create_user(self, token, trans, login_redirect_url):
trans.sa_session.commit()
return login_redirect_url, user

def disconnect(self, provider, trans, email=None, disconnect_redirect_url=None):
def disconnect(self, provider, trans, disconnect_redirect_url=None, email=None, association_id=None):
try:
user = trans.user
index = 0
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/authnz/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -432,7 +432,7 @@ def disconnect(self, provider, trans, email=None, disconnect_redirect_url=None,
if success is False:
return False, message, None
elif provider in KEYCLOAK_BACKENDS:
return backend.disconnect(provider, trans, email, disconnect_redirect_url)
return backend.disconnect(provider, trans, disconnect_redirect_url, email=email)
return backend.disconnect(provider, trans, disconnect_redirect_url)
except Exception:
msg = f"An error occurred when disconnecting authentication with `{provider}` identity provider for user `{trans.user.username}`"
Expand Down
4 changes: 2 additions & 2 deletions lib/galaxy/authnz/psa_authnz.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def refresh(self, trans, user_authnz_token):
return True
return False

def authenticate(self, trans):
def authenticate(self, trans, idphint=None):
on_the_fly_config(trans.sa_session)
strategy = Strategy(trans.request, trans.session, Storage, self.config)
backend = self._load_backend(strategy, self.config["redirect_uri"])
Expand Down Expand Up @@ -224,7 +224,7 @@ def callback(self, state_token, authz_code, trans, login_redirect_url):

return redirect_url, self.config.get("user", None)

def disconnect(self, provider, trans, disconnect_redirect_url=None, association_id=None):
def disconnect(self, provider, trans, disconnect_redirect_url=None, email=None, association_id=None):
on_the_fly_config(trans.sa_session)
self.config[setting_name("DISCONNECT_REDIRECT_URL")] = (
disconnect_redirect_url if disconnect_redirect_url is not None else ()
Expand Down
6 changes: 3 additions & 3 deletions lib/galaxy/job_execution/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
MetadataFile,
)
from galaxy.util import safe_makedirs
from galaxy.util.dictifiable import Dictifiable
from galaxy.util.dictifiable import UsesDictVisibleKeys

TOOL_PROVIDED_JOB_METADATA_FILE = "galaxy.json"
TOOL_PROVIDED_JOB_METADATA_KEYS = ["name", "info", "dbkey", "created_from_basename"]
Expand Down Expand Up @@ -62,7 +62,7 @@ def set_job_outputs(self, job_outputs: List[JobOutput]) -> None:
self.output_hdas_and_paths = {t.output_name: (t.dataset, t.dataset_path) for t in job_outputs}


class JobIO(Dictifiable):
class JobIO(UsesDictVisibleKeys):
dict_collection_visible_keys = (
"job_id",
"working_directory",
Expand Down Expand Up @@ -168,7 +168,7 @@ def from_dict(cls, io_dict, sa_session):
return cls(sa_session=sa_session, **io_dict)

def to_dict(self):
io_dict = super().to_dict()
io_dict = super()._dictify_view_keys()
# dict_for will always add `model_class`, we don't need or want it
io_dict.pop("model_class")
io_dict["user_context"] = self.user_context.to_dict()
Expand Down
8 changes: 3 additions & 5 deletions lib/galaxy/jobs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,7 +1319,7 @@ def working_directory(self):
)
return self.__working_directory

def working_directory_exists(self):
def working_directory_exists(self) -> bool:
job = self.get_job()
return self.app.object_store.exists(job, base_dir="job_work", dir_only=True, obj_dir=True)

Expand Down Expand Up @@ -2117,7 +2117,7 @@ def check_tool_output(self, tool_stdout, tool_stderr, tool_exit_code, job, job_s

return state

def cleanup(self, delete_files=True):
def cleanup(self, delete_files: bool = True) -> None:
# At least one of these tool cleanup actions (job import), is needed
# for the tool to work properly, that is why one might want to run
# cleanup but not delete files.
Expand All @@ -2130,9 +2130,7 @@ def cleanup(self, delete_files=True):
if e.errno != errno.ENOENT:
raise
if delete_files:
self.object_store.delete(
self.get_job(), base_dir="job_work", entire_dir=True, dir_only=True, obj_dir=True
)
self.object_store.delete(self.get_job(), base_dir="job_work", entire_dir=True, obj_dir=True)
except Exception:
log.exception("Unable to cleanup job %d", self.job_id)

Expand Down
24 changes: 16 additions & 8 deletions lib/galaxy/jobs/runners/aws.py
Original file line number Diff line number Diff line change
Expand Up @@ -401,11 +401,13 @@ def recover(self, job, job_wrapper):
msg = "(name!r/runner!r) is still in {state!s} state, adding to the runner monitor queue"
job_id = job.get_job_runner_external_id()
job_name = self.JOB_NAME_PREFIX + job_wrapper.get_id_tag()
ajs = AsynchronousJobState(files_dir=job_wrapper.working_directory, job_wrapper=job_wrapper)
ajs.job_id = str(job_id)
ajs.job_name = job_name
ajs.job_wrapper = job_wrapper
ajs.job_destination = job_wrapper.job_destination
ajs = AsynchronousJobState(
files_dir=job_wrapper.working_directory,
job_wrapper=job_wrapper,
job_id=str(job_id),
job_name=job_name,
job_destination=job_wrapper.job_destination,
)
if job.state in (model.Job.states.RUNNING, model.Job.states.STOPPED):
log.debug(msg.format(name=job.id, runner=job.job_runner_name, state=job.state))
ajs.old_state = model.Job.states.RUNNING
Expand All @@ -417,14 +419,20 @@ def recover(self, job, job_wrapper):
ajs.running = False
self.monitor_queue.put(ajs)

def fail_job(self, job_state, exception=False):
def fail_job(self, job_state: JobState, exception=False, message="Job failed", full_status=None):
if getattr(job_state, "stop_job", True):
self.stop_job(job_state.job_wrapper)
job_state.job_wrapper.reclaim_ownership()
self._handle_runner_state("failure", job_state)
if not job_state.runner_state_handled:
job_state.job_wrapper.fail(getattr(job_state, "fail_message", "Job failed"), exception=exception)
self._finish_or_resubmit_job(job_state, "", job_state.fail_message, job_id=job_state.job_id)
full_status = full_status or {}
tool_stdout = full_status.get("stdout")
tool_stderr = full_status.get("stderr")
fail_message = getattr(job_state, "fail_message", message)
job_state.job_wrapper.fail(
fail_message, tool_stdout=tool_stdout, tool_stderr=tool_stderr, exception=exception
)
self._finish_or_resubmit_job(job_state, "", fail_message)
if job_state.job_wrapper.cleanup_job == "always":
job_state.cleanup()

Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/managers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -697,7 +697,7 @@ def serialize(self, item, keys, **context):
try:
returned[key] = self.serializers[key](item, key, **context)
except SkipAttribute:
# dont add this key if the deserializer threw this
# don't add this key if the serializer threw this
pass
elif key in self.serializable_keyset:
returned[key] = self.default_serializer(item, key, **context)
Expand Down
4 changes: 2 additions & 2 deletions lib/galaxy/managers/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ def __init__(self, app):
self.default_view = "all"
self.add_view("all", list(self.serializers.keys()))

def default_serializer(self, config, key):
return getattr(config, key, None)
def default_serializer(self, item, key, **context):
return getattr(item, key, None)

def add_serializers(self):
def _defaults_to(default) -> base.Serializer:
Expand Down
32 changes: 16 additions & 16 deletions lib/galaxy/managers/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,26 +81,26 @@ def create(self, manage_roles=None, access_roles=None, flush=True, **kwargs):
session.commit()
return dataset

def copy(self, dataset, **kwargs):
def copy(self, item, **kwargs):
raise exceptions.NotImplemented("Datasets cannot be copied")

def purge(self, dataset, flush=True):
def purge(self, item, flush=True, **kwargs):
"""
Remove the object_store/file for this dataset from storage and mark
as purged.
:raises exceptions.ConfigDoesNotAllowException: if the instance doesn't allow
"""
self.error_unless_dataset_purge_allowed(dataset)
self.error_unless_dataset_purge_allowed(item)

# the following also marks dataset as purged and deleted
dataset.full_delete()
self.session().add(dataset)
item.full_delete()
self.session().add(item)
if flush:
session = self.session()
with transaction(session):
session.commit()
return dataset
return item

def purge_datasets(self, request: PurgeDatasetsTaskRequest):
"""
Expand Down Expand Up @@ -376,7 +376,7 @@ def delete(self, item, flush: bool = True, stop_job: bool = False, **kwargs):
self.stop_creating_job(item, flush=flush)
return item

def purge(self, dataset_assoc, flush=True):
def purge(self, item, flush=True, **kwargs):
"""
Purge this DatasetInstance and the dataset underlying it.
"""
Expand All @@ -388,15 +388,15 @@ def purge(self, dataset_assoc, flush=True):
# so that job cleanup associated with stop_creating_job will see
# the dataset as purged.
flush_required = not self.app.config.track_jobs_in_database
super().purge(dataset_assoc, flush=flush or flush_required)
super().purge(item, flush=flush or flush_required, **kwargs)

# stop any jobs outputing the dataset_assoc
self.stop_creating_job(dataset_assoc, flush=True)
# stop any jobs outputing the dataset association
self.stop_creating_job(item, flush=True)

# more importantly, purge underlying dataset as well
if dataset_assoc.dataset.user_can_purge:
self.dataset_manager.purge(dataset_assoc.dataset)
return dataset_assoc
if item.dataset.user_can_purge:
self.dataset_manager.purge(item.dataset, flush=flush, **kwargs)
return item

def by_user(self, user):
raise exceptions.NotImplemented("Abstract Method")
Expand Down Expand Up @@ -782,19 +782,19 @@ def add_serializers(self):
# remove the single nesting key here
del self.serializers["metadata"]

def serialize(self, dataset_assoc, keys, **context):
def serialize(self, item, keys, **context):
"""
Override to add metadata as flattened keys on the serialized DatasetInstance.
"""
# if 'metadata' isn't removed from keys here serialize will retrieve the un-serializable MetadataCollection
# TODO: remove these when metadata is sub-object
KEYS_HANDLED_SEPARATELY = ("metadata",)
left_to_handle = self._pluck_from_list(keys, KEYS_HANDLED_SEPARATELY)
serialized = super().serialize(dataset_assoc, keys, **context)
serialized = super().serialize(item, keys, **context)

# add metadata directly to the dict instead of as a sub-object
if "metadata" in left_to_handle:
metadata = self._prefixed_metadata(dataset_assoc)
metadata = self._prefixed_metadata(item)
serialized.update(metadata)
return serialized

Expand Down
6 changes: 3 additions & 3 deletions lib/galaxy/managers/hdas.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,14 +228,14 @@ def copy(
return copy

# .... deletion and purging
def purge(self, hda, flush=True, **kwargs):
def purge(self, item, flush=True, **kwargs):
if self.app.config.enable_celery_tasks:
from galaxy.celery.tasks import purge_hda

user = kwargs.get("user")
return purge_hda.delay(hda_id=hda.id, task_user_id=getattr(user, "id", None))
return purge_hda.delay(hda_id=item.id, task_user_id=getattr(user, "id", None))
else:
self._purge(hda, flush=flush)
self._purge(item, flush=flush)

def _purge(self, hda, flush=True):
"""
Expand Down
8 changes: 4 additions & 4 deletions lib/galaxy/managers/histories.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,19 +289,19 @@ def most_recent(self, user, filters=None, current_history=None):
return self.session().scalars(stmt).first()

# .... purgable
def purge(self, history, flush=True, **kwargs):
def purge(self, item, flush=True, **kwargs):
"""
Purge this history and all HDAs, Collections, and Datasets inside this history.
"""
self.error_unless_mutable(history)
self.error_unless_mutable(item)
self.hda_manager.dataset_manager.error_unless_dataset_purge_allowed()
# First purge all the datasets
for hda in history.datasets:
for hda in item.datasets:
if not hda.purged:
self.hda_manager.purge(hda, flush=True, **kwargs)

# Now mark the history as purged
super().purge(history, flush=flush, **kwargs)
super().purge(item, flush=flush, **kwargs)

# .... current
# TODO: make something to bypass the anon user + current history permissions issue
Expand Down
16 changes: 8 additions & 8 deletions lib/galaxy/managers/rbac_secured.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,10 @@ def error_unless_permitted(self, item, user, trans=None):
error_info = dict(model_class=item.__class__, id=getattr(item, "id", None))
raise self.permission_failed_error_class(**error_info)

def grant(self, item, user, flush=True):
def grant(self, item, user, flush: bool = True):
raise NotImplementedError("abstract parent class")

def revoke(self, item, user, flush=True):
def revoke(self, item, user, flush: bool = True):
raise NotImplementedError("abstract parent class")

def _role_is_permitted(self, item, role):
Expand Down Expand Up @@ -197,13 +197,13 @@ def is_permitted(self, dataset, user, trans=None):
return True
return False

def grant(self, dataset, user, flush=True):
def grant(self, item, user, flush: bool = True):
private_role = self._user_private_role(user)
return self._grant_role(dataset, private_role, flush=flush)
return self._grant_role(item, private_role, flush=flush)

def revoke(self, dataset, user, flush=True):
def revoke(self, item, user, flush: bool = True):
private_role = self._user_private_role(user)
return self._revoke_role(dataset, private_role, flush=flush)
return self._revoke_role(item, private_role, flush=flush)

# ---- private
def _role_is_permitted(self, dataset, role):
Expand Down Expand Up @@ -253,13 +253,13 @@ def is_permitted(self, dataset, user, trans=None):
or self._user_has_all_roles(user, current_roles)
)

def grant(self, item, user):
def grant(self, item, user, flush: bool = True):
pass
# not so easy
# need to check for a sharing role
# then add the new user to it

def revoke(self, item, user):
def revoke(self, item, user, flush: bool = True):
pass
# not so easy

Expand Down
Loading

0 comments on commit 51b723b

Please sign in to comment.