Skip to content

Commit

Permalink
Merge pull request #17132 from jdavcs/dev_sa20_fix23
Browse files Browse the repository at this point in the history
Towards SQLAlchemy 2.0: fix last cases of RemovedIn20Warning
  • Loading branch information
mvdbeek authored Dec 12, 2023
2 parents 1fb0986 + 4664534 commit 819a799
Show file tree
Hide file tree
Showing 13 changed files with 38 additions and 33 deletions.
4 changes: 2 additions & 2 deletions lib/galaxy/jobs/handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -879,7 +879,7 @@ def get_user_job_count_per_destination(self, user_id):
)
.group_by(model.Job.table.c.destination_id)
)
for row in result:
for row in result.mappings():
# Add the count from the database to the cached count
rval[row["destination_id"]] = rval.get(row["destination_id"], 0) + row["job_count"]
return rval
Expand All @@ -897,7 +897,7 @@ def __cache_user_job_count_per_destination(self):
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))))
.group_by(model.Job.table.c.user_id, model.Job.table.c.destination_id)
)
for row in result:
for row in result.mappings():
if row["user_id"] not in self.user_job_count_per_destination:
self.user_job_count_per_destination[row["user_id"]] = {}
self.user_job_count_per_destination[row["user_id"]][row["destination_id"]] = row["job_count"]
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/managers/history_contents.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def active_counts(self, history):
cast(func.sum(subquery.c.active), Integer).label("active"),
)
returned = self.app.model.context.execute(statement).one()
return dict(returned)
return dict(returned._mapping)

def _active_counts_statement(self, model_class, history_id):
deleted_attr = model_class.deleted
Expand Down
6 changes: 3 additions & 3 deletions lib/galaxy/managers/notification.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def get_user_total_unread_notification_count(self, user: User) -> int:

def get_broadcasted_notification(self, notification_id: int, active_only: Optional[bool] = True):
stmt = (
select(self.broadcast_notification_columns)
select(*self.broadcast_notification_columns)
.select_from(Notification)
.where(
and_(
Expand Down Expand Up @@ -333,7 +333,7 @@ def _user_notifications_query(
self, user: User, since: Optional[datetime] = None, active_only: Optional[bool] = True
):
stmt = (
select(self.user_notification_columns)
select(*self.user_notification_columns)
.select_from(Notification)
.join(
UserNotificationAssociation,
Expand All @@ -356,7 +356,7 @@ def _user_notifications_query(

def _broadcasted_notifications_query(self, since: Optional[datetime] = None, active_only: Optional[bool] = True):
stmt = (
select(self.broadcast_notification_columns)
select(*self.broadcast_notification_columns)
.select_from(Notification)
.where(Notification.category == MandatoryNotificationCategory.broadcast)
)
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/managers/workflows.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def index_query(

latest_workflow_load = joinedload(StoredWorkflow.latest_workflow)
if not payload.skip_step_counts:
latest_workflow_load = latest_workflow_load.undefer("step_count")
latest_workflow_load = latest_workflow_load.undefer(Workflow.step_count)
latest_workflow_load = latest_workflow_load.lazyload(Workflow.steps)

stmt = stmt.options(joinedload(StoredWorkflow.annotations))
Expand Down
6 changes: 4 additions & 2 deletions lib/galaxy/model/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1354,7 +1354,9 @@ class Job(Base, JobLike, UsesCreateAndUpdateTime, Dictifiable, Serializable):
"DataManagerJobAssociation", back_populates="job", uselist=False, cascade_backrefs=False
)
history_dataset_collection_associations = relationship("HistoryDatasetCollectionAssociation", back_populates="job")
workflow_invocation_step = relationship("WorkflowInvocationStep", back_populates="job", uselist=False)
workflow_invocation_step = relationship(
"WorkflowInvocationStep", back_populates="job", uselist=False, cascade_backrefs=False
)

any_output_dataset_collection_instances_deleted: column_property # defined at the end of this module
any_output_dataset_deleted: column_property # defined at the end of this module
Expand Down Expand Up @@ -6947,7 +6949,7 @@ def contains_collection(self, collection_id):
# join parents to hdca, look for matching hdca_id
hdca = aliased(HDCA, name="hdca")
jointohdca = parents_cte.join(hdca, hdca.collection_id == parents_cte.c.dataset_collection_id)
qry = Query(hdca.id).select_entity_from(jointohdca).filter(hdca.id == self.id)
qry = Query(hdca.id).select_from(jointohdca).filter(hdca.id == self.id)

results = qry.with_session(sa_session).all()
return len(results) > 0
Expand Down
4 changes: 2 additions & 2 deletions lib/galaxy/model/database_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def create(self, encoding, template):
preparer = IdentifierPreparer(engine.dialect)
template = template or "template1"
database, template = preparer.quote(self.database), preparer.quote(template)
stmt = f"CREATE DATABASE {database} ENCODING '{encoding}' TEMPLATE {template}"
stmt = text(f"CREATE DATABASE {database} ENCODING '{encoding}' TEMPLATE {template}")
with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
conn.execute(stmt)

Expand Down Expand Up @@ -131,7 +131,7 @@ def create(self, encoding, *arg):
with sqlalchemy_engine(self.url) as engine:
preparer = IdentifierPreparer(engine.dialect)
database = preparer.quote(self.database)
stmt = f"CREATE DATABASE {database} CHARACTER SET = '{encoding}'"
stmt = text(f"CREATE DATABASE {database} CHARACTER SET = '{encoding}'")
with engine.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
conn.execute(stmt)

Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/model/migrations/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def get_url_string(engine: Engine) -> str:


def load_metadata(metadata: MetaData, engine: Engine) -> None:
with engine.connect() as conn:
with engine.begin() as conn:
metadata.create_all(bind=conn)


Expand Down
1 change: 1 addition & 0 deletions lib/galaxy/model/store/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1060,6 +1060,7 @@ def attach_workflow_step(imported_object, attrs):
if "job" in step_attrs:
job = object_import_tracker.jobs_by_key[step_attrs["job"][object_key]]
imported_invocation_step.job = job
ensure_object_added_to_session(imported_invocation_step, object_in_session=job)
elif "implicit_collection_jobs" in step_attrs:
icj = object_import_tracker.implicit_collection_jobs_by_key[
step_attrs["implicit_collection_jobs"][object_key]
Expand Down
3 changes: 2 additions & 1 deletion lib/galaxy/model/unittest_utils/model_testing_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
make_url,
)
from sqlalchemy.sql.compiler import IdentifierPreparer
from sqlalchemy.sql.expression import text

from galaxy.model.database_utils import (
create_database,
Expand Down Expand Up @@ -235,7 +236,7 @@ def _drop_database(connection_url, database_name):
engine = create_engine(connection_url, isolation_level="AUTOCOMMIT")
preparer = IdentifierPreparer(engine.dialect)
database_name = preparer.quote(database_name)
stmt = f"DROP DATABASE IF EXISTS {database_name}"
stmt = text(f"DROP DATABASE IF EXISTS {database_name}")
with engine.connect() as conn:
conn.execute(stmt)
engine.dispose()
Expand Down
15 changes: 8 additions & 7 deletions lib/galaxy/quota/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,9 @@ def get_quota(self, user, quota_source_label=None) -> Optional[int]:
label_cond="IS NULL" if quota_source_label is None else " = :label"
)
)
conn = self.sa_session.connection()
with conn.begin():
res = conn.execute(query, is_true=True, user_id=user.id, label=quota_source_label).fetchone()
engine = self.sa_session.get_bind()
with engine.connect() as conn:
res = conn.execute(query, {"is_true": True, "user_id": user.id, "label": quota_source_label}).fetchone()
if res:
return int(res[0]) if res[0] else None
else:
Expand All @@ -188,10 +188,11 @@ def _default_quota(self, default_type, quota_source_label):
AND default_quota.quota_source_label {label_condition}
"""
)

conn = self.sa_session.connection()
with conn.begin():
res = conn.execute(query, is_true=True, label=quota_source_label, default_type=default_type).fetchone()
engine = self.sa_session.get_bind()
with engine.connect() as conn:
res = conn.execute(
query, {"is_true": True, "label": quota_source_label, "default_type": default_type}
).fetchone()
if res:
return res[0]
else:
Expand Down
2 changes: 1 addition & 1 deletion lib/galaxy/webapps/galaxy/controllers/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def sort(self, trans, query, ascending, column_name=None):
def build_initial_query(self, trans, **kwargs):
# Override to preload sharing information used when fetching data for grid.
query = super().build_initial_query(trans, **kwargs)
query = query.options(undefer("users_shared_with_count"))
query = query.options(undefer(self.model_class.users_shared_with_count))
return query

# Grid definition
Expand Down
4 changes: 2 additions & 2 deletions lib/galaxy/webapps/galaxy/controllers/page.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,9 @@ def build_initial_query(self, trans, **kwargs):
.join("user")
.filter(model.User.deleted == false())
.options(
joinedload(self.model_class.user).load_only("username"),
joinedload(self.model_class.user).load_only(self.model_class.username),
joinedload(self.model_class.annotations),
undefer("average_rating"),
undefer(self.model_class.average_rating),
)
)

Expand Down
20 changes: 10 additions & 10 deletions test/unit/data/model/migrations/test_migrations.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def test_is_empty(self, url_factory, metadata_state1_gxy): # noqa: F811
with create_and_drop_database(db_url):
with disposing_engine(db_url) as engine:
assert DatabaseStateCache(engine).is_database_empty()
with engine.connect() as conn:
with engine.begin() as conn:
metadata.create_all(bind=conn)
assert not DatabaseStateCache(engine).is_database_empty()

Expand All @@ -199,7 +199,7 @@ def test_has_alembic_version_table(self, url_factory, metadata_state4_gxy): # n
with create_and_drop_database(db_url):
with disposing_engine(db_url) as engine:
assert not DatabaseStateCache(engine).has_alembic_version_table()
with engine.connect() as conn:
with engine.begin() as conn:
metadata.create_all(bind=conn)
assert DatabaseStateCache(engine).has_alembic_version_table()

Expand All @@ -208,7 +208,7 @@ def test_has_sqlalchemymigrate_version_table(self, url_factory, metadata_state2_
with create_and_drop_database(db_url):
with disposing_engine(db_url) as engine:
assert not DatabaseStateCache(engine).has_sqlalchemymigrate_version_table()
with engine.connect() as conn:
with engine.begin() as conn:
metadata.create_all(bind=conn)
assert DatabaseStateCache(engine).has_sqlalchemymigrate_version_table()

Expand All @@ -229,7 +229,7 @@ def test_only_contains_kombu_tables(self, url_factory, metadata_state0kombu): #
with create_and_drop_database(db_url):
with disposing_engine(db_url) as engine:
assert DatabaseStateCache(engine).is_database_empty()
with engine.connect() as conn:
with engine.begin() as conn:
metadata.create_all(bind=conn)
assert not DatabaseStateCache(engine).is_database_empty()
assert DatabaseStateCache(engine).contains_only_kombu_tables()
Expand Down Expand Up @@ -733,7 +733,7 @@ def _get_paths_to_version_locations():

def load_sqlalchemymigrate_version(db_url, version):
with disposing_engine(db_url) as engine:
with engine.connect() as conn:
with engine.begin() as conn:
sql_delete = text(f"delete from {SQLALCHEMYMIGRATE_TABLE}") # there can be only 1 row
sql_insert = text(f"insert into {SQLALCHEMYMIGRATE_TABLE} values('_', '_', {version})")
conn.execute(sql_delete)
Expand All @@ -747,7 +747,7 @@ def test_load_sqlalchemymigrate_version(url_factory, metadata_state2_gxy): # no
load_metadata(metadata_state2_gxy, engine)
sql = text(f"select version from {SQLALCHEMYMIGRATE_TABLE}")
version = 42
with engine.connect() as conn:
with engine.begin() as conn:
result = conn.execute(sql).scalar()
assert result != version
load_sqlalchemymigrate_version(db_url, version)
Expand All @@ -766,7 +766,7 @@ def database_is_empty_or_contains_kombu_tables(db_url):
(ref: https://github.com/galaxyproject/galaxy/issues/13689)
"""
with disposing_engine(db_url) as engine:
with engine.connect() as conn:
with engine.begin() as conn:
metadata = MetaData()
metadata.reflect(bind=conn)
return not bool(metadata.tables) or metadata_contains_only_kombu_tables(metadata)
Expand Down Expand Up @@ -865,7 +865,7 @@ def is_metadata_loaded(db_url, metadata):
# True if the set of tables from the up-to-date state metadata (state6)
# is a subset of the metadata reflected from `db_url`.
with disposing_engine(db_url) as engine:
with engine.connect() as conn:
with engine.begin() as conn:
db_metadata = MetaData()
db_metadata.reflect(bind=conn)
tables = _get_tablenames(metadata)
Expand All @@ -885,7 +885,7 @@ def test_is_metadata_loaded(url_factory, metadata_state1_gxy): # noqa F811
with create_and_drop_database(db_url):
assert not is_metadata_loaded(db_url, metadata)
with disposing_engine(db_url) as engine:
with engine.connect() as conn:
with engine.begin() as conn:
metadata.create_all(bind=conn)
assert is_metadata_loaded(db_url, metadata)

Expand All @@ -896,7 +896,7 @@ def test_is_multiple_metadata_loaded(url_factory, metadata_state1_gxy, metadata_
with create_and_drop_database(db_url):
assert not is_metadata_loaded(db_url, metadata)
with disposing_engine(db_url) as engine:
with engine.connect() as conn:
with engine.begin() as conn:
metadata_state1_gxy.create_all(bind=conn)
metadata_state1_tsi.create_all(bind=conn)
assert is_metadata_loaded(db_url, metadata)
Expand Down

0 comments on commit 819a799

Please sign in to comment.