Skip to content

Commit

Permalink
Use the new enginefacade from oslo_db
Browse files Browse the repository at this point in the history
As per blueprint [1], the existing use of oslo_db session
handling (e.g., context.session.begin()) introduces potential issues.
Notably, unit tests failed during the Caracal release, though
no definitive deployment impact has been identified yet.

To future-proof the code and align with recommended practices,
we are migrating to the enginefacade pattern now.
This involves replacing:
with context.session.begin():
   context.session.add(obj)
with 'db_api.CONTEXT_WRITER.using(context)'

[1] https://blueprints.launchpad.net/neutron/+spec/enginefacade-switch
[2] Oslo db spec: http://specs.openstack.org/openstack/oslo-specs/specs/kilo/make-enginefacade-a-facade.html
  • Loading branch information
sven-rosenzweig committed Nov 22, 2024
1 parent 3ad7390 commit ba42dfd
Show file tree
Hide file tree
Showing 6 changed files with 39 additions and 21 deletions.
16 changes: 14 additions & 2 deletions networking_ccloud/db/db_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def __init__(self, *args, **kwargs):
self.drv_conf = get_driver_config()

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_hosts_on_segments(self, context, segment_ids=None, network_ids=None, physical_networks=None, level=1,
driver=None):
"""Get all binding hosts plus their segment info
Expand Down Expand Up @@ -126,6 +127,7 @@ def get_hosts_on_network(self, context, network_id):
return net_hosts[network_id]

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_top_level_vxlan_segments(self, context, network_ids):
query = context.session.query(segment_models.NetworkSegment)
query = query.filter_by(network_type=nl_const.TYPE_VXLAN, physical_network=None, segment_index=0)
Expand All @@ -138,6 +140,7 @@ def get_top_level_vxlan_segments(self, context, network_ids):
return net_seg

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_segment_by_host(self, context, network_id, physical_network, network_type=nl_const.TYPE_VLAN):
"""Return a single segment defined by network, host and network_type"""
query = context.session.query(segment_models.NetworkSegment)
Expand All @@ -150,6 +153,7 @@ def get_segment_by_host(self, context, network_id, physical_network, network_typ
return None

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_segments_by_physnet_network_tuples(self, context, physnet_networks, network_type=nl_const.TYPE_VLAN):
"""Get all segments which have one of the given combinations of physnet and network_id"""
query = context.session.query(segment_models.NetworkSegment)
Expand All @@ -162,6 +166,7 @@ def get_segments_by_physnet_network_tuples(self, context, physnet_networks, netw
return result

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_azs_for_network(self, context, network_id, extra_binding_hosts=None):
"""Get all AZs in this network bound on this driver"""
# get binding hosts on network
Expand All @@ -179,6 +184,7 @@ def get_azs_for_network(self, context, network_id, extra_binding_hosts=None):
return azs

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_interconnects(self, context, network_id=None, device_type=None, host=None):
query = context.session.query(cc_models.CCNetworkInterconnects)
filter_args = {}
Expand Down Expand Up @@ -209,7 +215,7 @@ def ensure_interconnect_for_network(self, context, device_type, network_id, az,
(but also services this AZ) - this can only happen for Transits as they are
the only devices servicing a different AZ.
"""
with context.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(context):
query = context.session.query(cc_models.CCNetworkInterconnects)
query = query.filter_by(device_type=device_type, network_id=network_id, availability_zone=az)
if query.count() > 0:
Expand Down Expand Up @@ -255,7 +261,7 @@ def ensure_interconnect_for_network(self, context, device_type, network_id, az,
availability_zone=az, host=host)
context.session.add(transit_alloc)

return new_interconnect_allocated, transit_alloc
return new_interconnect_allocated, transit_alloc

def ensure_transit_for_network(self, context, network_id, az):
return self.ensure_interconnect_for_network(context, cc_const.DEVICE_TYPE_TRANSIT, network_id, az)
Expand All @@ -264,6 +270,7 @@ def ensure_bgw_for_network(self, context, network_id, az):
return self.ensure_interconnect_for_network(context, cc_const.DEVICE_TYPE_BGW, network_id, az)

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER
def remove_interconnect_from_network(self, context, device_type, network_id, az):
"""Remove a transit from a network"""
query = context.session.query(cc_models.CCNetworkInterconnects)
Expand All @@ -280,6 +287,7 @@ def remove_bgw_from_network(self, context, network_id, az):
return self.remove_interconnect_from_network(context, cc_const.DEVICE_TYPE_BGW, network_id, az)

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_gateways_for_networks(self, context, network_ids, external_only=True):
fields = [
models_v2.Subnet.network_id, models_v2.Subnet.cidr, models_v2.Subnet.gateway_ip,
Expand Down Expand Up @@ -324,6 +332,7 @@ def get_gateways_for_network(self, context, network_id, *args, **kwargs):
net_gws = self.get_gateways_for_networks(context, [network_id], *args, **kwargs)
return net_gws.get(network_id)

@db_api.CONTEXT_READER
def get_subnet_l3_config_for_networks(self, context, network_ids):
"""Get l3 config (cidrs, az locality) for networks, grouped by subnet pools"""
fields = [
Expand Down Expand Up @@ -361,6 +370,7 @@ def get_subnet_l3_config_for_networks(self, context, network_ids):

return result

@db_api.CONTEXT_READER
def get_subnetpool_details(self, context, subnetpool_ids):
# get az from tags
fields = [models_v2.SubnetPool.id, tag_models.Tag.tag]
Expand Down Expand Up @@ -403,6 +413,7 @@ def get_subnetpool_details(self, context, subnetpool_ids):
return result

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_subport_trunk_vlan_id(self, context, port_id):
query = context.session.query(trunk_models.SubPort.segmentation_id)
query = query.filter(trunk_models.SubPort.port_id == port_id)
Expand All @@ -412,6 +423,7 @@ def get_subport_trunk_vlan_id(self, context, port_id):
return None

@db_api.retry_if_session_inactive()
@db_api.CONTEXT_READER
def get_trunks_with_binding_host(self, context, host):
fields = [
trunk_models.Trunk.id,
Expand Down
3 changes: 2 additions & 1 deletion networking_ccloud/tests/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from neutron.common import config
from neutron import policy
from neutron.plugins.ml2 import models as ml2_models
from neutron_lib.db import api as db_api
from neutron_lib import context
from oslo_config import cfg, fixture as config_fixture
from oslotest import base
Expand Down Expand Up @@ -53,7 +54,7 @@ def _make_port_with_binding(self, segments, host, **kwargs):
if not port:
port = self._make_port('json', segments[0][0]['network_id'], host=host, **kwargs)['port']
ctx = context.get_admin_context()
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
pbinding = ml2_models.PortBinding(port_id=port['id'], host=host, profile=profile, vif_type=vif_type)
ctx.session.add(pbinding)

Expand Down
3 changes: 2 additions & 1 deletion networking_ccloud/tests/common/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import api as db_api
from neutron.db import models_v2


def fix_net_mtu(ctx, network, mtu=1500):
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
net = ctx.session.query(models_v2.Network).get(network['id'])
net.mtu = mtu
ctx.session.add(net)
13 changes: 7 additions & 6 deletions networking_ccloud/tests/unit/db/test_db_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from neutron.services.trunk import models as trunk_models
from neutron.tests.unit.extensions import test_segment
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg

Expand Down Expand Up @@ -117,7 +118,7 @@ def setUp(self):
fix_net_mtu(ctx, self._net_c)
self._port_c_1 = self._make_port('json', self._net_c['id'])['port'] # bindings don't matter
self._port_c_2 = self._make_port('json', self._net_c['id'])['port'] # bindings don't matter
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
subport = trunk_models.SubPort(port_id=self._port_b_5['id'], segmentation_type='vlan', segmentation_id=1000)
trunk = trunk_models.Trunk(name='random-trunk', port_id=self._port_c_1['id'], sub_ports=[subport])
ctx.session.add(trunk)
Expand All @@ -133,7 +134,7 @@ def setUp(self):
self._subnetpool_reg = self._make_subnetpool("json", prefixes=["1.1.0.0/16", "2.2.0.0/16"], tenant_id="foo",
name="sp")['subnetpool']
self._net_c = self._make_network(name="c", admin_state_up=True, fmt='json')['network']
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
ctx.session.add(extnet_models.ExternalNetwork(network_id=self._net_c['id']))

self._subnet_c_1 = self._make_subnet("json", {"network": self._net_c}, "1.1.1.1", "1.1.1.0/24",
Expand All @@ -146,7 +147,7 @@ def setUp(self):
name="sp")['subnetpool']

self._net_d = self._make_network(name="d", admin_state_up=True, fmt='json')['network']
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
net = ctx.session.query(models_v2.Network).get(self._net_d['id'])
net.availability_zone_hints = '["qa-de-1d"]'
ctx.session.add(extnet_models.ExternalNetwork(network_id=self._net_d['id']))
Expand All @@ -165,7 +166,7 @@ def setUp(self):
subnetpool_id=self._subnetpool_az['id'], as_admin=True)['subnet']

# fix segment index
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
objs = ctx.session.query(segment_models.NetworkSegment).filter_by(physical_network=None,
network_type='vxlan')
objs.update({'segment_index': 0})
Expand Down Expand Up @@ -328,7 +329,7 @@ def test_get_subport_trunk_vlan_id(self):
with self.port() as trunkport, self.port() as subport:
self.assertIsNone(self._db.get_subport_trunk_vlan_id(ctx, subport['port']['id']))

with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
subport = trunk_models.SubPort(port_id=subport['port']['id'], segmentation_type='vlan',
segmentation_id=1000)
trunk = trunk_models.Trunk(name='random-trunk', port_id=trunkport['port']['id'], sub_ports=[subport])
Expand All @@ -340,7 +341,7 @@ def test_get_subport_trunk_vlan_id(self):
def test_get_trunks_with_binding_host(self):
ctx = context.get_admin_context()
with self.port() as trunkport1, self.port() as trunkport2:
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
trunk1 = trunk_models.Trunk(name='random-trunk1', port_id=trunkport1['port']['id'], sub_ports=[])
binding1 = (ctx.session.query(ml2_models.PortBinding)
.filter(ml2_models.PortBinding.port_id == trunkport1['port']['id']).first())
Expand Down
14 changes: 8 additions & 6 deletions networking_ccloud/tests/unit/extensions/test_extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from neutron.tests.unit.extensions import test_segment
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.db import api as db_api
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api as ml2_api
Expand Down Expand Up @@ -165,7 +166,7 @@ def setUp(self):
self._subnet_b_1 = self._make_subnet("json", network={'network': self._net_b}, subnetpool_id=self._snp_b['id'],
cidr="1.1.1.0/24", gateway="1.1.1.1", as_admin=True)

with self.ctx.session.begin():
with db_api.CONTEXT_WRITER.using(self.ctx):
self.ctx.session.add(extnet_models.ExternalNetwork(network_id=self._net_b['id']))
ascope = ascope_models.AddressScope(name="seagull", ip_version=4)
self.ctx.session.add(ascope)
Expand Down Expand Up @@ -223,11 +224,12 @@ def test_network_ensure_interconnects(self):
self._make_segment(network_id=network_id, network_type='vxlan',
segmentation_id=424242,
tenant_id="test-tenant", fmt='json')['segment']
objs = self.ctx.session.query(segment_models.NetworkSegment).filter_by(physical_network=None,
network_type='vxlan')
objs.update({'segment_index': 0})
from neutron_lib.db import api as db_api
with db_api.CONTEXT_WRITER.using(self.ctx):
objs = self.ctx.session.query(segment_models.NetworkSegment).filter_by(physical_network=None,
network_type='vxlan')
objs.update({'segment_index': 0})

# make sure nothing is allocated
self.assertEqual([], self.db.get_interconnects(self.ctx, network_id))

# make apicall
Expand Down Expand Up @@ -541,7 +543,7 @@ def test_network_move_gateway_to_fabric_no_move_if_already_moved(self):
cfg.CONF.set_override('handle_all_l3_gateways', False, group='ml2_cc_fabric')
with self.network() as net:
net_id = net['network']['id']
with self.ctx.session.begin():
with db_api.CONTEXT_WRITER.using(self.ctx):
self.ctx.session.add(extnet_models.ExternalNetwork(network_id=net_id))
self.tag_plugin.update_tag(self.ctx, "networks", net_id, cc_const.L3_GATEWAY_TAG)
resp = self.app.put(f"/cc-fabric/networks/{net_id}/move_gateway_to_fabric", expect_errors=True)
Expand Down
11 changes: 6 additions & 5 deletions networking_ccloud/tests/unit/ml2/test_mech_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

from neutron_lib.api.definitions import external_net as extnet_api
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib.db import api as db_api
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib import context
Expand Down Expand Up @@ -137,7 +138,7 @@ def setUp(self):
self.mech_driver = mm.mech_drivers[cc_const.CC_DRIVER_NAME].obj

ctx = context.get_admin_context()
with ctx.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(ctx):
self._address_scope = ascope_models.AddressScope(name="the-open-sea", ip_version=4)
ctx.session.add(self._address_scope)

Expand Down Expand Up @@ -191,7 +192,7 @@ def test_bind_port_trunking_direct_level_1(self):

with mock.patch.object(CCFabricSwitchAgentRPCClient, 'apply_config_update') as mock_acu:
def _create_trunk(port, network, **kwargs):
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
subport = trunk_models.SubPort(port_id=port['port']['id'], segmentation_type='vlan',
segmentation_id=1234)
trunk_port = self._make_port(net_id=network['network']['id'], fmt="json")
Expand Down Expand Up @@ -616,7 +617,7 @@ def test_create_subnet_network_no_az_snp_az_fails(self):
with self.subnetpool(["1.1.0.0/16", "1.2.0.0/24"], address_scope_id=self._address_scope['id'], name="foo",
tenant_id="foo", admin=True) as snp:
ctx = context.get_admin_context()
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
snp_db = ctx.session.query(models_v2.SubnetPool).get(snp['subnetpool']['id'])
ctx.session.add(tag_models.Tag(standard_attr_id=snp_db.standard_attr_id,
tag="availability-zone::qa-de-1a"))
Expand Down Expand Up @@ -717,7 +718,7 @@ def test_bind_port_external_network_az_local(self):
with self.network(availability_zone_hints=["qa-de-1a"], **net_kwargs) as network:
with self.subnetpool(["1.1.0.0/16", "1.2.0.0/24"], address_scope_id=self._address_scope.id, name="foo",
tenant_id="foo", admin=True) as snp:
with ctx.session.begin():
with db_api.CONTEXT_WRITER.using(ctx):
snp_db = ctx.session.query(models_v2.SubnetPool).get(snp['subnetpool']['id'])
ctx.session.add(tag_models.Tag(standard_attr_id=snp_db.standard_attr_id,
tag="availability-zone::qa-de-1a"))
Expand Down Expand Up @@ -908,7 +909,7 @@ def setUp(self):
self.context = context.get_admin_context()

ctx = context.get_admin_context()
with ctx.session.begin(subtransactions=True):
with db_api.CONTEXT_WRITER.using(ctx):
self._address_scope = ascope_models.AddressScope(name="the-open-sea", ip_version=4)
ctx.session.add(self._address_scope)

Expand Down

0 comments on commit ba42dfd

Please sign in to comment.