Skip to content

Commit

Permalink
Implement trunk extension
Browse files Browse the repository at this point in the history
Implement the trunk service extension. A trunk can be created on any
port that is in a direct binding hostgroup. Only one trunk per hostgroup
is possible. If the host is part of a metagroup, only VLANs not used by
the metagroup VLAN pool can be used as target, to not interfere with VMs
put on the host(s).

The trunk plugin modifies the subports by setting binding host, vnic
type and binding profile. The binding profile contains details
about the specific VLAN translation, but this is only informational, as
the "real" info will be fetched from the respective trunk/subport tables
in the DB.
  • Loading branch information
sebageek committed Dec 1, 2023
1 parent 64cc9f7 commit 6b89e46
Show file tree
Hide file tree
Showing 12 changed files with 424 additions and 10 deletions.
10 changes: 10 additions & 0 deletions networking_ccloud/common/config/config_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,16 @@ def has_switches_as_member(self, drv_conf, switch_names):
return True
return False

def get_parent_metagroup(self, drv_conf):
"""Get metagroup for this Hostgroup, if it is part of a metagroup"""
if self.metagroup:
return None

for hg in drv_conf.hostgroups:
if any(host in hg.members for host in self.binding_hosts):
return hg
return None


class VRF(pydantic.BaseModel):
name: str
Expand Down
1 change: 1 addition & 0 deletions networking_ccloud/common/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,3 +42,4 @@
PLATFORM_EOS: SWITCH_AGENT_EOS_TOPIC,
PLATFORM_NXOS: SWITCH_AGENT_NXOS_TOPIC,
}
TRUNK_PROFILE = 'cc-fabric_trunk_ro'
8 changes: 8 additions & 0 deletions networking_ccloud/common/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,11 @@ class SwitchConnectionError(Exception):
class SubnetSubnetPoolAZAffinityError(n_exc.BadRequest):
message = ("The subnet's network %(network_id)s has AZ hint %(net_az_hint)s, "
"the subnet's subnetpool %(subnetpool_id)s has AZ %(subnetpool_az)s set, which do not match")


class GenericTrunkException(n_exc.NeutronException):
message = "%(msg)s"


class BadTrunkRequest(n_exc.BadRequest):
message = "Bad request for %(trunk_port_id)s: %(reason)s"
35 changes: 33 additions & 2 deletions networking_ccloud/db/db_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,6 @@ def get_hosts_on_segments(self, context, segment_ids=None, network_ids=None, phy

hosts = net_hosts.setdefault(network_id, {})
if host not in hosts:
# FIXME: do we want to take the trunk segmentation id from the SubPort table
# or alternatively from the port's binding profile?
hosts[host] = dict(segment_id=segment_id, network_id=network_id, segmentation_id=segmentation_id,
physical_network=physnet, driver=driver, level=level,
trunk_segmentation_id=trunk_seg_id, is_bgw=False)
Expand Down Expand Up @@ -401,3 +399,36 @@ def get_subnetpool_details(self, context, subnetpool_ids):
result[snp_id]['cidrs'].append(cidr)

return result

@db_api.retry_if_session_inactive()
def get_subport_trunk_vlan_id(self, context, port_id):
query = context.session.query(trunk_models.SubPort.segmentation_id)
query = query.filter(trunk_models.SubPort.port_id == port_id)
subport = query.first()
if subport:
return subport.segmentation_id
return None

@db_api.retry_if_session_inactive()
def get_trunks_with_binding_host(self, context, host):
fields = [
trunk_models.Trunk.id,
trunk_models.Trunk.port_id,
ml2_models.PortBinding.host,
ml2_models.PortBinding.profile,
]
query = context.session.query(*fields)
query = query.join(ml2_models.PortBinding,
trunk_models.Trunk.port_id == ml2_models.PortBinding.port_id)
query = query.filter(sa.or_(ml2_models.PortBinding.host == host,
ml2_models.PortBinding.profile.like(f"%{host}%")))

trunk_ids = []
for trunk_id, port_id, port_host, port_profile in query.all():
port_profile_host = helper.get_binding_host_from_profile(port_profile, port_id)
if port_profile_host:
port_host = port_profile_host
if port_host != host:
continue
trunk_ids.append(trunk_id)
return trunk_ids
12 changes: 10 additions & 2 deletions networking_ccloud/ml2/mech_driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api as ml2_api
from neutron_lib import rpc as n_rpc
from neutron_lib.services.trunk import constants as trunk_const
from oslo_config import cfg
from oslo_log import log as logging

Expand All @@ -33,6 +34,7 @@
from networking_ccloud.ml2.agent.common import messages as agent_msg
from networking_ccloud.ml2.driver_rpc_api import CCFabricDriverAPI
from networking_ccloud.ml2.plugin import FabricPlugin
from networking_ccloud.services.trunk.driver import CCTrunkDriver


LOG = logging.getLogger(__name__)
Expand Down Expand Up @@ -85,6 +87,7 @@ def initialize(self):
self._agents = {}

fabricoperations.register_api_extension()
self.trunk_driver = CCTrunkDriver.create()

LOG.info("CC-Fabric ml2 driver initialized")

Expand Down Expand Up @@ -235,11 +238,16 @@ def _bind_port_direct(self, context, binding_host, hg_config):
context.current['id'], config_physnet, context.segments_to_bind)
return

# FIXME: trunk ports
trunk_vlan = None
if context.current['device_owner'] == trunk_const.TRUNK_SUBPORT_OWNER:
if hg_config.direct_binding and not hg_config.role:
trunk_vlan = self.fabric_plugin.get_subport_trunk_vlan_id(context._plugin_context,
context.current['id'])

net_external = context.network.current[extnet_api.EXTERNAL]
self.handle_binding_host_changed(context._plugin_context, context.current['network_id'], binding_host,
hg_config, context.binding_levels[0][ml2_api.BOUND_SEGMENT], segment,
net_external=net_external)
net_external=net_external, trunk_vlan=trunk_vlan)

vif_details = {} # no vif-details needed yet
context.set_binding(segment['id'], cc_const.VIF_TYPE_CC_FABRIC, vif_details, nl_const.ACTIVE)
Expand Down
5 changes: 3 additions & 2 deletions networking_ccloud/ml2/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,7 @@ def allocate_and_configure_interconnects(self, context, network):
device_segment = self._plugin.type_manager.allocate_dynamic_segment(context, network_id,
segment_spec)
device_segment['is_bgw'] = device_type == cc_const.DEVICE_TYPE_BGW
device_segment['trunk_segmentation_id'] = None
self.add_segments_to_config(context, scul, {network_id: {device.host: device_segment}})

if device_type == cc_const.DEVICE_TYPE_TRANSIT:
Expand Down Expand Up @@ -173,11 +174,11 @@ def add_segments_to_config(self, context, scul, net_segments):
LOG.error("Got a port binding for binding host %s in network %s, which was not found in config",
binding_host, network_id)
continue
# FIXME: handle trunk_vlans
trunk_vlan = segment_1['trunk_segmentation_id']
# FIXME: exclude_hosts
# FIXME: direct binding hosts? are they included?
gateways = net_gateways.get(network_id)
scul.add_binding_host_to_config(hg_config, network_id, vni, vlan,
scul.add_binding_host_to_config(hg_config, network_id, vni, vlan, trunk_vlan,
gateways=gateways, is_bgw=segment_1['is_bgw'])
if gateways:
l3_net_switch_map.setdefault(network_id, set()).update(hg_config.get_switch_names(self.drv_conf))
Expand Down
Empty file.
Empty file.
228 changes: 228 additions & 0 deletions networking_ccloud/services/trunk/driver.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,228 @@
# Copyright 2023 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from neutron.services.trunk.drivers import base
from neutron_lib.api.definitions import port as p_api
from neutron_lib.api.definitions import portbindings as pb_api
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as nl_const
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as trunk_const
from oslo_config import cfg
from oslo_log import log as logging

from networking_ccloud.common.config import get_driver_config
from networking_ccloud.common import constants as cc_const
from networking_ccloud.common.exceptions import BadTrunkRequest
from networking_ccloud.common import helper
from networking_ccloud.ml2.plugin import FabricPlugin

LOG = logging.getLogger(__name__)

SUPPORTED_INTERFACES = (
cc_const.VIF_TYPE_CC_FABRIC,
)
SUPPORTED_SEGMENTATION_TYPES = (
trunk_const.SEGMENTATION_TYPE_VLAN,
)


class CCTrunkDriver(base.DriverBase):
@property
def is_loaded(self):
try:
return cc_const.CC_DRIVER_NAME in cfg.CONF.ml2.mechanism_drivers
except cfg.NoSuchOptError:
return False

@classmethod
def create(cls):
return cls(cc_const.CC_DRIVER_NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES,
can_trunk_bound_port=True)

def _get_parent_port(self, context, parent_port_id):
"""Get parent port while checking it is compatible to our trunk driver
Return None if this driver is not responsible for this trunk/port
"""
port = self.core_plugin.get_port(context, parent_port_id)

# FIXME: normally we also should be able to work with unbound trunks
if not self.is_interface_compatible(port[pb_api.VIF_TYPE]):
LOG.debug("Parent port %s vif type %s not compatible", parent_port_id, port[pb_api.VIF_TYPE])
return None
return port

@registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT])
def register(self, resource, event, trigger, payload=None):
super().register(resource, event, trigger, payload=payload)

self.core_plugin = directory.get_plugin()
self.drv_conf = get_driver_config()
self.fabric_plugin = FabricPlugin()

registry.subscribe(self.trunk_valid_precommit, resources.TRUNK, events.PRECOMMIT_CREATE)
registry.subscribe(self.trunk_create, resources.TRUNK, events.AFTER_CREATE)
registry.subscribe(self.trunk_delete, resources.TRUNK, events.AFTER_DELETE)

registry.subscribe(self.subport_valid_precommit, resources.SUBPORTS, events.PRECOMMIT_CREATE)
registry.subscribe(self.subport_create, resources.SUBPORTS, events.AFTER_CREATE)
registry.subscribe(self.subport_delete, resources.SUBPORTS, events.AFTER_DELETE)

def trunk_valid_precommit(self, resource, event, trunk_plugin, payload):
self.validate_trunk(payload.context, payload.desired_state, payload.desired_state.sub_ports)

def subport_valid_precommit(self, resource, event, trunk_plugin, payload):
self.validate_trunk(payload.context, payload.states[0], payload.metadata['subports'])

def validate_trunk(self, context, trunk, subports):
trunk_port = self._get_parent_port(context, trunk.port_id)
if not trunk_port:
LOG.debug("Not responsible for trunk on port %s", trunk.port_id)
return

# we can only trunk direct bindings
trunk_host = helper.get_binding_host_from_port(trunk_port)
LOG.info("Validating trunk for trunk %s port %s host %s", trunk.id, trunk.port_id, trunk_host)
hg_config = self.drv_conf.get_hostgroup_by_host(trunk_host)
if not hg_config:
raise BadTrunkRequest(trunk_port_id=trunk.port_id,
reason=f"No hostgroup config found for host {trunk_host}")

if not hg_config.direct_binding:
raise BadTrunkRequest(trunk_port_id=trunk.port_id,
reason=f"Hostgroup {trunk_host} is not a direct binding hostgroup "
"(maybe a metagroup?), only direct binding hostgroups can be trunked")

if hg_config.role is not None:
raise BadTrunkRequest(trunk_port_id=trunk.port_id,
reason=f"Hostgroup {trunk_host} is of role {hg_config.role} "
"and can therefore not be trunked")

trunks_on_host = self.fabric_plugin.get_trunks_with_binding_host(context, trunk_host)
trunks = set(trunks_on_host) - set([trunk.id])
if trunks:
raise BadTrunkRequest(trunk_port_id=trunk.port_id,
reason=f"Host {trunk_host} already has trunk {' '.join(trunks)} connected to it")

# subport validation
parent_hg = hg_config.get_parent_metagroup(self.drv_conf)
meta_hg_vlans = []
if parent_hg:
meta_hg_vlans = hg_config.get_any_switchgroup(self.drv_conf).get_managed_vlans(self.drv_conf,
with_infra_nets=True)

subport_nets = {}
# existing subports
for existing_subport in trunk.sub_ports:
subport_port = self.core_plugin.get_port(context, existing_subport.port_id)
subport_nets[subport_port['network_id']] = existing_subport.port_id

# new subports
for subport in subports:
# don't allow a network to be on two subports
subport_port = self.core_plugin.get_port(context, subport.port_id)
sp_net = subport_port['network_id']
if sp_net in subport_nets and subport_nets[sp_net] != subport.port_id:
raise BadTrunkRequest(trunk_port_id=trunk.port_id,
reason=f"Network {sp_net} cannot be on two subports, "
f"{subport_nets[sp_net]} and port {subport.port_id}")
subport_nets[sp_net] = subport.port_id

# for hostgroups that are in a metagroup we don't want to trunk anything that trunks toward a vlan id
# that might be used by the metagroup
if subport.segmentation_id in meta_hg_vlans:
sg_name = hg_config.get_any_switchgroup(self.drv_conf).name
raise BadTrunkRequest(trunk_port_id=trunk.port_id,
reason=f"Subport {subport.port_id} segmentation id {subport.segmentation_id} "
f"collides with vlan range of switchgroup {sg_name}")

def trunk_create(self, resource, event, trunk_plugin, payload):
trunk_port = self._get_parent_port(payload.context, payload.states[0].port_id)
if not trunk_port:
return
self._bind_subports(payload.context, trunk_port, payload.states[0], payload.states[0].sub_ports)
status = trunk_const.TRUNK_ACTIVE_STATUS if len(payload.states[0].sub_ports) else trunk_const.TRUNK_DOWN_STATUS
payload.states[0].update(status=status)

def trunk_delete(self, resource, event, trunk_plugin, payload):
trunk_port = self._get_parent_port(payload.context, payload.states[0].port_id)
if not trunk_port:
return
self._unbind_subports(payload.context, trunk_port, payload.states[0], payload.states[0].sub_ports)

def subport_create(self, resource, event, trunk_plugin, payload):
trunk_port = self._get_parent_port(payload.context, payload.states[0].port_id)
if not trunk_port:
return
self._bind_subports(payload.context, trunk_port, payload.states[0], payload.metadata['subports'])

def subport_delete(self, resource, event, trunk_plugin, payload):
trunk_port = self._get_parent_port(payload.context, payload.states[0].port_id)
if not trunk_port:
return
self._unbind_subports(payload.context, trunk_port, payload.states[0], payload.metadata['subports'])

def _bind_subports(self, context, trunk_port, trunk, subports):
for subport in subports:
LOG.info("Adding subport %s trunk port %s of trunk %s", subport.port_id, trunk.port_id, trunk.id)
binding_profile = trunk_port.get(pb_api.PROFILE)

# note, that this information is only informational
binding_profile[cc_const.TRUNK_PROFILE] = {
'segmentation_type': subport.segmentation_type,
'segmentation_id': subport.segmentation_id,
'trunk_id': trunk.id,
}

port_data = {
p_api.RESOURCE_NAME: {
pb_api.HOST_ID: trunk_port.get(pb_api.HOST_ID),
pb_api.VNIC_TYPE: trunk_port.get(pb_api.VNIC_TYPE),
pb_api.PROFILE: binding_profile,
'device_owner': trunk_const.TRUNK_SUBPORT_OWNER,
'device_id': trunk_port.get('device_id'),
}
}
self.core_plugin.update_port(context, subport.port_id, port_data)
if len(subports) > 0:
trunk.update(status=trunk_const.TRUNK_ACTIVE_STATUS)

def _unbind_subports(self, context, trunk_port, trunk, subports):
for subport in subports:
LOG.info("Removing subport %s trunk port %s of trunk %s", subport.port_id, trunk.port_id, trunk.id)
binding_profile = trunk_port.get(pb_api.PROFILE)

# note, that this is only informational
if cc_const.TRUNK_PROFILE in binding_profile:
del binding_profile[cc_const.TRUNK_PROFILE]

port_data = {
p_api.RESOURCE_NAME: {
pb_api.HOST_ID: None,
pb_api.VNIC_TYPE: None,
pb_api.PROFILE: binding_profile,
'device_owner': '',
'device_id': '',
'status': nl_const.PORT_STATUS_DOWN,
},
}
self.core_plugin.update_port(context, subport.port_id, port_data)

if len(trunk.sub_ports) - len(subports) > 0:
trunk.update(status=trunk_const.TRUNK_ACTIVE_STATUS)
else:
LOG.info("Last subport was removed from trunk %s, setting it to state DOWN", trunk.id)
Loading

0 comments on commit 6b89e46

Please sign in to comment.