Skip to content

Commit

Permalink
Add NFS network for Manila to uni04delta
Browse files Browse the repository at this point in the history
Manila Tempest tests need to connect to the share for
Ganesha via a special (openstack) network [1].

This patch adds the NFS storage network with VLAN 24 and
range 172.21.0.0/24 in uni04delta. The NFS network is
connected to Ceph and Compute EDPM nodes. A NNCP, NAD,
L2Advertisement and IPAddressPool are defined for the
NFS network so that a pod in k8s can connect to it; such
as the tempest pod which will perform the storage tests.

In order to make these changes, uni04delta now keeps its
own copy of the nncp and networking directories since
they differ (by the new network) from the generic ones
in the lib directory.

TODO: update Manila CRDs to use this network.

[1] https://opendev.org/openstack/manila-tempest-plugin/src/branch/master/manila_tempest_tests/config.py#L99

Jira: https://issues.redhat.com/browse/OSPRH-7417
Depends-On: openstack-k8s-operators/ci-framework#2273

Signed-off-by: John Fulton <[email protected]>
  • Loading branch information
fultonj committed Sep 5, 2024
1 parent c1d4fa5 commit 8e59010
Show file tree
Hide file tree
Showing 19 changed files with 1,432 additions and 7 deletions.
87 changes: 87 additions & 0 deletions automation/net-env/uni04delta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,16 @@ instances:
prefix_length_v4: 24
skip_nm: false
vlan_id: 123
nfs:
interface_name: eth1.124
ip_v4: 172.21.0.111
mac_addr: '52:54:00:1b:1c:e5'
mtu: 1500
network_name: nfs
parent_interface: eth1
prefix_length_v4: 24
skip_nm: false
vlan_id: 124
ceph-1:
hostname: ceph-1
name: ceph-1
Expand Down Expand Up @@ -110,6 +120,16 @@ instances:
prefix_length_v4: 24
skip_nm: false
vlan_id: 123
nfs:
interface_name: eth1.124
ip_v4: 172.21.0.112
mac_addr: '52:54:00:1b:1c:e6'
mtu: 1500
network_name: nfs
parent_interface: eth1
prefix_length_v4: 24
skip_nm: false
vlan_id: 124
ceph-2:
hostname: ceph-2
name: ceph-2
Expand Down Expand Up @@ -165,6 +185,16 @@ instances:
prefix_length_v4: 24
skip_nm: false
vlan_id: 123
nfs:
interface_name: eth1.124
ip_v4: 172.21.0.113
mac_addr: '52:54:00:1b:1c:e7'
mtu: 1500
network_name: nfs
parent_interface: eth1
prefix_length_v4: 24
skip_nm: false
vlan_id: 124
controller-0:
hostname: controller-0
name: controller-0
Expand Down Expand Up @@ -235,6 +265,16 @@ instances:
prefix_length_v4: 24
skip_nm: false
vlan_id: 122
nfs:
interface_name: enp6s0.124
ip_v4: 172.21.0.10
mac_addr: '52:54:00:18:a1:f6'
mtu: 1500
network_name: nfs
parent_interface: enp6s0
prefix_length_v4: 24
skip_nm: false
vlan_id: 124
ocp-1:
hostname: osasinfra-master-1
name: ocp-1
Expand Down Expand Up @@ -292,6 +332,16 @@ instances:
prefix_length_v4: 24
skip_nm: false
vlan_id: 122
nfs:
interface_name: enp6s0.124
ip_v4: 172.21.0.11
mac_addr: '52:54:00:18:a1:f7'
mtu: 1500
network_name: nfs
parent_interface: enp6s0
prefix_length_v4: 24
skip_nm: false
vlan_id: 124
ocp-2:
hostname: osasinfra-master-2
name: ocp-2
Expand Down Expand Up @@ -349,6 +399,16 @@ instances:
prefix_length_v4: 24
skip_nm: false
vlan_id: 122
nfs:
interface_name: enp6s0.124
ip_v4: 172.21.0.12
mac_addr: '52:54:00:18:a1:f8'
mtu: 1500
network_name: nfs
parent_interface: enp6s0
prefix_length_v4: 24
skip_nm: false
vlan_id: 124
networker-0:
hostname: networker-0
name: networker-0
Expand Down Expand Up @@ -447,6 +507,15 @@ instances:
parent_interface: eth1
skip_nm: false
vlan_id: 22
nfs:
interface_name: eth1.24
ip_v4: 172.21.0.100
mac_addr: '52:54:00:1b:1c:e8'
mtu: 1500
network_name: nfs
parent_interface: eth1
skip_nm: false
vlan_id: 24
compute-1:
hostname: compute-1
name: compute-1
Expand Down Expand Up @@ -485,6 +554,15 @@ instances:
parent_interface: eth1
skip_nm: false
vlan_id: 22
nfs:
interface_name: eth1.24
ip_v4: 172.21.0.101
mac_addr: '52:54:00:1b:1c:e9'
mtu: 1500
network_name: nfs
parent_interface: eth1
skip_nm: false
vlan_id: 24
compute-2:
hostname: compute-2
name: compute-2
Expand Down Expand Up @@ -523,6 +601,15 @@ instances:
parent_interface: eth1
skip_nm: false
vlan_id: 22
nfs:
interface_name: eth1.24
ip_v4: 172.21.0.102
mac_addr: '52:54:00:1b:1c:e0'
mtu: 1500
network_name: nfs
parent_interface: eth1
skip_nm: false
vlan_id: 24
networks:
ctlplane:
dns_v4:
Expand Down
6 changes: 3 additions & 3 deletions dt/uni04delta/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ transformers:
create: true
components:
- ../../lib/networking/metallb
- ../../lib/networking/netconfig
- ../../lib/networking/nad
- networking/metallb
- networking/netconfig
- networking/nad
- ../../lib/control-plane

patches:
Expand Down
8 changes: 8 additions & 0 deletions dt/uni04delta/networking/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component

components:
- metallb
- nad
- netconfig
181 changes: 181 additions & 0 deletions dt/uni04delta/networking/metallb/kustomization.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,181 @@
---
apiVersion: kustomize.config.k8s.io/v1alpha1
kind: Component

resources:
- metallb_l2advertisement.yaml
- ocp_ip_pools.yaml

patches:
- target:
kind: IPAddressPool
labelSelector: "osp/lb-addresses-type=standard"
path: ocp_ip_pool_template.yaml

replacements:
# IPAddressPool addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.ctlplane.lb_addresses
targets:
- select:
kind: IPAddressPool
name: ctlplane
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.internalapi.lb_addresses
targets:
- select:
kind: IPAddressPool
name: internalapi
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.storage.lb_addresses
targets:
- select:
kind: IPAddressPool
name: storage
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.tenant.lb_addresses
targets:
- select:
kind: IPAddressPool
name: tenant
fieldPaths:
- spec.addresses

# Loadbalancer address pools
- source:
kind: ConfigMap
name: network-values
fieldPath: data.ctlplane.lb_addresses
targets:
- select:
group: metallb.io
kind: IPAddressPool
name: ctlplane
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.internalapi.lb_addresses
targets:
- select:
group: metallb.io
kind: IPAddressPool
name: internalapi
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.tenant.lb_addresses
targets:
- select:
group: metallb.io
kind: IPAddressPool
name: tenant
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.ctlplane.lb_addresses
targets:
- select:
group: metallb.io
kind: IPAddressPool
name: ctlplane
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.storage.lb_addresses
targets:
- select:
group: metallb.io
kind: IPAddressPool
name: storage
fieldPaths:
- spec.addresses
- source:
kind: ConfigMap
name: network-values
fieldPath: data.nfs.lb_addresses
targets:
- select:
group: metallb.io
kind: IPAddressPool
name: nfs
fieldPaths:
- spec.addresses

# Loadbalancer interfaces
- source:
kind: ConfigMap
name: network-values
fieldPath: data.bridgeName
targets:
- select:
group: metallb.io
kind: L2Advertisement
name: ctlplane
fieldPaths:
- spec.interfaces.0
- source:
kind: ConfigMap
name: network-values
fieldPath: data.tenant.iface
targets:
- select:
group: metallb.io
kind: L2Advertisement
name: tenant
fieldPaths:
- spec.interfaces.0
- source:
kind: ConfigMap
name: network-values
fieldPath: data.storage.iface
targets:
- select:
group: metallb.io
kind: L2Advertisement
name: storage
fieldPaths:
- spec.interfaces.0
- source:
kind: ConfigMap
name: network-values
fieldPath: data.internalapi.iface
targets:
- select:
group: metallb.io
kind: L2Advertisement
name: internalapi
fieldPaths:
- spec.interfaces.0
- source:
kind: ConfigMap
name: network-values
fieldPath: data.nfs.iface
targets:
- select:
group: metallb.io
kind: L2Advertisement
name: nfs
fieldPaths:
- spec.interfaces.0
Loading

0 comments on commit 8e59010

Please sign in to comment.